系統(tǒng): Ubuntu 16.04
ceph: jewel 10.2.7
部署方式: ceph-deploy
部署服務(wù)器: node0
文件服務(wù)器: node1 node2 node3 node4 node5
OSD數(shù)據(jù)盤: /dev/xvdb /dev/xvdc
準(zhǔn)備工作:
mkdir -p ~/ceph-cluster
cd ~/ceph-cluster
清空歷史數(shù)據(jù):
ceph-deploy purge node1 node2 node3 node4 node5
ceph-deploy purgedata node1 node2 node3 node4 node5
ceph-deploy forgetkeys
創(chuàng)建新集群的配置文件
ceph-deploy new node1 node2 node3 node4 node5
vim ceph.conf 增加以下內(nèi)容
filestore_xattr_use_omap = true
部署命令:
# 安裝程序
ceph-deploy install node0
ceph-deploy install node1 node2 node3 node4 node5
#初始化
ceph-deploy mon create-initial
# 格式化 OSD 盤誊锭,并格式化為 xfs
ceph-deploy --overwrite-conf osd create --zap-disk node1:/dev/xvdb node2:/dev/xvdb node3:/dev/xvdb node4:/dev/xvdb node5:/dev/xvdb
ceph-deploy --overwrite-conf osd create --zap-disk node1:/dev/xvdc node2:/dev/xvdc node3:/dev/xvdc node4:/dev/xvdc node5:/dev/xvdc
# 下發(fā)配置到節(jié)點(diǎn)
ceph-deploy --overwrite-conf admin node1 node2 node3 node4 node5
ceph-deploy --overwrite-conf admin node0
# 設(shè)置只讀權(quán)限
sudo chmod +r /etc/ceph/ceph.client.admin.keyring
# 部署 rgw 表悬,以支持 s3 協(xié)議
ceph-deploy rgw create node1 node2 node3 node4 node5
# 部署 mds,以支持cephfs
ceph-deploy mds create node1 node2 node3 node4 node5
# set pg_num for pools
sudo ceph osd pool set rbd pg_num 256
sudo ceph osd pool set rbd pgp_num 256
# 批量調(diào)整所有的 pg 和 pgp
for poolname in $(rados lspools);do
ceph osd pool set $poolname pg_num 64;
ceph osd pool set $poolname pgp_num 64;
done
# check pg_num
sudo ceph osd dump
把已存在的集群的配置收集到 ceph-deploy
mkdir -p cluster1
cd cluster1
ceph-deploy config pull HOST
ceph-deploy gatherkeys HOST
所有的 node 增加一塊硬盤 /dev/xvde
node1=host1
node2=host2
node3=host3
disk="/dev/xvde"
ceph-deploy --overwrite-conf osd create --data $disk $node1
ceph-deploy --overwrite-conf osd create --data $disk $node2
ceph-deploy --overwrite-conf osd create --data $disk $node3
TODO:
multisite 部署丧靡,支持多機(jī)房
http://docs.ceph.com/docs/master/radosgw/multisite/
http://www.reibang.com/p/31a6f8df9a8f