cluster install?
ca-01 ---------- cm-01
????????????????|---- co-01
????????????????|---- co-02
in all node
# deploy ceph-deploy
echo deb http://download.ceph.com/debian-infernalis/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
sudo apt-get update && sudo apt-get install ceph-deploy -y
In admin node
# ssh key
ssh-keygen
ssh-copy-id ceph-bj-monitor-01
ssh-copy-id ceph-bj-osd-01
ssh-copy-id ceph-bj-osd-02
# edit ~/.ssh/config
Host ceph-bj-monitor-01
? Hostname ceph-bj-monitor-01
? User root
Host ceph-bj-osd-01
? Hostname ceph-bj-osd-01
? User root
Host ceph-bj-osd-02
? Hostname ceph-bj-osd-01
? User root
# local repo
export CEPH_DEPLOY_REPO_URL=http://mirrors.163.com/ceph/debian-luminous/
export CEPH_DEPLOY_GPG_URL=https://mirrors.163.com/ceph/keys/release.asc
# deploy
ceph-deploy new ceph-bj-monitor-01
# check ceph.conf as follows:
[global]
fsid = 144cec2f-ceae-460c-84f7-5df374685e9d
mon_initial_members = ceph-bj-monitor-01
mon_host = 172.16.182.175
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
public network = 172.16.182.0/24
osd pool default size = 2
/*especially note below two lines if you are using ext4 in osd, refer to?http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/?for more*
osd_max_object_name_len = 256
osd_max_object_namespace_len = 64
osd journal size = 1024
filestore xattr use omap = true
osd pool default min size = 1
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
# deploy continue, add?--no-adjust-repos if you are bebind firewall or using proxy
ceph-deploy install dcg-bj-02 ceph-bj-monitor-01 ceph-bj-osd-01 ceph-bj-osd-02 --no-adjust-repos
ceph-deploy mon create-initial
# mkdir in osds
# emulation with local disk, ignore this if extra hard disk placed
ssh ceph-bj-osd-01
rm -rf /var/local/osd1
sudo mkdir /var/local/osd1
chmod 777 /var/local/osd1
exit
# emulation with local disk, ignore this if extra hard disk placed
ssh ceph-bj-osd-02
rm -rf /var/local/osd2
sudo mkdir /var/local/osd2
chmod 777 /var/local/osd2
exit
ceph-deploy disk zap ceph-bj-osd-01:nvme0n1
ceph-deploy disk zap ceph-bj-osd-01:nvme1n1
ceph-deploy disk zap ceph-bj-osd-02:nvme0n1
ceph-deploy disk zap ceph-bj-osd-02:nvme1n1
ceph-deploy osd prepare ceph-bj-osd-01:/dev/nvme0n1:/dev/nvme1n1 ceph-bj-osd-02:/dev/nvme0n1:/dev/nvme1n1
ceph-deploy osd activate ceph-bj-osd-01:/dev/nvme0n1p1:/dev/nvme1n1p1 ceph-bj-osd-02:/dev/nvme0n1p1:/dev/nvme1n1p1
# distribute key
ceph-deploy admin dcg-bj-02 ceph-bj-monitor-01 ceph-bj-osd-01 ceph-bj-osd-02
sudo chmod +r /etc/ceph/ceph.client.admin.keyring
# uninstall
ceph-deploy uninstall dcg-bj-02 ceph-bj-monitor-01 ceph-bj-osd-01 ceph-bj-osd-02
ceph-deploy purgedata dcg-bj-02 ceph-bj-monitor-01 ceph-bj-osd-01 ceph-bj-osd-02
sudo rm -rf –one-file-system – /var/lib/ceph
sudo rm -rf –one-file-system – /etc/ceph/
rm -rf ./my_ceph
# check
ceph health
ceph -w
ceph osd tree
RBD (kernel + librbd)
# ceph -w
? ? cluster 44eb667d-b061-4ef0-900b-6a173559d702
? ? health HEALTH_OK
? ? monmap e1: 1 mons at {ceph-bj-monitor-01=172.16.182.175:6789/0}
? ? ? ? ? ? election epoch 4, quorum 0 ceph-bj-monitor-01
? ? osdmap e17: 2 osds: 2 up, 2 in
? ? ? ? ? ? flags sortbitwise,require_jewel_osds
? ? ? pgmap v2095: 64 pgs, 1 pools, 0 bytes data, 0 objects
? ? ? ? ? ? 15476 MB used, 862 GB / 924 GB avail
? ? ? ? ? ? ? ? ? 64 active+clean
2018-10-25 03:14:06.925774? [INF] pgmap v2095: 64 pgs: 64 active+clean; 0 bytes data, 15476 MB used, 862 GB / 924 GB avail
# ceph osd tree
ID WEIGHT? TYPE NAME? ? ? ? ? ? ? UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 0.90309 root default
-2 0.45689? ? host ceph-bj-osd-01
0 0.45689? ? ? ? osd.0? ? ? ? ? ? ? ? up? 1.00000? ? ? ? ? 1.00000
-3 0.44620? ? host ceph-bj-osd-02
1 0.44620? ? ? ? osd.1? ? ? ? ? ? ? ? up? 1.00000? ? ? ? ? 1.00000
# rbd create test_image --size 10240
# rbd list
test_image
# rbd info test_image
rbd image 'test_image':
? ? ? ? size 10GiB in 2560 objects
? ? ? ? order 22 (4MiB objects)
? ? ? ? block_name_prefix: rbd_data.37196b8b4567
? ? ? ? format: 2
? ? ? ? features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
? ? ? ? flags:
issue happened when mapping to block
# rbd map test_image
rbd: sysfs write failed
RBD image feature set mismatch. Try disabling features unsupported by the kernel with "rbd feature disable".
In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: (6) No such device or address
# dmesg | tail
[351821.997061] usb 3-7: Manufacturer: Dell
[351821.997310] usb 3-7: ep 0x81 - rounding interval to 128 microframes, ep desc says 192 microframes
[351822.023940] input: Dell Dell USB Keyboard as /devices/pci0000:00/0000:00:14.0/usb3/3-7/3-7:1.0/0003:413C:2105.0003/input/input3
[351822.078560] hid-generic 0003:413C:2105.0003: input,hidraw2: USB HID v1.10 Keyboard [Dell Dell USB Keyboard] on usb-0000:00:14.0-7/input0
[351833.800882] usb 3-7: USB disconnect, device number 3
[1109182.054573] Key type ceph registered
[1109182.055604] libceph: loaded (mon/osd proto 15/24)
[1109182.059629] rbd: loaded
[1109682.929947] libceph: client14108 fsid 44eb667d-b061-4ef0-900b-6a173559d702
[1109682.932270] libceph: mon0 172.16.182.175:6789 session established
# rbd showmapped
# fdisk -l
Disk /dev/sda: 931.5 GiB, 1000204886016 bytes, 1953525168 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: dos
Disk identifier: 0x67de61a9
Device? ? Boot? ? ? Start? ? ? ? End? ? Sectors? Size Id Type
/dev/sda1? *? ? ? ? ? 2048 1951522815 1951520768 930.6G 83 Linux
/dev/sda2? ? ? 1951524862 1953523711? ? 1998850? 976M? 5 Extended
/dev/sda5? ? ? 1951524864 1953523711? ? 1998848? 976M 82 Linux swap / Solaris
#
resolve such issue by adding image-feature manually
# rbd create test_image2 --size 10G --image-format 2 --image-feature layering
# rbd ls
test_image
test_image2
# rbd map test_image2
/dev/rbd0
# rbd showmapped
id pool image? ? ? snap device
0? rbd? test_image2 -? ? /dev/rbd0
# fdisk -l
Disk /dev/sda: 931.5 GiB, 1000204886016 bytes, 1953525168 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: dos
Disk identifier: 0x67de61a9
Device? ? Boot? ? ? Start? ? ? ? End? ? Sectors? Size Id Type
/dev/sda1? *? ? ? ? ? 2048 1951522815 1951520768 930.6G 83 Linux
/dev/sda2? ? ? 1951524862 1953523711? ? 1998850? 976M? 5 Extended
/dev/sda5? ? ? 1951524864 1953523711? ? 1998848? 976M 82 Linux swap / Solaris
Disk /dev/rbd0: 10 GiB, 10737418240 bytes, 20971520 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes
# ll /dev/rbd0
brw-rw---- 1 root disk 251, 0 Oct 24 19:33 /dev/rbd0
# rados mkpool pool
successfully created pool pool
# rados lspools
rbd
pool
# rbd create pool/image1 --size 1G --image-format 2 --image-feature layering
# rbd list
test_image
test_image2
# rbd list pool
image1
# rbd info pool/image1
rbd image 'image1':
? ? ? ? size 1GiB in 256 objects
? ? ? ? order 22 (4MiB objects)
? ? ? ? block_name_prefix: rbd_data.37276b8b4567
? ? ? ? format: 2
? ? ? ? features: layering
? ? ? ? flags:
# rbd create pool/image2 --size 1G --order 24 --image-format 2 --image-feature layering
rbd: --order is deprecated, use --object-size
# rbd list pool
image1
image2
# rbd info pool/image2
rbd image 'image2':
? ? ? ? size 1GiB in 64 objects
? ? ? ? order 24 (16MiB objects)
? ? ? ? block_name_prefix: rbd_data.372b6b8b4567
? ? ? ? format: 2
? ? ? ? features: layering
? ? ? ? flags:
# rbd rm pool/image2
Removing image: 100% complete...done.
snapshot
# rbd snap create pool/image1@image1_snap
# rbd snap list
rbd: image name was not specified
# rbd snap list pool/image1
SNAPID NAME? ? ? ? SIZE TIMESTAMP
? ? 4 image1_snap 1GiB
# rbd ls pool -l
NAME? ? ? ? ? ? ? SIZE PARENT FMT PROT LOCK
image1? ? ? ? ? ? 1GiB? ? ? ? ? 2
image1@image1_snap 1GiB? ? ? ? ? 2
# rbd info pool/image1@image1_snap
rbd image 'image1':
? ? ? ? size 1GiB in 256 objects
? ? ? ? order 22 (4MiB objects)
? ? ? ? block_name_prefix: rbd_data.37276b8b4567
? ? ? ? format: 2
? ? ? ? features: layering
? ? ? ? flags:
? ? ? ? protected: False
# rbd snap protect pool/image1@image1_snap
# rbd info pool/image1@image1_snap
rbd image 'image1':
? ? ? ? size 1GiB in 256 objects
? ? ? ? order 22 (4MiB objects)
? ? ? ? block_name_prefix: rbd_data.37276b8b4567
? ? ? ? format: 2
? ? ? ? features: layering
? ? ? ? flags:
? ? ? ? protected: True
# rbd clone pool/image1@image1_snap rbd/image2
# rbd ls rbd -l
NAME? ? ? ? SIZE PARENT? ? ? ? ? ? ? ? ? FMT PROT LOCK
image2? ? ? 1GiB pool/image1@image1_snap? 2
test_image? 10GiB? ? ? ? ? ? ? ? ? ? ? ? ? 2
test_image2 10GiB? ? ? ? ? ? ? ? ? ? ? ? ? 2
# rbd children pool/image1@image1_snap
rbd/image2
# rbd flatten rbd/image2
Image flatten: 100% complete...done.
# rbd ls rbd -l
NAME? ? ? ? SIZE PARENT FMT PROT LOCK
image2? ? ? 1GiB? ? ? ? ? 2
test_image? 10GiB? ? ? ? ? 2
test_image2 10GiB? ? ? ? ? 2
export/import
# rbd export pool/image1 /tmp/image1_export
Exporting image: 100% complete...done.
# ls -alh /tmp/image1_export
-rw-r--r-- 1 root root 1.0G Oct 24 19:47 /tmp/image1_export
# rbd import /tmp/image1_export pool/image2 --image-format 2
Importing image: 100% complete...done.
# rbd ls pool -l
NAME? ? ? ? ? ? ? SIZE PARENT FMT PROT LOCK
image1? ? ? ? ? ? 1GiB? ? ? ? ? 2
image1@image1_snap 1GiB? ? ? ? ? 2 yes
image2? ? ? ? ? ? 1GiB? ? ? ? ? 2
# rbd ls -l
NAME? ? ? ? SIZE PARENT FMT PROT LOCK
image2? ? ? 1GiB? ? ? ? ? 2
test_image? 10GiB? ? ? ? ? 2
test_image2 10GiB? ? ? ? ? 2