搭建ceph集群

cluster install?

ca-01 ---------- cm-01

????????????????|---- co-01

????????????????|---- co-02

in all node

# deploy ceph-deploy

echo deb http://download.ceph.com/debian-infernalis/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list

wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -

sudo apt-get update && sudo apt-get install ceph-deploy -y

In admin node

# ssh key

ssh-keygen

ssh-copy-id ceph-bj-monitor-01

ssh-copy-id ceph-bj-osd-01

ssh-copy-id ceph-bj-osd-02

# edit ~/.ssh/config

Host ceph-bj-monitor-01

? Hostname ceph-bj-monitor-01

? User root

Host ceph-bj-osd-01

? Hostname ceph-bj-osd-01

? User root

Host ceph-bj-osd-02

? Hostname ceph-bj-osd-01

? User root

# local repo

export CEPH_DEPLOY_REPO_URL=http://mirrors.163.com/ceph/debian-luminous/

export CEPH_DEPLOY_GPG_URL=https://mirrors.163.com/ceph/keys/release.asc

# deploy

ceph-deploy new ceph-bj-monitor-01

# check ceph.conf as follows:

[global]

fsid = 144cec2f-ceae-460c-84f7-5df374685e9d

mon_initial_members = ceph-bj-monitor-01

mon_host = 172.16.182.175

auth_cluster_required = cephx

auth_service_required = cephx

auth_client_required = cephx

public network = 172.16.182.0/24

osd pool default size = 2

/*especially note below two lines if you are using ext4 in osd, refer to?http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/?for more*

osd_max_object_name_len = 256

osd_max_object_namespace_len = 64

osd journal size = 1024

filestore xattr use omap = true

osd pool default min size = 1

osd pool default pg num = 333

osd pool default pgp num = 333

osd crush chooseleaf type = 1

# deploy continue, add?--no-adjust-repos if you are bebind firewall or using proxy

ceph-deploy install dcg-bj-02 ceph-bj-monitor-01 ceph-bj-osd-01 ceph-bj-osd-02 --no-adjust-repos

ceph-deploy mon create-initial

# mkdir in osds

# emulation with local disk, ignore this if extra hard disk placed

ssh ceph-bj-osd-01

rm -rf /var/local/osd1

sudo mkdir /var/local/osd1

chmod 777 /var/local/osd1

exit

# emulation with local disk, ignore this if extra hard disk placed

ssh ceph-bj-osd-02

rm -rf /var/local/osd2

sudo mkdir /var/local/osd2

chmod 777 /var/local/osd2

exit

ceph-deploy disk zap ceph-bj-osd-01:nvme0n1

ceph-deploy disk zap ceph-bj-osd-01:nvme1n1

ceph-deploy disk zap ceph-bj-osd-02:nvme0n1

ceph-deploy disk zap ceph-bj-osd-02:nvme1n1

ceph-deploy osd prepare ceph-bj-osd-01:/dev/nvme0n1:/dev/nvme1n1 ceph-bj-osd-02:/dev/nvme0n1:/dev/nvme1n1

ceph-deploy osd activate ceph-bj-osd-01:/dev/nvme0n1p1:/dev/nvme1n1p1 ceph-bj-osd-02:/dev/nvme0n1p1:/dev/nvme1n1p1

# distribute key

ceph-deploy admin dcg-bj-02 ceph-bj-monitor-01 ceph-bj-osd-01 ceph-bj-osd-02

sudo chmod +r /etc/ceph/ceph.client.admin.keyring

# uninstall

ceph-deploy uninstall dcg-bj-02 ceph-bj-monitor-01 ceph-bj-osd-01 ceph-bj-osd-02

ceph-deploy purgedata dcg-bj-02 ceph-bj-monitor-01 ceph-bj-osd-01 ceph-bj-osd-02

sudo rm -rf –one-file-system – /var/lib/ceph

sudo rm -rf –one-file-system – /etc/ceph/

rm -rf ./my_ceph

# check

ceph health

ceph -w

ceph osd tree

RBD (kernel + librbd)

# ceph -w

? ? cluster 44eb667d-b061-4ef0-900b-6a173559d702

? ? health HEALTH_OK

? ? monmap e1: 1 mons at {ceph-bj-monitor-01=172.16.182.175:6789/0}

? ? ? ? ? ? election epoch 4, quorum 0 ceph-bj-monitor-01

? ? osdmap e17: 2 osds: 2 up, 2 in

? ? ? ? ? ? flags sortbitwise,require_jewel_osds

? ? ? pgmap v2095: 64 pgs, 1 pools, 0 bytes data, 0 objects

? ? ? ? ? ? 15476 MB used, 862 GB / 924 GB avail

? ? ? ? ? ? ? ? ? 64 active+clean

2018-10-25 03:14:06.925774? [INF] pgmap v2095: 64 pgs: 64 active+clean; 0 bytes data, 15476 MB used, 862 GB / 924 GB avail

# ceph osd tree

ID WEIGHT? TYPE NAME? ? ? ? ? ? ? UP/DOWN REWEIGHT PRIMARY-AFFINITY

-1 0.90309 root default

-2 0.45689? ? host ceph-bj-osd-01

0 0.45689? ? ? ? osd.0? ? ? ? ? ? ? ? up? 1.00000? ? ? ? ? 1.00000

-3 0.44620? ? host ceph-bj-osd-02

1 0.44620? ? ? ? osd.1? ? ? ? ? ? ? ? up? 1.00000? ? ? ? ? 1.00000

# rbd create test_image --size 10240

# rbd list

test_image

# rbd info test_image

rbd image 'test_image':

? ? ? ? size 10GiB in 2560 objects

? ? ? ? order 22 (4MiB objects)

? ? ? ? block_name_prefix: rbd_data.37196b8b4567

? ? ? ? format: 2

? ? ? ? features: layering, exclusive-lock, object-map, fast-diff, deep-flatten

? ? ? ? flags:

issue happened when mapping to block

# rbd map test_image

rbd: sysfs write failed

RBD image feature set mismatch. Try disabling features unsupported by the kernel with "rbd feature disable".

In some cases useful info is found in syslog - try "dmesg | tail".

rbd: map failed: (6) No such device or address

# dmesg | tail

[351821.997061] usb 3-7: Manufacturer: Dell

[351821.997310] usb 3-7: ep 0x81 - rounding interval to 128 microframes, ep desc says 192 microframes

[351822.023940] input: Dell Dell USB Keyboard as /devices/pci0000:00/0000:00:14.0/usb3/3-7/3-7:1.0/0003:413C:2105.0003/input/input3

[351822.078560] hid-generic 0003:413C:2105.0003: input,hidraw2: USB HID v1.10 Keyboard [Dell Dell USB Keyboard] on usb-0000:00:14.0-7/input0

[351833.800882] usb 3-7: USB disconnect, device number 3

[1109182.054573] Key type ceph registered

[1109182.055604] libceph: loaded (mon/osd proto 15/24)

[1109182.059629] rbd: loaded

[1109682.929947] libceph: client14108 fsid 44eb667d-b061-4ef0-900b-6a173559d702

[1109682.932270] libceph: mon0 172.16.182.175:6789 session established

# rbd showmapped

# fdisk -l

Disk /dev/sda: 931.5 GiB, 1000204886016 bytes, 1953525168 sectors

Units: sectors of 1 * 512 = 512 bytes

Sector size (logical/physical): 512 bytes / 512 bytes

I/O size (minimum/optimal): 512 bytes / 512 bytes

Disklabel type: dos

Disk identifier: 0x67de61a9

Device? ? Boot? ? ? Start? ? ? ? End? ? Sectors? Size Id Type

/dev/sda1? *? ? ? ? ? 2048 1951522815 1951520768 930.6G 83 Linux

/dev/sda2? ? ? 1951524862 1953523711? ? 1998850? 976M? 5 Extended

/dev/sda5? ? ? 1951524864 1953523711? ? 1998848? 976M 82 Linux swap / Solaris

#

resolve such issue by adding image-feature manually

# rbd create test_image2 --size 10G --image-format 2 --image-feature layering

# rbd ls

test_image

test_image2

# rbd map test_image2

/dev/rbd0

# rbd showmapped

id pool image? ? ? snap device

0? rbd? test_image2 -? ? /dev/rbd0

# fdisk -l

Disk /dev/sda: 931.5 GiB, 1000204886016 bytes, 1953525168 sectors

Units: sectors of 1 * 512 = 512 bytes

Sector size (logical/physical): 512 bytes / 512 bytes

I/O size (minimum/optimal): 512 bytes / 512 bytes

Disklabel type: dos

Disk identifier: 0x67de61a9

Device? ? Boot? ? ? Start? ? ? ? End? ? Sectors? Size Id Type

/dev/sda1? *? ? ? ? ? 2048 1951522815 1951520768 930.6G 83 Linux

/dev/sda2? ? ? 1951524862 1953523711? ? 1998850? 976M? 5 Extended

/dev/sda5? ? ? 1951524864 1953523711? ? 1998848? 976M 82 Linux swap / Solaris

Disk /dev/rbd0: 10 GiB, 10737418240 bytes, 20971520 sectors

Units: sectors of 1 * 512 = 512 bytes

Sector size (logical/physical): 512 bytes / 512 bytes

I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes

# ll /dev/rbd0

brw-rw---- 1 root disk 251, 0 Oct 24 19:33 /dev/rbd0

# rados mkpool pool

successfully created pool pool

# rados lspools

rbd

pool

# rbd create pool/image1 --size 1G --image-format 2 --image-feature layering

# rbd list

test_image

test_image2

# rbd list pool

image1

# rbd info pool/image1

rbd image 'image1':

? ? ? ? size 1GiB in 256 objects

? ? ? ? order 22 (4MiB objects)

? ? ? ? block_name_prefix: rbd_data.37276b8b4567

? ? ? ? format: 2

? ? ? ? features: layering

? ? ? ? flags:

# rbd create pool/image2 --size 1G --order 24 --image-format 2 --image-feature layering

rbd: --order is deprecated, use --object-size

# rbd list pool

image1

image2

# rbd info pool/image2

rbd image 'image2':

? ? ? ? size 1GiB in 64 objects

? ? ? ? order 24 (16MiB objects)

? ? ? ? block_name_prefix: rbd_data.372b6b8b4567

? ? ? ? format: 2

? ? ? ? features: layering

? ? ? ? flags:

# rbd rm pool/image2

Removing image: 100% complete...done.

snapshot

# rbd snap create pool/image1@image1_snap

# rbd snap list

rbd: image name was not specified

# rbd snap list pool/image1

SNAPID NAME? ? ? ? SIZE TIMESTAMP

? ? 4 image1_snap 1GiB

# rbd ls pool -l

NAME? ? ? ? ? ? ? SIZE PARENT FMT PROT LOCK

image1? ? ? ? ? ? 1GiB? ? ? ? ? 2

image1@image1_snap 1GiB? ? ? ? ? 2

# rbd info pool/image1@image1_snap

rbd image 'image1':

? ? ? ? size 1GiB in 256 objects

? ? ? ? order 22 (4MiB objects)

? ? ? ? block_name_prefix: rbd_data.37276b8b4567

? ? ? ? format: 2

? ? ? ? features: layering

? ? ? ? flags:

? ? ? ? protected: False

# rbd snap protect pool/image1@image1_snap

# rbd info pool/image1@image1_snap

rbd image 'image1':

? ? ? ? size 1GiB in 256 objects

? ? ? ? order 22 (4MiB objects)

? ? ? ? block_name_prefix: rbd_data.37276b8b4567

? ? ? ? format: 2

? ? ? ? features: layering

? ? ? ? flags:

? ? ? ? protected: True

# rbd clone pool/image1@image1_snap rbd/image2

# rbd ls rbd -l

NAME? ? ? ? SIZE PARENT? ? ? ? ? ? ? ? ? FMT PROT LOCK

image2? ? ? 1GiB pool/image1@image1_snap? 2

test_image? 10GiB? ? ? ? ? ? ? ? ? ? ? ? ? 2

test_image2 10GiB? ? ? ? ? ? ? ? ? ? ? ? ? 2

# rbd children pool/image1@image1_snap

rbd/image2

# rbd flatten rbd/image2

Image flatten: 100% complete...done.

# rbd ls rbd -l

NAME? ? ? ? SIZE PARENT FMT PROT LOCK

image2? ? ? 1GiB? ? ? ? ? 2

test_image? 10GiB? ? ? ? ? 2

test_image2 10GiB? ? ? ? ? 2

export/import

# rbd export pool/image1 /tmp/image1_export

Exporting image: 100% complete...done.

# ls -alh /tmp/image1_export

-rw-r--r-- 1 root root 1.0G Oct 24 19:47 /tmp/image1_export

# rbd import /tmp/image1_export pool/image2 --image-format 2

Importing image: 100% complete...done.

# rbd ls pool -l

NAME? ? ? ? ? ? ? SIZE PARENT FMT PROT LOCK

image1? ? ? ? ? ? 1GiB? ? ? ? ? 2

image1@image1_snap 1GiB? ? ? ? ? 2 yes

image2? ? ? ? ? ? 1GiB? ? ? ? ? 2

# rbd ls -l

NAME? ? ? ? SIZE PARENT FMT PROT LOCK

image2? ? ? 1GiB? ? ? ? ? 2

test_image? 10GiB? ? ? ? ? 2

test_image2 10GiB? ? ? ? ? 2

最后編輯于
?著作權歸作者所有,轉載或內(nèi)容合作請聯(lián)系作者
  • 序言:七十年代末兆蕉,一起剝皮案震驚了整個濱河市豁辉,隨后出現(xiàn)的幾起案子党巾,更是在濱河造成了極大的恐慌,老刑警劉巖蜈块,帶你破解...
    沈念sama閱讀 206,311評論 6 481
  • 序言:濱河連續(xù)發(fā)生了三起死亡事件吃谣,死亡現(xiàn)場離奇詭異锅风,居然都是意外死亡牢撼,警方通過查閱死者的電腦和手機夏醉,發(fā)現(xiàn)死者居然都...
    沈念sama閱讀 88,339評論 2 382
  • 文/潘曉璐 我一進店門爽锥,熙熙樓的掌柜王于貴愁眉苦臉地迎上來,“玉大人畔柔,你說我怎么就攤上這事氯夷。” “怎么了靶擦?”我有些...
    開封第一講書人閱讀 152,671評論 0 342
  • 文/不壞的土叔 我叫張陵腮考,是天一觀的道長擎淤。 經(jīng)常有香客問我,道長秸仙,這世上最難降的妖魔是什么嘴拢? 我笑而不...
    開封第一講書人閱讀 55,252評論 1 279
  • 正文 為了忘掉前任,我火速辦了婚禮寂纪,結果婚禮上席吴,老公的妹妹穿的比我還像新娘。我一直安慰自己捞蛋,他們只是感情好孝冒,可當我...
    茶點故事閱讀 64,253評論 5 371
  • 文/花漫 我一把揭開白布。 她就那樣靜靜地躺著拟杉,像睡著了一般庄涡。 火紅的嫁衣襯著肌膚如雪。 梳的紋絲不亂的頭發(fā)上搬设,一...
    開封第一講書人閱讀 49,031評論 1 285
  • 那天穴店,我揣著相機與錄音,去河邊找鬼拿穴。 笑死泣洞,一個胖子當著我的面吹牛,可吹牛的內(nèi)容都是我干的默色。 我是一名探鬼主播球凰,決...
    沈念sama閱讀 38,340評論 3 399
  • 文/蒼蘭香墨 我猛地睜開眼,長吁一口氣:“原來是場噩夢啊……” “哼腿宰!你這毒婦竟也來了呕诉?” 一聲冷哼從身側響起,我...
    開封第一講書人閱讀 36,973評論 0 259
  • 序言:老撾萬榮一對情侶失蹤吃度,失蹤者是張志新(化名)和其女友劉穎甩挫,沒想到半個月后,有當?shù)厝嗽跇淞掷锇l(fā)現(xiàn)了一具尸體规肴,經(jīng)...
    沈念sama閱讀 43,466評論 1 300
  • 正文 獨居荒郊野嶺守林人離奇死亡捶闸,尸身上長有42處帶血的膿包…… 初始之章·張勛 以下內(nèi)容為張勛視角 年9月15日...
    茶點故事閱讀 35,937評論 2 323
  • 正文 我和宋清朗相戀三年,在試婚紗的時候發(fā)現(xiàn)自己被綠了拖刃。 大學時的朋友給我發(fā)了我未婚夫和他白月光在一起吃飯的照片删壮。...
    茶點故事閱讀 38,039評論 1 333
  • 序言:一個原本活蹦亂跳的男人離奇死亡,死狀恐怖兑牡,靈堂內(nèi)的尸體忽然破棺而出央碟,到底是詐尸還是另有隱情,我是刑警寧澤,帶...
    沈念sama閱讀 33,701評論 4 323
  • 正文 年R本政府宣布亿虽,位于F島的核電站菱涤,受9級特大地震影響,放射性物質(zhì)發(fā)生泄漏洛勉。R本人自食惡果不足惜粘秆,卻給世界環(huán)境...
    茶點故事閱讀 39,254評論 3 307
  • 文/蒙蒙 一、第九天 我趴在偏房一處隱蔽的房頂上張望收毫。 院中可真熱鬧攻走,春花似錦、人聲如沸此再。這莊子的主人今日做“春日...
    開封第一講書人閱讀 30,259評論 0 19
  • 文/蒼蘭香墨 我抬頭看了看天上的太陽输拇。三九已至摘符,卻和暖如春,著一層夾襖步出監(jiān)牢的瞬間策吠,已是汗流浹背逛裤。 一陣腳步聲響...
    開封第一講書人閱讀 31,485評論 1 262
  • 我被黑心中介騙來泰國打工, 沒想到剛下飛機就差點兒被人妖公主榨干…… 1. 我叫王不留奴曙,地道東北人别凹。 一個月前我還...
    沈念sama閱讀 45,497評論 2 354
  • 正文 我出身青樓,卻偏偏與公主長得像洽糟,于是被迫代替她去往敵國和親。 傳聞我的和親對象是個殘疾皇子堕战,可洞房花燭夜當晚...
    茶點故事閱讀 42,786評論 2 345

推薦閱讀更多精彩內(nèi)容

  • 隨著國內(nèi)勞動力成本增加嘱丢,機器人行業(yè)即將爆發(fā)薪介。今天來說說這個行業(yè)的銷售技巧,希望自己的一點銷售技巧能在對正從事這個行...
    一抹在抹閱讀 196評論 0 0
  • 1 笑來老師最近因為語音泄露事情又引起媒體廣泛關注越驻。我對于事情本身不予置評汁政,只說說我對笑來老師的一些看法。 2 最...
    冀云來了閱讀 210評論 0 0
  • 六頂思考帽 藍色(控制之帽缀旁,負責整理思考本身记劈,就像樂隊指揮,負責定義思考所指向的主題并巍,確定焦點目木,負責小結,綜述懊渡,和...
    梅利醬閱讀 203評論 0 0
  • 認識一個人需要時間刽射,需要條件军拟,很多人呈現(xiàn)在你面前的是做出來的常態(tài),說出來的是不負責任盡興的好聽話誓禁。
    丹青妙音閱讀 71評論 0 0