k8s中部署有狀態(tài)應(yīng)用等需要持久化數(shù)據(jù)的應(yīng)用拌倍,必不可少得用存儲赂鲤,k8s支持很多中存儲方案,我司目前使用的存儲有g(shù)lusterfs(分為容器化和裸機方式)柱恤、nfs供應(yīng)用選用数初,本次就簡單實戰(zhàn)下glusterfs配合k8s做數(shù)據(jù)存儲。
介紹
GlusterFS系統(tǒng)是一個可擴展的網(wǎng)絡(luò)文件系統(tǒng)梗顺,相比其他分布式文件系統(tǒng)泡孩,GlusterFS具有高擴展性、高可用性寺谤、高性能仑鸥、可橫向擴展等特點吮播,并且其沒有元數(shù)據(jù)服務(wù)器的設(shè)計,讓整個服務(wù)沒有單點故障的隱患眼俊。
規(guī)劃
- 192.168.209.130 master-01
- 192.168.209.131 master-02
- 192.168.209.132 master-03
以上三臺作為主控并作為GlusterFS集群節(jié)點意狠。
添加hosts
[root@master-01 gluster]# cat /etc/hosts
192.168.209.130 master-01
192.168.209.131 master-02
192.168.209.132 master-03
安裝GlusterFS
GlusterFS默認配置文件在/etc/glusterfs/glusterd.vol
工作目錄默認在/var/lib/glusterfsd
[root@master-01 ~]#yum install -y centos-release-gluster glusterfs glusterfs-server glusterfs-fuse
[root@master-01 ~]#systemctl start glusterd
[root@master-01 ~]# systemctl status glusterd
● glusterd.service - GlusterFS, a clustered file-system server
Loaded: loaded (/usr/lib/systemd/system/glusterd.service; disabled; vendor preset: disabled)
Active: active (running) since 日 2019-04-28 13:20:08 CST; 2min 57s ago
Docs: man:glusterd(8)
集群配置
[root@master-01 ~]# gluster peer probe master-01
peer probe: success. Probe on localhost not needed
[root@master-01 ~]# gluster peer probe master-02
peer probe: success.
[root@master-01 ~]# gluster peer probe master-03
peer probe: success.
創(chuàng)建卷并啟動
[root@master-01 ~]# gluster volume create gv0 replica 3 master-01:/opt/data/gv0 master-02:/opt/data/gv0 master-03:/opt/data/gv0 force
volume create: gv0: success: please start the volume to access data
[root@master-01 ~]# gluster volume start gv0
volume start: gv0: success
- 默認不支持使用
/
分區(qū)作為存儲路徑,我這沒有數(shù)據(jù)盤疮胖,所以使用force
強制創(chuàng)建了环戈。 -
/opt/data/gv0
目錄得先在三臺機器上都創(chuàng)建。
查看節(jié)點和卷信息
[root@master-01 ~]# gluster peer status
Number of Peers: 2
Hostname: master-02
Uuid: 6f5a823c-4044-4dd9-95bc-129018b2eec0
State: Peer in Cluster (Connected)
Hostname: master-03
Uuid: dfeb6d81-c90b-4628-82f2-e379ed0ceded
State: Peer in Cluster (Connected)
[root@master-01 ~]# gluster volume info
Volume Name: gv0
Type: Replicate
Volume ID: 1891c9f8-8acb-48c2-94d9-29e2a625f9a0
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x 3 = 3
Transport-type: tcp
Bricks:
Brick1: master-01:/opt/data/gv0
Brick2: master-02:/opt/data/gv0
Brick3: master-03:/opt/data/gv0
Options Reconfigured:
transport.address-family: inet
nfs.disable: on
performance.client-io-threads: off
查看端口監(jiān)聽
[root@master-01 ~]# netstat -tunlp|grep glusterd
tcp 0 0 0.0.0.0:24007 0.0.0.0:* LISTEN 19533/glusterd
測試掛載
想要正常的在kubernetes集群中使用或者掛載glusterfs澎灸,集群中的對應(yīng)節(jié)點都需要安裝 glusterfs-fuse
[root@harbor-01 harbor]# yum install -y centos-release-gluster glusterfs-fuse
[root@master-01 ~]# mount -t glusterfs master-01:/gv0 /mnt
[root@master-01 ~]# df -h
...
master-01:/gv0 38G 13G 25G 34% /mnt
常用命令
#刪除卷
gluster volume stop gv0
gluster volume delete gv0
#將機器移出集群
gluster peer detach master-01
#只允許172.28.0.0的網(wǎng)絡(luò)訪問glusterfs
gluster volume set gv0 auth.allow 172.28.26.*
gluster volume set gv0 auth.allow 192.168.222.1,192.168.*.*
#加入新的機器并添加到卷里(由于副本數(shù)設(shè)置為2,至少要添加2(4院塞、6、8..)臺機器)
gluster peer probe master-02
gluster peer probe master-03
#新加卷
gluster volume add-brick gv0 repl 2 master-02:/data/gluster master-03:/data/gluster force
#刪除卷
gluster volume remove-brick gv0 repl 2 master-02:/opt/data/gv0 master-03:/opt/data/gv0 start
gluster volume remove-brick gv0 repl 2 master-02:/opt/data/gv0 master-03:/opt/data/gv0 status
gluster volume remove-brick gv0 repl 2 master-02:/opt/data/gv0 master-03:/opt/data/gv0 commit
注意:擴展或收縮卷時击孩,也要按照卷的類型迫悠,加入或減少的brick個數(shù)必須滿足相應(yīng)的要求。
#當對卷進行了擴展或收縮后巩梢,需要對卷的數(shù)據(jù)進行重新均衡创泄。
gluster volume rebalance mamm-volume start|stop|status
###########################################################
遷移卷---主要完成數(shù)據(jù)在卷之間的在線遷移
#啟動遷移過程
gluster volume replace-brick gv0 master-02:/opt/data/gv0 master-02:/opt/test start force
#查看遷移狀態(tài)
gluster volume replace-brick gv0 master-02:/opt/data/gv0 master-02:/opt/test status
#遷移完成后提交完成
gluster volume replace-brick gv0 master-02:/opt/data/gv0 master-02:/opt/test commit
#機器出現(xiàn)故障,執(zhí)行強制提交
gluster volume replace-brick gv0 master-02:/opt/data/gv0 master-02:/opt/test commit force
###########################################################
觸發(fā)副本自愈
gluster volume heal mamm-volume #只修復(fù)有問題的文件
gluster volume heal mamm-volume full #修復(fù)所有文件
gluster volume heal mamm-volume info #查看自愈詳情
#####################################################
data-self-heal, metadata-self-heal and entry-self-heal
啟用或禁用文件內(nèi)容、文件元數(shù)據(jù)和目錄項的自我修復(fù)功能括蝠,默認情況下三個全部是“on”鞠抑。
#將其中的一個設(shè)置為off的范例:
gluster volume set gv0 entry-self-heal off
到這一步GlusterFS就部署完成了,接下來就在k8s使用它忌警。
創(chuàng)建Endpoints
# ep 編排
[root@master-01 gluster]# cat gluster-ep.yaml
apiVersion: v1
kind: Endpoints
metadata:
name: glusterfs-cluster
subsets:
- addresses:
- ip: 192.168.209.130
- ip: 192.168.209.131
- ip: 192.168.209.132
ports:
- port: 1990
protocol: TCP
---
kind: Service
apiVersion: v1
metadata:
name: glusterfs-cluster
spec:
ports:
- port: 1990
# 開始創(chuàng)建
[root@master-01 gluster]# kubectl apply -f gluster-ep.yaml
endpoints/glusterfs-cluster created
service/glusterfs-cluster created
# 查看狀態(tài)結(jié)果
[root@master-01 gluster]# kubectl get ep,svc|grep glusterfs
endpoints/glusterfs-cluster 192.168.209.130:1990,192.168.209.131:1990,192.168.209.132:1990 2m4s
service/glusterfs-cluster ClusterIP 10.254.120.33 <none> 1990/TCP 2m4s
創(chuàng)建Pv/Pvc
# 編排
[root@master-01 gluster]# cat gluster-pvc.yaml
---
apiVersion: v1
kind: PersistentVolume # pv
metadata:
name: test-pv
namespace: default
labels:
alicloud-pvname: test-pv
spec: # 定義pv屬性
capacity: # 容量
storage: 2Gi # 存儲容量
accessModes: # 訪問模式,支持ReadWriteOnce搁拙、ReadOnlyMany和ReadWriteMany
- ReadWriteMany
glusterfs:
endpoints: 'glusterfs-cluster'
path: 'gv0'
readOnly: false
persistentVolumeReclaimPolicy: Recycle
---
kind: PersistentVolumeClaim # pvc
apiVersion: v1
metadata:
name: test-pvc
namespace: default
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi
selector:
matchLabels:
alicloud-pvname: test-pv
# 開始創(chuàng)建
[root@master-01 gluster]# kubectl apply -f gluster-pvc.yaml
persistentvolume/dt-pv created
persistentvolumeclaim/dt-pvc created
# 查看結(jié)果狀態(tài)
[root@master-01 gluster]# kubectl get pv,pvc
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
persistentvolume/test-pv 2Gi RWX Recycle Bound default/test-pvc 50s
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
persistentvolumeclaim/test-pvc Bound test-pv 2Gi RWX 50s
創(chuàng)建pod
# 編排
[root@master-01 gluster]# cat ng-deploy.yaml
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
labels:
run: nginx01
name: nginx01
spec:
replicas: 1
selector:
matchLabels:
app: nginx-pod1
template:
metadata:
labels:
app: nginx-pod1
spec:
containers:
- name: nginx11
image: nginx
imagePullPolicy: Always
volumeMounts:
- mountPath: /usr/share/nginx/html # 掛載到容器的路徑
name: glu
restartPolicy: Always
volumes:
- name: glu 定義卷
persistentVolumeClaim:
claimName: test-pvc 使用pvc
---
apiVersion: v1
kind: Service
metadata:
labels:
app: nginx1
name: nginx1
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
nodePort: 30003
selector:
app: nginx-pod1
type: NodePort
# 創(chuàng)建
[root@master-01 gluster]# kubectl apply -f ng-deploy.yaml
deployment.apps/nginx01 created
service/nginx1 created
查看狀態(tài)
[root@master-01 gluster]# kubectl get pod,svc|grep nginx
pod/nginx01-7df547bf8c-cxvht 1/1 Running 0 10m
service/nginx1 NodePort 10.254.55.187 <none> 80:30003/TCP 79m
[root@master-01 gluster]# kubectl describe po nginx01-7df547bf8c-cxvht
Name: nginx01-7df547bf8c-cxvht
......
Volumes:
glu:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: test-pvc
ReadOnly: false
default-token-tb5bg:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-tb5bg
測試
在glusterfs存儲卷中創(chuàng)建index.html
,并寫入內(nèi)容
[root@master-01 gluster]# vi /opt/data/gv0/index.html
[root@master-01 gluster]# kubectl exec -ti nginx01-7df547bf8c-cxvht bash
root@nginx01-7df547bf8c-cxvht:/# ls /usr/share/nginx/html/
index.html
root@nginx01-7df547bf8c-cxvht:/# cat /usr/share/nginx/html/index.html
<h1>hello glusterfs</h1>
瀏覽器訪問測試
image.png
現(xiàn)在就算pod 掛了等情況,數(shù)據(jù)也不會丟的法绵,需要注意的是箕速,上GlusterFS
一定得有人技術(shù)兜底,不然出了問題很麻煩的朋譬。