k8s進(jìn)階(二)

5. k8s基于ceph存儲(chǔ)實(shí)現(xiàn)數(shù)據(jù)的持久化和共享

5.1 基于ceph rbd

讓k8s中的pod可以訪問cephzhong rbd提供的鏡像作為存儲(chǔ)設(shè)備,需要ceph創(chuàng)建rbd军浆、兵器讓k8s node節(jié)點(diǎn)能夠通過(guò)ceph認(rèn)證

5.1.1創(chuàng)建初始化rbd

# 創(chuàng)建 rbd 
cephadm@ceph-deploy:~/ceph-cluster$ sudo ceph osd pool create pop-rbd-pool1 32 32
pool 'pop-rbd-pool1' created

# 查看新建的rbd
cephadm@ceph-deploy:~/ceph-cluster$ sudo ceph osd pool ls
device_health_metrics
popool
poprbd1
popcephfsmetadata
popcephfsdata
pop-rbd-pool1

# 存儲(chǔ)池啟用rbd
cephadm@ceph-deploy:~/ceph-cluster$ sudo ceph osd pool application enable pop-rbd-pool1 rbd
enabled application 'rbd' on pool 'pop-rbd-pool1'

# 初始化rbd
cephadm@ceph-deploy:~/ceph-cluster$ sudo rbd pool init -p pop-rbd-pool1
cephadm@ceph-deploy:~/ceph-cluster$ 

5.1.2 創(chuàng)建image

# 創(chuàng)建
dm@ceph-deploy:~/ceph-cluster$ sudo rbd create pop-img-img1 --size 3G --pool pop-rbd-pool1 --image-format 2 --image-feature layering
# 驗(yàn)證
cephadm@ceph-deploy:~/ceph-cluster$ sudo rbd ls --pool pop-rbd-pool1
pop-img-img1

cephadm@ceph-deploy:~/ceph-cluster$ sudo rbd --image pop-img-img1 --pool pop-rbd-pool1 info
rbd image 'pop-img-img1':
    size 3 GiB in 768 objects
    order 22 (4 MiB objects)
    snapshot_count: 0
    id: 3802ba74ebe8
    block_name_prefix: rbd_data.3802ba74ebe8
    format: 2
    features: layering
    op_features: 
    flags: 
    create_timestamp: Mon Oct 18 23:01:53 2021
    access_timestamp: Mon Oct 18 23:01:53 2021
    modify_timestamp: Mon Oct 18 23:01:53 2021

5.1.3 安裝客戶端ceph-common

需要在k8s master和node節(jié)點(diǎn)都要安裝的ceph-common組件包

# 設(shè)置ceph yum源, k8s master和node節(jié)點(diǎn)都要執(zhí)行
$ wget -q -O- 'https://mirrors.tuna.tsinghua.edu.cn/ceph/keys/release.asc' | sudo apt-key add -
$ sudo apt-add-repository 'deb https://mirrors.aliyun.com/ceph/debian-pacific/ focal main'
$ sudo apt update

安裝ceph-common

root@k8s-master-1:~# apt-cache madison ceph-common
ceph-common | 16.2.6-1focal | https://mirrors.aliyun.com/ceph/debian-pacific focal/main amd64 Packages
ceph-common | 15.2.13-0ubuntu0.20.04.2 | https://mirrors.tuna.tsinghua.edu.cn/ubuntu focal-updates/main amd64 Packages
ceph-common | 15.2.12-0ubuntu0.20.04.1 | https://mirrors.tuna.tsinghua.edu.cn/ubuntu focal-security/main amd64 Packages
ceph-common | 15.2.1-0ubuntu1 | https://mirrors.tuna.tsinghua.edu.cn/ubuntu focal/main amd64 Packages

root@k8s-master-1:~# apt install ceph-common=16.2.6-1focal -y
root@k8s-master-2:~# apt install ceph-common=16.2.6-1focal -y
root@k8s-master-3:~# apt install ceph-common=16.2.6-1focal -y
root@k8s-work-1:~# apt install ceph-common=16.2.6-1focal -y
root@k8s-work-2:~# apt install ceph-common=16.2.6-1focal -y
root@k8s-work-3:~# apt install ceph-common=16.2.6-1focal -y

5.1.4 創(chuàng)建ceph用戶并授權(quán)

cephadm@ceph-deploy:~/ceph-cluster$ sudo ceph auth get-or-create client.defult-pop mon 'allow r' osd 'allow * pool=pop-rbd-pool1'
[client.defult-pop]
    key = AQDRkG1h66ffBxAAwoN/k3Ai5UhaSINtv/fVZw==

#驗(yàn)證
cephadm@ceph-deploy:~/ceph-cluster$ sudo ceph auth get client.defult-pop
[client.defult-pop]
    key = AQDRkG1h66ffBxAAwoN/k3Ai5UhaSINtv/fVZw==
    caps mon = "allow r"
    caps osd = "allow * pool=pop-rbd-pool1"
exported keyring for client.defult-pop

# 導(dǎo)出用戶信息至keyring
cephadm@ceph-deploy:~/ceph-cluster$ sudo ceph auth get client.defult-pop -o ceph.client.defult-pop.keyring
exported keyring for client.defult-pop

# scp認(rèn)證信息到k8s master和node節(jié)點(diǎn)上
cephadm@ceph-deploy:~/ceph-cluster$ sudo scp ceph.conf ceph.client.defult-pop.keyring root@192.168.20.201:/etc/ceph
...

驗(yàn)證

# 需要ceph集群里面的hosts受神,寫到master和node節(jié)點(diǎn)里面去
root@k8s-master-1:/etc/ceph# sudo ceph --id defult-pop -s
  cluster:
    id:     f4254a97-6052-4c5a-b29e-b8cb43dff1d8
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph-node1,ceph-node2,ceph-node3 (age 6d)
    mgr: ceph-node1(active, since 6d), standbys: ceph-node2
    mds: 2/2 daemons up, 1 standby
    osd: 9 osds: 9 up (since 6d), 9 in (since 6d)
 
  data:
    volumes: 1/1 healthy
    pools:   6 pools, 672 pgs
    objects: 86 objects, 80 MiB
    usage:   421 MiB used, 450 GiB / 450 GiB avail
    pgs:     672 active+clean

5.1.5 通過(guò)keyring掛載rbd

基于ceph提供的rbd實(shí)現(xiàn)存儲(chǔ)卷的動(dòng)態(tài)提供鼻由,一是通過(guò)宿主機(jī)的keyring文件掛載rbd省骂,另外一個(gè)是通過(guò)keyring中的key定義為k8s中的secret惊橱,然后pod通過(guò)secret掛載rbd

root@k8s-ansible-client:~/yaml/20211010/04# cat busybox-keyring.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - image: busybox 
    command:
      - sleep
      - "3600"
    imagePullPolicy: Always 
    name: busybox
    #restartPolicy: Always
    volumeMounts:
    - name: rbd-data1
      mountPath: /data
  volumes:
    - name: rbd-data1
      rbd:
        monitors:
        - '10.10.0.62:6789'
        - '10.10.0.30:6789'
        - '10.10.0.190:6789'
        pool: pop-rbd-pool1
        image: pop-img-img1
        fsType: ext4
        readOnly: false
        user: defult-pop
        keyring: /etc/ceph/ceph.client.defult-pop.keyring

root@k8s-ansible-client:~/yaml/20211010/04# kubectl apply -f busybox-keyring.yaml 
pod/busybox created
root@k8s-ansible-client:~/yaml/20211010/04# kubectl get pods -o wide
NAME                         READY   STATUS    RESTARTS         AGE   IP              NODE             NOMINATED NODE   READINESS GATES
alpine-test                  1/1     Running   41 (5h2m ago)    23d   172.20.108.65   192.168.20.236   <none>           <none>
busybox                      1/1     Running   0                88s   172.20.213.12   192.168.20.253   <none>           <none>
kube100-site                 2/2     Running   0                9d    172.20.213.6    192.168.20.253   <none>           <none>
nginx-test-001               1/1     Running   17 (5h38m ago)   10d   172.20.191.10   192.168.20.147   <none>           <none>
nginx-test1                  1/1     Running   41 (5h11m ago)   23d   172.20.191.2    192.168.20.147   <none>           <none>
nginx-test2                  1/1     Running   41 (5h11m ago)   23d   172.20.213.3    192.168.20.253   <none>           <none>
nginx-test3                  1/1     Running   41 (5h11m ago)   23d   172.20.191.3    192.168.20.147   <none>           <none>
zookeeper1-cdbb7fbc-5pgdg    1/1     Running   1 (26h ago)      26h   172.20.191.27   192.168.20.147   <none>           <none>
zookeeper2-f4944446d-2xnjd   1/1     Running   0                26h   172.20.108.81   192.168.20.236   <none>           <none>
zookeeper3-589f6bc7-2mnz6    1/1     Running   0                26h   172.20.191.28   192.168.20.147   <none>           <none>
root@k8s-ansible-client:~/yaml/20211010/04# kubectl exec -it busybox sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # df -Th
Filesystem           Type            Size      Used Available Use% Mounted on
overlay              overlay        19.6G      8.2G     10.3G  44% /
tmpfs                tmpfs          64.0M         0     64.0M   0% /dev
tmpfs                tmpfs           1.9G         0      1.9G   0% /sys/fs/cgroup
/dev/rbd0            ext4            2.9G      9.0M      2.9G   0% /data
/dev/mapper/ubuntu--vg-ubuntu--lv
                     ext4           19.6G      8.2G     10.3G  44% /dev/termination-log
/dev/mapper/ubuntu--vg-ubuntu--lv
                     ext4           19.6G      8.2G     10.3G  44% /etc/resolv.conf
/dev/mapper/ubuntu--vg-ubuntu--lv
                     ext4           19.6G      8.2G     10.3G  44% /etc/hostname
/dev/mapper/ubuntu--vg-ubuntu--lv
                     ext4           19.6G      8.2G     10.3G  44% /etc/hosts
shm                  tmpfs          64.0M         0     64.0M   0% /dev/shm
tmpfs                tmpfs           3.2G     12.0K      3.2G   0% /var/run/secrets/kubernetes.io/serviceaccount
tmpfs                tmpfs           1.9G         0      1.9G   0% /proc/acpi
tmpfs                tmpfs          64.0M         0     64.0M   0% /proc/kcore
tmpfs                tmpfs          64.0M         0     64.0M   0% /proc/keys
tmpfs                tmpfs          64.0M         0     64.0M   0% /proc/timer_list
tmpfs                tmpfs          64.0M         0     64.0M   0% /proc/sched_debug
tmpfs                tmpfs           1.9G         0      1.9G   0% /proc/scsi
tmpfs                tmpfs           1.9G         0      1.9G   0% /sys/firmware
/ # 

基于Linux內(nèi)核進(jìn)行掛載的, 可以使用rbd showmapped命令查看映射

image.png

5.1.6 通過(guò)secret掛載rbd

# 編寫secret文件
root@k8s-ansible-client:~/yaml/20211010/04# cat secret-client-pop.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-defult-pop
type: "kubernetes.io/rbd"
data:
  key: QVFEUmtHMWg2NmZmQnhBQXdvTi9rM0FpNVVoYVNJTnR2L2ZWWnc9PQo=

# key的值 通過(guò)ceph.client.defult-pop.keyring文件里面key進(jìn)行base64生成
cephadm@ceph-deploy:~/ceph-cluster$ cat ceph.client.defult-pop.keyring 
[client.defult-pop]
    key = AQDRkG1h66ffBxAAwoN/k3Ai5UhaSINtv/fVZw==
    caps mon = "allow r"
    caps osd = "allow * pool=pop-rbd-pool1"
cephadm@ceph-deploy:~/ceph-cluster$ echo AQDRkG1h66ffBxAAwoN/k3Ai5UhaSINtv/fVZw== | base64
QVFEUmtHMWg2NmZmQnhBQXdvTi9rM0FpNVVoYVNJTnR2L2ZWWnc9PQo=

root@k8s-ansible-client:~/yaml/20211010/04# kubectl get secret
NAME                     TYPE                                  DATA   AGE
ceph-secret-defult-pop   kubernetes.io/rbd                     1      9s
default-token-6vzjr      kubernetes.io/service-account-token   3      24d
root@k8s-ansible-client:~/yaml/20211010/04# kubectl describe secret
Name:         ceph-secret-defult-pop
Namespace:    default
Labels:       <none>
Annotations:  <none>

Type:  kubernetes.io/rbd

Data
====
key:  41 bytes


Name:         default-token-6vzjr
Namespace:    default
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: default
              kubernetes.io/service-account.uid: 4c268866-16d3-4c67-8074-f92b12b3e2b7

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1350 bytes
namespace:  7 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IlA3YjBkSVE3QWlJdzRNOVlfcGpHWWI3dTU3OUhtczZTVGJldk91TS1pejQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImRlZmF1bHQtdG9rZW4tNnZ6anIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGVmYXVsdCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjRjMjY4ODY2LTE2ZDMtNGM2Ny04MDc0LWY5MmIxMmIzZTJiNyIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZWZhdWx0OmRlZmF1bHQifQ.R5nsdT9MpS8NZqYUr3ue5pG66ydEK52-WGqbUvl5u_Ao9FHPdrjL3e4T-qycmy9R-rDspB1Lyl16fVvaAw91esHcjcGKWKgsdW46M5xNr6RW7GbdOfJRlgQr1ovlMft66PkXtk9GvVOBSW6zlTfjyg9-V94ArUPrACaIw08eG4IylEG082SXs9YU9yNLkGKj9sCoQif2SM2Y8qfKFJ-oIXhE2BKvO3zgUKA5HYik7avN5lDf1MIEiDcu3ROZevkj2H6KGCRkVNEISoUM7oT64dQkToMJOltk3SiATbx__JAbFS6pX8yTNrnZ3NuynrvfzC-v-eIIoIhbO0QlRWl68g

nginx使用secret掛載rbd

root@k8s-ansible-client:~/yaml/20211010/04# cat nginx-secret.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80

        volumeMounts:
        - name: rbd-data1
          mountPath: /data
      volumes:
        - name: rbd-data1
          rbd:
            monitors:
            - '10.10.0.62:6789'
            - '10.10.0.30:6789'
            - '10.10.0.190:6789'
            pool: pop-rbd-pool1
            image: pop-img-img1
            fsType: ext4
            readOnly: false
            user: defult-pop
            secretRef:
              name: ceph-secret-defult-pop

root@k8s-ansible-client:~/yaml/20211010/04# kubectl apply -f nginx-secret.yaml 
deployment.apps/nginx-deployment created
root@k8s-ansible-client:~/yaml/20211010/04# kubectl get pods,deploy
NAME                                    READY   STATUS    RESTARTS         AGE
pod/alpine-test                         1/1     Running   41 (5h23m ago)   23d
pod/kube100-site                        2/2     Running   0                9d
pod/nginx-deployment-66489c5879-j6vdr   1/1     Running   0                11s
pod/nginx-test-001                      1/1     Running   17 (6h ago)      10d
pod/nginx-test1                         1/1     Running   41 (5h33m ago)   23d
pod/nginx-test2                         1/1     Running   41 (5h33m ago)   23d
pod/nginx-test3                         1/1     Running   41 (5h33m ago)   23d
pod/zookeeper1-cdbb7fbc-5pgdg           1/1     Running   1 (26h ago)      26h
pod/zookeeper2-f4944446d-2xnjd          1/1     Running   0                26h
pod/zookeeper3-589f6bc7-2mnz6           1/1     Running   0                26h

NAME                               READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/nginx-deployment   1/1     1            1           11s
deployment.apps/zookeeper1         1/1     1            1           26h
deployment.apps/zookeeper2         1/1     1            1           26h
deployment.apps/zookeeper3         1/1     1            1           26h

驗(yàn)證

root@k8s-ansible-client:~/yaml/20211010/04# kubectl exec -it pod/nginx-deployment-66489c5879-j6vdr base
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
OCI runtime exec failed: exec failed: container_linux.go:349: starting container process caused "exec: \"base\": executable file not found in $PATH": unknown
command terminated with exit code 126
root@k8s-ansible-client:~/yaml/20211010/04# kubectl exec -it pod/nginx-deployment-66489c5879-j6vdr bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-deployment-66489c5879-j6vdr:/# ls
bin  boot  data  dev  docker-entrypoint.d  docker-entrypoint.sh  etc  home  lib  lib64  media  mnt  opt  proc  root  run  sbin  srv  sys  tmp  usr  var
root@nginx-deployment-66489c5879-j6vdr:/# df -Th
Filesystem                        Type     Size  Used Avail Use% Mounted on
overlay                           overlay   20G  8.3G   11G  45% /
tmpfs                             tmpfs     64M     0   64M   0% /dev
tmpfs                             tmpfs    2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/rbd0                         ext4     2.9G  9.0M  2.9G   1% /data
/dev/mapper/ubuntu--vg-ubuntu--lv ext4      20G  8.3G   11G  45% /etc/hosts
shm                               tmpfs     64M     0   64M   0% /dev/shm
tmpfs                             tmpfs    3.2G   12K  3.2G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs                             tmpfs    2.0G     0  2.0G   0% /proc/acpi
tmpfs                             tmpfs    2.0G     0  2.0G   0% /proc/scsi
tmpfs                             tmpfs    2.0G     0  2.0G   0% /sys/firmware
root@nginx-deployment-66489c5879-j6vdr:/# cd /data/
root@nginx-deployment-66489c5879-j6vdr:/data# echo "11232" >> test.txt

5.1.7 使用storageclass動(dòng)態(tài)創(chuàng)建pv

使用ceph的admin賬號(hào)創(chuàng)建k8s的secret賬號(hào)

cephadm@ceph-deploy:~/ceph-cluster$ cat ceph.client.admin.keyring 
[client.admin]
    key = AQBylWVhhc28KhAA5RU3J89wwaVv1c6FLZDcsg==
    caps mds = "allow *"
    caps mgr = "allow *"
    caps mon = "allow *"
    caps osd = "allow *"
cephadm@ceph-deploy:~/ceph-cluster$ echo AQBylWVhhc28KhAA5RU3J89wwaVv1c6FLZDcsg== | base64
QVFCeWxXVmhoYzI4S2hBQTVSVTNKODl3d2FWdjFjNkZMWkRjc2c9PQo=

# 創(chuàng)建secret
root@k8s-ansible-client:~/yaml/20211010/04# cat secret-admin.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-admin
type: "kubernetes.io/rbd"
data:
  key: QVFCeWxXVmhoYzI4S2hBQTVSVTNKODl3d2FWdjFjNkZMWkRjc2c9PQo=
root@k8s-ansible-client:~/yaml/20211010/04# kubectl apply -f secret-admin.yaml
secret/ceph-secret-admin created
root@k8s-ansible-client:~/yaml/20211010/04# kubectl get secret
NAME                  TYPE                                  DATA   AGE
ceph-secret-admin     kubernetes.io/rbd                     1      16s
default-token-6vzjr   kubernetes.io/service-account-token   3      24d

storageclass使用secret關(guān)聯(lián)ceph

root@k8s-ansible-client:~/yaml/20211010/04# cat ceph-storage-class.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: ceph-storage-class-pop
  annotations:
    storageclass.kubernetes.io/is-default-class: "true" #設(shè)置為默認(rèn)存儲(chǔ)類
provisioner: kubernetes.io/rbd
parameters:
  monitors: 10.10.0.62:6789,10.10.0.30:6789,10.10.0.190:6789
  adminId: admin
  adminSecretName: ceph-secret-admin
  adminSecretNamespace: default 
  pool: pop-rbd-pool1
  userId: defult-pop
  userSecretName: ceph-secret-defult-pop

root@k8s-ansible-client:~/yaml/20211010/04# kubectl apply -f ceph-storage-class.yaml 
storageclass.storage.k8s.io/ceph-storage-class-pop created
root@k8s-ansible-client:~/yaml/20211010/04# kubectl get storageclass
NAME                               PROVISIONER         RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
ceph-storage-class-pop (default)   kubernetes.io/rbd   Delete          Immediate           false                  13s

使用storageclass動(dòng)態(tài)創(chuàng)建pv為mysql提供持久化存儲(chǔ)

# 創(chuàng)建一個(gè)MySQL的pvc
root@k8s-ansible-client:~/yaml/20211010/04# cat mysql-pvc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-data-pvc
spec:
  accessModes:
    - ReadWriteOnce
  storageClassName: ceph-storage-class-pop
  resources:
    requests:
      storage: '5Gi'

root@k8s-ansible-client:~/yaml/20211010/04# kubectl apply -f mysql-pvc.yaml 
persistentvolumeclaim/mysql-data-pvc created
root@k8s-ansible-client:~/yaml/20211010/04# kubectl get pvc
NAME             STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS             AGE
mysql-data-pvc   Bound    pvc-82b6546c-0eef-4204-92d3-1a556dd8e835   5Gi        RWO            ceph-storage-class-pop   6s
zk-pop-pvc-1     Bound    zk-pop-pv-1                                3Gi        RWO                                     27h
zk-pop-pvc-2     Bound    zk-pop-pv-2                                3Gi        RWO                                     27h
zk-pop-pvc-3     Bound    zk-pop-pv-3                                3Gi        RWO                                     27h

# 啟動(dòng)mysql
root@k8s-ansible-client:~/yaml/20211010/04# cat mysql-single.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec:
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
      - image: harbor.openscp.com/base/mysql:5.6.46
        name: mysql
        env:
          # Use secret in real usage
        - name: MYSQL_ROOT_PASSWORD
          value: pop123456
        ports:
        - containerPort: 3306
          name: mysql
        volumeMounts:
        - name: mysql-persistent-storage
          mountPath: /var/lib/mysql
      volumes:
      - name: mysql-persistent-storage
        persistentVolumeClaim:
          claimName: mysql-data-pvc 


---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: mysql-service-label 
  name: mysql-service
spec:
  type: NodePort
  ports:
  - name: http
    port: 3306
    protocol: TCP
    targetPort: 3306
    nodePort: 33306
  selector:
    app: mysql

root@k8s-ansible-client:~/yaml/20211010/04# kubectl apply -f mysql-single.yaml 
deployment.apps/mysql created
service/mysql-service created
root@k8s-ansible-client:~/yaml/20211010/04# kubectl get pods,deploy
NAME                             READY   STATUS    RESTARTS         AGE
pod/alpine-test                  1/1     Running   41 (6h7m ago)    23d
pod/kube100-site                 2/2     Running   0                9d
pod/mysql-555747bdd-8ktbh        1/1     Running   0                82s
pod/nginx-test-001               1/1     Running   17 (6h44m ago)   10d
pod/nginx-test1                  1/1     Running   41 (6h17m ago)   23d
pod/nginx-test2                  1/1     Running   41 (6h16m ago)   23d
pod/nginx-test3                  1/1     Running   41 (6h17m ago)   23d
pod/zookeeper1-cdbb7fbc-5pgdg    1/1     Running   1 (27h ago)      27h
pod/zookeeper2-f4944446d-2xnjd   1/1     Running   0                27h
pod/zookeeper3-589f6bc7-2mnz6    1/1     Running   0                27h

NAME                         READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/mysql        1/1     1            1           82s
deployment.apps/zookeeper1   1/1     1            1           27h
deployment.apps/zookeeper2   1/1     1            1           27h
deployment.apps/zookeeper3   1/1     1            1           27h
root@k8s-ansible-client:~/yaml/20211010/04# kubectl get svc
NAME            TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                                        AGE
kubernetes      ClusterIP   10.68.0.1       <none>        443/TCP                                        24d
mysql-service   NodePort    10.68.108.194   <none>        3306:31306/TCP                                 15s
zookeeper1      NodePort    10.68.42.189    <none>        2181:32181/TCP,2888:30923/TCP,3888:30168/TCP   27h
zookeeper2      NodePort    10.68.78.146    <none>        2181:32182/TCP,2888:31745/TCP,3888:30901/TCP   27h
zookeeper3      NodePort    10.68.199.44    <none>        2181:32183/TCP,2888:32488/TCP,3888:31621/TCP   27h

#驗(yàn)證
root@k8s-ansible-client:~/yaml/20211010/04# kubectl get pv,pvc
NAME                                                        CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                    STORAGECLASS             REASON   AGE
persistentvolume/pvc-82b6546c-0eef-4204-92d3-1a556dd8e835   5Gi        RWO            Delete           Bound    default/mysql-data-pvc   ceph-storage-class-pop            15m
persistentvolume/zk-pop-pv-1                                3Gi        RWO            Retain           Bound    default/zk-pop-pvc-1                                       27h
persistentvolume/zk-pop-pv-2                                3Gi        RWO            Retain           Bound    default/zk-pop-pvc-2                                       27h
persistentvolume/zk-pop-pv-3                                3Gi        RWO            Retain           Bound    default/zk-pop-pvc-3                                       27h

NAME                                   STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS             AGE
persistentvolumeclaim/mysql-data-pvc   Bound    pvc-82b6546c-0eef-4204-92d3-1a556dd8e835   5Gi        RWO            ceph-storage-class-pop   15m
persistentvolumeclaim/zk-pop-pvc-1     Bound    zk-pop-pv-1                                3Gi        RWO                                     27h
persistentvolumeclaim/zk-pop-pvc-2     Bound    zk-pop-pv-2                                3Gi        RWO                                     27h
persistentvolumeclaim/zk-pop-pvc-3     Bound    zk-pop-pv-3                                3Gi        RWO                                     27h


root@k8s-ansible-client:~/yaml/20211010/04# kubectl exec -it pod/mysql-555747bdd-8ktbh /bin/bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@mysql-555747bdd-8ktbh:/# mysql -uroot -ppop123456
Warning: Using a password on the command line interface can be insecure.
Welcome to the MySQL monitor.  Commands end with ; or \g.
Your MySQL connection id is 2
Server version: 5.6.46 MySQL Community Server (GPL)

Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.

Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

mysql>

5.2 基于ceph cephfs

root@k8s-ansible-client:~/yaml/20211010/04# cat nginx-cephfs.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 3
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80

        volumeMounts:
        - name: pop-staticdata-cephfs 
          mountPath: /usr/share/nginx/html/ 
      volumes:
        - name: pop-staticdata-cephfs
          cephfs:
            monitors:
            - '10.10.0.62:6789'
            - '10.10.0.30:6789'
            - '10.10.0.190:6789'
            path: /
            user: admin
            secretRef:
              name: ceph-secret-admin

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: nginx-cephfs-service-label
  name: nginx-cephfs
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 31080
  selector:
    app: ng-deploy-80


# 查看ceph得部署好cephfs
cephadm@ceph-deploy:~/ceph-cluster$ sudo ceph osd pool ls
device_health_metrics
popool
poprbd1
popcephfsmetadata
popcephfsdata
pop-rbd-pool1

# 需要提前把a(bǔ)dmin的kerying文件cp到node節(jié)點(diǎn)上
root@k8s-ansible-client:~/yaml/20211010/04# kubectl apply -f nginx-cephfs.yaml 
deployment.apps/nginx-deployment created
service/nginx-cephfs created
root@k8s-ansible-client:~/yaml/20211010/04# kubectl get pods,deploy
NAME                                    READY   STATUS    RESTARTS       AGE
pod/alpine-test                         1/1     Running   43 (21m ago)   24d
pod/kube100-site                        2/2     Running   0              9d
pod/nginx-deployment-78679d6df9-626qq   1/1     Running   0              4s
pod/nginx-deployment-78679d6df9-8sdk9   1/1     Running   0              4s
pod/nginx-deployment-78679d6df9-j7bcd   1/1     Running   0              4s
pod/nginx-test-001                      1/1     Running   19 (58m ago)   11d
pod/nginx-test1                         1/1     Running   43 (31m ago)   24d
pod/nginx-test2                         1/1     Running   43 (30m ago)   24d
pod/nginx-test3                         1/1     Running   43 (31m ago)   24d
pod/zookeeper1-cdbb7fbc-5pgdg           1/1     Running   1 (2d1h ago)   2d1h
pod/zookeeper2-f4944446d-2xnjd          1/1     Running   0              2d1h
pod/zookeeper3-589f6bc7-2mnz6           1/1     Running   0              2d1h

NAME                               READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/nginx-deployment   3/3     3            3           5s
deployment.apps/zookeeper1         1/1     1            1           2d1h
deployment.apps/zookeeper2         1/1     1            1           2d1h
deployment.apps/zookeeper3         1/1     1            1           2d1h

root@k8s-ansible-client:~/yaml/20211010/04# kubectl get svc
NAME           TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                                        AGE
kubernetes     ClusterIP   10.68.0.1       <none>        443/TCP                                        25d
nginx-cephfs   NodePort    10.68.150.199   <none>        80:31080/TCP                                   9s
zookeeper1     NodePort    10.68.42.189    <none>        2181:32181/TCP,2888:30923/TCP,3888:30168/TCP   2d1h
zookeeper2     NodePort    10.68.78.146    <none>        2181:32182/TCP,2888:31745/TCP,3888:30901/TCP   2d1h
zookeeper3     NodePort    10.68.199.44    <none>        2181:32183/TCP,2888:32488/TCP,3888:31621/TCP   2d1h

驗(yàn)證

root@k8s-ansible-client:~/yaml/20211010/04# kubectl exec -it pod/nginx-deployment-78679d6df9-j7bcd bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-deployment-78679d6df9-j7bcd:/# df -Th
Filesystem                                         Type     Size  Used Avail Use% Mounted on
overlay                                            overlay   20G  8.6G   11G  46% /
tmpfs                                              tmpfs     64M     0   64M   0% /dev
tmpfs                                              tmpfs    2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/mapper/ubuntu--vg-ubuntu--lv                  ext4      20G  8.6G   11G  46% /etc/hosts
shm                                                tmpfs     64M     0   64M   0% /dev/shm
10.10.0.62:6789,10.10.0.30:6789,10.10.0.190:6789:/ ceph     143G     0  143G   0% /usr/share/nginx/html
tmpfs                                              tmpfs    3.2G   12K  3.2G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs                                              tmpfs    2.0G     0  2.0G   0% /proc/acpi
tmpfs                                              tmpfs    2.0G     0  2.0G   0% /proc/scsi
tmpfs                                              tmpfs    2.0G     0  2.0G   0% /sys/firmware
root@nginx-deployment-78679d6df9-j7bcd:/usr/share/nginx/html# echo "cephfs is pop" > index.html

多刷新幾次瀏覽器止吐,顯示的結(jié)果都是一樣


image.png

6. k8s就緒探針和存活探針

6.1 作用與區(qū)別

liveness probe(存活探針):來(lái)確定何時(shí)重啟容器宝踪。例如侨糟,當(dāng)應(yīng)用程序處于運(yùn)行狀態(tài)但無(wú)法做進(jìn)一步操作,liveness 探針將捕獲到 deadlock肴沫,重啟處于該狀態(tài)下的容器粟害,使應(yīng)用程序在存在 bug 的情況下依然能夠繼續(xù)運(yùn)行下去。
readiness probe(就緒探針):來(lái)確定容器是否已經(jīng)就緒可以接受流量颤芬。只有當(dāng) Pod 中的容器都處于就緒狀態(tài)時(shí) kubelet 才會(huì)認(rèn)定該 Pod處于就緒狀態(tài)。該信號(hào)的作用是控制哪些 Pod應(yīng)該作為service的后端套鹅。如果 Pod 處于非就緒狀態(tài)站蝠,那么它們將會(huì)被從 service 的load balancer(負(fù)載均衡)中移除。

image.png

6.2 存活探針

6.2.1 定義liveness 命令

許多長(zhǎng)時(shí)間運(yùn)行的應(yīng)用程序最終會(huì)轉(zhuǎn)換到 broken 狀態(tài)卓鹿,除非重新啟動(dòng)菱魔,否則無(wú)法恢復(fù)。Kubernetes 提供了 liveness probe 來(lái)檢測(cè)和補(bǔ)救這種情況吟孙。

在本次練習(xí)將基于 busybox鏡像創(chuàng)建運(yùn)行一個(gè)容器的 Pod澜倦。以下是 Pod 的配置文件 exec-liveness.yaml:

root@k8s-ansible-client:~/yaml/20211010/05# cat exec-liveness.yaml
apiVersion: v1
kind: Pod
metadata:
  labels:
    test: liveness
  name: liveness-exec
spec:
  containers:
  - name: liveness
    image: busybox
    args:
    - /bin/sh
    - -c
    - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
    livenessProbe:
      exec:
        command:
        - cat
        - /tmp/healthy
      initialDelaySeconds: 5
      periodSeconds: 5

在這個(gè)配置文件中,可以看到 Pod 中只有一個(gè)容器杰妓。 periodSeconds 字段指定了 kubelet 應(yīng)該每 5 秒執(zhí)行一次存活探測(cè)藻治。 initialDelaySeconds 字段告訴 kubelet 在執(zhí)行第一次探測(cè)前應(yīng)該等待 5 秒。 kubelet 在容器內(nèi)執(zhí)行命令 cat /tmp/healthy 來(lái)進(jìn)行探測(cè)巷挥。 如果命令執(zhí)行成功并且返回值為 0桩卵,kubelet 就會(huì)認(rèn)為這個(gè)容器是健康存活的。 如果這個(gè)命令返回非 0 值倍宾,kubelet 會(huì)殺死這個(gè)容器并重新啟動(dòng)它雏节。

容器啟動(dòng)時(shí),執(zhí)行該命令:

/bin/sh -c "touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600"

在容器生命的最初30秒內(nèi)有一個(gè) /tmp/healthy 文件高职,在這30秒內(nèi) cat /tmp/healthy命令會(huì)返回一個(gè)成功的返回碼钩乍。30秒后, cat /tmp/healthy 將返回失敗的返回碼怔锌。
創(chuàng)建Pod:

root@k8s-ansible-client:~/yaml/20211010/05# kubectl apply -f exec-liveness.yaml 
pod/liveness-exec created

驗(yàn)證

root@k8s-ansible-client:~/yaml/20211010/05# kubectl describe pod liveness-exec
...
Events:
  Type     Reason     Age                    From               Message
  ----     ------     ----                   ----               -------
  Normal   Scheduled  5m56s                  default-scheduler  Successfully assigned default/liveness-exec to 192.168.20.253
  Normal   Pulled     5m55s                  kubelet            Successfully pulled image "busybox" in 506.529221ms
  Normal   Pulled     4m41s                  kubelet            Successfully pulled image "busybox" in 539.024238ms
  Normal   Created    3m26s (x3 over 5m55s)  kubelet            Created container liveness
  Normal   Pulled     3m26s                  kubelet            Successfully pulled image "busybox" in 525.35563ms
  Warning  Unhealthy  2m42s (x9 over 5m22s)  kubelet            Liveness probe failed: cat: can't open '/tmp/healthy': No such file or directory
  Normal   Killing    2m42s (x3 over 5m12s)  kubelet            Container liveness failed liveness probe, will be restarted
  Normal   Pulling    2m12s (x4 over 5m56s)  kubelet            Pulling image "busybox"
  Normal   Started    56s (x5 over 5m55s)    kubelet            Started container liveness

#從輸出結(jié)果來(lái)RESTARTS值加4了
root@k8s-ansible-client:~/yaml/20211010/05# kubectl get pods
NAME                         READY   STATUS    RESTARTS       AGE
alpine-test                  1/1     Running   43 (58m ago)   24d
kube100-site                 2/2     Running   0              10d
liveness-exec                1/1     Running   4 (75s ago)    6m15s
nginx-test-001               1/1     Running   19 (95m ago)   11d
nginx-test1                  1/1     Running   43 (68m ago)   24d
nginx-test2                  1/1     Running   43 (68m ago)   24d
nginx-test3                  1/1     Running   43 (68m ago)   24d
zookeeper1-cdbb7fbc-5pgdg    1/1     Running   1 (2d2h ago)   2d2h
zookeeper2-f4944446d-2xnjd   1/1     Running   0              2d2h
zookeeper3-589f6bc7-2mnz6    1/1     Running   0              2d2h

五次就之后就停止運(yùn)行了

root@k8s-ansible-client:~/yaml/20211010/05# kubectl get pods
NAME                         READY   STATUS             RESTARTS       AGE
alpine-test                  1/1     Running            43 (60m ago)   24d
kube100-site                 2/2     Running            0              10d
liveness-exec                0/1     CrashLoopBackOff   5 (32s ago)    8m2s
nginx-test-001               1/1     Running            19 (96m ago)   11d
nginx-test1                  1/1     Running            43 (69m ago)   24d
nginx-test2                  1/1     Running            43 (69m ago)   24d
nginx-test3                  1/1     Running            43 (69m ago)   24d
zookeeper1-cdbb7fbc-5pgdg    1/1     Running            1 (2d2h ago)   2d2h
zookeeper2-f4944446d-2xnjd   1/1     Running            0              2d2h
zookeeper3-589f6bc7-2mnz6    1/1     Running            0              2d2h

6.3.2 定義 liveness HTTP請(qǐng)求

我們還可以使用 HTTP GET 請(qǐng)求作為liveness probe寥粹。下面是一個(gè)基于liveness鏡像運(yùn)行了一個(gè)容器的 Pod 的例子 http-liveness.yaml

root@k8s-ansible-client:~/yaml/20211010/05# cat http-liveness.yaml 
apiVersion: v1
kind: Pod
metadata:
  labels:
    test: liveness
  name: liveness-http
spec:
  containers:
  - name: liveness
    image: harbor.openscp.com/base/liveness:latest
    args:
    - /server
    livenessProbe:
      httpGet:
        path: /healthz
        port: 8080
        httpHeaders:
        - name: X-Custom-Header
          value: Awesome
      initialDelaySeconds: 3
      periodSeconds: 3

該配置文件只定義了一個(gè)容器,livenessProbe 指定 kubelet 需要每隔3秒執(zhí)行一次 liveness probe产禾。initialDelaySeconds 指定 kubelet 在該執(zhí)行第一次探測(cè)之前需要等待3秒鐘排作。該探針將向容器中的 server 的8080端口發(fā)送一個(gè)HTTP GET 請(qǐng)求。如果server的/healthz路徑的 handler 返回一個(gè)成功的返回碼亚情,kubelet 就會(huì)認(rèn)定該容器是活著的并且很健康妄痪。如果返回失敗的返回碼,kubelet 將殺掉該容器并重啟它楞件。

任何大于200小于400的返回碼都會(huì)認(rèn)定是成功的返回碼衫生。其他返回碼都會(huì)被認(rèn)為是失敗的返回碼裳瘪。

容器啟動(dòng)3秒后罪针,kubelet 開始執(zhí)行健康檢查。第一次健康監(jiān)測(cè)會(huì)成功派殷,但是10秒后墓阀,健康檢查將失敗,kubelet將殺掉和重啟容器斯撮。

創(chuàng)建一個(gè) Pod 來(lái)測(cè)試一下 HTTP liveness檢測(cè):

root@k8s-ansible-client:~/yaml/20211010/05# kubectl apply -f http-liveness.yaml 
pod/liveness-http created

10秒后,查看 Pod 的 event帕膜,確認(rèn) liveness probe 失敗并重啟了容器溢十。

root@k8s-ansible-client:~/yaml/20211010/05# kubectl describe pod liveness-http
...
Events:
  Type     Reason     Age               From               Message
  ----     ------     ----              ----               -------
  Normal   Scheduled  41s               default-scheduler  Successfully assigned default/liveness-http to 192.168.20.253
  Normal   Pulled     40s               kubelet            Successfully pulled image "harbor.openscp.com/base/liveness:latest" in 367.087687ms
  Normal   Pulled     23s               kubelet            Successfully pulled image "harbor.openscp.com/base/liveness:latest" in 60.759744ms
  Normal   Created    5s (x3 over 40s)  kubelet            Created container liveness
  Normal   Started    5s (x3 over 40s)  kubelet            Started container liveness
  Warning  Unhealthy  5s (x6 over 29s)  kubelet            Liveness probe failed: HTTP probe failed with statuscode: 500
  Normal   Killing    5s (x2 over 23s)  kubelet            Container liveness failed liveness probe, will be restarted
  Normal   Pulling    5s (x3 over 41s)  kubelet            Pulling image "harbor.openscp.com/base/liveness:latest"
  Normal   Pulled     5s                kubelet            Successfully pulled image "harbor.openscp.com/base/liveness:latest" in 72.061843ms

校驗(yàn)三次之后茶宵,停止運(yùn)行該pod

root@k8s-ansible-client:~/yaml/20211010/05# kubectl get pods
NAME                         READY   STATUS             RESTARTS        AGE
alpine-test                  1/1     Running            43 (71m ago)    24d
kube100-site                 2/2     Running            0               10d
liveness-http                0/1     CrashLoopBackOff   3 (16s ago)     88s
nginx-test-001               1/1     Running            19 (107m ago)   11d
nginx-test1                  1/1     Running            43 (81m ago)    24d
nginx-test2                  1/1     Running            43 (80m ago)    24d
nginx-test3                  1/1     Running            43 (80m ago)    24d
zookeeper1-cdbb7fbc-5pgdg    1/1     Running            1 (2d2h ago)    2d2h
zookeeper2-f4944446d-2xnjd   1/1     Running            0               2d2h
zookeeper3-589f6bc7-2mnz6    1/1     Running            0               2d2h

6.3.3 定義 liveness TCP請(qǐng)求

第三種 liveness probe 使用 TCP Socket危纫。 使用此配置,kubelet 將嘗試在指定端口上打開容器的套接字乌庶。 如果可以建立連接种蝶,容器被認(rèn)為是健康的,如果不能就認(rèn)為是失敗的瞒大。

root@k8s-ansible-client:~/yaml/20211010/05# cat tcp-liveness.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  containers:
  - name: nginx
    image: harbor.openscp.com/base/nginx:latest
    ports:
    - containerPort: 8080
    readinessProbe:
      tcpSocket:
        port: 8080
      initialDelaySeconds: 5
      periodSeconds: 10
    livenessProbe:
      tcpSocket:
        port: 8080
      initialDelaySeconds: 15
      periodSeconds: 20

TCP 檢查的配置與 HTTP 檢查非常相似螃征。 此示例同時(shí)使用了 readiness 和 liveness probe。 容器啟動(dòng)后5秒鐘透敌,kubelet 將發(fā)送第一個(gè) readiness probe盯滚。 這將嘗試連接到端口8080上的 goproxy 容器。如果探測(cè)成功酗电,則該 pod 將被標(biāo)記為就緒魄藕。Kubelet 將每隔10秒鐘執(zhí)行一次該檢查。

除了 readiness probe之外撵术,該配置還包括 liveness probe背率。 容器啟動(dòng)15秒后,kubelet 將運(yùn)行第一個(gè) liveness probe。 就像readiness probe一樣寝姿,這將嘗試連接到 nginx容器上的8080端口交排。如果 liveness probe 失敗,容器將重新啟動(dòng)饵筑。

root@k8s-ansible-client:~/yaml/20211010/05# kubectl apply -f tcp-liveness.yaml 
pod/nginx created

15 秒之后埃篓,通過(guò)看 Pod 事件來(lái)檢測(cè)存活探測(cè)器:

root@k8s-ansible-client:~/yaml/20211010/05# kubectl describe pod nginx
...
Events:
  Type     Reason     Age                From               Message
  ----     ------     ----               ----               -------
  Normal   Scheduled  70s                default-scheduler  Successfully assigned default/nginx to 192.168.20.253
  Normal   Pulled     70s                kubelet            Successfully pulled image "harbor.openscp.com/base/nginx:latest" in 82.158332ms
  Normal   Pulling    11s (x2 over 70s)  kubelet            Pulling image "harbor.openscp.com/base/nginx:latest"
  Normal   Created    11s (x2 over 70s)  kubelet            Created container nginx
  Normal   Started    11s (x2 over 70s)  kubelet            Started container nginx
  Warning  Unhealthy  11s (x3 over 51s)  kubelet            Liveness probe failed: dial tcp 172.20.213.20:8080: connect: connection refused
  Normal   Killing    11s                kubelet            Container nginx failed liveness probe, will be restarted
  Normal   Pulled     11s                kubelet            Successfully pulled image "harbor.openscp.com/base/nginx:latest" in 83.295218ms
  Warning  Unhealthy  1s (x8 over 61s)   kubelet            Readiness probe failed: dial tcp 172.20.213.20:8080: connect: connection refused

6.3.4 使用命名的端口

可以使用命名的 ContainerPort 作為 HTTP 或 TCP liveness檢查:

ports:
- name: liveness-port
  containerPort: 8080
  hostPort: 8080

livenessProbe:
  httpGet:
  path: /healthz
  port: liveness-port

6.4 就緒探針

有時(shí),應(yīng)用程序暫時(shí)無(wú)法對(duì)外部流量提供服務(wù)根资。 例如架专,應(yīng)用程序可能需要在啟動(dòng)期間加載大量數(shù)據(jù)或配置文件。 在這種情況下嫂冻,您不想殺死應(yīng)用程序,也不想發(fā)送請(qǐng)求案狠。 Kubernetes提供了readiness probe來(lái)檢測(cè)和減輕這些情況吹零。 Pod中的容器可以報(bào)告自己還沒有準(zhǔn)備灿椅,不能處理Kubernetes服務(wù)發(fā)送過(guò)來(lái)的流量茫蛹。

Readiness probe的配置跟liveness probe很像。唯一的不同是使用 readinessProbe而不是livenessProbe柬采。

readinessProbe:
  exec:
    command:
    - cat
    - /tmp/healthy
  initialDelaySeconds: 5
  periodSeconds: 5

Readiness probe 的 HTTP 和 TCP 的探測(cè)器配置跟 liveness probe 一樣粉捻。

Readiness 和 livenss probe 可以并行用于同一容器肩刃。 使用兩者可以確保流量無(wú)法到達(dá)未準(zhǔn)備好的容器浅碾,并且容器在失敗時(shí)重新啟動(dòng)垂谢。

6.5 Configure Probes

Probe 中有很多精確和詳細(xì)的配置滥朱,通過(guò)它們您能準(zhǔn)確的控制 liveness 和 readiness 檢查:

  • initialDelaySeconds:容器啟動(dòng)后第一次執(zhí)行探測(cè)是需要等待多少秒徙邻。
  • periodSeconds:執(zhí)行探測(cè)的頻率缰犁。默認(rèn)是10秒帅容,最小1秒并徘。
  • timeoutSeconds:探測(cè)超時(shí)時(shí)間。默認(rèn)1秒劝评,最小1秒简肴。
  • successThreshold:探測(cè)失敗后砰识,最少連續(xù)探測(cè)成功多少次才被認(rèn)定為成功辫狼。默認(rèn)是 1膨处。對(duì)于 liveness 必須是 1鹃答。最小值是 1测摔。
  • failureThreshold:探測(cè)成功后,最少連續(xù)探測(cè)失敗多少次才被認(rèn)定為失敗挟纱。默認(rèn)是 3紊服。最小值是 1围苫。

HTTP probe 中可以給 httpGet設(shè)置其他配置項(xiàng):

  • host:連接的主機(jī)名,默認(rèn)連接到 pod 的 IP剃盾。您可能想在 http header 中設(shè)置 “Host” 而不是使用 IP痒谴。
  • scheme:連接使用的 schema积蔚,默認(rèn)HTTP尽爆。
  • path: 訪問的HTTP server 的 path。
  • httpHeaders:自定義請(qǐng)求的 header幅狮。HTTP運(yùn)行重復(fù)的 header擎值。
  • port:訪問的容器的端口名字或者端口號(hào)鸠儿。端口號(hào)必須介于 1 和 65525 之間捆交。

對(duì)于 HTTP 探測(cè)器,kubelet 向指定的路徑和端口發(fā)送 HTTP 請(qǐng)求以執(zhí)行檢查肉瓦。 Kubelet 將 probe 發(fā)送到容器的 IP 地址泞莉,除非地址被httpGet中的可選host字段覆蓋。 在大多數(shù)情況下挨厚,您不想設(shè)置主機(jī)字段疫剃。 有一種情況下您可以設(shè)置它。 假設(shè)容器在127.0.0.1上偵聽壤躲,并且 Pod 的hostNetwork字段為 true柒爵。 然后棉胀,在httpGet下的host應(yīng)該設(shè)置為127.0.0.1霎挟。 如果您的 pod 依賴于虛擬主機(jī),這可能是更常見的情況熬北,您不應(yīng)該是用host,而是應(yīng)該在httpHeaders中設(shè)置Host頭巫延。

?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請(qǐng)聯(lián)系作者
  • 序言:七十年代末脉执,一起剝皮案震驚了整個(gè)濱河市半夷,隨后出現(xiàn)的幾起案子,更是在濱河造成了極大的恐慌,老刑警劉巖枚尼,帶你破解...
    沈念sama閱讀 206,214評(píng)論 6 481
  • 序言:濱河連續(xù)發(fā)生了三起死亡事件署恍,死亡現(xiàn)場(chǎng)離奇詭異袁串,居然都是意外死亡,警方通過(guò)查閱死者的電腦和手機(jī)王悍,發(fā)現(xiàn)死者居然都...
    沈念sama閱讀 88,307評(píng)論 2 382
  • 文/潘曉璐 我一進(jìn)店門破镰,熙熙樓的掌柜王于貴愁眉苦臉地迎上來(lái),“玉大人压储,你說(shuō)我怎么就攤上這事鲜漩。” “怎么了集惋?”我有些...
    開封第一講書人閱讀 152,543評(píng)論 0 341
  • 文/不壞的土叔 我叫張陵孕似,是天一觀的道長(zhǎng)。 經(jīng)常有香客問我鳞青,道長(zhǎng)胶惰,這世上最難降的妖魔是什么坊饶? 我笑而不...
    開封第一講書人閱讀 55,221評(píng)論 1 279
  • 正文 為了忘掉前任孤页,我火速辦了婚禮屋讶,結(jié)果婚禮上乐疆,老公的妹妹穿的比我還像新娘。我一直安慰自己,他們只是感情好知纷,可當(dāng)我...
    茶點(diǎn)故事閱讀 64,224評(píng)論 5 371
  • 文/花漫 我一把揭開白布。 她就那樣靜靜地躺著忍疾,像睡著了一般士复。 火紅的嫁衣襯著肌膚如雪承璃。 梳的紋絲不亂的頭發(fā)上席楚,一...
    開封第一講書人閱讀 49,007評(píng)論 1 284
  • 那天熊杨,我揣著相機(jī)與錄音鳞绕,去河邊找鬼贴见。 笑死,一個(gè)胖子當(dāng)著我的面吹牛缘回,可吹牛的內(nèi)容都是我干的。 我是一名探鬼主播始赎,決...
    沈念sama閱讀 38,313評(píng)論 3 399
  • 文/蒼蘭香墨 我猛地睜開眼,長(zhǎng)吁一口氣:“原來(lái)是場(chǎng)噩夢(mèng)啊……” “哼排龄!你這毒婦竟也來(lái)了?” 一聲冷哼從身側(cè)響起,我...
    開封第一講書人閱讀 36,956評(píng)論 0 259
  • 序言:老撾萬(wàn)榮一對(duì)情侶失蹤驹马,失蹤者是張志新(化名)和其女友劉穎,沒想到半個(gè)月后嫉父,有當(dāng)?shù)厝嗽跇淞掷锇l(fā)現(xiàn)了一具尸體篮条,經(jīng)...
    沈念sama閱讀 43,441評(píng)論 1 300
  • 正文 獨(dú)居荒郊野嶺守林人離奇死亡弟头,尸身上長(zhǎng)有42處帶血的膿包…… 初始之章·張勛 以下內(nèi)容為張勛視角 年9月15日...
    茶點(diǎn)故事閱讀 35,925評(píng)論 2 323
  • 正文 我和宋清朗相戀三年,在試婚紗的時(shí)候發(fā)現(xiàn)自己被綠了涉茧。 大學(xué)時(shí)的朋友給我發(fā)了我未婚夫和他白月光在一起吃飯的照片赴恨。...
    茶點(diǎn)故事閱讀 38,018評(píng)論 1 333
  • 序言:一個(gè)原本活蹦亂跳的男人離奇死亡,死狀恐怖伴栓,靈堂內(nèi)的尸體忽然破棺而出伦连,到底是詐尸還是另有隱情,我是刑警寧澤钳垮,帶...
    沈念sama閱讀 33,685評(píng)論 4 322
  • 正文 年R本政府宣布惑淳,位于F島的核電站,受9級(jí)特大地震影響饺窿,放射性物質(zhì)發(fā)生泄漏歧焦。R本人自食惡果不足惜,卻給世界環(huán)境...
    茶點(diǎn)故事閱讀 39,234評(píng)論 3 307
  • 文/蒙蒙 一肚医、第九天 我趴在偏房一處隱蔽的房頂上張望绢馍。 院中可真熱鬧,春花似錦肠套、人聲如沸舰涌。這莊子的主人今日做“春日...
    開封第一講書人閱讀 30,240評(píng)論 0 19
  • 文/蒼蘭香墨 我抬頭看了看天上的太陽(yáng)瓷耙。三九已至朱躺,卻和暖如春,著一層夾襖步出監(jiān)牢的瞬間搁痛,已是汗流浹背长搀。 一陣腳步聲響...
    開封第一講書人閱讀 31,464評(píng)論 1 261
  • 我被黑心中介騙來(lái)泰國(guó)打工, 沒想到剛下飛機(jī)就差點(diǎn)兒被人妖公主榨干…… 1. 我叫王不留鸡典,地道東北人盈滴。 一個(gè)月前我還...
    沈念sama閱讀 45,467評(píng)論 2 352
  • 正文 我出身青樓,卻偏偏與公主長(zhǎng)得像轿钠,于是被迫代替她去往敵國(guó)和親。 傳聞我的和親對(duì)象是個(gè)殘疾皇子病苗,可洞房花燭夜當(dāng)晚...
    茶點(diǎn)故事閱讀 42,762評(píng)論 2 345

推薦閱讀更多精彩內(nèi)容