7. 基于nfs部署Redis服務實現(xiàn)持久化
7.1 構建Redis鏡像
配置文件和啟動腳本
root@k8s-ansible-client:~/yaml/20211016/dockerfile/redis# tree
.
├── build-command.sh
├── Dockerfile
├── redis-4.0.14.tar.gz
├── redis.conf
└── run_redis.sh
root@k8s-ansible-client:~/yaml/20211016/dockerfile/redis# grep -v "^#" redis.conf |grep -v "^$"
bind 0.0.0.0
protected-mode yes
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile /var/run/redis_6379.pid
loglevel notice
logfile ""
databases 16
always-show-logo yes
save 900 1
save 5 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error no
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /data/redis-data
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
requirepass 123456
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
slave-lazy-flush no
appendonly no
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble no
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes
root@k8s-ansible-client:~/yaml/20211016/dockerfile/redis# cat run_redis.sh
#!/bin/bash
/usr/sbin/redis-server /usr/local/redis/redis.conf
tail -f /etc/hosts
root@k8s-ansible-client:~/yaml/20211016/dockerfile/redis# cat build-command.sh
#!/bin/bash
TAG=$1
docker build -t harbor.openscp.com/base/redis:${TAG} .
sleep 3
docker push harbor.openscp.com/base/redis:${TAG}
root@k8s-ansible-client:~/yaml/20211016/dockerfile/redis# cat Dockerfile
#Redis Image
FROM harbor.openscp.com/base/centos:centos7.9.2009
ADD redis-4.0.14.tar.gz /usr/local/src
RUN yum install -y vim wget tree lrzsz gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel iproute net-tools iotop
RUN ln -sv /usr/local/src/redis-4.0.14 /usr/local/redis && cd /usr/local/redis && make && cp src/redis-cli /usr/sbin/ && cp src/redis-server /usr/sbin/ && mkdir -pv /data/redis-data
ADD redis.conf /usr/local/redis/redis.conf
ADD run_redis.sh /usr/local/redis/run_redis.sh
EXPOSE 6379
CMD ["/usr/local/redis/run_redis.sh"]
構建鏡像
root@k8s-ansible-client:~/yaml/20211016/dockerfile/redis# bash build-command.sh v1
...
Successfully tagged harbor.openscp.com/base/redis:v1
The push refers to repository [harbor.openscp.com/base/redis]
4b87fefa01f9: Pushed
7cf99aab4830: Pushed
498f7e20f28c: Pushed
2853d7e8868a: Pushed
4f43a69196ea: Pushed
174f56854903: Mounted from base/tomcat-app1
v1: digest: sha256:449ef47457b4161707f77f458e0e416be66f9f7fa7082d5afe71c982539d0936 size: 1581
創(chuàng)建一個pv
root@k8s-ansible-client:~/yaml/20211016/redis# cat redis-pv.yaml
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: redis-datadir-pv-1
namespace: pop
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
nfs:
path: /data/pop/redis-datadir-1
server: 10.10.0.26
root@k8s-ansible-client:~/yaml/20211016/redis# kubectl apply -f redis-pv.yaml
persistentvolume/redis-datadir-pv-1 created
root@k8s-ansible-client:~/yaml/20211016/redis# kubectl get pv -n pop
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
redis-datadir-pv-1 5Gi RWO Retain Available 39s
創(chuàng)建一個pvc
root@k8s-ansible-client:~/yaml/20211016/redis# cat redis-pvc.yaml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: redis-datadir-pvc-1
namespace: pop
spec:
volumeName: redis-datadir-pv-1
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
root@k8s-ansible-client:~/yaml/20211016/redis# kubectl apply -f redis-pvc.yaml
persistentvolumeclaim/redis-datadir-pvc-1 created 11s
root@k8s-ansible-client:~/yaml/20211016/redis# kubectl get pvc -n pop
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
redis-datadir-pvc-1 Bound redis-datadir-pv-1 5Gi RWO 13s
7.2 部署Redis
Redis的Deployment文件
root@k8s-ansible-client:~/yaml/20211016/redis# cat redis.yaml
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
labels:
app: devops-redis
name: deploy-devops-redis
namespace: pop
spec:
replicas: 1
selector:
matchLabels:
app: devops-redis
template:
metadata:
labels:
app: devops-redis
spec:
containers:
- name: redis-container
image: harbor.openscp.com/base/redis:v1
imagePullPolicy: Always
volumeMounts:
- mountPath: "/data/redis-data/"
name: redis-datadir
volumes:
- name: redis-datadir
persistentVolumeClaim:
claimName: redis-datadir-pvc-1
---
kind: Service
apiVersion: v1
metadata:
labels:
app: devops-redis
name: srv-devops-redis
namespace: pop
spec:
type: NodePort
ports:
- name: http
port: 6379
targetPort: 6379
nodePort: 36379
selector:
app: devops-redis
sessionAffinity: ClientIP
sessionAffinityConfig:
clientIP:
timeoutSeconds: 10800
基于Deployment啟動一個Redis pod容器
root@k8s-ansible-client:~/yaml/20211016/redis# kubectl apply -f redis.yaml
deployment.apps/deploy-devops-redis created
service/srv-devops-redis created
root@k8s-ansible-client:~/yaml/20211016/redis# kubectl get pods,deploy -n pop
NAME READY STATUS RESTARTS AGE
pod/deploy-devops-redis-d9fd6594c-fvmmg 1/1 Running 0 44s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/deploy-devops-redis 1/1 1 1 44s
驗證
root@k8s-ansible-client:~/yaml/20211016/redis# kubectl exec -it pod/deploy-devops-redis-d9fd6594c-fvmmg bash -n pop
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
[root@deploy-devops-redis-d9fd6594c-fvmmg /]# df -Th
Filesystem Type Size Used Avail Use% Mounted on
overlay overlay 20G 9.9G 8.8G 53% /
tmpfs tmpfs 64M 0 64M 0% /dev
tmpfs tmpfs 2.0G 0 2.0G 0% /sys/fs/cgroup
/dev/mapper/ubuntu--vg-ubuntu--lv ext4 20G 9.9G 8.8G 53% /etc/hosts
10.10.0.26:/data/pop/redis-datadir-1 nfs4 20G 15G 4.2G 78% /data/redis-data
shm tmpfs 64M 0 64M 0% /dev/shm
tmpfs tmpfs 3.2G 12K 3.2G 1% /run/secrets/kubernetes.io/serviceaccount
tmpfs tmpfs 2.0G 0 2.0G 0% /proc/acpi
tmpfs tmpfs 2.0G 0 2.0G 0% /proc/scsi
tmpfs tmpfs 2.0G 0 2.0G 0% /sys/firmware
8. 基于StatefulSet運行mysql一主多從
8.1 StatefulSet介紹
有狀態(tài)服務画恰, 是一種給Pod提供唯一標志的控制器,它可以保證部署和擴展的順序屎开。
Pod一致性:包含次序(啟動阐枣、停止次序)、網(wǎng)絡一致性奄抽。此一致性與Pod相關蔼两,與被調(diào)度到哪個node節(jié)點無關。
穩(wěn)定的次序:對于N個副本的StatefulSet逞度,每個Pod都在[0额划,N)的范圍內(nèi)分配一個數(shù)字序號,且是唯一的档泽。
穩(wěn)定的網(wǎng)絡:Pod的hostname模式為(statefulset名稱)- (序號)俊戳。
穩(wěn)定的存儲:通過VolumeClaimTemplate為每個Pod創(chuàng)建一個PV。刪除馆匿、減少副本抑胎,不會刪除相關的卷
如果把之前無狀態(tài)的服務比喻為牛、羊等牲畜,因為,這些到一定時候就可以”送出“骡男。那么,有狀態(tài)就比喻為:寵物恃锉,而寵物不像牲畜一樣到達一定時候“送出”,人們往往會照顧寵物的一生呕臂。
8.2 StatefulSet實現(xiàn)原理
與 ReplicaSet 和 Deployment 資源一樣破托,StatefulSet 也使用控制器的方式實現(xiàn),它主要由 StatefulSetController歧蒋、StatefulSetControl 和 StatefulPodControl 三個組件協(xié)作來完成 StatefulSet 的管理土砂,StatefulSetController 會同時從 PodInformer 和 ReplicaSetInformer 中接受增刪改事件并將事件推送到隊列中州既。
控制器 StatefulSetController 會在 Run 方法中啟動多個 Goroutine 協(xié)程,這些協(xié)程會從隊列中獲取待處理的 StatefulSet 資源進行同步瘟芝,接下來我們會先介紹 Kubernetes 同步 StatefulSet 的過程易桃。
8.3 StatefulSet組成部分
Headless Service:用來定義Pod網(wǎng)絡標識( DNS domain)褥琐。
volumeClaimTemplates :存儲卷申請模板锌俱,創(chuàng)建PVC,指定pvc名稱大小敌呈,將自動創(chuàng)建pvc贸宏,且pvc必須由存儲類供應。
StatefulSet :定義具體應用磕洪,名為Nginx吭练,有三個Pod副本,并為每個Pod定義了一個域名部署statefulset析显。
為什么需要 headless service 無頭服務鲫咽?
在用Deployment時,每一個Pod名稱是沒有順序的谷异,是隨機字符串分尸,因此是Pod名稱是無序的,但是在statefulset中要求必須是有序 歹嘹,每一個pod不能被隨意取代箩绍,pod重建后pod名稱還是一樣的。而pod IP是變化的尺上,所以是以Pod名稱來識別材蛛。pod名稱是pod唯一性的標識符,必須持久穩(wěn)定有效怎抛。這時候要用到無頭服務卑吭,它可以給每個Pod一個唯一的名稱 。
為什么需要volumeClaimTemplate马绝?
對于有狀態(tài)的副本集都會用到持久存儲豆赏,對于分布式系統(tǒng)來講,它的最大特點是數(shù)據(jù)是不一樣的迹淌,所以各個節(jié)點不能使用同一存儲卷河绽,每個節(jié)點有自已的專用存儲,但是如果在Deployment中的Pod template里定義的存儲卷唉窃,是所有副本集共用一個存儲卷耙饰,數(shù)據(jù)是相同的,因為是基于模板來的 纹份,而statefulset中每個Pod都要自已的專有存儲卷苟跪,所以statefulset的存儲卷就不能再用Pod模板來創(chuàng)建了廷痘,于是statefulSet使用volumeClaimTemplate,稱為卷申請模板件已,它會為每個Pod生成不同的pvc笋额,并綁定pv,從而實現(xiàn)各pod有專用存儲篷扩。這就是為什么要用volumeClaimTemplate的原因兄猩。
8.4 創(chuàng)建MYSQL一主多從環(huán)境
定義mysql的pv
root@k8s-ansible-client:~/yaml/20211016/mysql# cat mysql-pv.yaml
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mysql-datadir-1
namespace: pop
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
nfs:
path: /data/pop/mysql-datadir-1
server: 10.10.0.26
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mysql-datadir-2
namespace: pop
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
nfs:
path: /data/pop/mysql-datadir-2
server: 10.10.0.26
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mysql-datadir-3
namespace: pop
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
nfs:
path: /data/pop/mysql-datadir-3
server: 10.10.0.26
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mysql-datadir-4
namespace: pop
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
nfs:
path: /data/pop/mysql-datadir-4
server: 10.10.0.26
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mysql-datadir-5
namespace: pop
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
nfs:
path: /data/pop/mysql-datadir-5
server: 10.10.0.26
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mysql-datadir-6
namespace: pop
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
nfs:
path: /data/pop/mysql-datadir-6
server: 10.10.0.26
執(zhí)行yaml文件
root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl apply -f mysql-pv.yaml
persistentvolume/mysql-datadir-1 created
persistentvolume/mysql-datadir-2 created
persistentvolume/mysql-datadir-3 created
persistentvolume/mysql-datadir-4 created
persistentvolume/mysql-datadir-5 created
persistentvolume/mysql-datadir-6 created
root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl get pv -n pop
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
mysql-datadir-1 10Gi RWO Retain Available 9s
mysql-datadir-2 10Gi RWO Retain Available 9s
mysql-datadir-3 10Gi RWO Retain Available 9s
mysql-datadir-4 10Gi RWO Retain Available 9s
mysql-datadir-5 10Gi RWO Retain Available 9s
mysql-datadir-6 10Gi RWO Retain Available 9s
將MySQL配置定義到configMap
root@k8s-ansible-client:~/yaml/20211016/mysql# cat mysql-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: mysql
namespace: pop
labels:
app: mysql
data:
master.cnf: |
# Apply this config only on the master.
[mysqld]
log-bin
log_bin_trust_function_creators=1
lower_case_table_names=1
slave.cnf: |
# Apply this config only on slaves.
[mysqld]
super-read-only
log_bin_trust_function_creators=1
執(zhí)行yaml文件
root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl apply -f mysql-configmap.yaml
configmap/mysql created
root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl get configMap -n pop
NAME DATA AGE
kube-root-ca.crt 1 22h
mysql 2 9s
定義MySQL的Service
root@k8s-ansible-client:~/yaml/20211016/mysql# cat mysql-svc.yaml
apiVersion: v1
kind: Service
metadata:
namespace: pop
name: mysql
labels:
app: mysql
spec:
ports:
- name: mysql
port: 3306
clusterIP: None
selector:
app: mysql
---
apiVersion: v1
kind: Service
metadata:
name: mysql-read
namespace: pop
labels:
app: mysql
spec:
ports:
- name: mysql
port: 3306
selector:
app: mysql
執(zhí)行yaml文件
root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl apply -f mysql-svc.yaml
service/mysql created
service/mysql-read created
root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl get svc -n pop
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
mysql ClusterIP None <none> 3306/TCP 8s
mysql-read ClusterIP 10.68.181.29 <none> 3306/TCP 8s
srv-devops-redis NodePort 10.68.251.172 <none> 6379:30379/TCP 22h
定義MySQL的statefulset文件
鏡像:
mysql: docker pull mysql:5.7.30
xtrabackup: docker pull anjia0532/google-samples.xtrabackup:1.0
root@k8s-ansible-client:~/yaml/20211016/mysql# cat mysql-statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mysql
namespace: pop
spec:
selector:
matchLabels:
app: mysql
serviceName: mysql
replicas: 3
template:
metadata:
labels:
app: mysql
spec:
initContainers:
- name: init-mysql
image: harbor.openscp.com/base/mysql:5.7.30
command:
- bash
- "-c"
- |
set -ex
# Generate mysql server-id from pod ordinal index.
[[ `hostname` =~ -([0-9]+)$ ]] || exit 1
ordinal=${BASH_REMATCH[1]}
echo [mysqld] > /mnt/conf.d/server-id.cnf
# Add an offset to avoid reserved server-id=0 value.
echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
# Copy appropriate conf.d files from config-map to emptyDir.
if [[ $ordinal -eq 0 ]]; then
cp /mnt/config-map/master.cnf /mnt/conf.d/
else
cp /mnt/config-map/slave.cnf /mnt/conf.d/
fi
volumeMounts:
- name: conf
mountPath: /mnt/conf.d
- name: config-map
mountPath: /mnt/config-map
- name: clone-mysql
image: harbor.openscp.com/base/xtrabackup:1
command:
- bash
- "-c"
- |
set -ex
# Skip the clone if data already exists.
[[ -d /var/lib/mysql/mysql ]] && exit 0
# Skip the clone on master (ordinal index 0).
[[ `hostname` =~ -([0-9]+)$ ]] || exit 1
ordinal=${BASH_REMATCH[1]}
[[ $ordinal -eq 0 ]] && exit 0
# Clone data from previous peer.
ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
# Prepare the backup.
xtrabackup --prepare --target-dir=/var/lib/mysql
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
- name: conf
mountPath: /etc/mysql/conf.d
containers:
- name: mysql
image: harbor.openscp.com/base/mysql:5.7.30
env:
- name: MYSQL_ALLOW_EMPTY_PASSWORD
value: "1"
ports:
- name: mysql
containerPort: 3306
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
- name: conf
mountPath: /etc/mysql/conf.d
resources:
requests:
cpu: 500m
memory: 1Gi
livenessProbe:
exec:
command: ["mysqladmin", "ping"]
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
readinessProbe:
exec:
# Check we can execute queries over TCP (skip-networking is off).
command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
initialDelaySeconds: 5
periodSeconds: 2
timeoutSeconds: 1
- name: xtrabackup
image: harbor.openscp.com/base/xtrabackup:1
ports:
- name: xtrabackup
containerPort: 3307
command:
- bash
- "-c"
- |
set -ex
cd /var/lib/mysql
# Determine binlog position of cloned data, if any.
if [[ -f xtrabackup_slave_info ]]; then
# XtraBackup already generated a partial "CHANGE MASTER TO" query
# because we're cloning from an existing slave.
mv xtrabackup_slave_info change_master_to.sql.in
# Ignore xtrabackup_binlog_info in this case (it's useless).
rm -f xtrabackup_binlog_info
elif [[ -f xtrabackup_binlog_info ]]; then
# We're cloning directly from master. Parse binlog position.
[[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
rm xtrabackup_binlog_info
echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
fi
# Check if we need to complete a clone by starting replication.
if [[ -f change_master_to.sql.in ]]; then
echo "Waiting for mysqld to be ready (accepting connections)"
until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done
echo "Initializing replication from clone position"
# In case of container restart, attempt this at-most-once.
mv change_master_to.sql.in change_master_to.sql.orig
mysql -h 127.0.0.1 <<EOF
$(<change_master_to.sql.orig),
MASTER_HOST='mysql-0.mysql',
MASTER_USER='root',
MASTER_PASSWORD='',
MASTER_CONNECT_RETRY=10;
START SLAVE;
EOF
fi
# Start a server to send backups when requested by peers.
exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \
"xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
- name: conf
mountPath: /etc/mysql/conf.d
resources:
requests:
cpu: 100m
memory: 100Mi
volumes:
- name: conf
emptyDir: {}
- name: config-map
configMap:
name: mysql
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 10Gi
執(zhí)行yaml文件
root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl apply -f mysql-statefulset.yaml
statefulset.apps/mysql created
可以通過運行以下命令查看啟動進度
root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl get pods -l app=mysql --watch -n pop
NAME READY STATUS RESTARTS AGE
mysql-0 2/2 Running 0 22s
mysql-1 0/2 Init:0/2 0 4s
mysql-1 0/2 Init:1/2 0 16s
mysql-1 0/2 Init:1/2 0 28s
mysql-1 0/2 PodInitializing 0 36s
mysql-1 1/2 Running 0 37s
mysql-1 2/2 Running 0 42s
mysql-2 0/2 Pending 0 0s
mysql-2 0/2 Pending 0 0s
mysql-2 0/2 Pending 0 2s
mysql-2 0/2 Init:0/2 0 2s
mysql-2 0/2 Init:1/2 0 17s
mysql-2 0/2 Init:1/2 0 30s
mysql-2 0/2 PodInitializing 0 37s
mysql-2 1/2 Error 0 38s
mysql-2 1/2 Running 1 (1s ago) 39s
mysql-2 2/2 Running 1 (6s ago) 44s
一段時間后,應該看到所有 3 個 Pod 進入 Running 狀態(tài):
root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl get pods -l app=mysql -n pop
NAME READY STATUS RESTARTS AGE
mysql-0 2/2 Running 0 3m46s
mysql-1 2/2 Running 0 3m28s
mysql-2 2/2 Running 1 (2m8s ago) 2m46s
驗證
在MySQL主(mysql-0)上新建一個數(shù)據(jù)庫test
root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl exec -it mysql-0 -n pop bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
Defaulted container "mysql" out of: mysql, xtrabackup, init-mysql (init), clone-mysql (init)
root@mysql-0:/# mysql -u root -p
Enter password:
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 226
Server version: 5.7.30-log MySQL Community Server (GPL)
Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
mysql> CREATE DATABASE test;
Query OK, 1 row affected (0.01 sec)
mysql> show databases;
+------------------------+
| Database |
+------------------------+
| information_schema |
| mysql |
| performance_schema |
| sys |
| test |
| xtrabackup_backupfiles |
+------------------------+
6 rows in set (0.01 sec)
mysql>
在從庫(mysql-1,mysql-2)上查詢數(shù)據(jù)庫是否存在test
root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl exec -it mysql-1 -n pop bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
Defaulted container "mysql" out of: mysql, xtrabackup, init-mysql (init), clone-mysql (init)
root@mysql-1:/# mysql -u root -p
Enter password:
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 313
Server version: 5.7.30 MySQL Community Server (GPL)
Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
mysql> show databases;
+------------------------+
| Database |
+------------------------+
| information_schema |
| mysql |
| performance_schema |
| sys |
| test |
| xtrabackup_backupfiles |
+------------------------+
6 rows in set (0.01 sec)
mysql>
查看pv綁定情況
root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl get pv -n pop
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
mysql-datadir-1 10Gi RWO Retain Bound pop/data-mysql-0 115m
mysql-datadir-2 10Gi RWO Retain Bound pop/data-mysql-1 115m
mysql-datadir-3 10Gi RWO Retain Available 115m
mysql-datadir-4 10Gi RWO Retain Available 115m
mysql-datadir-5 10Gi RWO Retain Bound pop/data-mysql-2 115m
mysql-datadir-6 10Gi RWO Retain Available 115m
nfs服務器上映射目錄情況
root@k8s-ansible-client:/data/pop/mysql-datadir-1/mysql# ls
auto.cnf client-cert.pem ibdata1 ibtmp1 mysql-0-bin.000002 mysql-0-bin.index public_key.pem sys
ca-key.pem client-key.pem ib_logfile0 mysql mysql-0-bin.000003 performance_schema server-cert.pem test
ca.pem ib_buffer_pool ib_logfile1 mysql-0-bin.000001 mysql-0-bin.000004 private_key.pem server-key.pem xtrabackup_backupfiles
root@k8s-ansible-client:/data/pop/mysql-datadir-1/mysql# pwd
/data/pop/mysql-datadir-1/mysql
9. 部署Jenkins
9.1 構建鏡像
# dockerfile文件
root@k8s-ansible-client:~/yaml/20211016/dockerfile/jenkins# cat Dockerfile
#Jenkins Version 2.190.1
FROM harbor.openscp.com/base/jdk-base:v8.212
ADD jenkins-2.190.1.war /apps/jenkins/
ADD run_jenkins.sh /usr/bin/
EXPOSE 8080
CMD ["/usr/bin/run_jenkins.sh"]
# 啟動腳本
root@k8s-ansible-client:~/yaml/20211016/dockerfile/jenkins# cat run_jenkins.sh
#!/bin/bash
cd /apps/jenkins && java -server -Xms1024m -Xmx1024m -Xss512k -jar jenkins-2.190.1.war --webroot=/apps/jenkins/jenkins-data --httpPort=8080
# 構建鏡像腳本
root@k8s-ansible-client:~/yaml/20211016/dockerfile/jenkins# cat build-command.sh
#!/bin/bash
docker build -t harbor.openscp.com/base/jenkins:v2.190.1 .
echo "鏡像制作完成鉴未,即將上傳至Harbor服務器"
sleep 1
docker push harbor.openscp.com/base/jenkins:v2.190.1
echo "鏡像上傳完成"
root@k8s-ansible-client:~/yaml/20211016/dockerfile/jenkins# ls
build-command.sh Dockerfile jenkins-2.190.1.war run_jenkins.sh
制作鏡像
root@k8s-ansible-client:~/yaml/20211016/dockerfile/jenkins# bash build-command.sh
Sending build context to Docker daemon 78.25MB
Step 1/5 : FROM harbor.openscp.com/base/jdk-base:v8.212
---> 5ff82b3545df
Step 2/5 : ADD jenkins-2.190.1.war /apps/jenkins/
---> 0ff146588622
Step 3/5 : ADD run_jenkins.sh /usr/bin/
---> f1845218b8e9
Step 4/5 : EXPOSE 8080
---> Running in b9385da9b115
Removing intermediate container b9385da9b115
---> 423d3faa4ee3
Step 5/5 : CMD ["/usr/bin/run_jenkins.sh"]
---> Running in 517b84d483af
Removing intermediate container 517b84d483af
---> ee7f6eb14d7a
Successfully built ee7f6eb14d7a
Successfully tagged harbor.openscp.com/base/jenkins:v2.190.1
鏡像制作完成枢冤,即將上傳至Harbor服務器
The push refers to repository [harbor.openscp.com/base/jenkins]
1435dd08975d: Pushed
38926d9a4b7c: Pushed
38dbe7a8225d: Mounted from base/tomcat-app1
4cdbfe6aa3f6: Mounted from base/tomcat-app1
3aec209f0edd: Mounted from base/tomcat-app1
174f56854903: Mounted from base/redis
v2.190.1: digest: sha256:a8c1486adf54c960f7a8bd1c33c86d164973115fbadd497f3f9ca151dd150db7 size: 1576
鏡像上傳完成
9.2 創(chuàng)建Jenkins基于nfs實現(xiàn)持久化存儲
基于nfs創(chuàng)建pv
root@k8s-ansible-client:~/yaml/20211016/jenkins# cat jenkins-pv.yaml
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: jenkins-datadir-pv
namespace: pop
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
nfs:
server: 10.10.0.26
path: /data/pop/jenkins-data
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: jenkins-root-datadir-pv
namespace: pop
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
nfs:
server: 10.10.0.26
path: /data/pop/jenkins-root-data
root@k8s-ansible-client:~/yaml/20211016/jenkins# kubectl apply -f jenkins-pv.yaml
persistentvolume/jenkins-datadir-pv created
persistentvolume/jenkins-root-datadir-pv created
root@k8s-ansible-client:~/yaml/20211016/jenkins# kubectl get pv -n pop
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
jenkins-datadir-pv 10Gi RWO Retain Available 11s
jenkins-root-datadir-pv 10Gi RWO Retain Available 11s
創(chuàng)建pvc
root@k8s-ansible-client:~/yaml/20211016/jenkins# cat jenkins-pvc.yaml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jenkins-datadir-pvc
namespace: pop
spec:
volumeName: jenkins-datadir-pv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jenkins-root-data-pvc
namespace: pop
spec:
volumeName: jenkins-root-datadir-pv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
root@k8s-ansible-client:~/yaml/20211016/jenkins# kubectl apply -f jenkins-pvc.yaml
persistentvolumeclaim/jenkins-datadir-pvc created
persistentvolumeclaim/jenkins-root-data-pvc created
root@k8s-ansible-client:~/yaml/20211016/jenkins# kubectl get pvc -n pop
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
jenkins-datadir-pvc Bound jenkins-datadir-pv 10Gi RWO 5s
jenkins-root-data-pvc Bound jenkins-root-datadir-pv 10Gi RWO 5s
創(chuàng)建Jenkins環(huán)境
root@k8s-ansible-client:~/yaml/20211016/jenkins# cat jenkins.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
app: pop-jenkins
name: pop-jenkins-deployment
namespace: pop
spec:
replicas: 1
selector:
matchLabels:
app: pop-jenkins
template:
metadata:
labels:
app: pop-jenkins
spec:
containers:
- name: pop-jenkins-container
image: harbor.openscp.com/base/jenkins:v2.190.1
#imagePullPolicy: IfNotPresent
imagePullPolicy: Always
ports:
- containerPort: 8080
protocol: TCP
name: http
volumeMounts:
- mountPath: "/apps/jenkins/jenkins-data/"
name: jenkins-datadir-magedu
- mountPath: "/root/.jenkins"
name: jenkins-root-datadir
volumes:
- name: jenkins-datadir-magedu
persistentVolumeClaim:
claimName: jenkins-datadir-pvc
- name: jenkins-root-datadir
persistentVolumeClaim:
claimName: jenkins-root-data-pvc
---
kind: Service
apiVersion: v1
metadata:
labels:
app: pop-jenkins
name: pop-jenkins-service
namespace: pop
spec:
type: NodePort
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
nodePort: 31080
selector:
app: pop-jenkins
root@k8s-ansible-client:~/yaml/20211016/jenkins# kubectl apply -f jenkins.yaml
deployment.apps/pop-jenkins-deployment created
service/pop-jenkins-service created
#查看pods、deployment信息
root@k8s-ansible-client:~/yaml/20211016/jenkins# kubectl get pods,deploy -n pop
NAME READY STATUS RESTARTS AGE
pod/deploy-devops-redis-d9fd6594c-fvmmg 1/1 Running 0 2d17h
pod/mysql-0 2/2 Running 0 41h
pod/mysql-1 2/2 Running 0 41h
pod/mysql-2 2/2 Running 1 (41h ago) 41h
pod/pop-jenkins-deployment-58d59b9bf5-llcqs 1/1 Running 0 27s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/deploy-devops-redis 1/1 1 1 2d17h
deployment.apps/pop-jenkins-deployment 1/1 1 1 28s
# 查看svc信息
root@k8s-ansible-client:~/yaml/20211016/jenkins# kubectl get svc -n pop
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
pop-jenkins-service NodePort 10.68.139.115 <none> 80:31080/TCP 37s
驗證
獲取Jenkins的admin密碼
root@k8s-ansible-client:~/yaml/20211016/jenkins# kubectl exec -it pod/pop-jenkins-deployment-58d59b9bf5-llcqs -n pop bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
[root@pop-jenkins-deployment-58d59b9bf5-llcqs /]# cat /root/.jenkins/secrets/initialAdminPassword
c174844ecff54595ba7ddea4e4400abf
查看nfs掛載硬盤信息
# pods里面掛載信息
[root@pop-jenkins-deployment-58d59b9bf5-llcqs /]# df -Th
Filesystem Type Size Used Avail Use% Mounted on
overlay overlay 20G 9.5G 9.2G 51% /
tmpfs tmpfs 64M 0 64M 0% /dev
tmpfs tmpfs 2.0G 0 2.0G 0% /sys/fs/cgroup
10.10.0.26:/data/pop/jenkins-root-data nfs4 295G 806M 279G 1% /root/.jenkins
/dev/mapper/ubuntu--vg-ubuntu--lv ext4 20G 9.5G 9.2G 51% /etc/hosts
shm tmpfs 64M 0 64M 0% /dev/shm
10.10.0.26:/data/pop/jenkins-data nfs4 295G 806M 279G 1% /apps/jenkins/jenkins-data
tmpfs tmpfs 3.2G 12K 3.2G 1% /run/secrets/kubernetes.io/serviceaccount
tmpfs tmpfs 2.0G 0 2.0G 0% /proc/acpi
tmpfs tmpfs 2.0G 0 2.0G 0% /proc/scsi
tmpfs tmpfs 2.0G 0 2.0G 0% /sys/firmware
# nfs服務器上目錄的信息
root@k8s-ansible-client:/data/pop/jenkins-data# ls
bootstrap dc-license.txt help jsbundles LogFileOutputStream.class 'MainDialog$1$1.class' META-INF WEB-INF
ColorFormatter.class executable images 'LogFileOutputStream$1.class' 'Main$FileAndDescription.class' 'MainDialog$1.class' robots.txt winstone.jar
css favicon.ico JNLPMain.class 'LogFileOutputStream$2.class' Main.class MainDialog.class scripts
root@k8s-ansible-client:/data/pop/jenkins-data# cd ../jenkins-root-data/
root@k8s-ansible-client:/data/pop/jenkins-root-data# ls
config.xml jenkins.install.InstallUtil.installingPlugins jobs nodes secret.key.not-so-secret userContent
hudson.model.UpdateCenter.xml jenkins.install.UpgradeWizard.state logs plugins secrets users
identity.key.enc jenkins.telemetry.Correlator.xml nodeMonitors.xml secret.key updates
root@k8s-ansible-client:/data/pop/jenkins-root-data#
10. k8s實戰(zhàn)案例
10.1 web站點案例
實現(xiàn)nginx+php+wordpress+mysql環(huán)境
10.1.1 構建 php鏡像
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/php# ls
build-command.sh Dockerfile run_php.sh www.conf
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/php# cat Dockerfile
#PHP Base Image
FROM harbor.openscp.com/base/centos:centos7.9.2009
RUN yum install -y vim wget tree lrzsz gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel iproute net-tools iotop && rm -rf /etc/localtime && ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && useradd www -u 2020 && useradd nginx -u 2021
RUN yum install -y https://mirrors.tuna.tsinghua.edu.cn/remi/enterprise/remi-release-7.rpm && yum install php56-php-fpm php56-php-mysql -y
ADD www.conf /opt/remi/php56/root/etc/php-fpm.d/www.conf
#RUN useradd nginx -u 2019
ADD run_php.sh /usr/local/bin/run_php.sh
EXPOSE 9000
CMD ["/usr/local/bin/run_php.sh"]
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/php# cat build-command.sh
#!/bin/bash
TAG=$1
docker build -t harbor.openscp.com/base/wordpress-php-5.6:${TAG} .
echo "鏡像制作完成铜秆,即將上傳至Harbor服務器"
sleep 1
docker push harbor.openscp.com/base/wordpress-php-5.6:${TAG}
echo "鏡像上傳完成"
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/php# cat run_php.sh
#!/bin/bash
/opt/remi/php56/root/usr/sbin/php-fpm
tail -f /etc/hosts
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/php# grep -v "^;" www.conf |grep -v "^$"
[www]
user = nginx
group = nginx
listen = 0.0.0.0:9000
pm = dynamic
pm.max_children = 50
pm.start_servers = 5
pm.min_spare_servers = 5
pm.max_spare_servers = 35
slowlog = /opt/remi/php56/root/var/log/php-fpm/www-slow.log
php_admin_value[error_log] = /opt/remi/php56/root/var/log/php-fpm/www-error.log
php_admin_flag[log_errors] = on
php_value[session.save_handler] = files
php_value[session.save_path] = /opt/remi/php56/root/var/lib/php/session
php_value[soap.wsdl_cache_dir] = /opt/remi/php56/root/var/lib/php/wsdlcache
執(zhí)行腳本構建鏡像
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/php# chmod a+x *.sh
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/php# bash build-command.sh v1
...
Successfully tagged harbor.openscp.com/base/wordpress-php-5.6:v1
鏡像制作完成淹真,即將上傳至Harbor服務器
The push refers to repository [harbor.openscp.com/base/wordpress-php-5.6]
18125a50a52c: Pushed
017c00d221f8: Pushed
3bab9e7267e6: Pushed
7fc5345cbe01: Mounted from base/nginx-web1
174f56854903: Mounted from base/jenkins
v1: digest: sha256:d5a6abed76905f428d164520a5c728e99625d88bc1dbc6c62d2bc6fe384a6714 size: 1369
鏡像上傳完成
10.1.2 構建NGINX鏡像
先構建nginx的基礎鏡像
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx-base# cat Dockerfile
#Nginx Base Image
FROM harbor.openscp.com/base/centos:centos7.9.2009
RUN yum install -y vim wget tree lrzsz gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel iproute net-tools iotop && rm -rf /etc/localtime && ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && useradd www -u 2020 && useradd nginx -u 2021
ADD nginx-1.14.2.tar.gz /usr/local/src/
RUN cd /usr/local/src/nginx-1.14.2 && ./configure --prefix=/apps/nginx && make && make install && ln -sv /apps/nginx/sbin/nginx /usr/sbin/nginx &&rm -rf /usr/local/src/nginx-1.14.2.tar.gz
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx-base# cat build-command.sh
#!/bin/bash
docker build -t harbor.openscp.com/base/nginx-base-wordpress:v1.14.2 .
sleep 1
docker push harbor.openscp.com/base/nginx-base-wordpress:v1.14.2
執(zhí)行腳本構建nginx基礎鏡像
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx-base# bash build-command.sh
...
Successfully built b73c4576e520
Successfully tagged harbor.openscp.com/base/nginx-base-wordpress:v1.14.2
The push refers to repository [harbor.openscp.com/base/nginx-base-wordpress]
154a43bb903c: Pushed
3265817f225b: Mounted from base/nginx-web1
7fc5345cbe01: Mounted from base/wordpress-php-5.6
174f56854903: Mounted from base/wordpress-php-5.6
v1.14.2: digest: sha256:52412ed50aff876003c4834f00a8d60d7624ab770444ff8f2cac6fd21712ced3 size: 1164
制作業(yè)務nginx鏡像
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx# ls
build-command.sh Dockerfile index.html nginx.conf run_nginx.sh
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx# cat Dockerfile
#FROM harbor.magedu.local/pub-images/nginx-base-wordpress:v1.14.2
FROM harbor.openscp.com/base/nginx-base-wordpress:v1.14.2
ADD nginx.conf /apps/nginx/conf/nginx.conf
ADD run_nginx.sh /apps/nginx/sbin/run_nginx.sh
RUN mkdir -pv /home/nginx/wordpress
RUN chown nginx.nginx /home/nginx/wordpress/ -R
EXPOSE 80 443
CMD ["/apps/nginx/sbin/run_nginx.sh"]
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx# cat index.html
nginx web1
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx# cat nginx.conf
user nginx nginx;
worker_processes auto;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
client_max_body_size 10M;
client_body_buffer_size 16k;
client_body_temp_path /apps/nginx/tmp 1 2 2;
gzip on;
server {
listen 80;
server_name blogs.openscp.com;
location / {
root /home/nginx/wordpress;
index index.php index.html index.htm;
}
location ~ \.php$ {
root /home/nginx/wordpress;
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
include fastcgi_params;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
}
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx# cat run_nginx.sh
#!/bin/bash
/apps/nginx/sbin/nginx
tail -f /etc/hosts
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx# cat build-command.sh
#!/bin/bash
TAG=$1
docker build -t harbor.openscp.com/base/wordpress-nginx:${TAG} .
echo "鏡像制作完成,即將上傳至Harbor服務器"
sleep 1
docker push harbor.openscp.com/base/wordpress-nginx:${TAG}
echo "鏡像上傳完成"
執(zhí)行腳本構建業(yè)務Nginx鏡像
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx# chmod a+x *.sh
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx# bash build-command.sh v1
...
Successfully tagged harbor.openscp.com/base/wordpress-nginx:v1
鏡像制作完成连茧,即將上傳至Harbor服務器
The push refers to repository [harbor.openscp.com/base/wordpress-nginx]
2e4717b51034: Pushed
e32627e01bb4: Pushed
7a8e3034ed91: Pushed
b79d4f07216e: Pushed
25e48aff2729: Mounted from base/nginx-base-wordpress
5b5981584815: Mounted from base/nginx-base-wordpress
c6a91dc597a0: Mounted from base/nginx-base-wordpress
174f56854903: Mounted from base/nginx-base-wordpress
v1: digest: sha256:98d50b474682ef18369a6468a1e488d0baafa3324099fded4dc9ff754a84250d size: 1992
鏡像上傳完成
10.1.3 部署wordpress
yaml文件核蘸,基于nfs實現(xiàn)持久化存儲
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/wordpress# cat wordpress.yaml
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
labels:
app: wordpress-app
name: wordpress-app-deployment
namespace: pop
spec:
replicas: 1
selector:
matchLabels:
app: wordpress-app
template:
metadata:
labels:
app: wordpress-app
spec:
containers:
- name: wordpress-app-nginx
image: harbor.openscp.com/base/wordpress-nginx:v1
imagePullPolicy: Always
ports:
- containerPort: 80
protocol: TCP
name: http
- containerPort: 443
protocol: TCP
name: https
volumeMounts:
- name: wordpress
mountPath: /home/nginx/wordpress
readOnly: false
- name: wordpress-app-php
image: harbor.openscp.com/base/wordpress-php-5.6:v1
#imagePullPolicy: IfNotPresent
imagePullPolicy: Always
ports:
- containerPort: 9000
protocol: TCP
name: http
volumeMounts:
- name: wordpress
mountPath: /home/nginx/wordpress
readOnly: false
volumes:
- name: wordpress
nfs:
server: 10.10.0.26
path: /data/pop/wordpress
---
kind: Service
apiVersion: v1
metadata:
labels:
app: wordpress-app
name: wordpress-app-spec
namespace: pop
spec:
type: NodePort
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
nodePort: 30031
- name: https
port: 443
protocol: TCP
targetPort: 443
nodePort: 30033
selector:
app: wordpress-app
基于上面MySQL主從數(shù)據(jù)庫,創(chuàng)建數(shù)據(jù)庫(wordpress)和帳號(wordpress 123456)
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/wordpress# kubectl exec -it mysql-0 -n pop bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
Defaulted container "mysql" out of: mysql, xtrabackup, init-mysql (init), clone-mysql (init)
root@mysql-0:/# mysql -u root -p
Enter password:
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 91544
Server version: 5.7.30-log MySQL Community Server (GPL)
Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
mysql> create database wordpress;
Query OK, 1 row affected (0.01 sec)
mysql>
mysql> grant all privileges on wordpress.* to wordpress@'%' identified by '123456';
Query OK, 0 rows affected, 1 warning (0.05 sec)
mysql> flush privileges;
Query OK, 0 rows affected (0.05 sec)
mysql>
執(zhí)行yaml構建wordpress環(huán)境
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/wordpress# kubectl apply -f wordpress.yaml
deployment.apps/wordpress-app-deployment created
service/wordpress-app-spec created
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/wordpress# kubectl get pods,deploy -n pop
NAME READY STATUS RESTARTS AGE
pod/deploy-devops-redis-d9fd6594c-fvmmg 1/1 Running 0 2d18h
pod/mysql-0 2/2 Running 0 42h
pod/mysql-1 2/2 Running 0 42h
pod/mysql-2 2/2 Running 1 (42h ago) 42h
pod/wordpress-app-deployment-5f98868c5b-4775l 2/2 Running 0 4s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/deploy-devops-redis 1/1 1 1 2d18h
deployment.apps/wordpress-app-deployment 1/1 1 1 4s
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/wordpress# kubectl get svc -n pop
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
mysql ClusterIP None <none> 3306/TCP 44h
mysql-read ClusterIP 10.68.181.29 <none> 3306/TCP 44h
wordpress-app-spec NodePort 10.68.208.19 <none> 80:30031/TCP,443:30033/TCP 115s
下載wordpress安裝包https://cn.wordpress.org/download/
# 在nfs服務器上
root@k8s-ansible-client:/data/pop/wordpress# wget https://cn.wordpress.org/latest-zh_CN.tar.gz
# 解壓安裝包
root@k8s-ansible-client:/data/pop/wordpress# tar -zxvf latest-zh_CN.tar.gz
root@k8s-ansible-client:/data/pop/wordpress# ls
index.html latest-zh_CN.tar.gz wordpress
root@k8s-ansible-client:/data/pop/wordpress# rm -fr index.html latest-zh_CN.tar.gz
root@k8s-ansible-client:/data/pop/wordpress# mv wordpress/* .
root@k8s-ansible-client:/data/pop/wordpress# ls
index.php readme.html wp-activate.php wp-blog-header.php wp-config-sample.php wp-cron.php wp-links-opml.php wp-login.php wp-settings.php wp-trackback.php
license.txt wordpress wp-admin wp-comments-post.php wp-content wp-includes wp-load.php wp-mail.php wp-signup.php xmlrpc.php
root@k8s-ansible-client:/data/pop/wordpress# rm -fr wordpress/
安裝wordpress
注:如果權限有問題啸驯,將wordpress目錄加上nginx權限
數(shù)據(jù)庫名稱和用戶密碼是上面創(chuàng)建的客扎,連接地址:mysql-0.mysql.pop.svc.pop.local
登錄并發(fā)表一篇文章
10.2 微服務案例
實現(xiàn)dubbo+zookeeper微服務環(huán)境
10.2.1 構建鏡像
consumer
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/consumer# ls
build-command.sh Dockerfile dubbo-demo-consumer-2.1.5 dubbo-demo-consumer-2.1.5-assembly.tar.gz run_java.sh
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/consumer# cat Dockerfile
#Dubbo consumer
FROM harbor.openscp.com/base/jdk-base:v8.212
RUN yum install -y vim wget tree lrzsz gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel iproute net-tools iotop&& rm -rf /etc/localtime && ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && useradd nginx -u 2001
RUN yum install file -y
RUN mkdir -p /apps/dubbo/consumer
ADD dubbo-demo-consumer-2.1.5 /apps/dubbo/consumer
ADD run_java.sh /apps/dubbo/consumer/bin
RUN chown nginx.nginx /apps -R
RUN chmod a+x /apps/dubbo/consumer/bin/*.sh
CMD ["/apps/dubbo/consumer/bin/run_java.sh"]
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/consumer# cat run_java.sh
#!/bin/bash
su - nginx -c "/apps/dubbo/consumer/bin/start.sh"
tail -f /etc/hosts
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/consumer# cat build-command.sh
#!/bin/bash
docker build -t harbor.openscp.com/base/dubbo-demo-consumer:v1 .
sleep 3
docker push harbor.openscp.com/base/dubbo-demo-consumer:v1
# 需要配置注冊中心地址
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/consumer# grep -v "^#" dubbo-demo-consumer-2.1.5/conf/dubbo.properties
dubbo.container=log4j,spring
dubbo.application.name=demo-consumer
dubbo.application.owner=
dubbo.registry.address=zookeeper://zookeeper1.default.svc.pop.local:2181 | zookeeper://zookeeper2.default.svc.pop.local:2181 | zookeeper://zookeeper3.default.svc.pop.local:2181
dubbo.monitor.protocol=registry
dubbo.log4j.file=logs/dubbo-demo-consumer.log
dubbo.log4j.level=WARN
執(zhí)行腳本構建鏡像
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/consumer# chmod a+x *.sh
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/consumer# bash build-command.sh
...
Successfully tagged harbor.openscp.com/base/dubbo-demo-consumer:v1
The push refers to repository [harbor.openscp.com/base/dubbo-demo-consumer]
6138693121b5: Pushed
4fae946992b0: Pushed
1429d5765bc5: Pushed
d75b5de4df30: Pushed
c67ef8dd101d: Pushed
a53b249d3f92: Pushed
bc1760d32325: Pushed
38dbe7a8225d: Mounted from base/jenkins
4cdbfe6aa3f6: Mounted from base/jenkins
3aec209f0edd: Mounted from base/jenkins
174f56854903: Mounted from base/wordpress-nginx
v1: digest: sha256:413825db8a03f0eed51fd3056041059bc35fd79bd18b4fd7285a9fedbfc3913f size: 2628
dubboadmin
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/dubboadmin# ls
build-command.sh catalina.sh Dockerfile dubboadmin.war dubboadmin.zip logging.properties run_tomcat.sh server.xml
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/dubboadmin# cat Dockerfile
#Dubbo dubboadmin
FROM harbor.openscp.com/base/tomcat-base:v8.5.43
RUN yum install -y vim unzip wget tree lrzsz gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel iproute net-tools iotop&& rm -rf /etc/localtime && ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
ADD server.xml /apps/tomcat/conf/server.xml
ADD logging.properties /apps/tomcat/conf/logging.properties
ADD catalina.sh /apps/tomcat/bin/catalina.sh
ADD run_tomcat.sh /apps/tomcat/bin/run_tomcat.sh
ADD dubboadmin.war /data/tomcat/webapps/dubboadmin.war
RUN cd /data/tomcat/webapps && unzip dubboadmin.war && rm -rf dubboadmin.war && chown -R nginx.nginx /data /apps
EXPOSE 8080 8443
CMD ["/apps/tomcat/bin/run_tomcat.sh"]
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/dubboadmin# cat build-command.sh
#!/bin/bash
TAG=$1
docker build -t harbor.openscp.com/base/dubboadmin:${TAG} .
sleep 3
docker push harbor.openscp.com/base/dubboadmin:${TAG}
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/dubboadmin# cat run_tomcat.sh
#!/bin/bash
su - nginx -c "/apps/tomcat/bin/catalina.sh start"
su - nginx -c "tail -f /etc/hosts"
# 需要配置注冊中心地址
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/dubboadmin# cat dubboadmin/WEB-INF/dubbo.properties
dubbo.registry.address=zookeeper://zookeeper1.default.svc.pop.local:2181 | zookeeper://zookeeper2.default.svc.pop.local:2181 | zookeeper://zookeeper3.default.svc.pop.local:2181
dubbo.admin.root.password=root
dubbo.admin.guest.password=guest
執(zhí)行腳本構建鏡像
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/dubboadmin# chmo a+x *.sh
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/dubboadmin# bash build-command.sh v1
...
Successfully tagged harbor.openscp.com/base/dubboadmin:v1
The push refers to repository [harbor.openscp.com/base/dubboadmin]
98e0d611e025: Pushed
6dc138ab6f03: Pushed
82e5ee6a3c84: Pushed
5ddfa7134fcc: Pushed
44868cb8c702: Pushed
aaefdcea4576: Pushed
2ff59fc7c569: Pushed
6dc737cf89d1: Pushed
d5123d987925: Mounted from base/tomcat-app1
afa3eb2a2173: Mounted from base/tomcat-app1
7136febc3401: Mounted from base/tomcat-app1
38dbe7a8225d: Mounted from base/dubbo-demo-consumer
4cdbfe6aa3f6: Mounted from base/dubbo-demo-consumer
3aec209f0edd: Mounted from base/dubbo-demo-consumer
174f56854903: Mounted from base/dubbo-demo-consumer
v1: digest: sha256:2f4e1148295e244c0188c2dd8048d673bbda3c0a11b421d1be25b42e81ca19b6 size: 3466
provider, 配置注冊中心地址
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/provider# ls
build-command.sh Dockerfile dubbo-demo-provider-2.1.5 dubbo-demo-provider-2.1.5-assembly.tar.gz run_java.sh
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/provider# cat Dockerfile
#Dubbo provider
FROM harbor.openscp.com/base/jdk-base:v8.212
RUN yum install -y vim wget tree file nc lrzsz gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel iproute net-tools iotop&& rm -rf /etc/localtime && ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && useradd nginx -u 2001
RUN mkdir -p /apps/dubbo/provider
ADD dubbo-demo-provider-2.1.5/ /apps/dubbo/provider
ADD run_java.sh /apps/dubbo/provider/bin
RUN chown nginx.nginx /apps -R
RUN chmod a+x /apps/dubbo/provider/bin/*.sh
CMD ["/apps/dubbo/provider/bin/run_java.sh"]
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/provider# cat build-command.sh
#!/bin/bash
docker build -t harbor.openscp.com/base/dubbo-demo-provider:v1 .
sleep 3
docker push harbor.openscp.com/base/dubbo-demo-provider:v1
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/provider# cat run_java.sh
#!/bin/bash
su - nginx -c "/apps/dubbo/provider/bin/start.sh"
tail -f /etc/hosts
# 需要配置注冊中心地址
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/provider# grep -v "^#" dubbo-demo-provider-2.1.5/conf/dubbo.properties
dubbo.container=log4j,spring
dubbo.application.name=demo-provider
dubbo.application.owner=
dubbo.registry.address=zookeeper://zookeeper1.default.svc.pop.local:2181 | zookeeper://zookeeper2.default.svc.pop.local:2181 | zookeeper://zookeeper3.default.svc.pop.local:2181
dubbo.monitor.protocol=registry
dubbo.protocol.name=dubbo
dubbo.protocol.port=20880
dubbo.log4j.file=logs/dubbo-demo-provider.log
dubbo.log4j.level=WARN
執(zhí)行腳本構建鏡像
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/provider# chmod a+x *.sh
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/provider# bash build-command.sh
...
Successfully tagged harbor.openscp.com/base/dubbo-demo-provider:v1
The push refers to repository [harbor.openscp.com/base/dubbo-demo-provider]
c32486dc8751: Pushed
719e351a9ee9: Pushed
8f08fdf8a695: Pushed
ee248e4a3744: Pushed
fad539a3eb64: Pushed
9ff991a489ff: Pushed
38dbe7a8225d: Mounted from base/dubboadmin
4cdbfe6aa3f6: Mounted from base/dubboadmin
3aec209f0edd: Mounted from base/dubboadmin
174f56854903: Mounted from base/dubboadmin
v1: digest: sha256:5eeebdbbe537f385bbed92b7f24335af11c3f6d38285741390d2151c361010e8 size: 2416
10.2.2 部署微服務環(huán)境
yaml文件
root@k8s-ansible-client:~/yaml/20211016/dubbo# cat provider.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
app: pop-provider
name: pop-provider-deployment
spec:
replicas: 1
selector:
matchLabels:
app: pop-provider
template:
metadata:
labels:
app: pop-provider
spec:
containers:
- name: pop-provider-container
image: harbor.openscp.com/base/dubbo-demo-provider:v1
#command: ["/apps/tomcat/bin/run_tomcat.sh"]
#imagePullPolicy: IfNotPresent
imagePullPolicy: Always
ports:
- containerPort: 20880
protocol: TCP
name: http
---
kind: Service
apiVersion: v1
metadata:
labels:
app: pop-provider
name: pop-provider-spec
spec:
type: NodePort
ports:
- name: http
port: 80
protocol: TCP
targetPort: 30880
#nodePort: 30001
selector:
app: pop-provider
root@k8s-ansible-client:~/yaml/20211016/dubbo# cat dubboadmin.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
app: pop-dubboadmin
name: pop-dubboadmin-deployment
spec:
replicas: 1
selector:
matchLabels:
app: pop-dubboadmin
template:
metadata:
labels:
app: pop-dubboadmin
spec:
containers:
- name: pop-dubboadmin-container
image: harbor.openscp.com/base/dubboadmin:v1
#command: ["/apps/tomcat/bin/run_tomcat.sh"]
#imagePullPolicy: IfNotPresent
imagePullPolicy: Always
ports:
- containerPort: 8080
protocol: TCP
name: http
---
kind: Service
apiVersion: v1
metadata:
labels:
app: pop-dubboadmin
name: pop-dubboadmin-service
spec:
type: NodePort
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
nodePort: 30080
selector:
app: pop-dubboadmin
root@k8s-ansible-client:~/yaml/20211016/dubbo# cat consumer.yaml
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
labels:
app: pop-consumer
name: pop-consumer-deployment
spec:
replicas: 1
selector:
matchLabels:
app: pop-consumer
template:
metadata:
labels:
app: pop-consumer
spec:
containers:
- name: pop-consumer-container
image: harbor.openscp.com/base/dubbo-demo-consumer:v1
#command: ["/apps/tomcat/bin/run_tomcat.sh"]
#imagePullPolicy: IfNotPresent
imagePullPolicy: Always
ports:
- containerPort: 80
protocol: TCP
name: http
---
kind: Service
apiVersion: v1
metadata:
labels:
app: pop-consumer
name: pop-consumer-server
spec:
type: NodePort
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
#nodePort: 30001
selector:
app: pop-consumer
啟動provider(生產(chǎn)者)
root@k8s-ansible-client:~/yaml/20211016/dubbo# kubectl apply -f provider.yaml
deployment.apps/pop-provider-deployment created
service/pop-provider-spec created
root@k8s-ansible-client:~/yaml/20211016/dubbo# kubectl get pods,deploy
NAME READY STATUS RESTARTS AGE
pod/alpine-test 1/1 Running 50 (53m ago) 28d
pod/kube100-site 2/2 Running 0 14d
pod/nginx 0/1 CrashLoopBackOff 1611 (3m21s ago) 4d
pod/nginx-test-001 1/1 Running 26 (90m ago) 15d
pod/nginx-test1 1/1 Running 50 (63m ago) 28d
pod/nginx-test2 1/1 Running 50 (63m ago) 28d
pod/nginx-test3 1/1 Running 50 (63m ago) 28d
pod/pop-provider-deployment-6dfd4d78db-gl4rt 1/1 Running 0 20s
pod/zookeeper1-cdbb7fbc-5pgdg 1/1 Running 1 (6d3h ago) 6d3h
pod/zookeeper2-f4944446d-2xnjd 1/1 Running 0 6d3h
pod/zookeeper3-589f6bc7-2mnz6 1/1 Running 0 6d3h
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/pop-provider-deployment 1/1 1 1 20s
deployment.apps/zookeeper1 1/1 1 1 6d3h
deployment.apps/zookeeper2 1/1 1 1 6d3h
deployment.apps/zookeeper3 1/1 1 1 6d3h
root@k8s-ansible-client:~/yaml/20211016/dubbo# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.68.0.1 <none> 443/TCP 29d
pop-provider-spec NodePort 10.68.4.34 <none> 80:31797/TCP 25s
zookeeper1 NodePort 10.68.42.189 <none> 2181:32181/TCP,2888:30923/TCP,3888:30168/TCP 6d3h
zookeeper2 NodePort 10.68.78.146 <none> 2181:32182/TCP,2888:31745/TCP,3888:30901/TCP 6d3h
zookeeper3 NodePort 10.68.199.44 <none> 2181:32183/TCP,2888:32488/TCP,3888:31621/TCP 6d3h
驗證,使用window客戶端zooinspector查看zookeeper里面的信息
可以發(fā)現(xiàn)信息已經(jīng)注冊到zookeeper里面
啟動consumer(消費者)
root@k8s-ansible-client:~/yaml/20211016/dubbo# kubectl apply -f consumer.yaml
deployment.apps/pop-consumer-deployment created
service/pop-consumer-server created
驗證坯汤,查看provider(生產(chǎn)者)日志
root@k8s-ansible-client:~/yaml/20211010/03/dockerfile# kubectl exec -it pod/pop-provider-deployment-6dfd4d78db-gl4rt bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead
[root@pop-provider-deployment-6dfd4d78db-gl4rt logs]# tail -f *.log
...
==> stdout.log <==
at org.jboss.netty.channel.socket.nio.NioServerSocketPipelineSink.handleAcceptedSocket(NioServerSocketPipelineSink.java:137)
at org.jboss.netty.channel.socket.nio.NioServerSocketPipelineSink.eventSunk(NioServerSocketPipelineSink.java:76)
at org.jboss.netty.channel.Channels.write(Channels.java:632)
at org.jboss.netty.handler.codec.oneone.OneToOneEncoder.handleDownstream(OneToOneEncoder.java:70)
at com.alibaba.dubbo.remoting.transport.netty.NettyHandler.writeRequested(NettyHandler.java:99)
at org.jboss.netty.channel.Channels.write(Channels.java:611)
at org.jboss.netty.channel.Channels.write(Channels.java:578)
at org.jboss.netty.channel.AbstractChannel.write(AbstractChannel.java:251)
at com.alibaba.dubbo.remoting.transport.netty.NettyChannel.send(NettyChannel.java:98)
... 6 more
[00:57:55] Hello world0, request from consumer: /172.20.213.27:35450
[00:57:57] Hello world1, request from consumer: /172.20.213.27:35450
[00:57:59] Hello world2, request from consumer: /172.20.213.27:35450
[00:58:01] Hello world3, request from consumer: /172.20.213.27:35450
[00:58:03] Hello world4, request from consumer: /172.20.213.27:35450
[00:58:05] Hello world5, request from consumer: /172.20.213.27:35450
[00:58:07] Hello world6, request from consumer: /172.20.213.27:35450
啟動dubboadmin
root@k8s-ansible-client:~/yaml/20211016/dubbo# kubectl apply -f dubboadmin.yaml
deployment.apps/pop-dubboadmin-deployment created
service/pop-dubboadmin-service created
root@k8s-ansible-client:~/yaml/20211016/dubbo# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
alpine-test 1/1 Running 50 (76m ago) 28d 172.20.108.65 192.168.20.236 <none> <none>
kube100-site 2/2 Running 0 14d 172.20.213.6 192.168.20.253 <none> <none>
nginx 0/1 CrashLoopBackOff 1617 (4m28s ago) 4d 172.20.213.20 192.168.20.253 <none> <none>
nginx-test-001 1/1 Running 26 (113m ago) 15d 172.20.191.10 192.168.20.147 <none> <none>
nginx-test1 1/1 Running 50 (86m ago) 28d 172.20.191.2 192.168.20.147 <none> <none>
nginx-test2 1/1 Running 50 (86m ago) 28d 172.20.213.3 192.168.20.253 <none> <none>
nginx-test3 1/1 Running 50 (86m ago) 28d 172.20.191.3 192.168.20.147 <none> <none>
pop-consumer-deployment-54b54559d7-2dd2k 1/1 Running 0 10m 172.20.213.27 192.168.20.253 <none> <none>
pop-dubboadmin-deployment-75f8d75df-drcz6 1/1 Running 0 38s 172.20.108.91 192.168.20.236 <none> <none>
pop-provider-deployment-6dfd4d78db-gl4rt 1/1 Running 0 23m 172.20.191.33 192.168.20.147 <none> <none>
zookeeper1-cdbb7fbc-5pgdg 1/1 Running 1 (6d3h ago) 6d3h 172.20.191.27 192.168.20.147 <none> <none>
zookeeper2-f4944446d-2xnjd 1/1 Running 0 6d3h 172.20.108.81 192.168.20.236 <none> <none>
zookeeper3-589f6bc7-2mnz6 1/1 Running 0 6d3h 172.20.191.28 192.168.20.147 <none> <none>
root@k8s-ansible-client:~/yaml/20211016/dubbo# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.68.0.1 <none> 443/TCP 29d
pop-consumer-server NodePort 10.68.122.230 <none> 80:30926/TCP 12m
pop-dubboadmin-service NodePort 10.68.60.83 <none> 80:30080/TCP 2m23s
pop-provider-spec NodePort 10.68.4.34 <none> 80:31797/TCP 24m
zookeeper1 NodePort 10.68.42.189 <none> 2181:32181/TCP,2888:30923/TCP,3888:30168/TCP 6d3h
zookeeper2 NodePort 10.68.78.146 <none> 2181:32182/TCP,2888:31745/TCP,3888:30901/TCP 6d3h
zookeeper3 NodePort 10.68.199.44 <none> 2181:32183/TCP,2888:32488/TCP,3888:31621/TCP 6d3h
驗證虐唠,登錄帳號密碼都是root
11. Ingress
11.1 什么是ingress
Ingress 公開了從集群外部到集群內(nèi)服務的 HTTP 和 HTTPS 路由。 流量路由由 Ingress 資源上定義的規(guī)則控制惰聂。
簡單的流程圖疆偿,如下:
可以將 Ingress 配置為服務提供外部可訪問的 URL、負載均衡流量搓幌、終止 SSL/TLS杆故,以及提供基于名稱的虛擬主機等能力。 Ingress 控制器 通常負責通過負載均衡器來實現(xiàn) Ingress溉愁,盡管它也可以配置邊緣路由器或其他前端來幫助處理流量处铛。
Ingress 不會公開任意端口或協(xié)議。 將 HTTP 和 HTTPS 以外的服務公開到 Internet 時拐揭,通常使用 Service.Type=NodePort 或 Service.Type=LoadBalancer 類型的服務撤蟆。
11.2 Ingress controller
ingress Controller 通過監(jiān)聽 Ingress這個api對象里的配置規(guī)則并轉化成 Nginx 的配置(kubernetes聲明式API和控制循環(huán)) , 然后對外部提供服務。
核心是一個deployment堂污,實現(xiàn)方式有很多家肯,比如nginx, Contour, Haproxy, trafik, Istio,需要編寫的yaml有:Deployment, Service, ConfigMap, ServiceAccount(Auth)盟猖,其中service的類型可以是NodePort或者LoadBalancer讨衣。
Ingress Controller是將Ingress這種變化生成一段Nginx的配置换棚,然后將這個配置通過Kubernetes API寫到Nginx的Pod中,然后reload.(注意:寫入 nginx.conf 的不是service的地址反镇,而是service backend 的 pod 的地址固蚤,避免在 service 在增加一層負載均衡轉發(fā))
從上圖中可以很清晰的看到,實際上請求進來還是被負載均衡器攔截歹茶,比如 nginx夕玩,然后 Ingress Controller 通過跟 Ingress 交互得知某個域名對應哪個 service,再通過跟 kubernetes API 交互得知 service 地址等信息辆亏;綜合以后生成配置文件實時寫入負載均衡器风秤,然后負載均衡器 reload 該規(guī)則便可實現(xiàn)服務發(fā)現(xiàn)鳖目,即動態(tài)映射
了解了以上內(nèi)容以后扮叨,這也就很好的說明了我為什么喜歡把負載均衡器部署為 Daemon Set;因為無論如何請求首先是被負載均衡器攔截的领迈,所以在每個 node 上都部署一下彻磁,同時 hostport 方式監(jiān)聽 80 端口;那么就解決了其他方式部署不確定 負載均衡器在哪的問題狸捅,同時訪問每個 node 的 80 都能正確解析請求衷蜓;如果前端再 放個 nginx 就又實現(xiàn)了一層負載均衡。
Ingress Controller 會根據(jù)你定義的 Ingress 對象尘喝,提供對應的代理能力磁浇。業(yè)界常用的各種反向代理項目,比如 Nginx朽褪、HAProxy置吓、Envoy、Traefik 等缔赠,都已經(jīng)為Kubernetes 專門維護了對應的 Ingress Controller衍锚。
11.3 ingress部署
ingress調(diào)度的是后端的service,而不是pod
ingress controller 是與k8s集群做交互的嗤堰,感知etcd的變化
部署nginx-ingress-controller
https://kubernetes.github.io/ingress-nginx/deploy/
yaml地址
root@k8s-ansible-client:~/yaml/20211016/ingress# kubectl apply -f aa.yaml
namespace/ingress-nginx created
serviceaccount/ingress-nginx created
configmap/ingress-nginx-controller created
clusterrole.rbac.authorization.k8s.io/ingress-nginx created
clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx created
role.rbac.authorization.k8s.io/ingress-nginx created
rolebinding.rbac.authorization.k8s.io/ingress-nginx created
service/ingress-nginx-controller-admission created
service/ingress-nginx-controller created
deployment.apps/ingress-nginx-controller created
ingressclass.networking.k8s.io/nginx created
validatingwebhookconfiguration.admissionregistration.k8s.io/ingress-nginx-admission created
serviceaccount/ingress-nginx-admission created
clusterrole.rbac.authorization.k8s.io/ingress-nginx-admission created
clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created
role.rbac.authorization.k8s.io/ingress-nginx-admission created
rolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created
job.batch/ingress-nginx-admission-create created
job.batch/ingress-nginx-admission-patch created
root@k8s-ansible-client:~/yaml/20211016/ingress# kubectl get pods,deploy -n ingress-nginx
NAME READY STATUS RESTARTS AGE
pod/ingress-nginx-admission-create--1-n7f7b 0/1 Completed 0 21s
pod/ingress-nginx-admission-patch--1-cd57p 0/1 Completed 1 21s
pod/ingress-nginx-controller-cfb66bc7b-bflgv 1/1 Running 0 21s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/ingress-nginx-controller 1/1 1 1 21s
11.3.1 基于域名的多虛擬主機
root@k8s-ansible-client:~/yaml/20211016/ingress# cat ingress_single-host.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: nginx-web
namespace: pop
annotations:
kubernetes.io/ingress.class: "nginx" ##指定Ingress Controller的類型
spec:
rules: #路由規(guī)則
- host: www.pop.com ##客戶端訪問的host域名
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: pop-tomcat-app1-service
port:
number: 80
root@k8s-ansible-client:~/yaml/20211016/ingress# kubectl apply -f ingress_single-host.yaml
ingress.networking.k8s.io/nginx-web created
root@k8s-ansible-client:~/yaml/20211016/ingress# kubectl get ingress -n pop
NAME CLASS HOSTS ADDRESS PORTS AGE
nginx-web <none> www.pop.com 192.168.20.253 80 2m44s
驗證
11.3.2 基于域名的url轉發(fā)
root@k8s-ansible-client:~/yaml/20211016/ingress# cat ingress-url.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: nginx-web
namespace: pop
annotations:
kubernetes.io/ingress.class: "nginx" ##指定Ingress Controller的類型
spec:
rules:
- host: www.pop.com
http:
paths:
- path: /url1
pathType: Prefix
backend:
service:
name: pop-tomcat-app1-service
port:
number: 80
- path: /url2
pathType: Prefix
backend:
service:
name: pop-tomcat-app2-service
port:
number: 80
root@k8s-ansible-client:~/yaml/20211016/ingress# kubectl apply -f ingress-url.yaml
ingress.networking.k8s.io/nginx-web created
root@k8s-ansible-client:~/yaml/20211016/ingress# kubectl get ingress -n pop
NAME CLASS HOSTS ADDRESS PORTS AGE
nginx-web <none> www.pop.com 192.168.20.253 80 9s
驗證
11.3.3 基于多域名https
自簽證書
# 域名 www.pop.com
root@k8s-ansible-client:~/yaml/20211016/ingress/certs# openssl req -x509 -sha256 -newkey rsa:4096 -keyout ca.key -out ca.crt -days 3560 -nodes -subj '/CN=www.pop.com'
root@k8s-ansible-client:~/yaml/20211016/ingress/certs# openssl req -new -newkey rsa:4096 -keyout server.key -out server.csr -nodes -subj '/CN=www.pop.com'
root@k8s-ansible-client:~/yaml/20211016/ingress/certs# openssl x509 -req -sha256 -days 3650 -in server.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out server.crt
Signature ok
subject=CN = www.pop.com
Getting CA Private Key
# 域名 test.pop.net
root@k8s-ansible-client:~/yaml/20211016/ingress/certs# openssl req -new -newkey rsa:4096 -keyout test.key -out test.csr -nodes -subj '/CN=test.pop.net'
root@k8s-ansible-client:~/yaml/20211016/ingress/certs# openssl x509 -req -sha256 -days 3650 -in test.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out test.crt
Signature ok
subject=CN = test.pop.net
Getting CA Private Key
證書上傳至k8s
root@k8s-ansible-client:~/yaml/20211016/ingress/certs# kubectl create secret generic www-tls-secret --from-file=tls.crt=server.crt --from-file=tls.key=server.key -n pop
secret/www-tls-secret created
root@k8s-ansible-client:~/yaml/20211016/ingress/certs# kubectl create secret generic test-tls-secret --from-file=tls.crt=test.crt --from-file=tls.key=test.key -n pop
secret/test-tls-secret created
root@k8s-ansible-client:~/yaml/20211016/ingress/certs# kubectl get secret -n pop
NAME TYPE DATA AGE
default-token-4nrw2 kubernetes.io/service-account-token 3 9d
test-tls-secret Opaque 2 11s
www-tls-secret Opaque 2 45s
root@k8s-ansible-client:~/yaml/20211016/ingress# cat ingress-https-multi.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: nginx-web
namespace: pop
annotations:
kubernetes.io/ingress.class: "nginx" ##指定Ingress Controller的類型
nginx.ingress.kubernetes.io/ssl-redirect: 'true'
spec:
tls:
- hosts:
- www.pop.com
secretName: www-tls-secret
- hosts:
- test.pop.net
secretName: test-tls-secret
rules:
- host: www.pop.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: pop-tomcat-app1-service
port:
number: 80
- host: test.pop.net
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: pop-tomcat-app2-service
port:
number: 80
root@k8s-ansible-client:~/yaml/20211016/ingress# kubectl apply -f ingress-https-multi.yaml
ingress.networking.k8s.io/nginx-web created
root@k8s-ansible-client:~/yaml/20211016/ingress# kubectl get ingress -n pop
NAME CLASS HOSTS ADDRESS PORTS AGE
nginx-web <none> www.pop.com,test.pop.net 192.168.20.253 80, 443 25s
驗證
12. 基于HPA控制器實現(xiàn)控制pod副本數(shù)
12.1 HPA簡介
HPA(Horizontal Pod Autoscaler)是kubernetes(以下簡稱k8s)的一種資源對象戴质,能夠根據(jù)某些指標對在statefulSet、replicaController踢匣、replicaSet等集合中的pod數(shù)量進行動態(tài)伸縮告匠,使運行在上面的服務對指標的變化有一定的自適應能力。
HPA目前支持四種類型的指標离唬,分別是Resource后专、Object、External男娄、Pods行贪。其中在穩(wěn)定版本autoscaling/v1中只支持對CPU指標的動態(tài)伸縮漾稀,在測試版本autoscaling/v2beta2中支持memory和自定義指標的動態(tài)伸縮,并以annotation的方式工作在autoscaling/v1版本中建瘫。
12.2 HPA動態(tài)伸縮的原理及工作過程
HPA在k8s中也由一個controller控制崭捍,controller會間隔循環(huán)HPA,檢查每個HPA中監(jiān)控的指標是否觸發(fā)伸縮條件啰脚,默認的間隔時間為15s殷蛇。一旦觸發(fā)伸縮條件,controller會向k8s發(fā)送請求橄浓,修改伸縮對象(statefulSet粒梦、replicaController、replicaSet)子對象scale中控制pod數(shù)量的字段荸实。k8s響應請求匀们,修改scale結構體,然后會刷新一次伸縮對象的pod數(shù)量准给。伸縮對象被修改后泄朴,自然會通過list/watch機制增加或減少pod數(shù)量,達到動態(tài)伸縮的目的露氮。
kubectl autoscale 參數(shù)自動控制在k8s集群中運行的pod數(shù)量(水平自動伸縮)祖灰,需要提前設置pod范圍及觸發(fā)條件。
k8s從1.1版本開始增加了名稱為HPA(Horizontal Pod Autoscaler)的控制器畔规,用于實現(xiàn)基于pod中資源
(CPU/Memory)利用率進行對pod的自動擴縮容功能的實現(xiàn)局扶,早期的版本只能基于Heapster組件實現(xiàn)對CPU利用率
做為觸發(fā)條件,但是在k8s 1.11版本開始使用Metrices Server完成數(shù)據(jù)采集(采集pod的CPU/內(nèi)存)叁扫,然后將采集到的數(shù)據(jù)通過
API(Aggregated API三妈,匯總API),例如metrics.k8s.io陌兑、custom.metrics.k8s.io沈跨、external.metrics.k8s.io,然后
再把數(shù)據(jù)提供給HPA控制器進行查詢兔综,以實現(xiàn)基于某個資源利用率對pod進行擴縮容的目的饿凛。
控制管理器(kube-controller-manager)默認每隔15s(可以通過–horizontal-pod-autoscaler-sync-period修改)查詢metrics的資源使用情況
kube-controller-manager支持以下三種metrics指標類型:
->預定義metrics(比如Pod的CPU)以利用率的方式計算
->自定義的Pod metrics,以原始值(raw value)的方式計算
->自定義的object metrics
支持兩種metrics查詢方式:
->Heapster
->自定義的REST API
支持多metrics软驰,即支持多種數(shù)據(jù)采集方式涧窒,采集pod指標數(shù)據(jù)
運維通過master的API創(chuàng)建一個HPA控制器,由HPA控制器對deployment進行自動伸縮锭亏,通過調(diào)用kube-controller-manager對pod數(shù)量進行管理纠吴;
配置了HPA的deployment發(fā)現(xiàn)pod的資源利用率超過了之前所設定的值,通過調(diào)用kube-controller-manager服務創(chuàng)建pod慧瘤,直到pod的資源利用率低于所設定的值戴已;HPA會通過metrics API獲取當前HPA所管理的deployment內(nèi)部的POD的資源利用率
工作過程:
HPA
通過master api(Metrices Server通過api server把采集到的數(shù)據(jù)記錄到etcd中固该,HPA
會通過master api獲取到etcd中pod的數(shù)據(jù))獲取到deployment中pod的指標數(shù)據(jù),再與HPA
提前定義好的值做對比糖儡,如果資源利用率超出定義的值伐坏,則會通過kube-controller-manager新建pod,直到資源利用率低于hpa所定義的值握联;
kube-controller-manager默認每隔15s會查詢metrics的資源使用情況
12.3 部署Metrics-server
root@k8s-ansible-client:~/yaml/20211016/hpa# cat metrics-server-v0.4.4.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
labels:
k8s-app: metrics-server
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
image: harbor.openscp.com/base/metrics-server:v0.4.4
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
periodSeconds: 10
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
執(zhí)行yaml文件
root@k8s-ansible-client:~/yaml/20211016/hpa# kubectl apply -f metrics-server-v0.4.4.yaml
serviceaccount/metrics-server created
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
clusterrole.rbac.authorization.k8s.io/system:metrics-server created
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
service/metrics-server created
deployment.apps/metrics-server created
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
root@k8s-ansible-client:~/yaml/20211016/hpa# kubectl top node
W1030 22:13:22.276731 170084 top_node.go:119] Using json format to get metrics. Next release will switch to protocol-buffers, switch early by passing --use-protocol-buffers flag
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
192.168.20.147 229m 5% 1716Mi 53%
192.168.20.189 137m 6% 1117Mi 86%
192.168.20.201 251m 12% 1275Mi 99%
192.168.20.236 243m 6% 1555Mi 48%
192.168.20.249 309m 15% 1176Mi 91%
192.168.20.253 275m 6% 1580Mi 48%
12.4 HPA自動伸縮pod數(shù)量
命令行模式
kubectl autoscale deployment/nginx-deploymen --min=2 --max=5 --cpu-percent=50 -n default
#自動擴容指定deployment的pod數(shù)量桦沉,當pod的cpu資源利用率達到50%,最大pod數(shù)量擴容到5個金闽,最小2個pod數(shù)量
yaml文件
hpa的yml文件與服務的yml文件分開
root@k8s-ansible-client:~/yaml/20211016/hpa# cat hpa.yaml
apiVersion: autoscaling/v1 #定義API版本
kind: HorizontalPodAutoscaler #對象類型
metadata: #定義對象元數(shù)據(jù)
namespace: pop #hpa創(chuàng)建后隸屬的namespace
name: pop-tomcat-app1-podautoscaler #hpa對象名稱
labels:
app: pop-tomcat-app1 #自定義的label名稱
version: v2beta1 #自定義的api版本
spec: #定義對象具體信息
scaleTargetRef: #定義水平伸縮的目標對象纯露,Deployment、ReplicationController/ReplicaSet
apiVersion: apps/v1 #目標對象的API版本
#apiVersion: extensions/v1beta1
kind: Deployment #目標對象類型為deployment
name: pop-tomcat-app1-deployment #目標deployment的具體名稱
minReplicas: 2 #最小pod數(shù)
maxReplicas: 4 #最大pod數(shù)
targetCPUUtilizationPercentage: 60 #CPU資源利用率
執(zhí)行yaml, 可以看見默認啟動一個pod,執(zhí)行hpa之后代芜,啟動了兩個pod,這里cpu利用率就不做測試
root@k8s-ansible-client:~/yaml/20211010/03/tomcat-app1# kubectl get pods,deploy -n pop
NAME READY STATUS RESTARTS AGE
pod/deploy-devops-redis-d9fd6594c-fvmmg 1/1 Running 0 9d
pod/mysql-0 2/2 Running 0 8d
pod/mysql-1 2/2 Running 0 8d
pod/mysql-2 2/2 Running 1 (8d ago) 8d
pod/pop-tomcat-app1-deployment-54bb9d8f8c-jblz6 1/1 Running 0 4h20m
pod/pop-tomcat-app2-deployment-5676bf7c9-2sc7g 1/1 Running 0 4h21m
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/deploy-devops-redis 1/1 1 1 9d
deployment.apps/pop-tomcat-app1-deployment 1/1 1 1 4h20m
deployment.apps/pop-tomcat-app2-deployment 1/1 1 1 4h21m
root@k8s-ansible-client:~/yaml/20211016/hpa# kubectl apply -f hpa.yaml
horizontalpodautoscaler.autoscaling/pop-tomcat-app1-podautoscaler created
root@k8s-ansible-client:~/yaml/20211016/hpa# kubectl get pods,deploy -n pop
NAME READY STATUS RESTARTS AGE
pod/deploy-devops-redis-d9fd6594c-fvmmg 1/1 Running 0 9d
pod/mysql-0 2/2 Running 0 8d
pod/mysql-1 2/2 Running 0 8d
pod/mysql-2 2/2 Running 1 (8d ago) 8d
pod/pop-tomcat-app1-deployment-54bb9d8f8c-jblz6 1/1 Running 0 4h23m
pod/pop-tomcat-app1-deployment-54bb9d8f8c-psrr4 1/1 Running 0 7s
pod/pop-tomcat-app2-deployment-5676bf7c9-2sc7g 1/1 Running 0 4h24m
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/deploy-devops-redis 1/1 1 1 9d
deployment.apps/pop-tomcat-app1-deployment 2/2 2 2 4h23m
deployment.apps/pop-tomcat-app2-deployment 1/1 1 1 4h24m