6.1、K8s最佳應(yīng)用場(chǎng)景微服務(wù)架構(gòu)
6.2惠爽、K8s核心組件及架構(gòu)
6.3、K8s安裝部署前準(zhǔn)備工作
實(shí)驗(yàn)配置3臺(tái)虛擬機(jī)
master 192.168.18.121 etcd/apiserver/controller manager/scheduler kublete/kube-proxy
node1 192.168.18.122 kublete/kube-proxy/docker
node2 192.168.18.123 kublete/kube-proxy/docker
虛擬機(jī)器優(yōu)化
#關(guān)閉selinux
#關(guān)閉防火墻服務(wù)firewalld
#關(guān)閉NetworkManager.service
[root@master ~]# systemctl stop NetworkManager.service
[root@master ~]# systemctl disable NetworkManager.service
Removed symlink /etc/systemd/system/multi-user.target.wants/NetworkManager.service.
Removed symlink /etc/systemd/system/dbus-org.freedesktop.nm-dispatcher.service.
Removed symlink /etc/systemd/system/network-online.target.wants/NetworkManager-wait-online.service.
#配置阿里源
#安裝一些服務(wù)
[root@master ~]# yum install bash-completion.noarch -y
[root@master ~]# yum install -y net-tools vim lrzsz wget tree screen lsof tcpdump
#關(guān)閉postfix.service服務(wù)
[root@master ~]# systemctl stop postfix.service
[root@master ~]# systemctl disable postfix.service
Removed symlink /etc/systemd/system/multi-user.target.wants/postfix.service.
#配置好host解析(所有機(jī)器都需要配置)
[root@master ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.18.121 master
192.168.18.122 node1
192.168.18.123 node2
#遠(yuǎn)程推送文件
[root@master ~]# scp -rp /etc/hosts 192.168.18.122:/etc/hosts
The authenticity of host '192.168.18.122 (192.168.18.122)' can't be established.
ECDSA key fingerprint is SHA256:+BtfrhBtnaZlfOcA+jp7GC9MN32UwcX9l9qMSpa25uw.
ECDSA key fingerprint is MD5:e7:19:3d:34:57:53:e4:5b:88:0f:cb:1f:d1:81:b8:9d.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '192.168.18.122' (ECDSA) to the list of known hosts.
root@192.168.18.122's password:
hosts
6.4瞬哼、Master主機(jī)上服務(wù)安裝
6.4.1婚肆、etcd服務(wù)安裝
[root@master ~]# yum install -y etcd
#修改etcd配置文件
[root@master ~]# vim /etc/etcd/etcd.conf
6 ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
21 ETCD_ADVERTISE_CLIENT_URLS="http://192.168.18.121:2379"
#檢查etcd有效配置文件
[root@master ~]# grep -Ev "^$|^#" /etc/etcd/etcd.conf
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_NAME="default"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.18.121:2379"
#啟動(dòng)etcd并設(shè)置開機(jī)自啟動(dòng)
[root@master ~]# systemctl start etcd.service
[root@master ~]# systemctl enable etcd.service
Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /usr/lib/systemd/system/etcd.service.
#檢查已啟動(dòng)監(jiān)聽端口2379以及2380
#2379對(duì)外提供服務(wù),2380用于etcd集群內(nèi)部通訊
[root@master ~]# netstat -lntup
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 127.0.0.1:2380 0.0.0.0:* LISTEN 1864/etcd
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 773/sshd
tcp6 0 0 :::2379 :::* LISTEN 1864/etcd
tcp6 0 0 :::22 :::* LISTEN 773/sshd
[root@master ~]#
測(cè)試
[root@master ~]# etcdctl set testdir/testkey0 0
0
[root@master ~]# etcdctl get testdir/testkey0
0
[root@master ~]# etcdctl -C http://192.168.18.121:2379 cluster-health
member 8e9e05c52164694d is healthy: got healthy result from http://192.168.18.121:2379
cluster is healthy
6.4.2坐慰、安裝master節(jié)點(diǎn)
#yum源上搜索k8s-master安裝包
[root@master ~]# yum search kubernetes-master
Loaded plugins: fastestmirror
Repository base is listed more than once in the configuration
Repository updates is listed more than once in the configuration
Repository extras is listed more than once in the configuration
Repository centosplus is listed more than once in the configuration
Repository contrib is listed more than once in the configuration
Loading mirror speeds from cached hostfile
* base: mirrors.aliyun.com
* extras: mirrors.aliyun.com
* updates: mirrors.aliyun.com
=================================================== N/S matched: kubernetes-master ====================================================
kubernetes-master.x86_64 : Kubernetes services for master host
Name and summary matches only, use "search all" for everything.
#安裝
[root@master ~]# yum install -y kubernetes-master.x86_64
配置apiserver
[root@master ~]# vim /etc/kubernetes/apiserver
8 KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
#只允許本地訪問監(jiān)聽127.0.0.1,允許任意機(jī)器訪問監(jiān)聽0.0.0.0
10 # The port on the local server to listen on.
11 KUBE_API_PORT="--port=8080"
#打開監(jiān)聽端口8080
13 # Port minions listen on
14 KUBELET_PORT="--kubelet-port=10250"
#打開minion監(jiān)聽端口10250
16 # Comma separated list of nodes in the etcd cluster
17 KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.18.121:2379"
#查看apiserver配置文件
[root@master ~]# grep -Ev "^#|^$" /etc/kubernetes/apiserver
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
KUBE_API_PORT="--port=8080"
KUBELET_PORT="--kubelet-port=10250"
KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.18.121:2379"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
KUBE_API_ARGS=""
配置controller manager/scheduler(共用一個(gè)文件)
[root@master ~]# vim /etc/kubernetes/config
21 # How the controller-manager, scheduler, and proxy find the apiserver
22 KUBE_MASTER="--master=http://192.168.18.121:8080"
[root@master ~]# grep -Ev "^#|^$" /etc/kubernetes/config
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://192.168.18.121:8080"
完成配置啟動(dòng)服務(wù)并設(shè)置開機(jī)自啟動(dòng)
[root@master ~]# systemctl start kube-apiserver.service
[root@master ~]# systemctl start kube-controller-manager.service
[root@master ~]# systemctl start kube-scheduler.service
[root@master ~]# systemctl enable kube-scheduler.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
[root@master ~]# systemctl enable kube-controller-manager.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.
[root@master ~]# systemctl enable kube-apiserver.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.
檢查K8s組件健康狀態(tài)
[root@master ~]# kubectl get componentstatus
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
etcd-0 Healthy {"health":"true"}
controller-manager Healthy ok
6.5较性、node節(jié)點(diǎn)安裝
[root@node1 ~]# yum install -y kubernetes-node.x86_64
6.5.1、master節(jié)點(diǎn)上配置node
#/etc/kubernetes/config已配置過不需要管
#/etc/kubernetes/kubelet
[root@master ~]# vim /etc/kubernetes/kubelet
4 # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
5 KUBELET_ADDRESS="--address=192.168.18.121"
6
7 # The port for the info server to serve on
8 KUBELET_PORT="--port=10250"
10 # You may leave this blank to use the actual hostname
11 KUBELET_HOSTNAME="--hostname-override=master"
13 # location of the api-server
14 KUBELET_API_SERVER="--api-servers=http://192.168.18.121:8080"
#查看配置
[root@master ~]# grep -Ev "^#|^$" /etc/kubernetes/kubelet
KUBELET_ADDRESS="--address=192.168.18.121"
KUBELET_PORT="--port=10250"
KUBELET_HOSTNAME="--hostname-override=master"
KUBELET_API_SERVER="--api-servers=http://192.168.18.121:8080"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_ARGS=""
#啟動(dòng)服務(wù)加入開機(jī)自啟動(dòng)kubelet會(huì)自動(dòng)啟動(dòng)docker
[root@master ~]# systemctl status docker
● docker.service - Docker Application Container Engine
Loaded: loaded (/usr/lib/systemd/system/docker.service; disabled; vendor preset: disabled)
Active: inactive (dead)
Docs: http://docs.docker.com
[root@master ~]# systemctl start kubelet.service
[root@master ~]# systemctl start kube-proxy.service
[root@master ~]# systemctl enable kubelet.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@master ~]# systemctl enable kube-proxy.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
[root@master ~]# systemctl status docker
● docker.service - Docker Application Container Engine
Loaded: loaded (/usr/lib/systemd/system/docker.service; disabled; vendor preset: disabled)
Active: active (running) since Thu 2022-09-15 00:58:28 CST; 55s ago
Docs: http://docs.docker.com
Main PID: 2734 (dockerd-current)
CGroup: /system.slice/docker.service
├─2734 /usr/bin/dockerd-current --add-runtime docker-runc=/usr/libexec/docker/docker-runc-current --default-runtime=docke...
└─2740 /usr/bin/docker-containerd-current -l unix:///var/run/docker/libcontainerd/docker-containerd.sock --metrics-interv...
Sep 15 00:58:28 master dockerd-current[2734]: time="2022-09-15T00:58:28.241532749+08:00" level=warning msg="Docker could not e...ystem"
Sep 15 00:58:28 master dockerd-current[2734]: time="2022-09-15T00:58:28.280003502+08:00" level=info msg="Graph migration to co...conds"
Sep 15 00:58:28 master dockerd-current[2734]: time="2022-09-15T00:58:28.280865641+08:00" level=info msg="Loading containers: start."
Sep 15 00:58:28 master dockerd-current[2734]: time="2022-09-15T00:58:28.326211940+08:00" level=info msg="Firewalld running: false"
Sep 15 00:58:28 master dockerd-current[2734]: time="2022-09-15T00:58:28.414513143+08:00" level=info msg="Default bridge (docke...dress"
Sep 15 00:58:28 master dockerd-current[2734]: time="2022-09-15T00:58:28.454367659+08:00" level=info msg="Loading containers: done."
Sep 15 00:58:28 master dockerd-current[2734]: time="2022-09-15T00:58:28.472704447+08:00" level=info msg="Daemon has completed ...ation"
Sep 15 00:58:28 master dockerd-current[2734]: time="2022-09-15T00:58:28.472734973+08:00" level=info msg="Docker daemon" commit...1.13.1
Sep 15 00:58:28 master dockerd-current[2734]: time="2022-09-15T00:58:28.483495766+08:00" level=info msg="API listen on /var/ru....sock"
Sep 15 00:58:28 master systemd[1]: Started Docker Application Container Engine.
Hint: Some lines were ellipsized, use -l to show in full.
檢查master上的node節(jié)點(diǎn)已自動(dòng)注冊(cè)
[root@master ~]# kubectl get node
NAME STATUS AGE
master Ready 1m
其余node節(jié)點(diǎn)安裝
[root@node1 ~]# vim /etc/kubernetes/config
21 # How the controller-manager, scheduler, and proxy find the apiserver
22 KUBE_MASTER="--master=http://192.168.18.121:8080"
[root@node1 ~]# vim /etc/kubernetes/kubelet
4 # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
5 KUBELET_ADDRESS="--address=192.168.18.122"
7 # The port for the info server to serve on
8 KUBELET_PORT="--port=10250"
10 # You may leave this blank to use the actual hostname
11 KUBELET_HOSTNAME="--hostname-override=node1"
13 # location of the api-server
14 KUBELET_API_SERVER="--api-servers=http://192.168.18.121:8080"
[root@node2 ~]# grep -Ev "^$|^#" /etc/kubernetes/kubelet
KUBELET_ADDRESS="--address=192.168.18.123"
KUBELET_PORT="--port=10250"
KUBELET_HOSTNAME="--hostname-override=node2"
KUBELET_API_SERVER="--api-servers=http://192.168.18.121:8080"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_ARGS=""
#更改完配置后記得開啟服務(wù)并加入開機(jī)自啟動(dòng)
驗(yàn)證node節(jié)點(diǎn)(在master上)
[root@master ~]# kubectl get node
NAME STATUS AGE
master Ready 10m
node1 Ready 2m
node2 Ready 27s
#刪除節(jié)點(diǎn)命令(無效的)
[root@master ~]# kubectl delete node node-name
6.6结胀、為所有node節(jié)點(diǎn)配置flannel網(wǎng)絡(luò)
為了實(shí)現(xiàn)所有node節(jié)點(diǎn)上容器之間的通訊
6.6.1赞咙、master配置flannel
[root@master ~]# yum install -y flannel
3 # etcd url location. Point this to the server where etcd runs
4 FLANNEL_ETCD_ENDPOINTS="http://192.168.18.121:2379"
6 # etcd config key. This is the configuration key that flannel queries
7 # For address range assignment
8 FLANNEL_ETCD_PREFIX="/atomic.io/network"
#到etcd中創(chuàng)建對(duì)那個(gè)的key
[root@master ~]# grep -Ev "^$|^#" /etc/sysconfig/flanneld
FLANNEL_ETCD_ENDPOINTS="http://192.168.18.121:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network"
[root@master ~]# etcdctl set /atomic.io/network/config '{ "Network":"172.16.0.0/16" }'
重啟flannel服務(wù)以及docker服務(wù)
[root@master ~]# systemctl restart docker
[root@master ~]# systemctl start flanneld.service
[root@master ~]# systemctl enable flanneld.service
Created symlink from /etc/systemd/system/multi-user.target.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service.
Created symlink from /etc/systemd/system/docker.service.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service.
#新增的flannel網(wǎng)卡用于容器間的通訊
[root@master ~]# ifconfig
docker0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 172.16.49.1 netmask 255.255.255.0 broadcast 0.0.0.0
ether 02:42:b3:03:1a:7b txqueuelen 0 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
flannel0: flags=4305<UP,POINTOPOINT,RUNNING,NOARP,MULTICAST> mtu 1472
inet 172.16.49.0 netmask 255.255.0.0 destination 172.16.49.0
inet6 fe80::75dd:f199:d48d:e8c prefixlen 64 scopeid 0x20<link>
unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 500 (UNSPEC)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 3 bytes 144 (144.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@master ~]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: enp0s3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 08:00:27:f8:64:fc brd ff:ff:ff:ff:ff:ff
inet 192.168.18.121/24 brd 192.168.18.255 scope global enp0s3
valid_lft forever preferred_lft forever
inet6 fe80::a00:27ff:fef8:64fc/64 scope link
valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:b3:03:1a:7b brd ff:ff:ff:ff:ff:ff
inet 172.16.49.1/24 scope global docker0
valid_lft forever preferred_lft forever
4: flannel0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1472 qdisc pfifo_fast state UNKNOWN group default qlen 500
link/none
inet 172.16.49.0/16 scope global flannel0
valid_lft forever preferred_lft forever
inet6 fe80::75dd:f199:d48d:e8c/64 scope link flags 800
valid_lft forever preferred_lft forever
6.6.2、node配置flannel
不修改配置指定IP會(huì)卡住無法啟動(dòng)
[root@node1 ~]# tail -f /var/log/messages
Sep 15 11:26:09 node1 flanneld-start: E0915 11:26:09.441937 1447 network.go:102] failed to retrieve network config: client: etcd cluster is unavailable or misconfigured; error #0: dial tcp 127.0.0.1:2379: getsockopt: connection refused
Sep 15 11:26:10 node1 flanneld-start: E0915 11:26:10.443056 1447 network.go:102] failed to retrieve network config: client: etcd cluster is unavailable or misconfigured; error #0: dial tcp 127.0.0.1:2379: getsockopt: connection refused
Sep 15 11:26:11 node1 flanneld-start: E0915 11:26:11.443681 1447 network.go:102] failed to retrieve network config: client: etcd cluster is unavailable or misconfigured; error #0: dial tcp 127.0.0.1:2379: getsockopt: connection refused
Sep 15 11:26:12 node1 flanneld-start: E0915 11:26:12.444417 1447 network.go:102] failed to retrieve network config: client: etcd cluster is unavailable or misconfigured; error #0: dial tcp 127.0.0.1:2379: getsockopt: connection refused
改配置文件并重啟服務(wù)
[root@node1 ~]# vi /etc/sysconfig/flanneld
[root@node2 ~]# grep -Ev "^#|^$" /etc/sysconfig/flanneld
FLANNEL_ETCD_ENDPOINTS="http://192.168.18.121:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network"
[root@node1 ~]# systemctl start flanneld
[root@node1 ~]# systemctl enable flanneld
Created symlink from /etc/systemd/system/multi-user.target.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service.
Created symlink from /etc/systemd/system/docker.service.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service.
[root@node1 ~]# systemctl restart docker
測(cè)試不同主機(jī)之間容器通訊
#下載busybox鏡像
[root@master ~]# docker pull busybox
#導(dǎo)入壓縮tar包模式
[root@master ~]# docker load -i docker_busybox.tar.gz
#在宿主機(jī)上期啟動(dòng)容器查看各自IP
#master上容器
[root@master ~]# docker run -it busybox
/ # ip add
5: eth0@if6: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1472 qdisc noqueue
link/ether 02:42:ac:10:31:02 brd ff:ff:ff:ff:ff:ff
inet 172.16.49.2/24 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::42:acff:fe10:3102/64 scope link
valid_lft forever preferred_lft forever
#node1上容器
[root@node1 ~]# docker run -it busybox
/ # ip add
5: eth0@if6: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1472 qdisc noqueue
link/ether 02:42:ac:10:5f:02 brd ff:ff:ff:ff:ff:ff
inet 172.16.95.2/24 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::42:acff:fe10:5f02/64 scope link
valid_lft forever preferred_lft forever
#node2上容器
[root@node2 ~]# docker run -it busybox
/ # ip add
5: eth0@if6: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1472 qdisc noqueue
link/ether 02:42:ac:10:13:02 brd ff:ff:ff:ff:ff:ff
inet 172.16.19.2/24 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::42:acff:fe10:1302/64 scope link
valid_lft forever preferred_lft forever
##############################
#在master上的容器ping其余node節(jié)點(diǎn)上容器的IP
/ # ping 172.16.95.2
PING 172.16.95.2 (172.16.95.2): 56 data bytes
64 bytes from 172.16.95.2: seq=0 ttl=60 time=2.165 ms
64 bytes from 172.16.95.2: seq=1 ttl=60 time=0.607 ms
^C
--- 172.16.95.2 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 0.607/1.386/2.165 ms
/ # ping 172.16.19.2
PING 172.16.19.2 (172.16.19.2): 56 data bytes
64 bytes from 172.16.19.2: seq=0 ttl=60 time=1.187 ms
64 bytes from 172.16.19.2: seq=1 ttl=60 time=1.475 ms
64 bytes from 172.16.19.2: seq=2 ttl=60 time=1.321 ms
^C
--- 172.16.19.2 ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max = 1.187/1.327/1.475 ms
如果容器無法通訊檢查防火墻規(guī)則
[root@master ~]# iptables -L -n
Chain INPUT (policy ACCEPT)
target prot opt source destination
KUBE-FIREWALL all -- 0.0.0.0/0 0.0.0.0/0
#這里必須時(shí)Accept
Chain FORWARD (policy ACCEPT)
target prot opt source destination
DOCKER-ISOLATION all -- 0.0.0.0/0 0.0.0.0/0
DOCKER all -- 0.0.0.0/0 0.0.0.0/0
ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 ctstate RELATED,ESTABLISHED
ACCEPT all -- 0.0.0.0/0 0.0.0.0/0
ACCEPT all -- 0.0.0.0/0 0.0.0.0/0
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-FIREWALL all -- 0.0.0.0/0 0.0.0.0/0
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain DOCKER (1 references)
target prot opt source destination
Chain DOCKER-ISOLATION (1 references)
target prot opt source destination
RETURN all -- 0.0.0.0/0 0.0.0.0/0
Chain KUBE-FIREWALL (2 references)
target prot opt source destination
DROP all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes firewall for dropping marked packets */ mark match 0x8000/0x8000
Chain KUBE-SERVICES (1 references)
target prot opt source destination
#修正命令
[root@master ~]# iptables -P FORWARD ACCEPT
#將該防火墻配置加入docker啟動(dòng)文件
[root@master ~]# vim /usr/lib/systemd/system/docker.service
18 ExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT
#重載docker配置后重啟docker服務(wù)
[root@master ~]# systemctl daemon-reload
systemctl daemon-reload
[root@master ~]# systemctl restart docker.service