[root@kubem2 ~]# kubectl get nodes
Unable to connect to the server: EOF
出現(xiàn)這種問題因?yàn)閗ubelet沒有開機(jī)啟動
1陈哑、配置主機(jī)hosts
cat > /etc/hosts <<EOF
192.168.0.191 kubem1.godufo.com
192.168.0.192 kubem2.godufo.com
192.168.0.193 kubem3.godufo.com
EOF
2容诬、關(guān)閉Selinux/firewalld
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
3、關(guān)閉交換分區(qū)
swapoff -a
yes | cp /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak |grep -v swap > /etc/fstab
4碟渺、設(shè)置網(wǎng)橋包經(jīng)IPTables,core文件生成路徑
echo """
vm.swappiness = 0
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
""" > /etc/sysctl.conf
sysctl -p
5突诬、同步時(shí)間
yum install -y ntpdate
ntpdate -u ntp.api.bz
6苫拍、升級內(nèi)核是否大于4.14,否則請調(diào)整默認(rèn)啟動參數(shù)
grub2-editenv list
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
yum --enablerepo=elrepo-kernel install kernel-ml-devel kernel-ml -y
sed -i 's/GRUB_DEFAULT=saved/GRUB_DEFAULT=0/' /etc/default/grub
grub2-mkconfig -o /boot/grub2/grub.cfg
init 6
7旺隙、確認(rèn)內(nèi)核版本后绒极,開啟IPVS
uname -r
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe \${kernel_module}
fi
done
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
8、配置docker
yum install -y yum-utils device-mapper-persistent-data lvm2
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
yum makecache fast
yum install -y docker-ce-18.09.7-3.el7.x86_64
9蔬捷、配置docker加速器
cat > /etc/docker/daemon.json <<EOF
{
"registry-mirrors": ["https://ftoplrr2.mirror.aliyuncs.com"]
}
EOF
10集峦、所有主機(jī)配置kubenetes倉庫
cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
enabled=1
EOF
#master安裝指定版本的kubeadm
yum install -y kubelet-1.14.0-0.x86_64 kubectl-1.14.0-0.x86_64 kubeadm-1.14.0-0.x86_64
將所有插件設(shè)置開機(jī)自啟動,特別是kubelet
systemctl start kubelet
systemctl enable kubelet
#忽略Swap
echo 'KUBELET_EXTRA_ARGS="--fail-swap-on=false"' > /etc/sysconfig/kubelet
二抠刺、配置haproxy代理和keepalived
1、#拉取haproxy鏡像
docker pull haproxy:1.7.8-alpine
mkdir /etc/haproxy
cat >/etc/haproxy/haproxy.cfg<<EOF
global
log 127.0.0.1 local0 err
maxconn 50000
uid 99
gid 99
#daemon
nbproc 1
pidfile haproxy.pid
defaults
mode http
log 127.0.0.1 local0 err
maxconn 50000
retries 3
timeout connect 5s
timeout client 30s
timeout server 30s
timeout check 2s
listen admin_stats
mode http
bind 0.0.0.0:1080
log 127.0.0.1 local0 err
stats refresh 30s
stats uri /haproxy-status
stats realm Haproxy\ Statistics
stats auth will:will
stats hide-version
stats admin if TRUE
frontend k8s-https
bind 0.0.0.0:8443
mode tcp
#maxconn 50000
default_backend k8s-https
backend k8s-https
mode tcp
balance roundrobin
server kubem1.godufo.com 192.168.0.191:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3
server kubem2.godufo.com 192.168.0.192:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3
server kubem3.godufo.com 192.168.0.193:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3
EOF
2摘昌、啟動haproxy
docker run -d --name my-haproxy \
-v /etc/haproxy:/usr/local/etc/haproxy:ro \
-p 8443:8443 \
-p 1080:1080 \
--restart always \
haproxy:1.7.8-alpine
3速妖、查看日志
docker logs my-haproxy
4、瀏覽器查看狀態(tài)(密碼:will:will)
http://192.168.0.191:1080/haproxy-status
http://192.168.0.192:1080/haproxy-status
http://192.168.0.193:1080/haproxy-status
三聪黎、部署keepalived
1罕容、拉取keepalived鏡像
docker pull osixia/keepalived:1.4.4
2、啟動keepalived
# 注意網(wǎng)卡ens32為本次實(shí)驗(yàn)192.168.105.0/24網(wǎng)段的所在網(wǎng)卡
docker run --net=host --cap-add=NET_ADMIN \
-e KEEPALIVED_INTERFACE=ens33 \
-e KEEPALIVED_VIRTUAL_IPS="#PYTHON2BASH:['192.168.0.199']" \
-e KEEPALIVED_UNICAST_PEERS="#PYTHON2BASH:['192.168.0.191','192.168.0.192','192.168.0.193']" \
-e KEEPALIVED_PASSWORD=hello \
--name k8s-keepalived \
--restart always \
-d osixia/keepalived:1.4.4
3稿饰、查看日志
會看到兩個(gè)成為backup 一個(gè)成為master
docker logs k8s-keepalived
此時(shí)會配置 192.168.105.99 到其中一臺機(jī)器
ping測試
ping -c4 192.168.105.99
如果失敗后清理后锦秒,重新實(shí)驗(yàn)
docker rm -f k8s-keepalived
ip a del 192.168.0.199/32 dev ens33
四、部署kubeadm集群
1喉镰、 修改初始化配置
注意需要修改advertiseAddress旅择、controlPlaneEndpoint、imageRepository侣姆、serviceSubnet
其中advertiseAddress為master01的ip生真,controlPlaneEndpoint為VIP+8443端口沉噩,imageRepository修改為阿里的源,serviceSubnet找網(wǎng)絡(luò)組要一段沒人使用的IP段
cat > kubeadm-init.yaml << EOF
apiVersion: kubeadm.k8s.io/v1beta1
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.0.191
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: kubem1.godufo.com
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta1
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "192.168.0.199:8443"
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.14.0
networking:
dnsDomain: cluster.local
podSubnet: ""
serviceSubnet: "10.96.0.0/16"
podSubnet: "10.244.0.0/16"
scheduler: {}
EOF
2柱蟀、預(yù)下載鏡像
[root@kubem1 ~]$ kubeadm config images pull --config kubeadm-init.yaml
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.14.0
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.14.0
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.14.0
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.14.0
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.3.10
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.3.1
3川蒙、初始化
kubeadm init --config kubeadm-init.yaml
[root@kubem1 ~]# kubeadm init --config kubeadm-init.yaml
[init] Using Kubernetes version: v1.14.0
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubem1.godufo.com kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.209.0.1 192.168.0.191 192.168.0.199]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [kubem1.godufo.com localhost] and IPs [192.168.0.191 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [kubem1.godufo.com localhost] and IPs [192.168.0.191 127.0.0.1 ::1]
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 16.502487 seconds
[upload-config] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.14" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --experimental-upload-certs
[mark-control-plane] Marking the node kubem1.godufo.com as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node kubem1.godufo.com as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join 192.168.0.199:8443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:886bebaf75920eb5b66cf2b7b37807108bf76adbb368ad68fd889e7b67f78b8d \
--experimental-control-plane
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.0.199:8443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:886bebaf75920eb5b66cf2b7b37807108bf76adbb368ad68fd889e7b67f78b8d
kubeadm init主要執(zhí)行了以下操作:
[init]:指定版本進(jìn)行初始化操作
[preflight] :初始化前的檢查和下載所需要的Docker鏡像文件
[kubelet-start] :生成kubelet的配置文件”/var/lib/kubelet/config.yaml”,沒有這個(gè)文件kubelet無法啟動长已,所以初始化之前的kubelet實(shí)際上啟動失敗畜眨。
[certificates]:生成Kubernetes使用的證書,存放在/etc/kubernetes/pki目錄中术瓮。
[kubeconfig] :生成 KubeConfig 文件康聂,存放在/etc/kubernetes目錄中,組件之間通信需要使用對應(yīng)文件斤斧。
[control-plane]:使用/etc/kubernetes/manifest目錄下的YAML文件早抠,安裝 Master 組件。
[etcd]:使用/etc/kubernetes/manifest/etcd.yaml安裝Etcd服務(wù)撬讽。
[wait-control-plane]:等待control-plan部署的Master組件啟動蕊连。
[apiclient]:檢查Master組件服務(wù)狀態(tài)。
[uploadconfig]:更新配置
[kubelet]:使用configMap配置kubelet游昼。
[patchnode]:更新CNI信息到Node上甘苍,通過注釋的方式記錄。
[mark-control-plane]:為當(dāng)前節(jié)點(diǎn)打標(biāo)簽烘豌,打了角色Master载庭,和不可調(diào)度標(biāo)簽,這樣默認(rèn)就不會使用Master節(jié)點(diǎn)來運(yùn)行Pod廊佩。
[bootstrap-token]:生成token記錄下來囚聚,后邊使用kubeadm join往集群中添加節(jié)點(diǎn)時(shí)會用到
[addons]:安裝附加組件CoreDNS和kube-proxy
4、為kubectl準(zhǔn)備Kubeconfig文件
kubectl默認(rèn)會在執(zhí)行的用戶家目錄下面的.kube目錄下尋找config文件标锄。這里是將在初始化時(shí)[kubeconfig]步驟生成的admin.conf拷貝到.kube/config顽铸。
[root@master01 ~]# mkdir -p $HOME/.kube
[root@master01 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
5、其他master部署(在master01機(jī)器上執(zhí)行)
傳輸公鑰
ssh-keygen
ssh-copy-id 192.168.0.192
ssh-copy-id 192.168.0.193
在kubem1.godufo.com將證書文件拷貝至kubem2.godufo.com料皇、kubem3.godufo.com節(jié)點(diǎn)
USER=root
CONTROL_PLANE_IPS="kubem2.godufo.com"
for host in ${CONTROL_PLANE_IPS}; do
ssh "${USER}"@$host "mkdir -p /etc/kubernetes/pki/etcd"
scp /etc/kubernetes/pki/ca.* "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.* "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.* "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/etcd/ca.* "${USER}"@$host:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/admin.conf "${USER}"@$host:/etc/kubernetes/
done
USER=root
CONTROL_PLANE_IPS="kubem3.godufo.com"
for host in ${CONTROL_PLANE_IPS}; do
ssh "${USER}"@$host "mkdir -p /etc/kubernetes/pki/etcd"
scp /etc/kubernetes/pki/ca.* "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.* "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.* "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/etcd/ca.* "${USER}"@$host:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/admin.conf "${USER}"@$host:/etc/kubernetes/
done
6谓松、在另外兩臺主上執(zhí)行,注意注意--experimental-control-plane參數(shù)
kubeadm join 192.168.0.199:8443 --token abcdef.0123456789abcdef \ --discovery-token-ca-cert-hash sha256:886bebaf75920eb5b66cf2b7b37807108bf76adbb368ad68fd889e7b67f78b8d \ --experimental-control-plane
[root@kubem2 ~]# kubeadm join 192.168.0.199:8443 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:886bebaf75920eb5b66cf2b7b37807108bf76adbb368ad68fd889e7b67f78b8d
--experimental-control-plane[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
error execution phase preflight: [preflight] Some fatal errors occurred:
[ERROR FileAvailable--etc-kubernetes-pki-ca.crt]: /etc/kubernetes/pki/ca.crt already exists
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
[root@kubem2 ~]# kubeadm join 192.168.0.199:8443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:886bebaf75920eb5b66cf2b7b37807108bf76adbb368ad68fd889e7b67f78b8d ^C
[root@kubem2 ~]# kubeadm join 192.168.0.199:8443 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:886bebaf75920eb5b66cf2b7b37807108bf76adbb368ad68fd889e7b67f78b8d \
> --experimental-control-plane
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[preflight] Running pre-flight checks before initializing the new control plane instance
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubem2.godufo.com kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.209.0.1 192.168.0.192 192.168.0.199]
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [kubem2.godufo.com localhost] and IPs [192.168.0.192 127.0.0.1 ::1]
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [kubem2.godufo.com localhost] and IPs [192.168.0.192 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[certs] Using the existing "sa" key
[kubeconfig] Generating kubeconfig files
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/admin.conf"
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[check-etcd] Checking that the etcd cluster is healthy
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.14" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[etcd] Announced new etcd member joining to the existing etcd cluster
[etcd] Wrote Static Pod manifest for a local etcd member to "/etc/kubernetes/manifests/etcd.yaml"
[etcd] Waiting for the new etcd member to join the cluster. This can take up to 40s
[upload-config] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[mark-control-plane] Marking the node kubem2.godufo.com as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node kubem2.godufo.com as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
This node has joined the cluster and a new control plane instance was created:
* Certificate signing request was sent to apiserver and approval was received.
* The Kubelet was informed of the new secure connection details.
* Control plane (master) label and taint were applied to the new node.
* The Kubernetes control plane instances scaled up.
* A new etcd member was added to the local/stacked etcd cluster.
To start administering your cluster from this node, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Run 'kubectl get nodes' to see this node join the cluster.
注意**:token有效期是有限的践剂,如果舊的token過期鬼譬,可以使用kubeadm token create --print-join-command重新創(chuàng)建一條token。
另外兩臺都加入集群后
mkdir -p $HOME/.kube \ && cp -i /etc/kubernetes/admin.conf $HOME/.kube/config \ && chown $(id -u):$(id -g) $HOME/.kube/config
7逊脯、查看集群
[root@kubem2 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kubem1.godufo.com NotReady master 58m v1.14.0
kubem2.godufo.com NotReady master 49m v1.14.0
kubem3.godufo.com NotReady master 47m v1.14.0