虛擬機(jī): VMware Workstation Pro 15
Linux 版本:CentOS Linux release 7.8.2003 (Core)
docker版本:19.03.11
kubectl版本:?v1.18.2
操作系統(tǒng):centos7.6以及更高版本都可以配置:4核cpu奇瘦,4G內(nèi)存涂臣,1塊50G硬盤
==================================所有主機(jī)執(zhí)行如下基礎(chǔ)配置:============================================
systemctl stop firewalld? && systemctl? disable? firewalld
yum -y install wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel autoconf automake zlib-devel? python-devel epel-release openssh-server socat? ipvsadm conntrack ntpdate
yum install iptables-services -y
service iptables stop? && systemctl disable iptables
#時(shí)間同步
ntpdate cn.pool.ntp.org
1)crontab -e
* */1 * * * /usr/sbin/ntpdate?? cn.pool.ntp.org
2)重啟crond服務(wù)進(jìn)程:
service crond restart
關(guān)閉selinux
sed?-i? 's/SELINUX=enforcing/SELINUX=disabled/'? /etc/sysconfig/selinux
sed?-i? 's/SELINUX=enforcing/SELINUX=disabled/g'??/etc/selinux/config
setenforce 0? #關(guān)閉selinux
swapoff -a ? #關(guān)閉swap
關(guān)閉交換分區(qū)
swapoff -a# 永久禁用,打開/etc/fstab注釋掉swap那一行催束。sed -i 's/.*swap.*/#&/' /etc/fstab
cat <<EOF >? /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
hostnamectl set-hostname master1
hostnamectl set-hostname master2
hostnamectl set-hostname master3
hostnamectl set-hostname node1
cat >> /etc/hosts << EOF
192.168.180.135 master1
192.168.180.136 master2
192.168.180.137 master3
192.168.180.138 node1
EOF
reboot -f
# 配置master1到node無(wú)密碼登陸嘶摊,配置master1到master2延蟹、master3無(wú)密碼登陸
在master1上操作
ssh-keygen -t rsa
#一直回車就可以
ssh-copy-id?-i?.ssh/id_rsa.pub?root@master2
#上面需要輸入yes之后,輸入密碼叶堆,輸入master2物理機(jī)密碼即可
ssh-copy-id -i .ssh/id_rsa.pubroot@master3
#上面需要輸入yes之后阱飘,輸入密碼,輸入master3物理機(jī)密碼即可
ssh-copy-id -i .ssh/id_rsa.pubroot@node1
#上面需要輸入yes之后虱颗,輸入密碼沥匈,輸入node1物理機(jī)密碼即可
echo?1?>?/proc/sys/net/bridge/bridge-nf-call-iptables
echo?1?>/proc/sys/net/bridge/bridge-nf-call-ip6tables
echo?"""
vm.swappiness?=?0
net.bridge.bridge-nf-call-iptables?=?1
net.ipv4.ip_forward?=?1
net.bridge.bridge-nf-call-ip6tables?=?1
"""?>?/etc/sysctl.conf
sysctl?-p
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1 if [ $? -eq 0 ];
then /sbin/modprobe \${kernel_module} fidone
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
1.所有機(jī)器安裝docker環(huán)境
===========================================安裝docker===============================================
sudo yum update
curl -sSL https://get.docker.com/ | sh
sudo service docker start
docker run hello-world
docker version
yum -y install epel-release
yum -y install python-pip
sudo pip install --upgrade pip
pip -V
sudo pip install docker-compose
安裝過(guò)程中如果出現(xiàn)如下錯(cuò)誤:
Cannot uninstall ‘requests’. It is adistutils installed project and thus we cannot accurately determine which filesbelong to it which would lead to only a partial uninstall.
解決辦法:pip install docker-compose--ignore-installed requests
sudo docker-compose --version
######## subprocess32報(bào)錯(cuò)解決方案
yum -y install gcc gcc-c++
yum install python-devel
sudo pip install subprocess32 -U
sudo pip install docker-compose
2.所有主機(jī)安裝k8s環(huán)境(kubelet kubeadm kubectl)
===========================================安裝k8s===============================================
2.1 按照順序執(zhí)行如下
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
指定版本方式(本次安裝使用這種方式)
yum install -y kubelet-1.18.2 kubectl-1.18.2 kubeadm-1.18.2
或者可以選擇安裝最新版本方式
yum makecache fast
yum install -y kubelet kubeadm kubectl
# 跳過(guò)密鑰安裝如下
yum install kubectl-1.18.2-0.x86_64 --nogpgcheck
yum install kubelet-1.18.2-0.x86_64 --nogpgcheck
yum install kubeadm-1.18.2-0.x86_64 --nogpgcheck
systemctl enable kubelet.service
2.2 創(chuàng)建一個(gè)目錄mkdir kubernetes上傳all包文件 (官方下載的鏡像)
執(zhí)行如下命令安裝:
docker?load?-i???1-18-kube-apiserver.tar.gz
docker?load?-i???1-18-kube-scheduler.tar.gz
docker?load?-i???1-18-kube-controller-manager.tar.gz
docker?load?-i???1-18-pause.tar.gz
docker?load?-i???1-18-cordns.tar.gz
docker?load?-i???1-18-etcd.tar.gz
docker?load?-i???1-18-kube-proxy.tar.gz
2.3 部署keepalive+lvs實(shí)現(xiàn)master節(jié)點(diǎn)高可用-對(duì)apiserver做高可用
2.3.1 部署keepalived+lvs,在各master節(jié)點(diǎn)操作
yum install -y socat keepalived ipvsadm conntrack
2.3.2 修改配置
<master 1 >節(jié)點(diǎn)修改之后的keepalived.conf如下所示:
global_defs {
? router_id LVS_DEVEL
}
vrrp_instance VI_1 {
? ? state BACKUP
? ? nopreempt
? ? interface ens33
? ? virtual_router_id 80
? ? priority 100
? ? advert_int 1
? ? authentication {
? ? ? ? auth_type PASS
? ? ? ? auth_pass just0kk
? ? }
? ? virtual_ipaddress {
? ? ? ? 192.168.180.200
? ? }
}
virtual_server 192.168.180.200 6443 {
? ? delay_loop 6
? ? lb_algo loadbalance
? ? lb_kind DR
? ? net_mask 255.255.255.0
? ? persistence_timeout 0
? ? protocol TCP
? ? real_server 192.168.180.135 6443 {
? ? ? ? weight 1
? ? ? ? SSL_GET {
? ? ? ? ? ? url {
? ? ? ? ? ? ? path /healthz
? ? ? ? ? ? ? status_code 200
? ? ? ? ? ? }
? ? ? ? ? ? connect_timeout 3
? ? ? ? ? ? nb_get_retry 3
? ? ? ? ? ? delay_before_retry 3
? ? ? ? }
? ? }
? ? real_server 192.168.180.136 6443 {
? ? ? ? weight 1
? ? ? ? SSL_GET {
? ? ? ? ? ? url {
? ? ? ? ? ? ? path /healthz
? ? ? ? ? ? ? status_code 200
? ? ? ? ? ? }
? ? ? ? ? ? connect_timeout 3
? ? ? ? ? ? nb_get_retry 3
? ? ? ? ? ? delay_before_retry 3
? ? ? ? }
? ? }
? ? real_server 192.168.180.137 6443 {
? ? ? ? weight 1
? ? ? ? SSL_GET {
? ? ? ? ? ? url {
? ? ? ? ? ? ? path /healthz
? ? ? ? ? ? ? status_code 200
? ? ? ? ? ? }
? ? ? ? ? ? connect_timeout 3
? ? ? ? ? ? nb_get_retry 3
? ? ? ? ? ? delay_before_retry 3
? ? ? ? }
? ? }
}
<master 2 >節(jié)點(diǎn)修改之后的keepalived.conf如下所示:
global_defs {
? router_id LVS_DEVEL
}
vrrp_instance VI_1 {
? ? state BACKUP
? ? nopreempt
? ? interface ens33
? ? virtual_router_id 80
? ? priority 50
? ? advert_int 1
? ? authentication {
? ? ? ? auth_type PASS
? ? ? ? auth_pass just0kk
? ? }
? ? virtual_ipaddress {
? ? ? ? 192.168.180.200
? ? }
}
virtual_server 192.168.180.200 6443 {
? ? delay_loop 6
? ? lb_algo loadbalance
? ? lb_kind DR? ? net_mask 255.255.255.0
? ? persistence_timeout 0
? ? protocol TCP
? ? real_server 192.168.180.135 6443 {
? ? ? ? weight 1
? ? ? ? SSL_GET {
? ? ? ? ? ? url {
? ? ? ? ? ? ? path /healthz
? ? ? ? ? ? ? status_code 200
? ? ? ? ? ? }
? ? ? ? ? ? connect_timeout 3
? ? ? ? ? ? nb_get_retry 3
? ? ? ? ? ? delay_before_retry 3
? ? ? ? }
? ? }
? ? real_server 192.168.180.136 6443 {
? ? ? ? weight 1
? ? ? ? SSL_GET {
? ? ? ? ? ? url {
? ? ? ? ? ? ? path /healthz
? ? ? ? ? ? ? status_code 200
? ? ? ? ? ? }
? ? ? ? ? ? connect_timeout 3
? ? ? ? ? ? nb_get_retry 3
? ? ? ? ? ? delay_before_retry 3
? ? ? ? }
? ? }
? ? real_server 192.168.180.137 6443 {
? ? ? ? weight 1
? ? ? ? SSL_GET {
? ? ? ? ? ? url {
? ? ? ? ? ? ? path /healthz
? ? ? ? ? ? ? status_code 200
? ? ? ? ? ? }
? ? ? ? ? ? connect_timeout 3
? ? ? ? ? ? nb_get_retry 3
? ? ? ? ? ? delay_before_retry 3
? ? ? ? }
? ? }
}
<master 3 >節(jié)點(diǎn)修改之后的keepalived.conf如下所示:
global_defs {
? router_id LVS_DEVEL
}
vrrp_instance VI_1 {
? ? state BACKUP
? ? nopreempt
? ? interface ens33
? ? virtual_router_id 80
? ? priority 30
? ? advert_int 1
? ? authentication {
? ? ? ? auth_type PASS
? ? ? ? auth_pass just0kk
? ? }
? ? virtual_ipaddress {
? ? ? ? 192.168.180.200
? ? }
}
virtual_server 192.168.180.200 6443 {
? ? delay_loop 6
? ? lb_algo loadbalance
? ? lb_kind DR
? ? net_mask 255.255.255.0
? ? persistence_timeout 0
? ? protocol TCP
? ? real_server 192.168.180.135 6443 {
? ? ? ? weight 1
? ? ? ? SSL_GET {
? ? ? ? ? ? url {
? ? ? ? ? ? ? path /healthz
? ? ? ? ? ? ? status_code 200
? ? ? ? ? ? }
? ? ? ? ? ? connect_timeout 3
? ? ? ? ? ? nb_get_retry 3
? ? ? ? ? ? delay_before_retry 3
? ? ? ? }
? ? }
? ? real_server 192.168.180.136 6443 {
? ? ? ? weight 1
? ? ? ? SSL_GET {
? ? ? ? ? ? url {
? ? ? ? ? ? ? path /healthz
? ? ? ? ? ? ? status_code 200
? ? ? ? ? ? }
? ? ? ? ? ? connect_timeout 3
? ? ? ? ? ? nb_get_retry 3
? ? ? ? ? ? delay_before_retry 3
? ? ? ? }
? ? }
? ? real_server 192.168.180.137 6443 {
? ? ? ? weight 1
? ? ? ? SSL_GET {
? ? ? ? ? ? url {
? ? ? ? ? ? ? path /healthz
? ? ? ? ? ? ? status_code 200
? ? ? ? ? ? }
? ? ? ? ? ? connect_timeout 3
? ? ? ? ? ? nb_get_retry 3
? ? ? ? ? ? delay_before_retry 3
? ? ? ? }
? ? }
}
2.3.3? 提示:重要知識(shí)點(diǎn)忘渔,必看高帖,否則生產(chǎn)會(huì)遇到巨大的坑
keepalive需要配置BACKUP,而且是非搶占模式nopreempt畦粮,假設(shè)master1宕機(jī)散址,啟動(dòng)之后vip不會(huì)自動(dòng)漂移到master1乖阵,這樣可以保證k8s集群始終處于正常狀態(tài),因?yàn)榧僭O(shè)master1啟動(dòng)预麸,apiserver等組件不會(huì)立刻運(yùn)行瞪浸,如果vip漂移到master1,那么整個(gè)集群就會(huì)掛掉吏祸,這就是為什么我們需要配置成非搶占模式了
2.3.4 啟動(dòng)順序master1->master2->master3对蒲,在master1、master2犁罩、master3依次執(zhí)行如下命令
systemctl enable keepalived? && systemctl start keepalived? && systemctl status keepalived
2.3.5 ip addr 查看ens33網(wǎng)卡下是否有192.168.180.200 如果只是master1有VIP 則正常? 如果出現(xiàn)master節(jié)點(diǎn)都有的情況 大概率是vrrp組播問(wèn)題
則需要執(zhí)行如下處理:
yum install tcpdump
sudo tcpdump -i ens33 vrrp -n
firewall-cmd --direct --permanent --add-rule ipv4 filter INPUT 0 --in-interface ens33 --destination 224.0.0.18 --protocol vrrp -j ACCEPT
#刷新防火墻
firewall-cmd --reload;
2.4 創(chuàng)建kubeadm-config.yaml 初始化文件 在master1上操作如下
cat kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.18.2
controlPlaneEndpoint: 192.168.180.200:6443
apiServer:
certSANs:
- 192.168.180.135
- 192.168.180.136
- 192.168.180.137
- 192.168.180.138
- 192.168.180.200
networking:
podSubnet: 10.244.0.0/16
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind:? KubeProxyConfiguration
mode: ipvs
初始化命令:
kubeadm init --config kubeadm-config.yaml
初始化命令執(zhí)行成功之后顯示如下內(nèi)容齐蔽,說(shuō)明初始化成功了
在master1節(jié)點(diǎn)執(zhí)行如下两疚,這樣才能有權(quán)限操作k8s資源
mkdir?-p?$HOME/.kube
sudo?cp?-i? /etc/kubernetes/admin.conf? $HOME/.kube/config
sudo?chown?$(id?-u):$(id?-g)? $HOME/.kube/config
kubectl get nodes
master1節(jié)點(diǎn)是NotReady
kubectl get pods -n kube-system
cordns也是處于pending狀態(tài)
安裝網(wǎng)絡(luò)插件解決該問(wèn)題
docker load -i?? cni.tar.gz
docker load -i?? calico-node.tar.gz
可以訪問(wèn)下面的github地址床估,把下面的目錄clone和下載下來(lái),解壓之后诱渤,在把文件傳到master1節(jié)點(diǎn)即可
https://github.com/luckylucky421/kubernetes1.17.3/tree/master
GIT鏈接:https://github.com/luckylucky421/kubernetes1.17.3.git
kubectl apply -f calico.yaml
把master1節(jié)點(diǎn)的證書拷貝到master2和master3上
(1)在master2和master3上創(chuàng)建證書存放目錄
cd /root && mkdir -p /etc/kubernetes/pki/etcd &&mkdir -p ~/.kube/
(2)在master1節(jié)點(diǎn)把證書拷貝到master2和master3上丐巫,在master1上操作如下:
scp /etc/kubernetes/pki/ca.crt root@master2:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/ca.key root@master2:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.key root@master2:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.pub root@master2:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.crt root@master2:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.key root@master2:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/etcd/ca.crt root@master2:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/pki/etcd/ca.key root@master2:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/pki/ca.crt master3:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/ca.key master3:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.key master3:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.pub master3:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.crt master3:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.key master3:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/etcd/ca.crt master3:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/pki/etcd/ca.key master3:/etc/kubernetes/pki/etcd/
在master2和master3上執(zhí)行:
kubeadm join 192.168.180.200:6443 --token lazpeh.w7ofsulkv2ccsz52 \
? --discovery-token-ca-cert-hash sha256:a5ac0c822dcac9f74339768a2173f0aac460dffd04364d4eaad70f77573d5d18 \
? --control-plane
備注:--control-plane:這個(gè)參數(shù)表示加入到k8s集群的是master節(jié)點(diǎn)
在master2和master3上操作:
????mkdir?-p?$HOME/.kube
????sudo?cp?-i?/etc/kubernetes/admin.conf?$HOME/.kube/config
????sudo?chown?$(id?-u):$(id?-g)$HOME/.kube/config
在node1上執(zhí)行:(不需要拷貝證書)
kubeadm join 192.168.180.200:6443 --token lazpeh.w7ofsulkv2ccsz52 \
? ? --discovery-token-ca-cert-hash sha256:a5ac0c822dcac9f74339768a2173f0aac460dffd04364d4eaad70f77573d5d18
安裝traefik
每個(gè)節(jié)點(diǎn)下執(zhí)行
docker load -i? traefik_1_7_9.tar.gz
1)生成traefik證書,在master1上操作
mkdir ?~/ikube/tls/ -p
echo """
[req]
distinguished_name = req_distinguished_name
prompt = yes
[ req_distinguished_name ]
countryName ? ? ? ? ? ? ? ? ? ? = Country Name (2 letter code)
countryName_value ? ? ? ? ? ? ? = CN
stateOrProvinceName ? ? ? ? ? ? = State or Province Name (full name)
stateOrProvinceName_value ? ? ? = Beijing
localityName ? ? ? ? ? ? ? ? ? ?= Locality Name (eg, city)
localityName_value ? ? ? ? ? ? ?= Haidian
organizationName ? ? ? ? ? ? ? ?= Organization Name (eg, company)
organizationName_value ? ? ? ? ?= Channelsoft
organizationalUnitName ? ? ? ? ?= Organizational Unit Name (eg, section)
organizationalUnitName_value ? ?= R & D Department
commonName ? ? ? ? ? ? ? ? ? ? ?= Common Name (eg, your name or your server\'s hostname)
commonName_value ? ? ? ? ? ? ? ?= *.multi.io
emailAddress ? ? ? ? ? ? ? ? ? ?= Email Address
emailAddress_value ? ? ? ? ? ? ?= lentil1016@gmail.com
""" > ~/ikube/tls/openssl.cnf
openssl req -newkey rsa:4096 -nodes -config ~/ikube/tls/openssl.cnf -days 3650 -x509 -out ~/ikube/tls/tls.crt -keyout ~/ikube/tls/tls.key
kubectl create -n kube-system secret tls ssl --cert ~/ikube/tls/tls.crt --key ~/ikube/tls/tls.key
2)執(zhí)行yaml文件? github倉(cāng)庫(kù)下載
kubectl apply -f traefik.yaml
3)查看traefik是否部署成功:
kubectl get pods -n kube-system
安裝kubernetes-dashboard 2.0版本(kubernetes的web ui界面)
每個(gè)節(jié)點(diǎn)下執(zhí)行
docker load -i dashboard_2_0_0.tar.gz
docker load -i metrics-scrapter-1-0-1.tar.gz
只在master1節(jié)點(diǎn)操作
kubectl apply -f kubernetes-dashboard.yaml
查看dashboard是否安裝成功:
kubectl get pods -n kubernetes-dashboard
查看dashboard前端的service
kubectl?get?svc?-n?kubernetes-dashboard
修改service type類型變成NodePort:
kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard
把 type: ClusterIP變成 type: NodePort勺美,保存退出即可
kubectl?get?svc?-n?kube-system
https://192.168.180.200:31102/
通過(guò)yaml文件里指定的默認(rèn)的token登陸dashboard
1)查看kubernetes-dashboard名稱空間下的secret
kubectl get secret -n kubernetes-dashboard
kubectl? describe? secret? kubernetes-dashboard-token-lrpsj -n?? kubernetes-dashboard
創(chuàng)建管理員token递胧,可查看任何空間權(quán)限
kubectl create clusterrolebinding dashboard-cluster-admin --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard
安裝metrics監(jiān)控相關(guān)的插件
docker load -i metrics-server-amd64_0_3_1.tar.gz
docker load -i addon.tar.gz
只需要在master1上執(zhí)行
kubectl apply -f metrics.yaml