一. 服務(wù)器前期準(zhǔn)備
使用vmware創(chuàng)建出三臺虛擬機(jī),系統(tǒng)使用CentOS 7.6
192.168.11.138 master
192.168.11.145 node1
192.168.11.146 node2
1. 主機(jī)名解析
[root@localhost ~]# vi /etc/hosts
2. 系統(tǒng)初始化準(zhǔn)備
由于k8s要求每個節(jié)點的時間必須精確一致耸黑,所以需要先進(jìn)行每個節(jié)點的時間同步
# 安裝時間同步軟件
[root@localhost ~]# yum install -y chrony
# 啟動時間同步
[root@localhost ~]# systemctl start chronyd
[root@localhost ~]# systemctl enable chronyd
# 關(guān)閉防火墻
[root@localhost ~]# systemctl stop firewalld
[root@localhost ~]# systemctl disable firewalld
# 修改安全配置selinux
[root@localhost ~]# vi /etc/selinux/config
# 找到并修改參數(shù) SELINUX=disbled
# 關(guān)閉swap
[root@localhost ~]# vi /etc/fstab
# 注釋掉下面這行
/dev/mapper/centos-swap swap swap defaults 0 0
# 配置網(wǎng)橋
[root@localhost ~]# vi /etc/sysctl.d/kubernetes.conf
# 新建文件kubernetes.conf并加入以下配置
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-iptables=1
net.ipv4.ip_forward=1
# 重新加載配置
[root@localhost ~]# sysctl -p
# 加載網(wǎng)橋過濾模塊
[root@localhost ~]# modprobe br_netfilter
# 查看是否加載成功
[root@localhost ~]# lsmod |grep br_netfilter
br_netfilter 22256 0
bridge 151336 1 br_netfilter
# k8s中的service有兩種代理模型紊婉,一種基于iptables聪轿,一種基于ipvs
# 兩者比較捌斧,ipvs性能明顯高一些戈稿,但如果要是用它西土,需要手動載入ipvs模塊
# 安裝管理軟件
[root@localhost ~]# yum install ipset ipvsadmin -y
# 添加需要加載的模塊寫入腳本文件,后綴必須是.modules
[root@localhost ~]# cat <<EOF > /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
# 添加執(zhí)行權(quán)限
[root@localhost ~]# chmod +x /etc/sysconfig/modules/ipvs.modules
# 執(zhí)行腳本
[root@localhost ~]# /bin/bash /etc/sysconfig/modules/ipvs.modules
# 查看是否加載成功
[root@localhost ~]# lsmod | grep -e ip_vs -e nf_conntrack_ipv4
nf_conntrack_ipv4 15053 0
nf_defrag_ipv4 12729 1 nf_conntrack_ipv4
ip_vs_sh 12688 0
ip_vs_wrr 12697 0
ip_vs_rr 12600 0
ip_vs 145497 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 133095 2 ip_vs,nf_conntrack_ipv4
libcrc32c 12644 3 xfs,ip_vs,nf_conntrack
3. 重啟服務(wù)器
[root@localhost ~]# reboot
# 檢查selinux是否停用
[root@master ~]# getenforce
Disabled
# 檢查swap分區(qū)是否關(guān)閉
[root@master ~]# free -m
total used free shared buff/cache available
Mem: 4061 113 3751 11 196 3708
Swap: 0 0 0
二. 安裝k8s集群
1. docker
# 切換鏡像源
[root@master ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
# 查看支持的鏡像版本
[root@master ~]# yum list docker-ce --showduplicates
# 安裝指定版本 --setopt=bosoletes=0 是防止自動下載最新的
[root@master ~]# yum install --setopt=bosoletes=0 docker-ce-18.06.3.ce-3.el7 -y
# 修改配置
[root@master ~]# mkdir /etc/docker
[root@master ~]# cat <<EOF > /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": ["https://kn0t2bca.mirror.aliyuncs.com"]
}
EOF
# 重啟并開機(jī)自啟動
[root@master ~]# systemctl restart docker
[root@master ~]# systemctl enable docker
2. kubernetes
# 添加鏡像源
[root@master ~]# vi /etc/yum.repos.d/kubernetes.repo
# 寫入以下內(nèi)容
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
# 安裝
[root@master ~]# yum install --setopt=obsoletes=0 kubeadm-1.17.4-0 kubelet-1.17.4-0 kubectl-1.17.4-0 -y
# 修改配置鞍盗,添加cgroup
[root@master ~]# vi /etc/sysconfig/kubelet
KUBELET_CGROUP_ARGS="--cgroup-driver=systemd"
KUBE_PROXY_MODE="ipvs"
#刪除這行KUBELET_EXTRA_ARGS=
# 開機(jī)啟動需了,可以先不用啟動,因為啟動集群的時候會啟動
[root@master ~]# systemctl enable kubelet
3. 下載依賴鏡像
# 查看鏡像版本般甲,1.17.17為目前環(huán)境推薦的版本
[root@master ~]# kubeadm config images list
I0804 13:49:19.439685 22165 version.go:251] remote version is much newer: v1.21.3; falling back to: stable-1.17
W0804 13:49:22.247896 22165 validation.go:28] Cannot validate kube-proxy config - no validator is available
W0804 13:49:22.247928 22165 validation.go:28] Cannot validate kubelet config - no validator is available
k8s.gcr.io/kube-apiserver:v1.17.17
k8s.gcr.io/kube-controller-manager:v1.17.17
k8s.gcr.io/kube-scheduler:v1.17.17
k8s.gcr.io/kube-proxy:v1.17.17
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd:3.4.3-0
k8s.gcr.io/coredns:1.6.5
# 由于鏡像在國外肋乍,沒法科學(xué)上網(wǎng)的需要修改一下下載地址
[root@master ~]# images=(
kube-apiserver:v1.17.17
kube-controller-manager:v1.17.17
kube-scheduler:v1.17.17
kube-proxy:v1.17.17
pause:3.1
etcd:3.4.3-0
coredns:1.6.5
)
[root@master ~]# for imageName in ${images[@]} ; do
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
done
4. 初始化集群
master上運(yùn)行
[root@master ~]# kubeadm init \
--kubernetes-version=v1.17.17 \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.96.0.0/12 \
--apiserver-advertise-address=192.168.11.138
# 執(zhí)行成功后能看到以下信息,根據(jù)提示執(zhí)行命令
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.11.138:6443 --token rtdb7l.og3lwhxbqisq0jbi \
--discovery-token-ca-cert-hash sha256:987755122093887613b08c2b24c8593fb72c5d696459b266822c88c42bc34f28
# 根據(jù)提示執(zhí)行以下命令
[root@master ~]# mkdir -p $HOME/.kube
[root@master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
a. 節(jié)點上執(zhí)行
# 在master提示中找到一下命令在各個節(jié)點運(yùn)行敷存,加入到集群中
kubeadm join 192.168.11.138:6443 --token rtdb7l.og3lwhxbqisq0jbi \
--discovery-token-ca-cert-hash sha256:987755122093887613b08c2b24c8593fb72c5d696459b266822c88c42bc34f28
b. 在master上查看集群狀態(tài)
# 查看當(dāng)前有哪些節(jié)點運(yùn)行墓造,由于網(wǎng)絡(luò)組件還沒有安裝,所以可以看到狀態(tài)都是NotReady
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master NotReady master 12m v1.17.4
node1 NotReady <none> 2m6s v1.17.4
node2 NotReady <none> 7s v1.17.4
c. 網(wǎng)絡(luò)組件安裝
獲取flannel配置文件
下載地址:https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
由于里面配置的鏡像是國外的锚烦,如果沒有科學(xué)上網(wǎng)的話觅闽,需要把鏡像地址quay.io
替換成quay-mirror.qiniu.com
把yml文件上傳至master節(jié)點目錄/home/anson
,然后執(zhí)行以下命令
[root@master ~]# kubectl apply -f /home/anson/kube-flannel.yml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
# 執(zhí)行成功后涮俄,各節(jié)點狀態(tài)變成Ready
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready master 56m v1.17.4
node1 Ready <none> 45m v1.17.4
node2 Ready <none> 43m v1.17.4
三. 測試k8s
用k8s創(chuàng)建一個nginx蛉拙,并使用瀏覽器訪問
# 拉鏡像
[root@master ~]# kubectl create deployment nginx --image=nginx:1.14-alpine
deployment.apps/nginx created
## 暴露80端口
kubectl expose deployment nginx --port=80 --type=NodePort
service/nginx exposed
# 查看pod
[root@master ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-6867cdf567-kshn8 1/1 Running 0 93s
# 查看service
[root@master ~]# kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 64m
nginx NodePort 10.109.61.48 <none> 80:31091/TCP 64s
根據(jù)上面展示的信息,根據(jù)80:31091/TCP
可以了解到彻亲,k8s網(wǎng)絡(luò)內(nèi)80端口對接31091端口孕锄,此時可以使用瀏覽器輸入地址{IP}:{PORT}
進(jìn)行訪問。
Dashboard安裝
1. 應(yīng)用官方的配置文件
[root@master ~]# kubectl apply -f http://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml
執(zhí)行后報錯,拒絕連接。原因應(yīng)該是dns污染的問題雹顺,設(shè)置直連就可以了。
[root@master ~]# vi /etc/hosts
# 加入一下配置
199.232.68.133 raw.githubusercontent.com
# 再次執(zhí)行就可以了
[root@master ~]# kubectl apply -f http://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created
執(zhí)行kubectl get pods -A
能看到多了兩個pod
kubernetes-dashboard dashboard-metrics-scraper-894c58c65-8nd5g 1/1 Running 0 16m
kubernetes-dashboard kubernetes-dashboard-555f8cc76f-kpsd4 1/1 Running 0 16m
2. 創(chuàng)建管理員
[root@master home]# mkdir dashboard && cd dashboard
[root@master dashboard]# vi dashboard-admin.yaml
yml文件內(nèi)容如下
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
執(zhí)行命令,服務(wù)賬號被創(chuàng)建且賦予權(quán)限
[root@master dashboard]# kubectl apply -f dashboard-admin.yaml
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created
通過一下命令獲取管理員token令牌
[root@master dashboard]# kubectl get secret -n kubernetes-dashboard $(kubectl get serviceaccount admin-user -n kubernetes-dashboard -o jsonpath="{.secrets[0].name}") -o jsonpath="{.data.token}" | base64 --decode
eyJhbGciOiJSUzI1NiIsImtpZCI6InE5ZVNYV1ROdzNhcHcxaEhmQWx4WFBmR3U5dEFPY2Nmckh4OUNJVi13OFUifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXZ0Y3RrIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI2ZWU5YzJhMS1kNTE4LTQ2NmEtOWY4Ni1iZDlhZmM4NzZhZjYiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.dHQ44tyzpBmnvJ6mEB5fV3GmGKy--WP0QyFegTv7oO2fIaVTySqebmSfY1JRnLAQnJeYAzT5gbVhWW5iZ7m71Lq3q-YUof8F18MSG_CfMd71YGqFmMsvqlSsXK4BgdyRWDT8Zz1eMacebabGGxqg0Y4wt_lkUZy8s0xXSUuAKPS3dNZNxnF61ow2By5ve2iLhNg67ncVqy0tBHb9wdDsV98j_vMmxM2uDt_ui_Oevf2n8H3i2T_TBTtxR8diYBEYcWL_Gh5vcynk9nF4N2aF62MYoYUaF2I4vk2CgYnFe-Qi18pdtQshhHy0x276ShNLhrbDnY1rypTe3uFm06nHvQ
3. 開啟代理訪問dashboard
[root@master dashboard]# kubectl proxy
Starting to serve on 127.0.0.1:8001
4. 踩坑
執(zhí)行完上一個步驟后踪旷,理論上應(yīng)該是瀏覽器直接訪問{IP:port}
既可以打開dashboard的登錄界面曼氛,但死活打不開豁辉,顯示Forbidden。
使用kubectl get pod -A
也是顯示pod正常啟動舀患,搞了好久終于讓我找到解決方案徽级。
a. 修改kubernetes-dashboard配置文件
[root@master dashboard]# kubectl edit svc -n kubernetes-dashboard
找到namespace叫kubernetes-dashboard
的部分,把type: ClusterIP
替換成type: NodePort
運(yùn)行該命令就可以看到dashboard對外訪問的端口是30310
[root@master dashboard]# kubectl get svc -A
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 23h
default nginx NodePort 10.109.61.48 <none> 80:31091/TCP 22h
kube-system kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 23h
kubernetes-dashboard dashboard-metrics-scraper ClusterIP 10.98.97.30 <none> 8000/TCP 4h6m
kubernetes-dashboard kubernetes-dashboard NodePort 10.99.81.212 <none> 443:30310/TCP 4h6m
此刻瀏覽器打開https://192.168.11.138:30310
聊浅,還是無法訪問餐抢,主要是因為證書的問題。
b. 生成新證書
# 查看現(xiàn)有證書低匙,找到kubernetes-dashboard-certs
[root@master dashboard]# kubectl get secret -A
NAMESPACE NAME TYPE DATA AGE
default default-token-g6c27
.
.
.
kubernetes-dashboard kubernetes-dashboard-certs Opaque 0 4h11m
kubernetes-dashboard kubernetes-dashboard-csrf Opaque 1 4h11m
kubernetes-dashboard kubernetes-dashboard-key-holder Opaque 2 4h11m
kubernetes-dashboard kubernetes-dashboard-token-4wmkq kubernetes.io/service-account-token 3 4h11m
# 刪除命名空間kubernetes-dashboard下的kubernetes-dashboard-certs證書
[root@master dashboard]# kubectl delete secret -n kubernetes-dashboard kubernetes-dashboard-certs
secret "kubernetes-dashboard-certs" deleted
# 創(chuàng)建證書
[root@master dashboard]# kubectl create secret generic kubernetes-dashboard-certs --from-file=/etc/kubernetes/pki -n kubernetes-dashboard secret/kubernetes-dashboard-certs created
c. 重啟pod
刪除pod后旷痕,k8s會自動重啟,因此執(zhí)行以下命令即可
[root@master dashboard]# kubectl delete pod -n kubernetes-dashboard --all
pod "dashboard-metrics-scraper-894c58c65-8nd5g" deleted
pod "kubernetes-dashboard-555f8cc76f-kpsd4" deleted
# 參看pod重啟情況顽冶,當(dāng)前第一個重啟完畢欺抗,第二個正在重啟中
[root@master dashboard]# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
default nginx-6867cdf567-kshn8 1/1
.
.
.
kubernetes-dashboard dashboard-metrics-scraper-894c58c65-4xl9g 1/1 Running 0 26s
kubernetes-dashboard kubernetes-dashboard-555f8cc76f-t7l56 0/1 ContainerCreating 0 26s
d. 啟動代理
[root@master dashboard]# kubectl proxy --address='0.0.0.0' --accept-hosts='^\*$' &
瀏覽器打開地址便可看到以下界面,我們使用token登錄强重,填入第2步獲取的管理員令牌就可以登錄了
ingress安裝
1. 應(yīng)用官方的配置文件
[root@master ingress-controller]# kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.30.0/deploy/static/mandatory.yaml namespace/ingress-nginx created
configmap/nginx-configuration created
configmap/tcp-services created
configmap/udp-services created
serviceaccount/nginx-ingress-serviceaccount created
clusterrole.rbac.authorization.k8s.io/nginx-ingress-clusterrole created
role.rbac.authorization.k8s.io/nginx-ingress-role created
rolebinding.rbac.authorization.k8s.io/nginx-ingress-role-nisa-binding created
clusterrolebinding.rbac.authorization.k8s.io/nginx-ingress-clusterrole-nisa-binding created
deployment.apps/nginx-ingress-controller created
limitrange/ingress-nginx created
[root@master ingress-controller]# kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.30.0/deploy/static/provider/baremetal/service-nodeport.yaml
service/ingress-nginx created
# 查看pod和服務(wù)是否起來
[root@master ingress-controller]# kubectl get pod,svc -n ingress-nginx
NAME READY STATUS RESTARTS AGE
pod/nginx-ingress-controller-7f74f657bd-s9dqk 0/1 ContainerCreating 0 4m8s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/ingress-nginx NodePort 10.103.251.197 <none> 80:31492/TCP,443:32075/TCP 52s