簡介
環(huán)境: centos7虛擬機倍试,參考本地vmware虛機環(huán)境搭建
版本: k8s,kubelet,kubeadm,kubectl 1.14.1
節(jié)點列表:
節(jié)點IP | 節(jié)點角色 | 備注 |
---|---|---|
192.168.88.14 | Master | master |
192.168.88.15 | Node | node1 |
192.168.88.16 | Node | node2 |
修改hostname
hostnamectl set-hostname master
修改hosts文件
把所有節(jié)點寫到hosts里
192.168.88.14 master
192.168.88.15 node1
192.168.88.16 node2
關(guān)閉防火墻
setenforce 0
sed -i "s/^SELINUX\=enforcing/SELINUX\=disabled/g" /etc/selinux/config
systemctl disable firewalld
systemctl stop firewalld
開啟ip轉(zhuǎn)發(fā)
vim /etc/sysctl.d/k8s.conf寫入:
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
執(zhí)行命令使其生效
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf
關(guān)閉swap
swapoff -a
sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
配置國內(nèi)yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
升級內(nèi)核
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
yum --disablerepo="*" --enablerepo="elrepo-kernel" list available
yum --enablerepo=elrepo-kernel install -y kernel-ml
awk -F\' '$1=="menuentry " {print i++ " @ " $2}' /etc/grub2.cfg
grub2-set-default 0
重啟所有節(jié)點
安裝docker
yum install -y docker
配置daocloud鏡像加速
curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io
#注意:末尾多了一個逗號例衍,需要刪掉
{"registry-mirrors": ["http://f1361db2.m.daocloud.io"],}
執(zhí)行腳本拉取所需鏡像悔常,先拉阿里云的再改tag
echo ""
echo "=========================================================="
echo "Pull Kubernetes v1.14.1 Images from aliyuncs.com ......"
echo "=========================================================="
echo ""
MY_REGISTRY=registry.cn-hangzhou.aliyuncs.com/openthings
## 拉取鏡像
docker pull ${MY_REGISTRY}/k8s-gcr-io-kube-apiserver:v1.14.1
docker pull ${MY_REGISTRY}/k8s-gcr-io-kube-controller-manager:v1.14.1
docker pull ${MY_REGISTRY}/k8s-gcr-io-kube-scheduler:v1.14.1
docker pull ${MY_REGISTRY}/k8s-gcr-io-kube-proxy:v1.14.1
docker pull ${MY_REGISTRY}/k8s-gcr-io-etcd:3.3.10
docker pull ${MY_REGISTRY}/k8s-gcr-io-pause:3.1
docker pull ${MY_REGISTRY}/k8s-gcr-io-coredns:1.3.1
## 添加Tag
docker tag ${MY_REGISTRY}/k8s-gcr-io-kube-apiserver:v1.14.1 k8s.gcr.io/kube-apiserver:v1.14.1
docker tag ${MY_REGISTRY}/k8s-gcr-io-kube-scheduler:v1.14.1 k8s.gcr.io/kube-scheduler:v1.14.1
docker tag ${MY_REGISTRY}/k8s-gcr-io-kube-controller-manager:v1.14.1 k8s.gcr.io/kube-controller-manager:v1.14.1
docker tag ${MY_REGISTRY}/k8s-gcr-io-kube-proxy:v1.14.1 k8s.gcr.io/kube-proxy:v1.14.1
docker tag ${MY_REGISTRY}/k8s-gcr-io-etcd:3.3.10 k8s.gcr.io/etcd:3.3.10
docker tag ${MY_REGISTRY}/k8s-gcr-io-pause:3.1 k8s.gcr.io/pause:3.1
docker tag ${MY_REGISTRY}/k8s-gcr-io-coredns:1.3.1 k8s.gcr.io/coredns:1.3.1
echo ""
echo "=========================================================="
echo "Pull Kubernetes v1.14.1 Images FINISHED."
echo "into registry.cn-hangzhou.aliyuncs.com/openthings, "
echo " by openthings@https://my.oschina.net/u/2306127."
echo "=========================================================="
echo ""
安裝etcd集群
yum install -y etcd
修改每個節(jié)點的/etc/etcd/etcd.conf
#[Member]
#ETCD_CORS=""
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_WAL_DIR=""
ETCD_LISTEN_PEER_URLS="http://192.168.88.14:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.88.14:2379,http://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
ETCD_NAME="etcd01"
#ETCD_SNAPSHOT_COUNT="100000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
#ETCD_QUOTA_BACKEND_BYTES="0"
#ETCD_MAX_REQUEST_BYTES="1572864"
#ETCD_GRPC_KEEPALIVE_MIN_TIME="5s"
#ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s"
#ETCD_GRPC_KEEPALIVE_TIMEOUT="20s"
#
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.88.14:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.88.14:2379"
#ETCD_DISCOVERY=""
#ETCD_DISCOVERY_FALLBACK="proxy"
#ETCD_DISCOVERY_PROXY=""
#ETCD_DISCOVERY_SRV=""
ETCD_INITIAL_CLUSTER="etcd01=http://192.168.88.14:2380,etcd02=http://192.168.88.15:2380,etcd03=http://192.168.88.16:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
#ETCD_STRICT_RECONFIG_CHECK="true"
#ETCD_ENABLE_V2="true"
...
這里有個坑,默認的data目錄是/var/lib/etcd/default.etcd择懂,所以需要創(chuàng)建
mkdir default.etcd
chown -R etcd:etcd default.etcd/
所有節(jié)點配置完后蜻直,先啟動master節(jié)點耻陕,然后啟動兩個node節(jié)點劲阎,查看集群狀態(tài):
etcdctl cluster-health
member 1691cc4b81e3405a is healthy: got healthy result from http://192.168.88.14:2379
member 4c2e8055bc8aaba8 is healthy: got healthy result from http://192.168.88.15:2379
member a64df75e95660ceb is healthy: got healthy result from http://192.168.88.16:2379
安裝kubelet绘盟,kubectl,kubeadm
# 先配置yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
# 然后測試一下
curl https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
# 如果不能訪問的話悯仙,可能要配置代理龄毡,export http_proxy,https_proxy
# 然后安裝kubelet等軟件
yum install -y kubelet kubectl kubeadm
# 報錯,需要如下包:
conntrack-tools-1.4.4-4.el7.x86_64.rpm
libnetfilter_conntrack-1.0.6-1.el7_3.x86_64.rpm
libnetfilter_cthelper-1.0.0-9.el7.x86_64.rpm
libnetfilter_cttimeout-1.0.0-6.el7.x86_64.rpm
libnetfilter_queue-1.0.2-2.el7_2.x86_64.rpm
socat-1.7.3.2-2.el7.x86_64.rpm
配置kubeadm
cat > kubeadmin-config.yaml <<EOF
apiVersion: kubeadm.k8s.io/v1beta1
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: "192.168.88.14"
bindPort: 6443
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
etcd:
external:
endpoints:
- "http://192.168.88.14:2379"
- "http://192.168.88.15:2379"
- "http://192.168.88.16:2379"
networking:
podSubnet: "10.244.0.0/16"
apiServer:
certSANs:
- "192.168.88.14"
- "192.168.88.15"
- "192.168.88.16"
kubernetesVersion: "v1.14.1"
EOF
kubeadm升級了雁比,配置文件內(nèi)容變動很大,詳細看官網(wǎng)
kubeadm init創(chuàng)建集群
[root@master ~]# kubeadm init --config=kubeadmin-config.yaml
[etcd.external.endpoints: Invalid value: "192.168.88.14:2379": URL parse error: parse 192.168.88.14:2379: first path segment in URL cannot contain colon, etcd.external.endpoints: Invalid value: "192.168.88.15:2379": URL parse error: parse 192.168.88.15:2379: first path segment in URL cannot contain colon, etcd.external.endpoints: Invalid value: "192.168.88.16:2379": URL parse error: parse 192.168.88.16:2379: first path segment in URL cannot contain colon]
[root@master ~]# vim kubeadmin-config.yaml
[root@master ~]# kubeadm init --config=kubeadmin-config.yaml
[init] Using Kubernetes version: v1.14.1
[preflight] Running pre-flight checks
[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] External etcd mode: Skipping etcd/ca certificate authority generation
[certs] External etcd mode: Skipping etcd/server certificate authority generation
[certs] External etcd mode: Skipping etcd/peer certificate authority generation
[certs] External etcd mode: Skipping apiserver-etcd-client certificate authority generation
[certs] External etcd mode: Skipping etcd/healthcheck-client certificate authority generation
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.88.14 192.168.88.14 192.168.88.15 192.168.88.16]
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 17.516089 seconds
[upload-config] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.14" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --experimental-upload-certs
[mark-control-plane] Marking the node master as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: s2tqxm.i4tzr950bmvi2cjz
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.88.14:6443 --token s2tqxm.i4tzr950bmvi2cjz \
--discovery-token-ca-cert-hash sha256:85c1875f91b2b5b27438d39da789bec4c67824cdec4a778bf61f4082fd4ee205
輸出的內(nèi)容需要保存到本地撤嫩,在其他兩個node節(jié)點上執(zhí)行上面的kubeadm join命令偎捎,加入到集群中。
配置kubectl
mkdir ~/.kube/
cp /etc/kubernetes/admin.conf ~/.kube/config
執(zhí)行kubectl命令
[root@master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
master NotReady master 3h4m v1.14.1
node1 NotReady <none> 3h3m v1.14.1
node2 NotReady <none> 3h3m v1.14.1
發(fā)現(xiàn)所有節(jié)點都是notReady的狀態(tài)序攘,不要急茴她,還差最后一步,打通容器網(wǎng)絡(luò)程奠,我們來部署calico
部署calico
詳細還是見官網(wǎng)
首先下載calico.yaml
因為我們部署的etcd不需要證書丈牢,所以yaml中的那些證書相關(guān)的配置不用修改,我主要修改了下面部分:
- name: CALICO_IPV4POOL_IPIP
value: "Off"
- name: CALICO_IPV4POOL_CIDR
value: "10.244.0.0/16"
然后apply yaml
kubectl apply -f calico.yaml
最后kubectl get node查看部署結(jié)果
[root@master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
master Ready master 3h4m v1.14.1
node1 Ready <none> 3h3m v1.14.1
node2 Ready <none> 3h3m v1.14.1
獲取api-server的token
kubectl get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='default')].data.token}"|base64 --decode
訪問api-server
export TOKEN=`kubectl get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='default')].data.token}"|base64 --decode`
curl -X GET https://192.168.100.23:6443/apis/extensions/v1beta1/namespaces/kube-system/ingresses -H "Authorization: Bearer $TOKEN" -k
創(chuàng)建一個busybox驗證集群的情況
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
k8s-app: busybox
name: busybox
spec:
replicas: 1
selector:
matchLabels:
k8s-app: busybox
template:
metadata:
labels:
k8s-app: busybox
spec:
containers:
- image: 192.168.100.15:5000/busybox:latest
name: busybox
imagePullPolicy: IfNotPresent
command:
- sleep
- "3600"
以上↑