目錄
node組件部署
node需要的組件從master節點的kube-server文件包裏面複製出來
kubelet kube-proxy
1、部署環境準備
#master節點上分配權限kubelet
[root@master ssl]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
#master節點上拷貝kubelet、kube-proxy到node節點,拷貝TSL兩文件到node節點
[root@manage01 123]# scp -r kubernetes/server/bin/kubectl kubernetes/server/bin/kubelet kubernetes/server/bin/kube-proxy [email protected]:/opt/kubernetes/bin/
[root@manage01 123]# scp -r kubernetes/server/bin/kubectl kubernetes/server/bin/kubelet kubernetes/server/bin/kube-proxy [email protected]:/opt/kubernetes/bin/
[root@manage01 ssl]# scp bootstrap.kubeconfig kube-proxy.kubeconfig token.csv [email protected]:/opt/kubernetes/cfg/
[root@manage01 ssl]# scp bootstrap.kubeconfig kube-proxy.kubeconfig token.csv [email protected]:/opt/kubernetes/cfg/
#node節點環境變量kubectl
[root@node01 ~]# chmod +x /opt/kubernetes/bin/*
[root@node01 bin]# cp /opt/kubernetes/bin/kubectl /usr/bin/kubectl
2、部署kubelet
FAQ:
[root@node01 cfg]# journalctl -xefu kubelet
#--allow-privileged=true
#很多文章配置有上述參數,kubelet啓動會報錯的!!
2月 29 18:53:00 node01 kubelet[46573]: F0229 18:53:00.113351 46573 server.go:156] unknown flag: --allow-privileged
2月 29 18:53:00 node01 systemd[1]: kubelet.service holdoff time over, scheduling restart.
2月 29 18:53:00 node01 systemd[1]: start request repeated too quickly for kubelet.service
2月 29 18:53:00 node01 systemd[1]: Failed to start Kubernetes Kubelet.
2月 29 18:53:00 node01 systemd[1]: Unit kubelet.service entered failed state.
2月 29 18:53:00 node01 systemd[1]: kubelet.service failed.
對應方法
本身allow-privileged已經是deprecated選項了,在1.15.0被刪除了。所以在kubelet的啓動參數中刪除即可
[root@node02 cfg]# vi kubelet.sh && chmod 755 kubelet.sh
#!/bin/bash
NODE_ADDRESS=${1:-"192.168.192.130"}
DNS_SERVER_IP=${2:-"10.10.10.2"}
cat <<EOF >/opt/kubernetes/cfg/kubelet
KUBELET_OPTS="--logtostderr=true \\
--v=4 \\
--address=${NODE_ADDRESS} \\
--hostname-override=${NODE_ADDRESS} \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--cert-dir=/opt/kubernetes/ssl \\
--cluster-dns=${DNS_SERVER_IP} \\
--cluster-domain=cluster.local \\
--fail-swap-on=false \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
EOF
cat <<EOF >/usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet
3、部署kube-proxy
#!/bin/bash
NODE_ADDRESS=${1:-"192.168.192.130"}
cat <<EOF >/opt/kubernetes/cfg/kube-proxy
KUBE_PROXY_OPTS="--logtostderr=true \
--v=4 \
--hostname-override=${NODE_ADDRESS} \
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"
EOF
cat <<EOF >/usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-proxy
systemctl restart kube-proxy
4、執行部署腳本,查看進程
[root@node02 cfg]# ./kubelet.sh 192.168.192.130 10.10.10.2
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@node02 cfg]# ./kube-proxy.sh 192.168.192.130
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
[root@node02 cfg]# ps -ef | grep kube
root 1396 1 5 10:42 ? 00:31:53 /opt/kubernetes/bin/etcd --name=etcd03 --data-dir=/var/lib/etcd/default.etcd --listen-peer-urls=https://192.168.192.130:2380 --listen-client-urls=https://192.168.192.130:2379,http://127.0.0.1:2379 --advertise-client-urls=https://192.168.192.130:2379 --initial-advertise-peer-urls=https://192.168.192.130:2380 --initial-cluster=etcd01=https://192.168.192.128:2380,etcd02=https://192.168.192.129:2380,etcd03=https://192.168.192.130:2380 --initial-cluster-token=etcd01=https://192.168.192.128:2380,etcd02=https://192.168.192.129:2380,etcd03=https://192.168.192.130:2380 --initial-cluster-state=new --cert-file=/opt/kubernetes/ssl/server.pem --key-file=/opt/kubernetes/ssl/server-key.pem --peer-cert-file=/opt/kubernetes/ssl/server.pem --peer-key-file=/opt/kubernetes/ssl/server-key.pem --trusted-ca-file=/opt/kubernetes/ssl/ca.pem --peer-trusted-ca-file=/opt/kubernetes/ssl/ca.pem
root 1655 1 0 10:58 ? 00:01:45 /opt/kubernetes/bin/flanneld --ip-masq --etcd-endpoints=https://192.168.192.128:2379,https://192.168.192.129:2379,https://192.168.192.130:2379 -etcd-cafile=/opt/kubernetes/ssl/ca.pem -etcd-certfile=/opt/kubernetes/ssl/server.pem -etcd-keyfile=/opt/kubernetes/ssl/server-key.pem
root 48503 1 0 19:38 ? 00:00:00 /opt/kubernetes/bin/kube-proxy --logtostderr=true --v=4 --hostname-override=192.168.192.130 --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig
root 48651 39421 0 19:39 pts/0 00:00:00 journalctl -xefu kubelet
root 49204 1 2 19:42 ? 00:00:06 /opt/kubernetes/bin/kubelet --logtostderr=true --v=4 --address=192.168.192.130 --hostname-override=192.168.192.130 --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig --experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig --cert-dir=/opt/kubernetes/ssl --cluster-dns=10.10.10.2 --cluster-domain=cluster.local --fail-swap-on=false --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0
root 50237 48735 0 19:46 pts/1 00:00:00 grep --color=auto kube
5、matser節點配置允許證書
#查看所有節點證書請求
[root@manage01 ssl]# kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-EM2C9BtGq_R4NG259ItFhZzjj0ceSAS4VtKx2IFQO50 34m kubelet-bootstrap Approved,Issued
node-csr-Ln2uUgzUdHVfbQ9M3DtMxHoPpAb7wGMTkTjg4Vo4hN8 84s kubelet-bootstrap Pending
#允許證書
[root@manage01 ssl]# kubectl certificate approve node-csr-Ln2uUgzUdHVfbQ9M3DtMxHoPpAb7wGMTkTjg4Vo4hN8
certificatesigningrequest.certificates.k8s.io/node-csr-Ln2uUgzUdHVfbQ9M3DtMxHoPpAb7wGMTkTjg4Vo4hN8 approved
#查看節點請求狀態
[root@manage01 ssl]# kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.192.129 Ready <none> 28m v1.17.3
192.168.192.130 Ready <none> 11s v1.17.3
6、部署一個nginx測試k8s集羣狀態
#服務正常
[root@manage01 ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-1 Healthy {"health": "true"}
etcd-2 Healthy {"health": "true"}
etcd-0 Healthy {"health": "true"}
#節點正常
[root@manage01 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.192.129 Ready <none> 21h v1.17.3
192.168.192.130 Ready <none> 21h v1.17.3
#創建nginx容器
[root@manage01 ~]# kubectl run nginx --image=nginx --replicas=3
deployment.apps/nginx created
#查看創建進度,全部running爲完畢
[root@manage01 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-6db489d4b7-cz6ft 1/1 Running 0 77s
nginx-6db489d4b7-rbvvj 1/1 Running 0 77s
nginx-6db489d4b7-xfllp 1/1 Running 0 77s
#暴露端口分發給節點
[root@manage01 ~]# kubectl expose deployment nginx --port=88 --target-port=80 --type=NodePort
service/nginx exposed
#查看訪問端口,88爲VIP端口,31318爲節點端口
[root@manage01 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.10.10.1 <none> 443/TCP 26h
nginx NodePort 10.10.10.147 <none> 88:31318/TCP 42s
#節點訪問
[root@node01 ~]# curl 10.10.10.147:88
[root@node01 ~]# curl 192.168.192.129:31318
[root@node01 ~]# curl 192.168.192.130:31318