高可用的 K8S 集羣構建

設置系統主機名以及 Host 文件的相互解析
hostnamectl set-hostname k8s-master01
hostnamectl set-hostname k8s-master02
hostnamectl set-hostname k8s-master03


vi /etc/hosts
10.10.21.8 k8s-master01
10.10.21.28 k8s-master02
10.10.21.38 k8s-master03
10.10.21.100 k8s-vip



安裝依賴包
yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget net-tools git

設置防火牆爲 Iptables 並設置空規則
systemctl stop firewalld && systemctl disable firewalld
yum -y install iptables-services && systemctl start iptables && systemctl enable iptables&& iptables -F && service iptables save

關閉 SELINUX
swapoff -a && sed -i '/ swap / s/^(.)$/#\1/g' /etc/fstab
setenforce 0 && sed -i 's/^SELINUX=.
/SELINUX=disabled/' /etc/selinux/config

調整內核參數,對於 K8S
[root@k8s-master ~]# pwd
/root
[root@k8s-master ~]# vi kubernetes.conf
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1 # 上面兩條的作用是開啓網橋模式,這兩步是必須的
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 # 禁止使用 swap 空間,只有當系統 OOM 時才允許使用它
vm.overcommit_memory=1 # 不檢查物理內存是否夠用
vm.panic_on_oom=0 # 開啓 OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1 # 關閉ipv6,這步也是必須的
net.netfilter.nf_conntrack_max=2310720















使開機時能調用
[root@k8s-master ~]# cp kubernetes.conf /etc/sysctl.d/kubernetes.conf

[root@k8s-master ~]# sysctl -p /etc/sysctl.d/kubernetes.conf # 手動刷新

調整系統時區
每個節點都需要執行,根據自己環境的需求來修改,如果已經是CST的時區,就可以跳過這步
#設置系統時區爲 中國/上海
[root@k8s-master ~]# timedatectl set-timezone Asia/Shanghai # 將當前的 UTC 時間寫入硬件時鐘
[root@k8s-master ~]# timedatectl set-local-rtc 0
#重啓依賴於系統時間的服務
[root@k8s-master ~]# systemctl restart rsyslog
[root@k8s-master ~]# systemctl restart crond






關閉系統不需要的服務
每個節點都需要執行,這是關閉郵件服務
[root@k8s-master ~]# systemctl stop postfix && systemctl disable postfix

設置 rsyslogd 和 systemd journald
每個節點都需要執行,因爲centos7的引導方式改爲了systemd,所以在centos7中就有兩個日誌系統,這裏我們配置使用systemd journald
[root@k8s-master ~]# mkdir /var/log/journal # 持久化保存日誌的目錄
[root@k8s-master ~]# mkdir /etc/systemd/journald.conf.d
[root@k8s-master ~]# vi /etc/systemd/journald.conf.d/99-prophet.conf
[Journal]
持久化保存到磁盤
Storage=persistent






壓縮歷史日誌
Compress=yes

SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000

最大佔用空間10G

SystemMaxUse=10G

#單日誌文件最大200M
SystemMaxFileSize=200M

#日誌保存時間 2 周
MaxRetentionSec=2week

#不將日誌轉發到syslog
ForwardToSyslog=no

[root@k8s-master ~]# systemctl restart systemd-journald

升級內核爲4.4版本
CentOS 7.x 系統自帶的 3.10.x 內核存在一些 Bugs,導致運行的 Docker、Kubernetes 不穩定,例如: rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
[root@k8s-master ~]# rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
#安裝完成後檢查 /boot/grub2/grub.cfg 中對應內核 menuentry 中是否包含 initrd16 配置,如果沒有,再安裝 一次!
[root@k8s-master ~]# yum --enablerepo=elrepo-kernel install -y kernel-lt
#設置開機從新內核啓動
[root@k8s-master ~]# grub2-set-default 'CentOS Linux (4.4.189-1.el7.elrepo.x86_64) 7 (Core)'
#重啓後安裝內核源文件






關閉 NUMA
[root@k8s-master ~]# cp /etc/default/grub{,.bak}
[root@k8s-master ~]# vi /etc/default/grub
#在 GRUB_CMDLINE_LINUX 一行添加 numa=off 參數,如下所示:
GRUB_CMDLINE_LINUX="crashkernel=auto rd.lvm.lv=centos/root rhgb quiet numa=off"



[root@k8s-master ~]# cp /boot/grub2/grub.cfg{,.bak}
[root@k8s-master ~]# grub2-mkconfig -o /boot/grub2/grub.cfg
[root@k8s-master ~]# reboot

kube-proxy開啓ipvs的前置條件(所有節點都需要)
[root@k8s-master ~]# modprobe br_netfilter

[root@k8s-master ~]# vi /etc/sysconfig/modules/ipvs.modules
#!/bin/bashmodprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4




[root@k8s-master ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
nf_conntrack_ipv4 20480 0
nf_defrag_ipv4 16384 1 nf_conntrack_ipv4
ip_vs_sh 16384 0
ip_vs_wrr 16384 0
ip_vs_rr 16384 0
ip_vs 147456 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 114688 2 ip_vs,nf_conntrack_ipv4
libcrc32c 16384 2 xfs,ip_vs







安裝 Docker 軟件(所有節點都需要)
[root@k8s-master ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@k8s-master ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

這裏不yum update也能安裝docker-ce(多個節點一起yum update -y會被當成惡意***,可以換個時間再update)
[root@k8s-master ~]# yum update -y && yum install -y docker-ce
[root@k8s-master ~]# reboot

##創建 /etc/docker 目錄
[root@k8s-master ~]# mkdir /etc/docker

配置 daemon.
vi /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
}
}







[root@k8s-master ~]# mkdir -p /etc/systemd/system/docker.service.d
#重啓docker服務
[root@k8s-master ~]# systemctl daemon-reload && systemctl restart docker && systemctl enable docker

在主節點啓動 Haproxy 與 Keepalived 容器
導入腳本 > 運行 > 查看可用節點
[root@k8s-master ~]# mkdir -p /usr/local/kubernetes/install
將所需文件傳入/usr/local/kubernetes/install
[root@k8s-master ~]# cd /usr/local/kubernetes/install
[root@k8s-master install]# ls
haproxy.tar keepalived.tar kubeadm-basic.images.tar.gz load-images.sh start.keep.tar.gz
[root@k8s-master install]# docker load -i haproxy.tar
[root@k8s-master install]# docker load -i keepalived.tar
[root@k8s-master install]# tar -zxvf kubeadm-basic.images.tar.gz
[root@k8s-master install]# vi load-images.sh
#!/bin/bash
cd /usr/local/kubernetes/install/kubeadm-basic.images
ls /usr/local/kubernetes/install/kubeadm-basic.images | grep -v load-images.sh > /tmp/k8s-images.txt
for i in $( cat /tmp/k8s-images.txt )
do
docker load -i $i
done
rm -rf /tmp/k8s-images.txt

















[root@k8s-master install]# chmod +x load-images.sh
[root@k8s-master install]# ./load-images.sh
[root@k8s-master install]# tar -zxvf start.keep.tar.gz
[root@k8s-master install]# mv data/ /
[root@k8s-master install]# cd /data
[root@k8s-master data]# cd lb
[root@k8s-master lb]# ls
etc kubeadm-config.yaml start-haproxy.sh start-keepalived.sh






[root@k8s-master lb]# vi etc/haproxy.cfg
server rancher01 10.10.21.8:6443
(這裏需要一個個節點配置,否則可能由於節點沒啓動出錯)

[root@k8s-master lb]# vi start-haproxy.sh
MasterIP1=10.10.21.8
MasterIP2=10.10.21.28
MasterIP3=10.10.21.38


[root@k8s-master lb]# ./start-haproxy.sh
aee0cf634eadad7b73c58f7c56e2bf6bc62d4cb489f3c156bb8c0650910d58f6

[root@k8s-master lb]# netstat -naltp | grep 6444
tcp6 0 0 :::6444 :::* LISTEN 2340/docker-proxy

[root@k8s-master lb]# vi start-keepalived.sh
#!/bin/bash
VIRTUAL_IP=10.10.21.100
INTERFACE=eth0


[root@k8s-master lb]# ./start-keepalived.sh
a280dbbb7bd9d4e0b724e111d1dff308880e6931f84a28eaf3015ff1b42fc25d

[root@k8s-master lb]# ip add
eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:50:56:92:70:dd brd ff:ff:ff:ff:ff:ff
inet 10.10.21.28/24 brd 10.10.21.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet 10.10.21.100/24 scope global secondary eth0
valid_lft forever preferred_lft forever





安裝 Kubeadm (主從配置)(所有節點都需要)
[root@k8s-master lb]# vi /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg








[root@k8s-master lb]# yum -y install kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1
[root@k8s-master lb]# systemctl enable kubelet.service

初始化主節點(上面的設置都是所有節點都需要,以下設置是針對主節點)
[root@k8s-master lb]# cd /usr/local/kubernetes/install/
[root@k8s-master install]# kubeadm config print init-defaults > kubeadm-config.yaml

[root@k8s-master install]# vi kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:

  • groups:
    • system:bootstrappers:kubeadm:default-node-token
      token: abcdef.0123456789abcdef
      ttl: 24h0m0s
      usages:


    • signing
    • authentication
      kind: InitConfiguration
      localAPIEndpoint:
      advertiseAddress: 10.10.21.8
      bindPort: 6443
      nodeRegistration:
      criSocket: /var/run/dockershim.sock
      name: k8s-master01
      taints:







    • effect: NoSchedule
      key: node-role.kubernetes.io/master

      apiServer:
      timeoutForControlPlane: 4m0s
      apiVersion: kubeadm.k8s.io/v1beta2
      certificatesDir: /etc/kubernetes/pki
      clusterName: kubernetes
      controlPlaneEndpoint: "10.10.21.100:6444"
      controllerManager: {}
      dns:
      type: CoreDNS
      etcd:
      local:
      dataDir: /var/lib/etcd
      imageRepository: k8s.gcr.io
      kind: ClusterConfiguration
      kubernetesVersion: v1.15.1
      networking:
      dnsDomain: cluster.local
      podSubnet: "10.244.0.0/16"
      serviceSubnet: 10.96.0.0/12
      scheduler: {}



















apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs



[root@k8s-master install]# kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

kubeadm join 10.10.21.100:6444 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:5e75bb6da2837ca318cc79fdb74e149a5ac185005f89cca31deba5e5fb962df6 \
--control-plane --certificate-key f9a01415dfb5909b920f2a853b1161e3f05cc9a992c922a94a21398bf22c60d3

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.10.21.100:6444 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:5e75bb6da2837ca318cc79fdb74e149a5ac185005f89cca31deba5e5fb962df6

[root@k8s-master install]# mkdir -p $HOME/.kube
[root@k8s-master install]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master install]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

[root@k8s-master01 install]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 NotReady master 2m44s v1.15.1

接下來在其它master節點上操作啓動 Haproxy 與 Keepalived 容器
[root@k8s-master lb]# vi /data/lb/etc/haproxy.cfg (先設置一個節點)
server rancher01 10.10.21.8:6443

[root@k8s-master lb]# vi start-haproxy.sh
MasterIP1=10.10.21.8
MasterIP2=10.10.21.28
MasterIP3=10.10.21.38


[root@k8s-master lb]# cd /data/lb/
[root@k8s-master lb]# ./start-haproxy.sh
aee0cf634eadad7b73c58f7c56e2bf6bc62d4cb489f3c156bb8c0650910d58f6

[root@k8s-master lb]# netstat -naltp | grep 6444
tcp6 0 0 :::6444 :::* LISTEN 2340/docker-proxy

[root@k8s-master lb]# vi start-keepalived.sh
#!/bin/bash
VIRTUAL_IP=10.10.21.100
INTERFACE=eth0


[root@k8s-master lb]# ./start-keepalived.sh
a280dbbb7bd9d4e0b724e111d1dff308880e6931f84a28eaf3015ff1b42fc25d

[root@k8s-master lb]# docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
4c4551e52852 wise2c/keepalived-k8s "/usr/bin/keepalived…" 35 seconds ago Up 33 seconds Keepalived-K8S
910875d10340 wise2c/haproxy-k8s "/docker-entrypoint.…" 28 minutes ago Up 28 minutes 0.0.0.0:6444->6444/tcp HAProxy-K8S


用上面的初始化主節點得到的結果把其它master節點加入集羣
[root@k8s-master lb]# kubeadm join 10.10.21.100:6444 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:5e75bb6da2837ca318cc79fdb74e149a5ac185005f89cca31deba5e5fb962df6 \
--control-plane --certificate-key f9a01415dfb5909b920f2a853b1161e3f05cc9a992c922a94a21398bf22c60d3


報錯:
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
error execution phase preflight: unable to fetch the kubeadm-config ConfigMap: failed to get config map: Unauthorized

注意:kubeadm init生成的token有效期只有1天,如果你的node節點在使用kubeadm join時出現如上錯誤

請到master上檢查你所使用的token是否有效,kubeadm token list
[root@k8s-master01 install]# kubeadm token list
TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS
abcdef.0123456789abcdef <invalid> 2020-12-19T18:46:54+08:00 authentication,signing <none> system:bootstrappers:kubeadm:default-node-token
rf5y2f.bdtdm9ojmr86lyhp <invalid> 2020-12-18T20:46:54+08:00 <none> Proxy for managing TTL for the kubeadm-certs secret <none>



生成不過期的token
[root@k8s-master01 install]# kubeadm token create --ttl 0 --print-join-command
kubeadm join 10.10.21.100:6444 --token 86lgxf.0xifzlgrxxj7ta6d --discovery-token-ca-cert-hash sha256:5e75bb6da2837ca318cc79fdb74e149a5ac185005f89cca31deba5e5fb962df6

再次把master節點加入集羣
[root@k8s-master lb]# kubeadm join 10.10.21.100:6444 --token 86lgxf.0xifzlgrxxj7ta6d \
--discovery-token-ca-cert-hash sha256:5e75bb6da2837ca318cc79fdb74e149a5ac185005f89cca31deba5e5fb962df6 \
--control-plane --certificate-key f9a01415dfb5909b920f2a853b1161e3f05cc9a992c922a94a21398bf22c60d3


成功
This node has joined the cluster and a new control plane instance was created:

  • Certificate signing request was sent to apiserver and approval was received.
  • The Kubelet was informed of the new secure connection details.
  • Control plane (master) label and taint were applied to the new node.
  • The Kubernetes control plane instances scaled up.
  • A new etcd member was added to the local/stacked etcd cluster.

To start administering your cluster from this node, you need to run the following as a regular user:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

Run 'kubectl get nodes' to see this node join the cluster.

[root@k8s-master02 lb]# mkdir -p $HOME/.kube
[root@k8s-master02 lb]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master02 lb]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@k8s-master02 ~]# cat .kube/config
查看 server: https://10.10.21.100:6444



所有節點修改haproxy.cfg重啓容器
[root@k8s-master lb]# vi /data/lb/etc/haproxy.cfg
server rancher01 10.10.21.8:6443
server rancher02 10.10.21.28:6443
server rancher03 10.10.21.38:6443



[root@k8s-master01 install]# docker ps -a | grep haproxy
84c2c97f1100 wise2c/haproxy-k8s "/docker-entrypoint.…" 2 days ago Up 2 days 0.0.0.0:6444->6444/tcp HAProxy-K8S

[root@k8s-master01 install]# docker rm -f HAProxy-K8S && /data/lb/start-haproxy.sh
HAProxy-K8S
411fb977585726ff43f14af910f91db128e5a466eb32082636dbe3a64060864f

[root@k8s-master01 install]# docker ps -a | grep haproxy
411fb9775857 wise2c/haproxy-k8s "/docker-entrypoint.…" 41 seconds ago Up 39 seconds 0.0.0.0:6444->6444/tcp HAProxy-K8S

在主節點查看操作:
[root@k8s-master01 install]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 NotReady master 2d18h v1.15.1
k8s-master02 NotReady master 36m v1.15.1
k8s-master03 NotReady master 29m v1.15.1




[root@k8s-master01 install]# pwd
/usr/local/kubernetes/install

主節點部署網絡
[root@k8s-master01 install]# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

[root@k8s-master01 install]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-5c98db65d4-8zmw7 1/1 Running 0 2d18h
coredns-5c98db65d4-gwjrx 1/1 Running 0 2d18h
etcd-k8s-master01 1/1 Running 0 2d18h
etcd-k8s-master02 1/1 Running 0 61m
etcd-k8s-master03 1/1 Running 0 25m
kube-apiserver-k8s-master01 1/1 Running 0 2d18h
kube-apiserver-k8s-master02 1/1 Running 0 61m
kube-apiserver-k8s-master03 1/1 Running 0 25m
kube-controller-manager-k8s-master01 1/1 Running 1 2d18h
kube-controller-manager-k8s-master02 1/1 Running 0 61m
kube-controller-manager-k8s-master03 1/1 Running 0 25m
kube-flannel-ds-6fbjn 1/1 Running 0 72s
kube-flannel-ds-tdv9w 1/1 Running 0 72s
kube-flannel-ds-zqj7x 1/1 Running 0 72s
kube-proxy-hj8qm 1/1 Running 0 2d18h
kube-proxy-k2p4m 1/1 Running 0 54m
kube-proxy-txqkl 1/1 Running 0 61m
kube-scheduler-k8s-master01 1/1 Running 1 2d18h
kube-scheduler-k8s-master02 1/1 Running 0 61m
kube-scheduler-k8s-master03 1/1 Running 0 25m




















[root@k8s-master01 install]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready master 2d18h v1.15.1
k8s-master02 Ready master 61m v1.15.1
k8s-master03 Ready master 54m v1.15.1



關閉主節點
[root@k8s-master01 install]# shutdown -h now

在其它節點操作
[root@k8s-master02 ~]# kubectl get node
Unable to connect to the server: net/http: TLS handshake timeout

[root@k8s-master02 ~]# pwd
/root

[root@k8s-master02 ~]# vi .kube/config (修改如下本地節點IP)
server: https://10.10.21.28:6444

[root@k8s-master02 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 NotReady master 2d19h v1.15.1
k8s-master02 Ready master 84m v1.15.1
k8s-master03 Ready master 77m v1.15.1



[root@k8s-master03 ~]# vi .kube/config (修改如下本地節點IP)
server: https://10.10.21.38:6444

啓動主節點
Etcd 集羣狀態查看
[root@k8s-master01 ~]# kubectl get endpoints kube-controller-manager --namespace=kube-system -o yaml
apiVersion: v1
kind: Endpoints
metadata:
annotations:
control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-master03_55d9202f-0962-4efb-a512-f21959001268","leaseDurationSeconds":15,"acquireTime":"2020-12-21T05:27:47Z","renewTime":"2020-12-21T05:55:47Z","leaderTransitions":2}'
creationTimestamp: "2020-12-18T10:46:52Z"
name: kube-controller-manager
namespace: kube-system
resourceVersion: "298045"
selfLink: /api/v1/namespaces/kube-system/endpoints/kube-controller-manager
uid: 0dbbd8ad-8fda-4d24-8e16-19e772030559












[root@k8s-master01 ~]# kubectl get endpoints kube-scheduler --namespace=kube-system -o yaml
apiVersion: v1
kind: Endpoints
metadata:
annotations:
control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-master02_4567a03f-a1a3-4c16-aa47-e7efe6553d26","leaseDurationSeconds":15,"acquireTime":"2020-12-21T05:27:46Z","renewTime":"2020-12-21T05:56:05Z","leaderTransitions":2}'
creationTimestamp: "2020-12-18T10:46:53Z"
name: kube-scheduler
namespace: kube-system
resourceVersion: "298073"
selfLink: /api/v1/namespaces/kube-system/endpoints/kube-scheduler
uid: 2bcc5145-baea-40bf-b3e9-0e7f057c7fd8










[root@k8s-master01 ~]# kubectl -n kube-system exec etcd-k8s-master01 -- etcdctl --endpoints=https://10.10.21.8:2379 --ca-file=/etc/kubernetes/pki/etcd/ca.crt --cert-file=/etc/kubernetes/pki/etcd/server.crt --key-file=/etc/kubernetes/pki/etcd/server.key cluster-health
member 1f716c5cc789f0ad is healthy: got healthy result from https://10.10.21.28:2379
member 40869e813511be0d is healthy: got healthy result from https://10.10.21.38:2379
member 7557ff4ca558021b is healthy: got healthy result from https://10.10.21.8:2379
cluster is healthy



加入主節點以及其餘工作節點(執行安裝日誌中的加入命令即可)
[root@k8s-master01 ~]# cat /usr/local/kubernetes/install/kubeadm-init.log

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章