一. 環境概述
IP | 角色 | 操作系統 | 主要插件 |
192.168.122.23 | k8s-master01 | CentOS Linux release 7.7.1908 (Core) | kube-apiserver、kube-controller、kube-scheduler、kubelet、kube-proxy、kube-flannel、etcd |
192.168.122.173 | k8s-master02 | CentOS Linux release 7.7.1908 (Core) | kube-apiserver、kube-controller、kube-scheduler、kubelet、kube-proxy、kube-flannel、etcd |
192.168.122.253 | k8s-master03 | CentOS Linux release 7.7.1908 (Core) | kube-apiserver、kube-controller、kube-scheduler、kubelet、kube-proxy、kube-flannel、etcd |
192.168.122.100 | VIP | ||
192.168.122.102 | node1 | CentOS Linux release 7.7.1908 (Core) | kubelet、kube-proxy、kube-flannel |
(1) 修改內核參數:
[root@k8s-master01 ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
net.ipv4.ip_forward = 1
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.netfilter.nf_conntrack_max = 2310720
fs.inotify.max_user_watches=89100
fs.may_detach_mounts = 1
fs.file-max = 52706963
fs.nr_open = 52706963
net.bridge.bridge-nf-call-arptables = 1
vm.swappiness = 0 #最大限度使用物理內存,然後纔是 swap空間
vm.overcommit_memory=1
vm.panic_on_oom=0
EOF
[root@k8s-master01 ~]# sysctl --system
* Applying /usr/lib/sysctl.d/00-system.conf ...
net.bridge.bridge-nf-call-ip6tables = 0
net.bridge.bridge-nf-call-iptables = 0
net.bridge.bridge-nf-call-arptables = 0
* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
kernel.yama.ptrace_scope = 0
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.promote_secondaries = 1
net.ipv4.conf.all.promote_secondaries = 1
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.d/k8s.conf ...
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
net.ipv4.ip_forward = 1
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.netfilter.nf_conntrack_max = 2310720
fs.inotify.max_user_watches = 89100
fs.may_detach_mounts = 1
fs.file-max = 52706963
fs.nr_open = 52706963
net.bridge.bridge-nf-call-arptables = 1
vm.swappiness = 0 #最大限度使用物理內存,然後纔是 swap空間
vm.overcommit_memory = 1
vm.panic_on_oom = 0
* Applying /etc/sysctl.conf ...
(2) 關閉swap
k8s1.8版本以後,要求關閉swap,否則默認配置下kubelet將無法啓動。
#臨時關閉
swapoff -a
#永久關閉
sed -i ‘/ swap / s/^\(.*\)$/#\1/g‘ /etc/fstab
(3) 開啓ipvs
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
#查看是否加載
lsmod | grep ip_vs
#配置開機自加載
cat <<EOF>> /etc/rc.local
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod +x /etc/rc.d/rc.local
(4) 關閉firewalld和SELinux
(5) 時間同步
二. 安裝docker,kubeadm和kubelet
所有節點需要安裝docker, kubeadm, kubelet
docker的安裝參考:
https://docs.docker.com/install/linux/docker-ce/centos/
這裏選用18.09.1版本:
yum install docker-ce-18.09.1 docker-ce-cli-18.09.1 containerd.io
同時,docker的Cgroup Driver建議改爲:systemd。可參考:
https://kubernetes.io/docs/setup/production-environment/container-runtimes/
[root@k8s-master01 images]# docker info
Containers: 17
Running: 16
Paused: 0
Stopped: 1
Images: 8
Server Version: 18.09.1
Storage Driver: overlay2
Backing Filesystem: xfs
Supports d_type: true
Native Overlay Diff: true
Logging Driver: json-file
Cgroup Driver: systemd
Plugins:
Volume: local
Network: bridge host macvlan null overlay
Log: awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog
Swarm: inactive
Runtimes: runc
Default Runtime: runc
Init Binary: docker-init
containerd version: b34a5c8af56e510852c35414db4c1f4fa6172339
runc version: 3e425f80a8c931f88e6d94a8c831b9d5aa481657
init version: fec3683
Security Options:
seccomp
Profile: default
Kernel Version: 3.10.0-1062.4.3.el7.x86_64
Operating System: CentOS Linux 7 (Core)
OSType: linux
Architecture: x86_64
CPUs: 2
Total Memory: 1.795GiB
Name: k8s-master01
ID: RFA7:NDVW:TIWI:CTVM:PISW:LL5O:K2U6:WGVF:PS7S:RX3Q:RJNN:PJBD
Docker Root Dir: /var/lib/docker
Debug Mode (client): false
Debug Mode (server): false
Registry: https://index.docker.io/v1/
Labels:
Experimental: false
Insecure Registries:
127.0.0.0/8
Live Restore Enabled: false
Product License: Community Engine
kubeadm, kubelet選用1.16.0版本:
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubeadm-1.16.0-0.x86_64 kubectl-1.16.0-0.x86_64 kubelet-1.16.0-0.x86_64
所有節點執行:
systemctl enable docker
systemctl start docker
systemctl enable kubelet
三. 安裝配置keepalived、haproxy
需要在三臺master節點執行。
yum install -y socat keepalived haproxy ipvsadm
systemctl enable haproxy
systemctl enable keepalived
(1) 配置haproxy。
[root@k8s-master01 ~]# cat /etc/haproxy/haproxy.cfg
global
log 127.0.0.1 local3
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 32768
user haproxy
group haproxy
daemon
nbproc 1
stats socket /var/lib/haproxy/stats
defaults
mode tcp
log global
option tcplog
option dontlognull
option redispatch
retries 3
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout check 10s
listen stats
mode http
bind :8888
stats enable
stats uri /admin?stats
stats auth admin:admin
stats admin if TRUE
frontend k8s_https *:8443
mode tcp
maxconn 2000
default_backend https_sri
backend https_sri
balance roundrobin
server master1-api 192.168.122.23:6443 check inter 10000 fall 2 rise 2 weight 1
server master2-api 192.168.122.173:6443 check inter 10000 fall 2 rise 2 weight 1
server master3-api 192.168.122.253:6443 check inter 10000 fall 2 rise 2 weight 1
(2) 配置keepalived
[root@k8s-master01 ~]# cat /etc/keepalived/keepalived.conf
global_defs {
router_id master01
}
vrrp_script check_haproxy {
script /etc/keepalived/check_haproxy.sh
interval 3
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 80
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.122.100
}
track_script {
check_haproxy
}
}
[root@k8s-master02 ~]# cat /etc/keepalived/keepalived.conf
global_defs {
router_id master02
}
vrrp_script check_haproxy {
script /etc/keepalived/check_haproxy.sh
interval 3
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 80
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.122.100
}
track_script {
check_haproxy
}
}
[root@k8s-master03 ~]# cat /etc/keepalived/keepalived.conf
global_defs {
router_id master03
}
vrrp_script check_haproxy {
script /etc/keepalived/check_haproxy.sh
interval 3
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 80
priority 80
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.122.100
}
track_script {
check_haproxy
}
}
監控腳本如下:
[root@k8s-master01 ~]# cat /etc/keepalived/check_haproxy.sh
#!/bin/bash
NUM=`ps -C haproxy --no-header |wc -l`
if [ $NUM -eq 0 ];then
systemctl stop keepalived
fi
(3) 啓動haproxy和keepalived
systemctl start haproxy
systemctl start keepalived
[root@k8s-master01 ~]# systemctl status haproxy.service
● haproxy.service - HAProxy Load Balancer
Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; vendor preset: disabled)
Active: active (running) since 日 2019-11-17 10:34:03 CST; 3h 47min ago
Main PID: 17158 (haproxy-systemd)
Tasks: 3
Memory: 5.8M
CGroup: /system.slice/haproxy.service
├─17158 /usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid
├─17159 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds
└─17160 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds
11月 17 10:34:03 master1 systemd[1]: Started HAProxy Load Balancer.
11月 17 10:34:03 master1 haproxy-systemd-wrapper[17158]: haproxy-systemd-wrapper: executing /usr/sbin/haproxy -f ... -Ds
Hint: Some lines were ellipsized, use -l to show in full.
[root@k8s-master01 ~]# systemctl status keepalived.service
● keepalived.service - LVS and VRRP High Availability Monitor
Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
Active: active (running) since 日 2019-11-17 10:33:57 CST; 3h 47min ago
Main PID: 17132 (keepalived)
Tasks: 3
Memory: 2.2M
CGroup: /system.slice/keepalived.service
├─17132 /usr/sbin/keepalived -D
├─17133 /usr/sbin/keepalived -D
└─17134 /usr/sbin/keepalived -D
11月 17 10:33:59 master1 Keepalived_vrrp[17134]: Sending gratuitous ARP on eth0 for 192.168.122.100
11月 17 10:33:59 master1 Keepalived_vrrp[17134]: Sending gratuitous ARP on eth0 for 192.168.122.100
11月 17 10:33:59 master1 Keepalived_vrrp[17134]: Sending gratuitous ARP on eth0 for 192.168.122.100
11月 17 10:33:59 master1 Keepalived_vrrp[17134]: Sending gratuitous ARP on eth0 for 192.168.122.100
11月 17 10:34:04 master1 Keepalived_vrrp[17134]: Sending gratuitous ARP on eth0 for 192.168.122.100
11月 17 10:34:04 master1 Keepalived_vrrp[17134]: VRRP_Instance(VI_1) Sending/queueing gratuitous ARPs on eth0 fo...2.100
11月 17 10:34:04 master1 Keepalived_vrrp[17134]: Sending gratuitous ARP on eth0 for 192.168.122.100
11月 17 10:34:04 master1 Keepalived_vrrp[17134]: Sending gratuitous ARP on eth0 for 192.168.122.100
11月 17 10:34:04 master1 Keepalived_vrrp[17134]: Sending gratuitous ARP on eth0 for 192.168.122.100
11月 17 10:34:04 master1 Keepalived_vrrp[17134]: Sending gratuitous ARP on eth0 for 192.168.122.100
Hint: Some lines were ellipsized, use -l to show in full.
四. 安裝master節點
通過kubeadm config print init-defaults > kubeadm.conf可以獲得默認配置文件。
[root@k8s-master01 ~]# cat kubeadm.conf
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.2.3.4
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: master1
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.16.0
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12
scheduler: {}
#查看需要的鏡像
kubeadm config images list --config kubeadm.conf
[root@k8s-master01 ~]# kubeadm config images list --config kubeadm.conf
k8s.gcr.io/kube-apiserver:v1.16.0
k8s.gcr.io/kube-controller-manager:v1.16.0
k8s.gcr.io/kube-scheduler:v1.16.0
k8s.gcr.io/kube-proxy:v1.16.0
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd:3.3.15-0
k8s.gcr.io/coredns:1.6.2
確保master節點上有上述鏡像。
(1) k8s-master01節點:
配置文件kubeadm_master01.conf
[root@k8s-master01 ~]# cat kubeadm_master01.conf
apiVersion: kubeadm.k8s.io/v1beta1
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.122.23
bindPort: 6443
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.16.0
controlPlaneEndpoint: "192.168.122.100:8443"
imageRepository: registry.aliyuncs.com/google_containers
apiServer:
certSANs:
- "k8s-master01"
- "k8s-master02"
- "k8s-master03"
- 192.168.122.23
- 192.168.122.173
- 192.168.122.253
- 192.168.122.100
networking:
podSubnet: "10.244.0.0/16"
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
etcd:
local:
extraArgs:
listen-client-urls: "https://127.0.0.1:2379,https://192.168.122.23:2379"
advertise-client-urls: "https://192.168.122.23:2379"
listen-peer-urls: "https://192.168.122.23:2380"
initial-advertise-peer-urls: "https://192.168.122.23:2380"
initial-cluster: "k8s-master01=https://192.168.122.23:2380"
initial-cluster-state: new
serverCertSANs:
- k8s-master01
- 192.168.122.23
peerCertSANs:
- k8s-master01
- 192.168.122.23
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
#初始化master01
kubeadm init --config kubeadm_master01.conf
[root@k8s-master01 ~]# kubeadm init --config kubeadm_master01.conf
[init] Using Kubernetes version: v1.16.0
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local k8s-master01 k8s-master02 k8s-master03] and IPs [10.96.0.1 192.168.122.23 192.168.122.100 192.168.122.23 192.168.122.173 192.168.122.253 192.168.122.100]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master01 localhost k8s-master01] and IPs [192.168.122.23 127.0.0.1 ::1 192.168.122.23]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master01 localhost k8s-master01] and IPs [192.168.122.23 127.0.0.1 ::1 192.168.122.23]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 24.514221 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.16" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: v6d021.dxax8e4vu6749njt
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join 192.168.122.100:8443 --token v6d021.dxax8e4vu6749njt \
--discovery-token-ca-cert-hash sha256:a86ecb2f31b0bfee32ab415e61ced3415249fe2becd81821c3d57404f2c5caca \
--control-plane
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.122.100:8443 --token v6d021.dxax8e4vu6749njt \
--discovery-token-ca-cert-hash sha256:a86ecb2f31b0bfee32ab415e61ced3415249fe2becd81821c3d57404f2c5caca
按照提示執行:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
查看當前存在pod,可以發現唯獨coredns的pod是出於Pending狀態,原因是還未安裝網絡插件。
#安裝Flannel網絡插件
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
#應用配置文件(注意只在k8s-master01執行)
kubectl apply -f kube-flannel.yml
接下來,給k8s-master02/k8s-master03分發證書:
#!/bin/bash
for index in 173 253; do
ip=192.168.122.${index}
ssh $ip "mkdir -p /etc/kubernetes/pki/etcd; mkdir -p ~/.kube/"
scp /etc/kubernetes/pki/ca.crt $ip:/etc/kubernetes/pki/ca.crt
scp /etc/kubernetes/pki/ca.key $ip:/etc/kubernetes/pki/ca.key
scp /etc/kubernetes/pki/sa.key $ip:/etc/kubernetes/pki/sa.key
scp /etc/kubernetes/pki/sa.pub $ip:/etc/kubernetes/pki/sa.pub
scp /etc/kubernetes/pki/front-proxy-ca.crt $ip:/etc/kubernetes/pki/front-proxy-ca.crt
scp /etc/kubernetes/pki/front-proxy-ca.key $ip:/etc/kubernetes/pki/front-proxy-ca.key
scp /etc/kubernetes/pki/etcd/ca.crt $ip:/etc/kubernetes/pki/etcd/ca.crt
scp /etc/kubernetes/pki/etcd/ca.key $ip:/etc/kubernetes/pki/etcd/ca.key
scp /etc/kubernetes/admin.conf $ip:/etc/kubernetes/admin.conf
scp /etc/kubernetes/admin.conf $ip:~/.kube/config
done
(2) k8s-master02節點
配置文件kubeadm_master02.conf
[root@k8s-master02 ~]# cat kubeadm_master02.conf
apiVersion: kubeadm.k8s.io/v1beta1
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.122.173
bindPort: 6443
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.16.0
controlPlaneEndpoint: "192.168.122.100:8443"
imageRepository: registry.aliyuncs.com/google_containers
apiServer:
certSANs:
- "k8s-master01"
- "k8s-master02"
- "k8s-master03"
- 192.168.122.23
- 192.168.122.173
- 192.168.122.253
- 192.168.122.100
networking:
podSubnet: "10.244.0.0/16"
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
etcd:
local:
extraArgs:
listen-client-urls: "https://127.0.0.1:2379,https://192.168.122.173:2379"
advertise-client-urls: "https://192.168.122.173:2379"
listen-peer-urls: "https://192.168.122.173:2380"
initial-advertise-peer-urls: "https://192.168.122.173:2380"
initial-cluster: "k8s-master01=https://192.168.122.23:2380,k8s-master02=https://192.168.122.173:2380"
initial-cluster-state: existing
serverCertSANs:
- k8s-master02
- 192.168.122.173
peerCertSANs:
- k8s-master02
- 192.168.122.173
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
#配置證書
kubeadm init phase certs all --config kubeadm_master02.conf
[root@k8s-master02 ~]# kubeadm init phase certs all --config kubeadm_master02.conf
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Using existing ca certificate authority
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master02 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local k8s-master01 k8s-master02 k8s-master03] and IPs [10.96.0.1 192.168.122.173 192.168.122.100 192.168.122.23 192.168.122.173 192.168.122.253 192.168.122.100]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Using existing front-proxy-ca certificate authority
[certs] Generating "front-proxy-client" certificate and key
[certs] Using existing etcd/ca certificate authority
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master02 localhost k8s-master02] and IPs [192.168.122.173 127.0.0.1 ::1 192.168.122.173]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master02 localhost k8s-master02] and IPs [192.168.122.173 127.0.0.1 ::1 192.168.122.173]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Using the existing "sa" key
#配置etcd
kubeadm init phase etcd local --config kubeadm_master02.conf
[root@k8s-master02 ~]# kubeadm init phase etcd local --config kubeadm_master02.conf
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
#生成kubelet配置文件
kubeadm init phase kubeconfig kubelet --config kubeadm_master02.conf
[root@k8s-master02 ~]# kubeadm init phase kubeconfig kubelet --config kubeadm_master02.conf
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "kubelet.conf" kubeconfig file
#啓動kubelet
kubeadm init phase kubelet-start --config kubeadm_master02.conf
[root@k8s-master02 ~]# kubeadm init phase kubelet-start --config kubeadm_master02.conf
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
#將k8s-master02的etcd加入集羣
[root@k8s-master02 ~]# kubectl exec -n kube-system etcd-k8s-master01 -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://192.168.122.23:2379 member add master2 https://192.168.122.173:2380
Added member named master2 with ID 82b4a2c0cf7a107b to cluster
ETCD_NAME="master2"
ETCD_INITIAL_CLUSTER="master2=https://192.168.122.173:2380,k8s-master01=https://192.168.122.23:2380"
ETCD_INITIAL_CLUSTER_STATE="existing"
#啓動 kube-apiserver、kube-controller-manager、kube-scheduler
#啓動 kube-apiserver、kube-controller-manager、kube-scheduler
[root@k8s-master02 ~]# kubeadm init phase kubeconfig all --config kubeadm_master02.conf
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/admin.conf"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/kubelet.conf"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[root@k8s-master02 ~]# kubeadm init phase control-plane all --config kubeadm_master02.conf
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
#查看節點狀態
[root@k8s-master02 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready master 60m v1.16.0
k8s-master02 Ready <none> 11m v1.16.0
#將節點標記爲master
[root@k8s-master02 ~]# kubeadm init phase mark-control-plane --config kubeadm_master02.conf
[mark-control-plane] Marking the node k8s-master02 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-master02 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
#再次查看
[root@k8s-master02 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready master 61m v1.16.0
k8s-master02 Ready master 12m v1.16.0
(3) k8s-master03節點
配置文件kubeadm_master03.conf
[root@k8s-master03 ~]# cat kubeadm_master03.conf
apiVersion: kubeadm.k8s.io/v1beta1
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.122.253
bindPort: 6443
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.16.0
controlPlaneEndpoint: "192.168.122.100:8443"
imageRepository: registry.aliyuncs.com/google_containers
apiServer:
certSANs:
- "k8s-master01"
- "k8s-master02"
- "k8s-master03"
- 192.168.122.23
- 192.168.122.173
- 192.168.122.253
- 192.168.122.100
networking:
podSubnet: "10.244.0.0/16"
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
etcd:
local:
extraArgs:
listen-client-urls: "https://127.0.0.1:2379,https://192.168.122.253:2379"
advertise-client-urls: "https://192.168.122.253:2379"
listen-peer-urls: "https://192.168.122.253:2380"
initial-advertise-peer-urls: "https://192.168.122.253:2380"
initial-cluster: "k8s-master01=https://192.168.122.23:2380,k8s-master02=https://192.168.122.173:2380,k8s-master03=https://192.168.122.253:2380"
initial-cluster-state: existing
serverCertSANs:
- k8s-master03
- 192.168.122.253
peerCertSANs:
- k8s-master03
- 192.168.122.253
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
#配置證書
[root@k8s-master03 ~]# kubeadm init phase certs all --config kubeadm_master03.conf
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Using existing ca certificate authority
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master03 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local k8s-master01 k8s-master02 k8s-master03] and IPs [10.96.0.1 192.168.122.253 192.168.122.100 192.168.122.23 192.168.122.173 192.168.122.253 192.168.122.100]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Using existing front-proxy-ca certificate authority
[certs] Generating "front-proxy-client" certificate and key
[certs] Using existing etcd/ca certificate authority
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master03 localhost k8s-master03] and IPs [192.168.122.253 127.0.0.1 ::1 192.168.122.253]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master03 localhost k8s-master03] and IPs [192.168.122.253 127.0.0.1 ::1 192.168.122.253]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Using the existing "sa" key
#配置etcd
[root@k8s-master03 ~]# kubeadm init phase etcd local --config kubeadm_master03.conf
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
#生成kubelet配置文件
[root@k8s-master03 ~]# kubeadm init phase kubeconfig kubelet --config kubeadm_master03.conf
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "kubelet.conf" kubeconfig file
#啓動kubelet
[root@k8s-master03 ~]# kubeadm init phase kubelet-start --config kubeadm_master03.conf
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
#將k8s-master03的etcd加入集羣
[root@k8s-master03 ~]# kubectl exec -n kube-system etcd-k8s-master01 -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://192.168.122.23:2379 member add master3 https://192.168.122.253:2380
Added member named master3 with ID bbe616bcc23e7c5a to cluster
ETCD_NAME="master3"
ETCD_INITIAL_CLUSTER="k8s-master02=https://192.168.122.173:2380,master3=https://192.168.122.253:2380,k8s-master01=https://192.168.122.23:2380"
ETCD_INITIAL_CLUSTER_STATE="existing"
#啓動 kube-apiserver、kube-controller-manager、kube-scheduler
[root@k8s-master03 ~]# kubeadm init phase kubeconfig all --config kubeadm_master03.conf
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/admin.conf"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Using existing kubeconfig file: "/etc/kubernetes/kubelet.conf"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[root@k8s-master03 ~]# kubeadm init phase control-plane all --config kubeadm_master03.conf
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
#將節點標記爲master
[root@k8s-master03 ~]# kubeadm init phase mark-control-plane --config kubeadm_master03.conf
[mark-control-plane] Marking the node k8s-master03 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-master03 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
五. 安裝Node節點
初始化k8s-master0有如下提示:
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.122.100:8443 --token v6d021.dxax8e4vu6749njt \
--discovery-token-ca-cert-hash sha256:a86ecb2f31b0bfee32ab415e61ced3415249fe2becd81821c3d57404f2c5caca
[root@node1 ~]# kubeadm join 192.168.122.100:8443 --token v6d021.dxax8e4vu6749njt --discovery-token-ca-cert-hash sha256:a86ecb2f31b0bfee32ab415e61ced3415249fe2becd81821c3d57404f2c5caca
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.16" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
六. 測試集羣
#查看node信息
[root@k8s-master01 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready master 4h14m v1.16.0
k8s-master02 Ready master 3h25m v1.16.0
k8s-master03 Ready master 3h4m v1.16.0
node1 Ready <none> 172m v1.16.0
#查看集羣信息
[root@k8s-master01 ~]# kubectl cluster-info
Kubernetes master is running at https://192.168.122.100:8443
KubeDNS is running at https://192.168.122.100:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
#查看etcd集羣成員信息
[root@k8s-master01 images]# kubectl exec -n kube-system etcd-k8s-master01 -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://192.168.122.23:2379 member list
82b4a2c0cf7a107b: name=k8s-master02 peerURLs=https://192.168.122.173:2380 clientURLs=https://192.168.122.173:2379 isLeader=false
bbe616bcc23e7c5a: name=k8s-master03 peerURLs=https://192.168.122.253:2380 clientURLs=https://192.168.122.253:2379 isLeader=false
f9d8f2568838dc77: name=k8s-master01 peerURLs=https://192.168.122.23:2380 clientURLs=https://192.168.122.23:2379 isLeader=true
#檢查ipvs是否啓用
[root@k8s-master01 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.96.0.1:443 rr
-> 192.168.122.23:6443 Masq 1 3 0
-> 192.168.122.173:6443 Masq 1 0 0
-> 192.168.122.253:6443 Masq 1 0 0
TCP 10.96.0.10:53 rr
-> 10.244.0.2:53 Masq 1 0 0
-> 10.244.0.3:53 Masq 1 0 0
TCP 10.96.0.10:9153 rr
-> 10.244.0.2:9153 Masq 1 0 0
-> 10.244.0.3:9153 Masq 1 0 0
UDP 10.96.0.10:53 rr
-> 10.244.0.2:53 Masq 1 0 0
-> 10.244.0.3:53 Masq 1 0 0
#最後查看pod狀態如下:
[root@k8s-master01 ~]# kubectl get pod -o wide --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-system coredns-58cc8c89f4-9jsrp 1/1 Running 0 4h16m 10.244.0.3 k8s-master01 <none> <none>
kube-system coredns-58cc8c89f4-d92jk 1/1 Running 0 4h16m 10.244.0.2 k8s-master01 <none> <none>
kube-system etcd-k8s-master01 1/1 Running 1 4h15m 192.168.122.23 k8s-master01 <none> <none>
kube-system etcd-k8s-master02 1/1 Running 6 3h28m 192.168.122.173 k8s-master02 <none> <none>
kube-system etcd-k8s-master03 1/1 Running 5 3h7m 192.168.122.253 k8s-master03 <none> <none>
kube-system kube-apiserver-k8s-master01 1/1 Running 3 4h16m 192.168.122.23 k8s-master01 <none> <none>
kube-system kube-apiserver-k8s-master02 1/1 Running 0 3h16m 192.168.122.173 k8s-master02 <none> <none>
kube-system kube-apiserver-k8s-master03 1/1 Running 0 166m 192.168.122.253 k8s-master03 <none> <none>
kube-system kube-controller-manager-k8s-master01 1/1 Running 1 4h16m 192.168.122.23 k8s-master01 <none> <none>
kube-system kube-controller-manager-k8s-master02 1/1 Running 0 3h16m 192.168.122.173 k8s-master02 <none> <none>
kube-system kube-controller-manager-k8s-master03 1/1 Running 0 166m 192.168.122.253 k8s-master03 <none> <none>
kube-system kube-flannel-ds-amd64-7w7hq 1/1 Running 0 3h7m 192.168.122.253 k8s-master03 <none> <none>
kube-system kube-flannel-ds-amd64-8lj9m 1/1 Running 0 4h12m 192.168.122.23 k8s-master01 <none> <none>
kube-system kube-flannel-ds-amd64-f9nkw 1/1 Running 0 174m 192.168.122.102 node1 <none> <none>
kube-system kube-flannel-ds-amd64-xvmhv 1/1 Running 0 3h28m 192.168.122.173 k8s-master02 <none> <none>
kube-system kube-proxy-8cnr7 1/1 Running 0 4h16m 192.168.122.23 k8s-master01 <none> <none>
kube-system kube-proxy-8g5rj 1/1 Running 0 3h28m 192.168.122.173 k8s-master02 <none> <none>
kube-system kube-proxy-h82tt 1/1 Running 0 3h7m 192.168.122.253 k8s-master03 <none> <none>
kube-system kube-proxy-lxxjb 1/1 Running 0 174m 192.168.122.102 node1 <none> <none>
kube-system kube-scheduler-k8s-master01 1/1 Running 1 4h15m 192.168.122.23 k8s-master01 <none> <none>
kube-system kube-scheduler-k8s-master02 1/1 Running 0 3h16m 192.168.122.173 k8s-master02 <none> <none>
kube-system kube-scheduler-k8s-master03 1/1 Running 0 166m 192.168.122.253 k8s-master03 <none> <none>