kubeadm 部署kuberneters 1.17.4高可用集羣

一、操作系統參數設置

三個master

HOST IP
master1 192.168.0.11
master2 192.168.0.12
master3 192.168.0.13
VIP 192.168.0.14
1、設置系統主機名以及Host文件
hostnamectl set-hostname k8s-master01
修改hosts
192.168.0.11 master01
192.168.0.12 master02
192.168.0.13 master03

2、安裝工具
yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git

3、設置防火牆爲iptables並設置空規則
systemctl stop firewalld && systemctl disable firewalld
yum -y install iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && service iptables save

4、關閉SELINUX
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

5、調整內核
調整內核參數,對於K8S
cat > kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 # 禁止使用 swap空間,只有當系統OOM時才允許使用它
vm.overcommit_memory=1 # 不檢查物理內存是否夠用
vm.panic_on_oom=0 # 開啓OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=2706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
 
cp kubernetes.conf /etc/sysctl.d/kubernetes.conf

modprobe br_netfilter
modprobe nf_conntrack 

sysctl -p /etc/sysctl.d/kubernetes.conf

6、升級系統內核 4.44
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
 
#安裝完成後檢查 /boot/grub2/grub.cfg中對應內核menuentry 中是否包含 initrd16配置,如果沒有再安裝一次
yum --enablerepo=elrepo-kernel install -y kernel-lt
#設置開機從新內核啓動
grub2-set-default "CentOs Linux (4.4.182-1.el7.elrepo.x86_64) 7 (Core)"
重啓
reboot
重啓完成後查看內核版本  

7、設置ipvs
kube-proxy開啓ipvs的前置條件
modprobe br_netfilter
 
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
 
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

8、安裝Docker
yum install -y yum-utils device-mapper-persistent-data lvm2
 
yum-config-manager --add-repo  http://mirrors.aliyum.com/docker-ce/linux/centos/docker-ce.repo
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
 
yum list docker-ce --showduplicates | sort -r

確定是否執行updateyum update -y可不執行

yum install -y docker-ce
 
#創建 /etc/docker 目錄
mkdir /etc/docker
 
#配置 daemon
cat > /etc/docker/daemon.json <<EOF
{
    "exec-opts": ["native.cgroupdriver=systemd"],
    "log-driver": "json-file",
    "log-opts": {
       "max-size": "100m"
    }
}
EOF
 
mkdir -p /etc/systemd/system/docker.service.d
 
 
#重啓docker服務
systemctl daemon-reload && systemctl restart docker && systemctl enable docker
 
9、開啓ipvs
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
 /sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
 if [ $? -eq 0 ]; then
 /sbin/modprobe \${kernel_module}
 fi
done
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
 
11、安裝kubeadm命令
安裝kubeadm (主從配置)
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
 

yum -y install kubeadm-1.17.4 kubectl-1.17.4 kubelet-1.17.4

二、配置haproxy代理

拉取haproxy鏡像
docker pull haproxy:1.7.8-alpine

mkdir /etc/haproxy
cat >/etc/haproxy/haproxy.cfg<<EOF
global
  log 127.0.0.1 local0 err
  maxconn 50000
  uid 99
  gid 99
  #daemon
  nbproc 1
  pidfile haproxy.pid
 
defaults
  mode http
  log 127.0.0.1 local0 err
  maxconn 50000
  retries 3
  timeout connect 5s
  timeout client 30s
  timeout server 30s
  timeout check 2s
 
listen admin_stats
  mode http
  bind 0.0.0.0:1080
  log 127.0.0.1 local0 err
  stats refresh 30s
  stats uri     /haproxy-status
  stats realm   Haproxy\ Statistics
  stats auth    test:test
  stats hide-version
  stats admin if TRUE
 
frontend k8s-https
  bind 0.0.0.0:8443
  mode tcp
  #maxconn 50000
  default_backend k8s-https
 
backend k8s-https
  mode tcp
  balance roundrobin
  server master01 192.168.0.11:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3
  server master02 192.168.0.12:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3
  server master02 192.168.0.13:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3
EOF
 
啓動 haproxy
docker run -d --name my-haproxy \
-v /etc/haproxy:/usr/local/etc/haproxy:ro \
-p 8443:8443 \
-p 1080:1080 \
--restart always \
haproxy:1.7.8-alpine
 
登陸密碼查看配置文件的auth 參數
http://192.168.0.11:1080/haproxy-status
http://192.168.0.12:1080/haproxy-status
http://192.168.0.14:1080/haproxy-status

三、部署keepalived

1、拉取keepalived鏡像
docker pull osixia/keepalived:1.4.4
 
2、啓動keepalived
#注意網卡eth0爲本次實驗192.168.0.0/24網段的所在網卡
KEEPALIVED_VIRTUAL_IPS爲VIP
KEEPALIVED_UNICAST_PEERS爲所有實際節點IP
 
 docker run --net=host --cap-add=NET_ADMIN \
-e KEEPALIVED_INTERFACE=eth0 \
-e KEEPALIVED_VIRTUAL_IPS="#PYTHON2BASH:['192.168.0.14']" \
-e KEEPALIVED_UNICAST_PEERS="#PYTHON2BASH:['192.168.0.11','192.168.0.12','192.168.0.13']" \
-e KEEPALIVED_PASSWORD=hello \
--name k8s-keepalived \
--restart always \
-d osixia/keepalived:1.4.4
 
3、如果失敗後清理後
docker rm -f k8s-keepalived
ip a del 192.168.0.14/32 dev eth0

四、kubeadm部署集羣

1、初始化主節點

kubeadm config print init-defaults > kubeadm-config.yaml

vi kubeadm-config.yaml

apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
 - system:bootstrappers:kubeadm:default-node-token
 token: abcdef.0123456789abcdef
 ttl: 24h0m0s
 usages:
 - signing
 - authentication
kind: InitConfiguration
localAPIEndpoint:
 advertiseAddress: 192.168.0.11
 bindPort: 6443
nodeRegistration:
 criSocket: /var/run/dockershim.sock
 name: k8s-master01
 taints:
 - effect: NoSchedule
   key: node-role.kubernetes.io/master
---
apiServer:
 timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.0.14:8443
controllerManager: {}
dns:
 type: CoreDNS
etcd:
 local:
   dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.17.4
networking:
 dnsDomain: cluster.local
 serviceSubnet: 10.96.0.0/12
 podSubnet: "10.244.0.0/16"
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
 SupportIPVSProxyMode: true
mode: ipvs

kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log

2、爲kubectl準備Kubeconfig文件

kubectl默認會在執行的用戶家目錄下面的.kube目錄下尋找config文件。這裏是將在初始化時[kubeconfig]步驟生成的admin.conf拷貝到.kube/config。
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

kubectl get nodes

3、 拷貝證書文件到其他master節點

傳輸公鑰

ssh-keygen
ssh-copy-id 192.168.0.12
 ssh-copy-id 192.168.0.13
USER=root
CONTROL_PLANE_IPS="192.168.0.12 192.168.0.13"
for host in ${CONTROL_PLANE_IPS}; do
    ssh "${USER}"@$host "mkdir -p /etc/kubernetes/pki/etcd"
    scp /etc/kubernetes/pki/ca.* "${USER}"@$host:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/sa.* "${USER}"@$host:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/front-proxy-ca.* "${USER}"@$host:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/etcd/ca.* "${USER}"@$host:/etc/kubernetes/pki/etcd/
    scp /etc/kubernetes/admin.conf "${USER}"@$host:/etc/kubernetes/
done

4、 添加master集羣節點

kubeadm join 192.168.0.189:8443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:005bf2fc71dddd3b1s5adddb8bf0a02ccd4ab507819434913fe9e9a9eb762ee   --control-plane

安裝完成後需要systemctl enable kubelet 設置kubelet自啓動,否則節點重啓後不能恢復

5、 flannel

wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.ym
kubectl create -f kube-flannel.yml
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章