k8s高可用集羣二進制安裝

系統版本:CentOS7.6

Kubernetes版本:v1.13.4

docker版本:18.06

k8s-vip192.168.2.240
k8s-m1192.168.2.241etcd kubectl apiserver scheduler controller-manager kubelet kube-proxy haproxy keepalived
k8s-m2192.168.2.242etcd kubectl apiserver scheduler controller-manager kubelet kube-proxy haproxy keepalived
k8s-m3192.168.2.243etcd kubectl apiserver scheduler controller-manager kubelet kube-proxy haproxy keepalived 
k8s-n1192.168.2.244kubelet kube-proxy
k8s-n2192.168.2.245kubelet kube-proxy
k8s-n3192.168.2.246kubelet kube-proxy


#主機名配置
cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.2.241 k8s-m1
192.168.2.242 k8s-m2
192.168.2.243 k8s-m3
192.168.2.244 k8s-n1
192.168.2.245 k8s-n2
192.168.2.246 k8s-n3

爲了執行方便,需要做免祕鑰登錄

#後續操作在k8s-m1執行
cat /root/init_env.sh
#!/bin/bash
#關閉防火牆 SELINUX
systemctl stop firewalld && systemctl disable firewalld
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=disable/' /etc/selinux/config
#關閉swap
swapoff -a && sysctl -w vm.swappiness=0
sed -i 's/.*swap.*/#&/g' /etc/fstab
#設置Docker所需參數
cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf
#加載ip_vs模塊
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
#安裝docker18.06版本 
yum -y install yum-utils device-mapper-persistent-data lvm2 wget epel-release ipvsadm vim ntpdate
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum install -y docker-ce-18.06.1.ce-3.el7
cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "registry-mirrors": ["https://gco4rcsp.mirror.aliyuncs.com"],
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m",
    "max-file": "3"
  }
}
EOF
systemctl enable docker && systemctl daemon-reload && systemctl restart docker


#複製初始化腳本到所有節點並執行
chmod +x /root/init_env.sh
for node in k8s-{m,n}{1,2,3};do scp /root/init_env.sh root@$node:/root/;done
for node in k8s-{m,n}{1,2,3};do ssh root@$node /root/init_env.sh >> /dev/null;echo -e "$node install complete";done


#所有節點時間同步計劃任務
0 * * * * ntpdate 202.112.10.36


#設置環境變量
export SOFT_DIR=/root/k8s_soft
export ETCD_version=v3.3.12
export ETCD_SSL_DIR=/etc/etcd/ssl
export K8S_DIR=/etc/kubernetes
export APISERVER_IP=192.168.2.240
export KUBE_APISERVER=https://192.168.2.240:8443
export SYSTEM_SERVICE_DIR=/usr/lib/systemd/system


#k8s二進制文件下載並複製到相應節點
mkdir -p ${SOFT_DIR} && cd ${SOFT_DIR} && wget  https://storage.googleapis.com/kubernetes-release/release/v1.13.4/kubernetes-server-linux-amd64.tar.gz && tar -zxvf kubernetes-server-linux-amd64.tar.gz && cd kubernetes/server/bin/
for node in k8s-m{1,2,3};do scp kubectl kube-apiserver kube-scheduler kube-controller-manager kubelet kube-proxy root@$node:/usr/local/bin/;done
for node in k8s-n{1,2,3};do scp kubelet kube-proxy root@$node:/usr/local/bin/;done


#cfssl下載
cd ${SOFT_DIR} && wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/local/bin/cfssl
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/local/bin/cfssljson
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson


#cni插件
export CNI_URL="https://github.com/containernetworking/plugins/releases/download"
export CNI_VERSION=v0.7.4
cd ${SOFT_DIR} && wget  "${CNI_URL}/${CNI_VERSION}/cni-plugins-amd64-${CNI_VERSION}.tgz"
mkdir -p /opt/cni/bin
tar -zxf cni-plugins-amd64-${CNI_VERSION}.tgz -C /opt/cni/bin
#複製可執行文件到所有節點
for node in k8s-m{2,3} k8s-n{1,2,3};do ssh root@$node mkdir -p /opt/cni/bin/;scp /opt/cni/bin/* root@$node:/opt/cni/bin;done


#ETCD下載
cd ${SOFT_DIR} && wget https://github.com/etcd-io/etcd/releases/download/${ETCD_version}/etcd-${ETCD_version}-linux-amd64.tar.gz
tar -zxvf etcd-${ETCD_version}-linux-amd64.tar.gz && cd etcd-${ETCD_version}-linux-amd64
#複製可執行文件到其餘master節點
for node in k8s-m{1,2,3};do scp etcd* root@$node:/usr/local/bin/;done


#生成etcd CA、etcd證書
mkdir -p ${ETCD_SSL_DIR} && cd ${ETCD_SSL_DIR}
cat > ca-config.json <<EOF
{"signing":{"default":{"expiry":"87600h"},"profiles":{"kubernetes":{"usages":["signing","key encipherment","server auth","client auth"],"expiry":"87600h"}}}}
EOF
cat > etcd-ca-csr.json <<EOF 
{"CN":"etcd","key":{"algo":"rsa","size":2048},"names":[{"C":"CN","ST":"BeiJing","L":"BeiJing","O":"etcd","OU":"etcd"}]}
EOF
cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare etcd-ca
cfssl gencert -ca=etcd-ca.pem -ca-key=etcd-ca-key.pem -config=ca-config.json -hostname=127.0.0.1,192.168.2.241,192.168.2.242,192.168.2.243 -profile=kubernetes etcd-ca-csr.json | cfssljson -bare etcd
rm -rf *.json *.csr
for node in k8s-m{2,3};do ssh root@$node mkdir -p ${ETCD_SSL_DIR} /var/lib/etcd;scp * root@$node:${ETCD_SSL_DIR};done


#ETCD配置文件
cat /etc/etcd/config 
#[Member]
ETCD_NAME="etcd01"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.2.241:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.2.241:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.2.241:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.2.241:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.2.241:2380,etcd02=https://192.168.2.242:2380,etcd03=https://192.168.2.243:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
cat ${SYSTEM_SERVICE_DIR}/etcd.service
[Unit]
Description=Etcd Server
After=neCNork.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/etc/etcd/config
ExecStart=/usr/local/bin/etcd \
--name=${ETCD_NAME} \
--data-dir=${ETCD_DATA_DIR} \
--listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
--advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} \
--initial-cluster-state=new \
--cert-file=/etc/etcd/ssl/etcd.pem \
--key-file=/etc/etcd/ssl/etcd-key.pem \
--peer-cert-file=/etc/etcd/ssl/etcd.pem \
--peer-key-file=/etc/etcd/ssl/etcd-key.pem \
--trusted-ca-file=/etc/etcd/ssl/etcd-ca.pem \
--peer-trusted-ca-file=/etc/etcd/ssl/etcd-ca.pem
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target


#複製配置文件及啓動文件到其餘主節點
for node in k8s-m{2,3};do scp /etc/etcd/config root@$node:/etc/etcd/;scp ${SYSTEM_SERVICE_DIR}/etcd.service root@$node:${SYSTEM_SERVICE_DIR};done


#修改配置文件後,主節點分別啓動etcd
systemctl enable --now etcd


#檢查etcd集羣狀態
etcdctl \
--ca-file=/etc/etcd/ssl/etcd-ca.pem \
--cert-file=/etc/etcd/ssl/etcd.pem \
--key-file=/etc/etcd/ssl/etcd-key.pem \
--endpoints="https://192.168.2.241:2379,\
https://192.168.2.242:2379,\
https://192.168.2.243:2379" cluster-health


各組件證書及配置文件

k8s-m1執行後續操作

#集羣CA
mkdir -p ${K8S_DIR}/pki && cd ${K8S_DIR}/pki
cat > ca-config.json << EOF
{"signing":{"default":{"expiry":"87600h"},"profiles":{"kubernetes":{"usages":["signing","key encipherment","server auth","client auth"],"expiry":"87600h"}}}}
EOF
cat > ca-csr.json << EOF 
{"CN": "kubernetes","key": {"algo": "rsa","size": 2048},"names":[{"C": "CN","ST": "BeiJing","L": "BeiJing","O": "kubernetes","OU": "k8s"}]}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca


#kube-apiserver證書
cat > apiserver-csr.json <<EOF 
{"CN":"apiserver","key":{"algo":"rsa","size":2048},"names":[{"C":"CN","ST":"BeiJing","L":"BeiJing","O":"kubernetes","OU":"k8s"}]}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -hostname=10.96.0.1,192.168.2.240,127.0.0.1,kubernetes.default -profile=kubernetes apiserver-csr.json | cfssljson -bare apiserver


#kubelet https證書

cat > api-kubelet-client-csr.json << EOF
{"CN":"apiserver-kubelet-client","key":{"algo":"rsa","size":2048},"names":[{"C":"CN","ST":"BeiJing","L":"BeiJing","O":"system:masters","OU":"k8s"}]}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes api-kubelet-client-csr.json | cfssljson -bare api-kubelet-client


#controller manager證書及kubeconfig文件
cat > manager-csr.json << EOF
{"CN":"system:kube-controller-manager","key":{"algo":"rsa","size":2048},"names":[{"C":"CN","ST":"BeiJing","L":"BeiJing","O":"system:kube-controller-manager","OU":"k8s"}]}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -hostname=127.0.0.1,192.168.2.241,192.168.2.242,192.168.2.243 -profile=kubernetes   manager-csr.json | cfssljson -bare controller-manager
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=${KUBE_APISERVER} --kubeconfig=${K8S_DIR}/controller-manager.kubeconfig
kubectl config set-credentials system:kube-controller-manager --client-certificate=controller-manager.pem --client-key=controller-manager-key.pem --embed-certs=true --kubeconfig=${K8S_DIR}/controller-manager.kubeconfig
kubectl config set-context system:kube-controller-manager@kubernetes --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=${K8S_DIR}/controller-manager.kubeconfig
kubectl config use-context system:kube-controller-manager@kubernetes --kubeconfig=${K8S_DIR}/controller-manager.kubeconfig


#scheduler證書及kubeconfig文件
cat > scheduler-csr.json << EOF
{"CN":"system:kube-scheduler","key":{"algo":"rsa","size":2048},"names":[{"C":"CN","ST":"BeiJing","L":"BeiJing","O":"system:kube-scheduler","OU":"k8s"}]}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -hostname=127.0.0.1,192.168.2.241,192.168.2.242,192.168.2.243 -profile=kubernetes   scheduler-csr.json | cfssljson -bare scheduler
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=${KUBE_APISERVER} --kubeconfig=${K8S_DIR}/scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler --client-certificate=scheduler.pem --client-key=scheduler-key.pem --embed-certs=true --kubeconfig=${K8S_DIR}/scheduler.kubeconfig
kubectl config set-context system:kube-scheduler@kubernetes --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=${K8S_DIR}/scheduler.kubeconfig
kubectl config use-context system:kube-scheduler@kubernetes --kubeconfig=${K8S_DIR}/scheduler.kubeconfig


#admin證書(kubectl使用)及kubeconfig文件
cat > admin-csr.json << EOF
{"CN":"admin","key":{"algo":"rsa","size":2048},"names":[{"C":"CN","ST":"BeiJing","L":"BeiJing","O":"system:masters","OU":"k8s"}]}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=${KUBE_APISERVER} --kubeconfig=${K8S_DIR}/admin.kubeconfig
kubectl config set-credentials kubernetes-admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=${K8S_DIR}/admin.kubeconfig
kubectl config set-context kubernetes-admin@kubernetes --cluster=kubernetes --user=kubernetes-admin --kubeconfig=${K8S_DIR}/admin.kubeconfig
kubectl config use-context kubernetes-admin@kubernetes --kubeconfig=${K8S_DIR}/admin.kubeconfig


#Service Account Key
#kube-apiserver簽名ServiceAccountToken的公鑰文件,kube-controller-manager的--service-account-private-key-file指定私鑰文件,兩者配對使用
openssl genrsa -out sa.key 2048
openssl rsa -in sa.key -pubout -out sa.pub


#刪除證書請求文件
rm -rf *.json *.csr


服務配置文件

#apiserver高可用部署  Haproxy+Keepalived
for node in k8s-m{1,2,3};do ssh root@$node yum -y install haproxy keepalived;done
#keepalived配置文件,其餘節點修改state爲BACKUP,priority小於主節點即可;檢查網卡名稱並修改
cat > /etc/keepalived/keepalived.conf << EOF 
vrrp_script check_haproxy {
    script "/etc/keepalived/check_haproxy.sh"
    interval 3
}
vrrp_instance VI_1 {
    state MASTER
    interface ens192
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
    ${APISERVER_IP}
    }
     track_script {
        check_haproxy
     }
}
EOF
cat > /etc/keepalived/check_haproxy.sh <<EOF
#!/bin/bash
systemctl status haproxy > /dev/null
if [[ \$? != 0 ]];then
        echo "haproxy is down,close the keepalived"
        systemctl stop keepalived
fi
EOF
cat > /etc/haproxy/haproxy.cfg << EOF 
global
    log         127.0.0.1 local2
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon
    stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
#---------------------------------------------------------------------
frontend  k8s-api 
   bind *:8443
   mode tcp
   default_backend             apiserver
#---------------------------------------------------------------------
backend apiserver
    balance     roundrobin
    mode tcp
    server  k8s-m1 192.168.2.241:6443 check weight 1 maxconn 2000 check inter 2000 rise 2 fall 3
    server  k8s-m2 192.168.2.242:6443 check weight 1 maxconn 2000 check inter 2000 rise 2 fall 3
    server  k8s-m3 192.168.2.243:6443 check weight 1 maxconn 2000 check inter 2000 rise 2 fall 3
EOF


#複製配置文件到其餘主節點
for node in k8s-m{2,3};do scp /etc/keepalived/* root@$node:/etc/keepalived;scp /etc/haproxy/haproxy.cfg root@$node:/etc/haproxy;done


#修改keepalived.conf文件並啓動服務
#修改過程省略
for node in k8s-m{1,2,3};do ssh root@$node systemctl enable --now keepalived haproxy;done
#查看VIP是否工作正常
ping ${APISERVER_IP} -c 3


#APISERVER系統服務配置文件
cat > ${SYSTEM_SERVICE_DIR}/kube-apiserver.service << EOF 
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
  --authorization-mode=Node,RBAC \
  --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeClaimResize,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,Priority,PodPreset \
  --advertise-address=192.168.2.241 \
  --bind-address=192.168.2.241  \
  --insecure-port=0 \
  --secure-port=6443 \
  --allow-privileged=true \
  --apiserver-count=3 \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/var/log/audit.log \
  --enable-swagger-ui=true \
  --storage-backend=etcd3 \
  --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \
  --etcd-certfile=/etc/etcd/ssl/etcd.pem \
  --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
  --etcd-servers=https://192.168.2.241:2379,https://192.168.2.242:2379,https://192.168.2.243:2379 \
  --event-ttl=1h \
  --enable-bootstrap-token-auth \
  --client-ca-file=/etc/kubernetes/pki/ca.pem \
  --kubelet-https=true \
  --kubelet-client-certificate=/etc/kubernetes/pki/api-kubelet-client.pem \
  --kubelet-client-key=/etc/kubernetes/pki/api-kubelet-client-key.pem \
  --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
  --runtime-config=api/all,settings.k8s.io/v1alpha1=true \
  --service-cluster-ip-range=10.96.0.0/12 \
  --service-node-port-range=30000-32767 \
  --service-account-key-file=/etc/kubernetes/pki/sa.pub \
  --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \
  --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \
  --feature-gates=PodShareProcessNamespace=true \
  --v=4
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
EOF


#scheduler系統服務配置文件
cat > ${SYSTEM_SERVICE_DIR}/kube-scheduler.service <<EOF 
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-scheduler \
  --leader-elect=true \
  --kubeconfig=/etc/kubernetes/scheduler.kubeconfig \
  --address=127.0.0.1 \
  --v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF


#controller-manager系統服務配置文件
cat > ${SYSTEM_SERVICE_DIR}/kube-controller-manager.service << EOF 
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
  --allocate-node-cidrs=true \
  --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
  --authentication-kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
  --authorization-kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
  --client-ca-file=/etc/kubernetes/pki/ca.pem \
  --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
  --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
  --bind-address=127.0.0.1 \
  --leader-elect=true \
  --cluster-cidr=10.244.0.0/16 \
  --service-cluster-ip-range=10.96.0.0/12 \
  --service-account-private-key-file=/etc/kubernetes/pki/sa.key \
  --root-ca-file=/etc/kubernetes/pki/ca.pem \
  --use-service-account-credentials=true \
  --controllers=*,bootstrapsigner,tokencleaner \
  --experimental-cluster-signing-duration=86700h \
  --feature-gates=RotateKubeletClientCertificate=true \
  --v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF


#kube-proxy系統服務配置文件
cat > ${SYSTEM_SERVICE_DIR}/kube-proxy.service << EOF 
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-proxy \
  --config=/etc/kubernetes/kube-proxy.conf \
  --v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
cat > /etc/kubernetes/kube-proxy.conf << EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
    acceptContentTypes: ""
    burst: 10
    contentType: application/vnd.kubernetes.protobuf
    kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
    qps: 5
clusterCIDR: "10.244.0.0/16"
configSyncPeriod: 15m0s
conntrack:
    max: null
    maxPerCore: 32768
    min: 131072
    tcpCloseWaitTimeout: 1h0m0s
    tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
    masqueradeAll: true
    masqueradeBit: 14
    minSyncPeriod: 0s
    syncPeriod: 30s
ipvs:
    excludeCIDRs: null
    minSyncPeriod: 0s
    scheduler: ""
    syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
resourceContainer: /kube-proxy
udpIdleTimeout: 250ms
EOF


#kubelet系統服務配置文件
cat > ${SYSTEM_SERVICE_DIR}/kubelet.service << EOF 
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service
[Service]
ExecStart=/usr/local/bin/kubelet \
  --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \
  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
  --config=/etc/kubernetes/kubelet-conf.yml \
  --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 \
  --allow-privileged=true \
  --network-plugin=cni \
  --cni-conf-dir=/etc/cni/net.d \
  --cni-bin-dir=/opt/cni/bin \
  --cert-dir=/etc/kubernetes/pki \
  --v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
cat > /etc/kubernetes/kubelet-conf.yml << EOF 
address: 0.0.0.0
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
configMapAndSecretChangeDetectionStrategy: Watch
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuCFSQuotaPeriod: 100ms
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kind: KubeletConfiguration
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeLeaseDurationSeconds: 40
nodeStatusReportFrequency: 1m0s
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
port: 10250
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
EOF


#分發證書及配置文件相應節點
#apiserver系統服務配置需要修改
for node in k8s-m{2,3} k8s-n{1,2,3};do ssh root@$node mkdir -p ${K8S_DIR}/pki;done
for node in k8s-m{2,3};do scp -r ${K8S_DIR}/* root@$node:${K8S_DIR};done
for node in k8s-m{2,3};do scp ${SYSTEM_SERVICE_DIR}/{kube-apiserver.service,kube-scheduler.service,kube-controller-manager.service,kubelet.service,kube-proxy.service} root@$node:${SYSTEM_SERVICE_DIR};done
for node in k8s-n{1,2,3};do scp ${K8S_DIR}/pki/ca.pem root@$node:${K8S_DIR}/pki;done
for node in k8s-m{1,2,3};do ssh root@$node mkdir /root/.kube ${K8S_DIR}/manifests;ssh root@$node cp ${K8S_DIR}/admin.kubeconfig /root/.kube/config;done


#啓動服務
for node in k8s-m{1,2,3};do ssh root@$node systemctl enable --now kube-apiserver kube-controller-manager kube-scheduler;done


#驗證組件是否正常

image.png



#本次安裝啓用了TLS認證,因此每個節點的kubelet都必須使用kube-apiserver的CA的憑證後,才能與kube-apiserver進行溝通,而該過程需要手動針對每臺節點單獨簽署憑證是一件繁瑣的事情,且一旦節點增加會延伸出管理不易問題;而TLS bootstrapping目標就是解決該問題,通過讓kubelet先使用一個預定低權限使用者連接到kube-apiserver,然後在對kube-apiserver申請憑證簽署,當授權Token一致時,Node節點的kubelet憑證將由kube-apiserver動態簽署提供。具體作法可以參考TLS BootstrappingAuthenticating with Bootstrap Tokens

#kubelet證書及kubeconfig文件
cd ${K8S_DIR}/pki
#建立TLS bootstrap secret來提供自動簽證使用
TOKEN_PUB=$(openssl rand -hex 3)
TOKEN_SECRET=$(openssl rand -hex 8)
BOOTSTRAP_TOKEN="${TOKEN_PUB}.${TOKEN_SECRET}"
kubectl -n kube-system create secret generic bootstrap-token-${TOKEN_PUB} \
        --type 'bootstrap.kubernetes.io/token' \
        --from-literal description="cluster bootstrap token" \
        --from-literal token-id=${TOKEN_PUB} \
        --from-literal token-secret=${TOKEN_SECRET} \
        --from-literal usage-bootstrap-authentication=true \
        --from-literal usage-bootstrap-signing=true
#建立bootstrap的kubeconfig文件
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=${KUBE_APISERVER} --kubeconfig=${K8S_DIR}/bootstrap-kubelet.kubeconfig
kubectl config set-credentials tls-bootstrap-token-user --token=${BOOTSTRAP_TOKEN} --kubeconfig=${K8S_DIR}/bootstrap-kubelet.kubeconfig
kubectl config set-context tls-bootstrap-token-user@kubernetes --cluster=kubernetes --user=tls-bootstrap-token-user --kubeconfig=${K8S_DIR}/bootstrap-kubelet.kubeconfig
kubectl config use-context tls-bootstrap-token-user@kubernetes --kubeconfig=${K8S_DIR}/bootstrap-kubelet.kubeconfig
#授權kubelet可以創建csr
kubectl create clusterrolebinding kubeadm:kubelet-bootstrap --clusterrole system:node-bootstrapper --group system:bootstrappers
#批准csr請求,允許system:bootstrappers組的所有csr
cat <<EOF | kubectl apply -f -
# Approve all CSRs for the group "system:bootstrappers"
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: auto-approve-csrs-for-group
subjects:
- kind: Group
  name: system:bootstrappers
  apiGroup: rbac.authorization.k8s.io
roleRef:
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
  apiGroup: rbac.authorization.k8s.io
EOF
#允許kubelet能夠更新自己的證書
cat <<EOF | kubectl apply -f -
# Approve renewal CSRs for the group "system:nodes"
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: auto-approve-renewals-for-nodes
subjects:
- kind: Group
  name: system:nodes
  apiGroup: rbac.authorization.k8s.io
roleRef:
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
  apiGroup: rbac.authorization.k8s.io
EOF
#創建所需的clusterrole
cat <<EOF | kubectl apply -f -
# A ClusterRole which instructs the CSR approver to approve a user requesting
# node client credentials.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
rules:
- apiGroups: ["certificates.k8s.io"]
  resources: ["certificatesigningrequests/nodeclient"]
  verbs: ["create"]
---
# A ClusterRole which instructs the CSR approver to approve a node renewing its
# own client credentials.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
rules:
- apiGroups: ["certificates.k8s.io"]
  resources: ["certificatesigningrequests/selfnodeclient"]
  verbs: ["create"]
EOF


#kube-proxy證書及kubeconfig文件
cat > ca-config.json << EOF
{"signing":{"default":{"expiry":"87600h"},"profiles":{"kubernetes":{"usages":["signing","key encipherment","server auth","client auth"],"expiry":"87600h"}}}}
EOF
cat > kube-proxy-csr.json << EOF
{"CN":"system:kube-proxy","key":{"algo": "rsa","size":2048},"names":[{"C":"CN","L":"BeiJing","ST":"BeiJing","O":"kubernetes","OU":"k8s"}]}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=${KUBE_APISERVER} --kubeconfig=${K8S_DIR}/kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=${K8S_DIR}/kube-proxy.kubeconfig
kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=${K8S_DIR}/kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=${K8S_DIR}/kube-proxy.kubeconfig
rm -rf *.json *.csr


#複製配置文件及啓動文件到相應節點
for node in k8s-m{2,3} k8s-n{1,2,3};do scp ${K8S_DIR}/{kube-proxy.kubeconfig,kube-proxy.conf,bootstrap-kubelet.kubeconfig,kubelet-conf.yml} root@$node:${K8S_DIR};done
for node in k8s-m{2,3} k8s-n{1,2,3};do scp ${SYSTEM_SERVICE_DIR}/{kubelet.service,kube-proxy.service} root@$node:${SYSTEM_SERVICE_DIR};done


#啓動kubelet、kube-proxy服務
for node in k8s-{m,n}{1,2,3};do ssh root@$node systemctl enable --now kubelet kube-proxy;done


#查看節點狀態

image.png


#確認使用IPVS模式
curl localhost:10249/proxyMode
ipvs


#部署flannel  修改--iface
cd ${SOFT_DIR} mkdir flannel && cd flannel
cat > kube-flannel.yml << EOF 
---
apiVersion: extensions/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
    - configMap
    - secret
    - emptyDir
    - hostPath
  allowedHostPaths:
    - pathPrefix: "/etc/cni/net.d"
    - pathPrefix: "/etc/kube-flannel"
    - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unsed in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
rules:
  - apiGroups: ['extensions']
    resources: ['podsecuritypolicies']
    verbs: ['use']
    resourceNames: ['psp.flannel.unprivileged']
  - apiGroups:
      - ""
    resources:
      - pods
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes/status
    verbs:
      - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-amd64
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      hostNetwork: true
      nodeSelector:
        beta.kubernetes.io/arch: amd64
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.11.0-amd64
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.11.0-amd64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        - --iface=ens192
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
EOF

kubectl apply -f kube-flannel.yml


image.png


image.png


#部署coredns
yum -y install epel-release && yum -y install jq
cd ${SOFT_DIR} mkdir coredns && cd coredns
wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sed
wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/deploy.sh
chmod +x deploy.sh
./deploy.sh -i 10.96.0.10 > coredns.yml
kubectl apply -f coredns.yml


#驗證dns是否正常

image.png


#設定master節點加上污點Taint
kubectl taint nodes k8s-m1 k8s-m2 k8s-m3 node-role.kubernetes.io/master="":NoSchedule
#設置label role
kubectl label node k8s-m1 k8s-m2 k8s-m3  node-role.kubernetes.io/master=master
kubectl label node k8s-n1 k8s-n2 k8s-n3  node-role.kubernetes.io/node=node

image.png


參考:

https://blog.csdn.net/weixin_34238642/article/details/87387323

https://www.kubernetes.org.cn/4963.html

https://www.kubernetes.org.cn/5163.html

https://www.cnblogs.com/xiaoqshuo/articles/10195143.html 

https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章