設置 yum repository
yum install -y yum-utils device-mapper-persistent-data lvm2
配置docker官方yum
sudo yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
安裝並啓動 docker
在 master 節點和 node 節點都要執行
yum install -y docker-ce docker-ce-cli containerd.io sudo systemctl enable docker sudo systemctl start docker
安裝 nfs-utils
在 master 節點和 node 節點都要執行
安裝 nfs-utils 才能掛載 nfs 網絡存儲
yum install -y nfs-utils
kubernetes基本配置
配置K8S的yum源
#在 master 節點和 node 節點都要執行
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
關閉防火牆、SeLinux、Swap
在 master 節點和 node 節點都要執行
關閉防火牆
systemctl stop firewalld
systemctl disable firewalld
關閉Selinux
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
關閉Swap
swapoff -a
yes | cp /etc/fstab /etc/fstab.bak
cat /etc/fstab.bak |grep -v swap > /etc/fstab
配置sysctl.conf
在 master 節點和 node 節點都要執行
echo "net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1"
>>/etc/sysctl.conf
sysctl -p
安裝kubelet、kubeadm、kubectl
在 master 節點和 node 節點都要執行
yum install -y kubelet kubeadm kubectl
添加 --exec-opt native.cgroupdriver=systemd
在ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock添加
cat /usr/lib/systemd/system/docker.service|grep -v "#"|grep -v "^$"
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket
[Service]
Type=notify
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
替換國內Docker鏡像
curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io
重啓 docker,並啓動 kubelet
systemctl daemon-reload
systemctl restart docker
systemctl enable kubelet
systemctl start kubelet
初始化 master 節點
在 master 節點執行
配置域名
echo "172.16.88.210 master.luca.com" >> /etc/hosts
創建 ./kubeadm-config.yaml
在 master 節點執行
cat <<EOF > ./kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.15.1
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
controlPlaneEndpoint: "master.luca.com:6443"
networking:
podSubnet: "10.100.0.1/20"
EOF
初始化 apiserver
在 master 節點執行
kubeadm init --config=kubeadm-config.yaml --upload-certs
You can now join any number of the control-plane node running the following command on each as root:
kubeadm join master.luca.com:6443 --token 1u23ed.kgjq2lsrvcl3l48d \
--discovery-token-ca-cert-hash sha256:76477643fc2029b684f040cbf8778789c5fb8f7db6eed42317835f4e29aacc26 \
--control-plane --certificate-key 615cb3929f93073c31d15568bda5f8f8659e579df423c6c025cd30ea20f23829
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join master.luca.com:6443 --token 1u23ed.kgjq2lsrvcl3l48d \
--discovery-token-ca-cert-hash sha256:76477643fc2029b684f040cbf8778789c5fb8f7db6eed42317835f4e29aacc26
安裝 calico網絡
在 master 節點執行
rm -rf /root/.kube/
mkdir /root/.kube/
cp -i /etc/kubernetes/admin.conf /root/.kube/config
kubectl apply -f https://docs.projectcalico.org/v3.6/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
確認容器組狀態
在 master 節點執行
直到所有的容器組處於 Running 狀態才完成
watch kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-7b4657785d-87ppv 1/1 Running 0 33m
calico-node-7t2vm 1/1 Running 0 23m
calico-node-dxgzs 1/1 Running 0 23m
calico-node-jr67v 1/1 Running 0 23m
calico-node-spzmj 1/1 Running 0 23m
calico-node-ss5px 1/1 Running 0 33m
coredns-6967fb4995-6xwkn 1/1 Running 0 39m
coredns-6967fb4995-m8w29 1/1 Running 0 39m
etcd-k8s-master 1/1 Running 0 40m
kube-apiserver-k8s-master 1/1 Running 0 40m
kube-controller-manager-k8s-master 1/1 Running 0 40m
kube-proxy-d6rlz 1/1 Running 0 23m
kube-proxy-gl649 1/1 Running 0 23m
kube-proxy-l94wg 1/1 Running 0 39m
kube-proxy-rlldt 1/1 Running 0 23m
kube-proxy-vpzq5 1/1 Running 0 23m
kube-scheduler-k8s-master 1/1 Running 0 40m
確認master 初始化結果
kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master Ready master 9m29s v1.15.3
初始化 node節點
只在 master 節點執行,獲取加入節點參數
kubeadm token create --print-join-command
kubeadm join master.luca.com:6443 --token jgr7ab.4mzdsrq3rj1y0b2u --discovery-token-ca-cert-hash sha256:76477643fc2029b684f040cbf8778789c5fb8f7db6eed42317835f4e29aacc26
加入節點
所有node服務器執行
echo "172.16.88.210 master.luca.com" >> /etc/hosts
kubeadm join master.luca.com:6443 --token jgr7ab.4mzdsrq3rj1y0b2u --discovery-token-ca-cert-hash sha256:76477643fc2029b684f040cbf8778789c5fb8f7db6eed42317835f4e29aacc26
在 master 節點執行,獲取加入信息
kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master Ready master 17m v1.15.3
k8s-salve-1 NotReady <none> 19s v1.15.3
k8s-salve-2 NotReady <none> 18s v1.15.3
k8s-salve-3 NotReady <none> 15s v1.15.3
k8s-salve-4 NotReady <none> 14s v1.15.3
安裝 Ingress Controller
只在 master 節點執行
kubectl apply -f https://raw.githubusercontent.com/eip-work/eip-monitor-repository/master/dashboard/nginx-ingress.yaml