參考:
https://blog.51cto.com/xinsir/2406118
--環境
k8s 192.168.3.5 centos7.5 master節點
k8s1 192.168.3.6 centos7.5 node節點
--修改主機名
hostnamectl set-hostname k8s
hostnamectl set-hostname k8s1
--配置兩臺主機間的hosts通訊,ssh
節點1 192.168.3.5:
ssh-keygen -t rsa #生成rsa
ssh-copy-id [email protected] #複製3.5的公鑰到3.6上
節點2 192.168.3.6:
ssh-keygen -t rsa #生成rsa
ssh-copy-id [email protected] #複製3.6的公鑰到3.5上
--關閉防火牆
systemctl stop firewalld
systemctl disable firewalld
--關閉selinux
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux
setenforce 0
--關閉swap分區,開機不掛載
swapoff -a
vi /etc/fstab
#/dev/mapper/centos-swap swap swap defaults 0 0
--修改iptables轉發規則
vi /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
--使生效
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf
--安裝docker依賴工具
sudo yum install -y yum-utils \
device-mapper-persistent-data \
lvm2
--添加docker repo文件
sudo yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
--配置aliyun repo源(防止安裝docker依賴檢查不通過)
wget http://mirrors.aliyun.com/repo/Centos-7.repo
mv CentOS-Base.repo CentOS-Base.repo.bak
mv Centos-7.repo CentOS-Base.repo
--查看可以安裝版本(可選)
yum list docker-ce --showduplicates | sort -r
--安裝Docker
sudo yum install docker-ce-18.03.0.ce-1.el7.centos
--啓動docker並設置開機啓動
systemctl start docker
systemctl enable docker
##上面的操作在兩個節點master和node都要配置##
--鏡像下載
--在master節點上執行下面命令,你也可以在寫到一個shell腳本里面,直接運行shell腳本,因爲下載鏡像很容易搞暈。
docker pull cnych/kube-apiserver-amd64:v1.10.0
docker pull cnych/kube-scheduler-amd64:v1.10.0
docker pull cnych/kube-controller-manager-amd64:v1.10.0
docker pull cnych/kube-proxy-amd64:v1.10.0
docker pull cnych/k8s-dns-kube-dns-amd64:1.14.8
docker pull cnych/k8s-dns-dnsmasq-nanny-amd64:1.14.8
docker pull cnych/k8s-dns-sidecar-amd64:1.14.8
docker pull cnych/etcd-amd64:3.1.12
docker pull cnych/flannel:v0.10.0-amd64
docker pull cnych/pause-amd64:3.1
docker tag cnych/kube-apiserver-amd64:v1.10.0 k8s.gcr.io/kube-apiserver-amd64:v1.10.0
docker tag cnych/kube-scheduler-amd64:v1.10.0 k8s.gcr.io/kube-scheduler-amd64:v1.10.0
docker tag cnych/kube-controller-manager-amd64:v1.10.0 k8s.gcr.io/kube-controller-manager-amd64:v1.10.0
docker tag cnych/kube-proxy-amd64:v1.10.0 k8s.gcr.io/kube-proxy-amd64:v1.10.0
docker tag cnych/k8s-dns-kube-dns-amd64:1.14.8 k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.8
docker tag cnych/k8s-dns-dnsmasq-nanny-amd64:1.14.8 k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8
docker tag cnych/k8s-dns-sidecar-amd64:1.14.8 k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8
docker tag cnych/etcd-amd64:3.1.12 k8s.gcr.io/etcd-amd64:3.1.12
docker tag cnych/flannel:v0.10.0-amd64 quay.io/coreos/flannel:v0.10.0-amd64
docker tag cnych/pause-amd64:3.1 k8s.gcr.io/pause-amd64:3.1
docker rmi cnych/kube-apiserver-amd64:v1.10.0
docker rmi cnych/kube-scheduler-amd64:v1.10.0
docker rmi cnych/kube-controller-manager-amd64:v1.10.0
docker rmi cnych/kube-proxy-amd64:v1.10.0
docker rmi cnych/k8s-dns-kube-dns-amd64:1.14.8
docker rmi cnych/k8s-dns-dnsmasq-nanny-amd64:1.14.8
docker rmi cnych/k8s-dns-sidecar-amd64:1.14.8
docker rmi cnych/etcd-amd64:3.1.12
docker rmi cnych/flannel:v0.10.0-amd64
docker rmi cnych/pause-amd64:3.1
--查看master節點image
[root@k8s ~]# docker images -a
REPOSITORY TAG IMAGE ID CREATED SIZE
k8s.gcr.io/kube-proxy-amd64 v1.10.0 bfc21aadc7d3 18 months ago 97MB
k8s.gcr.io/kube-apiserver-amd64 v1.10.0 af20925d51a3 18 months ago 225MB
k8s.gcr.io/kube-controller-manager-amd64 v1.10.0 ad86dbed1555 18 months ago 148MB
k8s.gcr.io/kube-scheduler-amd64 v1.10.0 704ba848e69a 18 months ago 50.4MB
k8s.gcr.io/etcd-amd64 3.1.12 52920ad46f5b 18 months ago 193MB
quay.io/coreos/flannel v0.10.0-amd64 f0fad859c909 20 months ago 44.6MB
k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64 1.14.8 c2ce1ffb51ed 20 months ago 41MB
k8s.gcr.io/k8s-dns-sidecar-amd64 1.14.8 6f7f2dc7fab5 20 months ago 42.2MB
k8s.gcr.io/k8s-dns-kube-dns-amd64 1.14.8 80cc5ea4b547 20 months ago 50.5MB
k8s.gcr.io/pause-amd64 3.1 da86e6ba6ca1 21 months ago 742kB
--在node節點上執行下面命令,同理你也可以寫到一個腳本里面
docker pull cnych/kube-proxy-amd64:v1.10.0
docker pull cnych/flannel:v0.10.0-amd64
docker pull cnych/pause-amd64:3.1
docker pull cnych/kubernetes-dashboard-amd64:v1.8.3
docker pull cnych/heapster-influxdb-amd64:v1.3.3
docker pull cnych/heapster-grafana-amd64:v4.4.3
docker pull cnych/heapster-amd64:v1.4.2
docker pull cnych/k8s-dns-kube-dns-amd64:1.14.8
docker pull cnych/k8s-dns-dnsmasq-nanny-amd64:1.14.8
docker pull cnych/k8s-dns-sidecar-amd64:1.14.8
docker tag cnych/flannel:v0.10.0-amd64 quay.io/coreos/flannel:v0.10.0-amd64
docker tag cnych/pause-amd64:3.1 k8s.gcr.io/pause-amd64:3.1
docker tag cnych/kube-proxy-amd64:v1.10.0 k8s.gcr.io/kube-proxy-amd64:v1.10.0
docker tag cnych/k8s-dns-kube-dns-amd64:1.14.8 k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.8
docker tag cnych/k8s-dns-dnsmasq-nanny-amd64:1.14.8 k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8
docker tag cnych/k8s-dns-sidecar-amd64:1.14.8 k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8
docker tag cnych/kubernetes-dashboard-amd64:v1.8.3 k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
docker tag cnych/heapster-influxdb-amd64:v1.3.3 k8s.gcr.io/heapster-influxdb-amd64:v1.3.3
docker tag cnych/heapster-grafana-amd64:v4.4.3 k8s.gcr.io/heapster-grafana-amd64:v4.4.3
docker tag cnych/heapster-amd64:v1.4.2 k8s.gcr.io/heapster-amd64:v1.4.2
docker rmi cnych/kube-proxy-amd64:v1.10.0
docker rmi cnych/flannel:v0.10.0-amd64
docker rmi cnych/pause-amd64:3.1
docker rmi cnych/kubernetes-dashboard-amd64:v1.8.3
docker rmi cnych/heapster-influxdb-amd64:v1.3.3
docker rmi cnych/heapster-grafana-amd64:v4.4.3
docker rmi cnych/heapster-amd64:v1.4.2
docker rmi cnych/k8s-dns-kube-dns-amd64:1.14.8
docker rmi cnych/k8s-dns-dnsmasq-nanny-amd64:1.14.8
docker rmi cnych/k8s-dns-sidecar-amd64:1.14.8
--查看node節點image
[root@k8s1 ~]# docker images -a
REPOSITORY TAG IMAGE ID CREATED SIZE
k8s.gcr.io/kube-proxy-amd64 v1.10.0 bfc21aadc7d3 18 months ago 97MB
k8s.gcr.io/kubernetes-dashboard-amd64 v1.8.3 0c60bcf89900 19 months ago 102MB
quay.io/coreos/flannel v0.10.0-amd64 f0fad859c909 20 months ago 44.6MB
k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64 1.14.8 c2ce1ffb51ed 20 months ago 41MB
k8s.gcr.io/k8s-dns-sidecar-amd64 1.14.8 6f7f2dc7fab5 20 months ago 42.2MB
k8s.gcr.io/k8s-dns-kube-dns-amd64 1.14.8 80cc5ea4b547 20 months ago 50.5MB
k8s.gcr.io/pause-amd64 3.1 da86e6ba6ca1 21 months ago 742kB
k8s.gcr.io/heapster-influxdb-amd64 v1.3.3 577260d221db 2 years ago 12.5MB
k8s.gcr.io/heapster-grafana-amd64 v4.4.3 8cb3de219af7 2 years ago 152MB
k8s.gcr.io/heapster-amd64 v1.4.2 d4e02f5922ca 2 years ago 73.4MB
--在master和node節點都需要執行以下操作
--安裝kubelet、kubeadm、kubectl、kubernetes-cni
--添加yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
--安裝
yum makecache fast
yum install -y kubelet-1.10.0-0
yum install -y kubeadm-1.10.0-0 kubectl-1.10.0-0 kubernetes-cni-0.6.0-0
--查看docker驅動
docker info |grep Cgroup
Cgroup Driver: cgroupfs
注:kubectl默認使用的驅動是cgroupfs的,但是使用yum安裝的,yum給改成了system了。所以我們需要手動更改kubectl驅動
--修改kubectl驅動
vi /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
# 把這個文件中的
Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=systemed"
替換成
Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs"
--重新加載kubectl daemon
systemctl daemon-reload
--在master執行以下命令
kubeadm init --kubernetes-version=v1.10.0 --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.3.5
解釋:
--kubernetes-version k8s版本
--pod-network-cidr 產生的pod之間的網絡段
--apiserver-advertise-address master節點地址
特別注意:如果初始化k8s集羣成功,最後會顯示一行命令,如下:一定要保存住,無法復現。
# 這條命令是其它node節點加入k8s集羣的命令。
kubeadm join 192.168.3.5:6443 --token 5a1jym.n08j8fqxmci7jqqu --discovery-token-ca-cert-hash sha256:fa9461e22aa591aa6584dc0deffbc1717a7ab50a26bf5a4c7532d0990c662f82
--master節點根據提示修改kubectl的配置
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
--在node節點上執行上一步生產的腳本
kubeadm join 192.168.3.5:6443 --token 5a1jym.n08j8fqxmci7jqqu --discovery-token-ca-cert-hash sha256:fa9461e22aa591aa6584dc0deffbc1717a7ab50a26bf5a4c7532d0990c662f82
--在master節點上查看集羣狀態
# 此時輸出的node節點可能顯示爲notready狀態,沒關係我們把pod通訊的網絡插件安裝上就行了。
kubectl get node
--在master節點安裝flannel插件
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl apply -f kube-flannel.yml
--查看pod的啓動狀態
[root@k8s ~]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system etcd-k8s 1/1 Running 0 4m
kube-system kube-apiserver-k8s 1/1 Running 0 4m
kube-system kube-controller-manager-k8s 1/1 Running 0 4m
kube-system kube-dns-86f4d74b45-pbztb 0/3 Pending 0 4m
kube-system kube-flannel-ds-amd64-8h9nk 0/1 Init:0/1 0 17s
kube-system kube-flannel-ds-amd64-96mpq 0/1 Init:0/1 0 17s
kube-system kube-proxy-pf7rx 1/1 Running 0 4m
kube-system kube-proxy-znt7d 1/1 Running 0 2m
kube-system kube-scheduler-k8s 1/1 Running 0 4m
--至此k8s集羣安裝完畢