参考:
https://blog.51cto.com/xinsir/2406118
--环境
k8s 192.168.3.5 centos7.5 master节点
k8s1 192.168.3.6 centos7.5 node节点
--修改主机名
hostnamectl set-hostname k8s
hostnamectl set-hostname k8s1
--配置两台主机间的hosts通讯,ssh
节点1 192.168.3.5:
ssh-keygen -t rsa #生成rsa
ssh-copy-id [email protected] #复制3.5的公钥到3.6上
节点2 192.168.3.6:
ssh-keygen -t rsa #生成rsa
ssh-copy-id [email protected] #复制3.6的公钥到3.5上
--关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
--关闭selinux
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux
setenforce 0
--关闭swap分区,开机不挂载
swapoff -a
vi /etc/fstab
#/dev/mapper/centos-swap swap swap defaults 0 0
--修改iptables转发规则
vi /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
--使生效
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf
--安装docker依赖工具
sudo yum install -y yum-utils \
device-mapper-persistent-data \
lvm2
--添加docker repo文件
sudo yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
--配置aliyun repo源(防止安装docker依赖检查不通过)
wget http://mirrors.aliyun.com/repo/Centos-7.repo
mv CentOS-Base.repo CentOS-Base.repo.bak
mv Centos-7.repo CentOS-Base.repo
--查看可以安装版本(可选)
yum list docker-ce --showduplicates | sort -r
--安装Docker
sudo yum install docker-ce-18.03.0.ce-1.el7.centos
--启动docker并设置开机启动
systemctl start docker
systemctl enable docker
##上面的操作在两个节点master和node都要配置##
--镜像下载
--在master节点上执行下面命令,你也可以在写到一个shell脚本里面,直接运行shell脚本,因为下载镜像很容易搞晕。
docker pull cnych/kube-apiserver-amd64:v1.10.0
docker pull cnych/kube-scheduler-amd64:v1.10.0
docker pull cnych/kube-controller-manager-amd64:v1.10.0
docker pull cnych/kube-proxy-amd64:v1.10.0
docker pull cnych/k8s-dns-kube-dns-amd64:1.14.8
docker pull cnych/k8s-dns-dnsmasq-nanny-amd64:1.14.8
docker pull cnych/k8s-dns-sidecar-amd64:1.14.8
docker pull cnych/etcd-amd64:3.1.12
docker pull cnych/flannel:v0.10.0-amd64
docker pull cnych/pause-amd64:3.1
docker tag cnych/kube-apiserver-amd64:v1.10.0 k8s.gcr.io/kube-apiserver-amd64:v1.10.0
docker tag cnych/kube-scheduler-amd64:v1.10.0 k8s.gcr.io/kube-scheduler-amd64:v1.10.0
docker tag cnych/kube-controller-manager-amd64:v1.10.0 k8s.gcr.io/kube-controller-manager-amd64:v1.10.0
docker tag cnych/kube-proxy-amd64:v1.10.0 k8s.gcr.io/kube-proxy-amd64:v1.10.0
docker tag cnych/k8s-dns-kube-dns-amd64:1.14.8 k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.8
docker tag cnych/k8s-dns-dnsmasq-nanny-amd64:1.14.8 k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8
docker tag cnych/k8s-dns-sidecar-amd64:1.14.8 k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8
docker tag cnych/etcd-amd64:3.1.12 k8s.gcr.io/etcd-amd64:3.1.12
docker tag cnych/flannel:v0.10.0-amd64 quay.io/coreos/flannel:v0.10.0-amd64
docker tag cnych/pause-amd64:3.1 k8s.gcr.io/pause-amd64:3.1
docker rmi cnych/kube-apiserver-amd64:v1.10.0
docker rmi cnych/kube-scheduler-amd64:v1.10.0
docker rmi cnych/kube-controller-manager-amd64:v1.10.0
docker rmi cnych/kube-proxy-amd64:v1.10.0
docker rmi cnych/k8s-dns-kube-dns-amd64:1.14.8
docker rmi cnych/k8s-dns-dnsmasq-nanny-amd64:1.14.8
docker rmi cnych/k8s-dns-sidecar-amd64:1.14.8
docker rmi cnych/etcd-amd64:3.1.12
docker rmi cnych/flannel:v0.10.0-amd64
docker rmi cnych/pause-amd64:3.1
--查看master节点image
[root@k8s ~]# docker images -a
REPOSITORY TAG IMAGE ID CREATED SIZE
k8s.gcr.io/kube-proxy-amd64 v1.10.0 bfc21aadc7d3 18 months ago 97MB
k8s.gcr.io/kube-apiserver-amd64 v1.10.0 af20925d51a3 18 months ago 225MB
k8s.gcr.io/kube-controller-manager-amd64 v1.10.0 ad86dbed1555 18 months ago 148MB
k8s.gcr.io/kube-scheduler-amd64 v1.10.0 704ba848e69a 18 months ago 50.4MB
k8s.gcr.io/etcd-amd64 3.1.12 52920ad46f5b 18 months ago 193MB
quay.io/coreos/flannel v0.10.0-amd64 f0fad859c909 20 months ago 44.6MB
k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64 1.14.8 c2ce1ffb51ed 20 months ago 41MB
k8s.gcr.io/k8s-dns-sidecar-amd64 1.14.8 6f7f2dc7fab5 20 months ago 42.2MB
k8s.gcr.io/k8s-dns-kube-dns-amd64 1.14.8 80cc5ea4b547 20 months ago 50.5MB
k8s.gcr.io/pause-amd64 3.1 da86e6ba6ca1 21 months ago 742kB
--在node节点上执行下面命令,同理你也可以写到一个脚本里面
docker pull cnych/kube-proxy-amd64:v1.10.0
docker pull cnych/flannel:v0.10.0-amd64
docker pull cnych/pause-amd64:3.1
docker pull cnych/kubernetes-dashboard-amd64:v1.8.3
docker pull cnych/heapster-influxdb-amd64:v1.3.3
docker pull cnych/heapster-grafana-amd64:v4.4.3
docker pull cnych/heapster-amd64:v1.4.2
docker pull cnych/k8s-dns-kube-dns-amd64:1.14.8
docker pull cnych/k8s-dns-dnsmasq-nanny-amd64:1.14.8
docker pull cnych/k8s-dns-sidecar-amd64:1.14.8
docker tag cnych/flannel:v0.10.0-amd64 quay.io/coreos/flannel:v0.10.0-amd64
docker tag cnych/pause-amd64:3.1 k8s.gcr.io/pause-amd64:3.1
docker tag cnych/kube-proxy-amd64:v1.10.0 k8s.gcr.io/kube-proxy-amd64:v1.10.0
docker tag cnych/k8s-dns-kube-dns-amd64:1.14.8 k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.8
docker tag cnych/k8s-dns-dnsmasq-nanny-amd64:1.14.8 k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8
docker tag cnych/k8s-dns-sidecar-amd64:1.14.8 k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8
docker tag cnych/kubernetes-dashboard-amd64:v1.8.3 k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
docker tag cnych/heapster-influxdb-amd64:v1.3.3 k8s.gcr.io/heapster-influxdb-amd64:v1.3.3
docker tag cnych/heapster-grafana-amd64:v4.4.3 k8s.gcr.io/heapster-grafana-amd64:v4.4.3
docker tag cnych/heapster-amd64:v1.4.2 k8s.gcr.io/heapster-amd64:v1.4.2
docker rmi cnych/kube-proxy-amd64:v1.10.0
docker rmi cnych/flannel:v0.10.0-amd64
docker rmi cnych/pause-amd64:3.1
docker rmi cnych/kubernetes-dashboard-amd64:v1.8.3
docker rmi cnych/heapster-influxdb-amd64:v1.3.3
docker rmi cnych/heapster-grafana-amd64:v4.4.3
docker rmi cnych/heapster-amd64:v1.4.2
docker rmi cnych/k8s-dns-kube-dns-amd64:1.14.8
docker rmi cnych/k8s-dns-dnsmasq-nanny-amd64:1.14.8
docker rmi cnych/k8s-dns-sidecar-amd64:1.14.8
--查看node节点image
[root@k8s1 ~]# docker images -a
REPOSITORY TAG IMAGE ID CREATED SIZE
k8s.gcr.io/kube-proxy-amd64 v1.10.0 bfc21aadc7d3 18 months ago 97MB
k8s.gcr.io/kubernetes-dashboard-amd64 v1.8.3 0c60bcf89900 19 months ago 102MB
quay.io/coreos/flannel v0.10.0-amd64 f0fad859c909 20 months ago 44.6MB
k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64 1.14.8 c2ce1ffb51ed 20 months ago 41MB
k8s.gcr.io/k8s-dns-sidecar-amd64 1.14.8 6f7f2dc7fab5 20 months ago 42.2MB
k8s.gcr.io/k8s-dns-kube-dns-amd64 1.14.8 80cc5ea4b547 20 months ago 50.5MB
k8s.gcr.io/pause-amd64 3.1 da86e6ba6ca1 21 months ago 742kB
k8s.gcr.io/heapster-influxdb-amd64 v1.3.3 577260d221db 2 years ago 12.5MB
k8s.gcr.io/heapster-grafana-amd64 v4.4.3 8cb3de219af7 2 years ago 152MB
k8s.gcr.io/heapster-amd64 v1.4.2 d4e02f5922ca 2 years ago 73.4MB
--在master和node节点都需要执行以下操作
--安装kubelet、kubeadm、kubectl、kubernetes-cni
--添加yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
--安装
yum makecache fast
yum install -y kubelet-1.10.0-0
yum install -y kubeadm-1.10.0-0 kubectl-1.10.0-0 kubernetes-cni-0.6.0-0
--查看docker驱动
docker info |grep Cgroup
Cgroup Driver: cgroupfs
注:kubectl默认使用的驱动是cgroupfs的,但是使用yum安装的,yum给改成了system了。所以我们需要手动更改kubectl驱动
--修改kubectl驱动
vi /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
# 把这个文件中的
Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=systemed"
替换成
Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs"
--重新加载kubectl daemon
systemctl daemon-reload
--在master执行以下命令
kubeadm init --kubernetes-version=v1.10.0 --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.3.5
解释:
--kubernetes-version k8s版本
--pod-network-cidr 产生的pod之间的网络段
--apiserver-advertise-address master节点地址
特别注意:如果初始化k8s集群成功,最后会显示一行命令,如下:一定要保存住,无法复现。
# 这条命令是其它node节点加入k8s集群的命令。
kubeadm join 192.168.3.5:6443 --token 5a1jym.n08j8fqxmci7jqqu --discovery-token-ca-cert-hash sha256:fa9461e22aa591aa6584dc0deffbc1717a7ab50a26bf5a4c7532d0990c662f82
--master节点根据提示修改kubectl的配置
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
--在node节点上执行上一步生产的脚本
kubeadm join 192.168.3.5:6443 --token 5a1jym.n08j8fqxmci7jqqu --discovery-token-ca-cert-hash sha256:fa9461e22aa591aa6584dc0deffbc1717a7ab50a26bf5a4c7532d0990c662f82
--在master节点上查看集群状态
# 此时输出的node节点可能显示为notready状态,没关系我们把pod通讯的网络插件安装上就行了。
kubectl get node
--在master节点安装flannel插件
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl apply -f kube-flannel.yml
--查看pod的启动状态
[root@k8s ~]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system etcd-k8s 1/1 Running 0 4m
kube-system kube-apiserver-k8s 1/1 Running 0 4m
kube-system kube-controller-manager-k8s 1/1 Running 0 4m
kube-system kube-dns-86f4d74b45-pbztb 0/3 Pending 0 4m
kube-system kube-flannel-ds-amd64-8h9nk 0/1 Init:0/1 0 17s
kube-system kube-flannel-ds-amd64-96mpq 0/1 Init:0/1 0 17s
kube-system kube-proxy-pf7rx 1/1 Running 0 4m
kube-system kube-proxy-znt7d 1/1 Running 0 2m
kube-system kube-scheduler-k8s 1/1 Running 0 4m
--至此k8s集群安装完毕