主機列表:
ip | 主機名 | 節點 | cpu | 內存 |
192.168.23.100 | k8smaster | master | 2核 | 2G |
192.168.23.101 | k8snode01 | node | 2核 | 2G |
192.168.23.102 | k8snode02 | node | 2核 | 2G |
1、配置本地yum源
yum源包:
鏈接:https://pan.baidu.com/s/1KAYWlw5Ky2ESUEZVsphQ0Q
配置本地yum源,將yum.repo拷貝到/etc/yum.repos.d/目錄。
[root@k8smaster yum.repos.d]# more yum.repo
[soft]
name=base
baseurl=http://192.168.23.100/yum
gpgcheck=0
[root@k8smaster yum.repos.d]# scp yum.repo 192.168.23.102:/etc/yum.repos.d/
[email protected]'s password:
yum.repo 100% 63 0.1KB/s 00:00
[root@k8smaster yum.repos.d]# scp yum.repo 192.168.23.101:/etc/yum.repos.d/
[email protected]'s password:
yum.repo
2、修改/etc/hosts
[root@k8smaster yum.repos.d]# cat >> /etc/hosts << EOF
> 192.168.23.100 k8smaster
> 192.168.23.101 k8snode01
> 192.168.23.102 k8snode02
> EOF
[root@k8smaster yum.repos.d]#
3、安裝依賴
yum install -y conntrack ntpdate ntp ipvsadm ipset iptables curl sysstat libseccomp wget vim net-tools git iproute lrzsz bash-completion tree bridge-utils unzip bind-utils gcc
4、關閉selinux
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
5、關閉防火牆,設置防火牆爲iptables並設置空規則
#關閉firewalld並取消自啓動
systemctl stop firewalld && systemctl disable firewalld
#安裝iptables,啓動iptables,設置開機自啓,清空iptables規則,保存當前規則到默認規則
yum -y install iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && service iptables save
6、關閉swap分區
#關閉swap分區【虛擬內存】並且永久關閉虛擬內存。
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
7、配置內核參數,對於k8s
cat > kubernetes.conf <<EOF
#開啓網橋模式【重要】
net.bridge.bridge-nf-call-iptables=1
#開啓網橋模式【重要】
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
#禁止使用swap空間,只有當系統OOM時才允許使用它
vm.swappiness=0
#不檢查物理內存是否夠用
vm.overcommit_memory=1
#開啓OOM
vm.panic_on_oom=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
#關閉ipv6【重要】
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
#將優化內核文件拷貝到/etc/sysctl.d/文件夾下,這樣優化文件開機的時候能夠被調用
cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
#手動刷新,讓優化文件立即生效
sysctl -p /etc/sysctl.d/kubernetes.conf
8、調整系統時區
#設置系統時區爲中國/上海
timedatectl set-timezone Asia/Shanghai
#將當前的 UTC 時間寫入硬件時鐘
timedatectl set-local-rtc 0
#重啓依賴於系統時間的服務
systemctl restart rsyslog
systemctl restart crond
9、關閉系統不需要的服務
#關閉及禁用郵件服務
systemctl stop postfix && systemctl disable postfix
10、設置日誌的保存方式
在Centos7以後,因爲引導方式改爲了system.d,所以有兩個日誌系統同時在工作,默認的是rsyslogd,以及systemd journald
使用systemd journald更好一些,因此我們更改默認爲systemd journald,只保留一個日誌的保存方式。
1).創建保存日誌的目錄
mkdir /var/log/journal
2).創建配置文件存放目錄
mkdir /etc/systemd/journald.conf.d
3).創建配置文件
cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
#持久化保存到磁盤
Storage=persistent
#壓縮歷史日誌
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
#最大佔用空間10G
SystemMaxUse=10G
#單日誌文件最大200M
SystemMaxFileSize=200M
#日誌保存時間2周
MaxRetentionSec=2week
#不將日誌轉發到syslog
ForwardToSyslog=no
EOF
4).重啓systemd journald的配置
systemctl restart systemd-journald
11、打開文件數調整
echo "* soft nofile 65536" >> /etc/security/limits.conf
echo "* hard nofile 65536" >> /etc/security/limits.conf
12、升級Linux內核爲4.44版本
[root@k8smaster yum.repos.d]# yum install kernel-lt.x86_64 -y (4.4.213-1.el7.elrepo)
[root@k8smaster yum.repos.d]# awk -F\' '$1=="menuentry " {print $2}' /etc/grub2.cfg
CentOS Linux (4.4.213-1.el7.elrepo.x86_64) 7 (Core)
CentOS Linux, with Linux 3.10.0-123.el7.x86_64
CentOS Linux, with Linux 0-rescue-b7478dd50b1d41a5836a6a670b5cd8c1
[root@k8smaster yum.repos.d]#grub2-set-default 'CentOS Linux (4.4.213-1.el7.elrepo.x86_64) 7 (Core)'
[root@k8snode01 ~]# uname -a
Linux k8snode01 4.4.213-1.el7.elrepo.x86_64 #1 SMP Wed Feb 5 10:44:50 EST 2020 x86_64 x86_64 x86_64 GNU/Linux
12、kube-proxy開啓ipvs的前置條件
modprobe br_netfilter #加載netfilter模塊
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack_ipv4 #使用lsmod命令查看這些文件是否被引導。
13、安裝docker
依賴 yum install yum-utils device-mapper-persistent-data lvm2 -y
yum install -y docker-ce #安裝docker
創建/etc/docker目錄
[ ! -d /etc/docker ] && mkdir /etc/docker
配置daemon
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
}
}
修改docker.service文件
/usr/lib/systemd/system/docker.service
ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry 0.0.0.0/0 -H unix:///var/run/docker.sock -H tcp://0.0.0.0:2375 --containerd=/run/containerd/containerd.sock
# 重啓docker服務
systemctl daemon-reload && systemctl restart docker && systemctl enable docker
14、安裝倉庫和鏡像初始化
docker run -d -p 5000:5000 --restart=always --name private-docker-registry --privileged=true -v /data/registry:/var/lib/registry 192.168.23.100:5000/registry:v1
flannel網絡鏡像包
鏈接:https://pan.baidu.com/s/1-DYxDoU2X85aobaGFclKfA
提取碼:nson
k8s基礎鏡像包
鏈接:https://pan.baidu.com/s/17uV90VPXqoaezwccpTj2GQ
提取碼:13t3
導入鏡像
[root@k8smaster k8s_image]# more load_image.sh
#!/bin/bash
ls /home/zhaiky/k8s_image|grep -v load > /tmp/image-list.txt
cd /home/zhaiky/k8s_image
for i in $( cat /tmp/image-list.txt )
do
docker load -i $i
done
rm -rf /tmp/image-list.txt
上傳鏡像到私有倉庫
docker push 192.168.23.100:5000/kube-apiserver:v1.15.1
docker push 192.168.23.100:5000/kube-proxy:v1.15.1
docker push 192.168.23.100:5000/kube-controller-manager:v1.15.1
docker push 192.168.23.100:5000/kube-scheduler:v1.15.1
docker push 192.168.23.100:5000/registry:v1
docker push 192.168.23.100:5000/coreos/flannel:v0.11.0-s390x
docker push 192.168.23.100:5000/coreos/flannel:v0.11.0-ppc64le
docker push 192.168.23.100:5000/coreos/flannel:v0.11.0-arm64
docker push 192.168.23.100:5000/coreos/flannel:v0.11.0-arm
docker push 192.168.23.100:5000/coreos/flannel:v0.11.0-amd64
docker push 192.168.23.100:5000/coredns:1.3.1
docker push 192.168.23.100:5000/etcd:3.3.10
docker push 192.168.23.100:5000/pause:3.1
15、安裝kubeadm、kubelet、kubectl
yum install -y kubeadm-1.15.1 kubelet-1.15.1 kubectl-1.15.1
systemctl enable kubelet && systemctl start kubelet
16、啓用kubectl命令的自動補全功能
# 安裝並配置bash-completion
yum install -y bash-completion
echo 'source /usr/share/bash-completion/bash_completion' >> /etc/profile
source /etc/profile
echo "source <(kubectl completion bash)" >> ~/.bashrc
source ~/.bashrc
17、初始化Master
配置文件包,包括kubeadm-config.yaml和kube-flannel.yml都在裏面
鏈接:https://pan.baidu.com/s/1g0G7Ion0n6lERpluNjh_9A
提取碼:6pxt
[root@k8smaster ~]# cp /home/zhaiky/kubeadm-config.yaml .
kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log
關鍵日誌記錄
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.23.100:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:78c3f1e110ed1f954665ba55a689397c2dc4d35243dc4516dd00b0bac97172f6
18、安裝flannel網絡插件
[root@k8smaster ~]# cp /home/zhaiky/kube-flannel.yml .
[root@k8smaster ~]# kubectl create -f kube-flannel.yml
19、將k8s子節點加入到k8s主節點
kubeadm join 192.168.23.100:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:78c3f1e110ed1f954665ba55a689397c2dc4d35243dc4516dd00b0bac97172f6
[root@k8smaster zhaiky]# kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health":"true"}
[root@k8smaster ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8smaster Ready master 4m58s v1.15.1
k8snode01 NotReady <none> 21s v1.15.1
k8snode02 NotReady <none> 16s v1.15.1
[root@k8smaster ~]#
20、簡單操作
使用k8s運行一個nginx實例
[root@k8smaster ~]# kubectl run nginx --image=192.168.23.100:5000/nginx:v1 --port=80 --replicas=1
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx created
[root@k8smaster ~]#
[root@k8smaster ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-5bbb49fb76-xzj6x 1/1 Running 0 59s 10.244.1.2 k8snode01 <none> <none>
[root@k8smaster ~]#
[root@k8smaster ~]# kubectl get deployment
NAME READY UP-TO-DATE AVAILABLE AGE
nginx 1/1 1 1 2m15s
[root@k8smaster ~]#
[root@k8smaster ~]# curl "http://10.244.1.2"
<title>Welcome to nginx!</title>
[root@k8smaster ~]# kubectl expose deployment nginx --port=80 --type=LoadBalancer
service/nginx exposed
[root@k8smaster ~]# kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 14h
nginx LoadBalancer 10.99.225.215 <pending> 80:32461/TCP 13s
[root@k8smaster ~]#
[root@k8smaster ~]# curl "http://192.168.23.101:32461"
<title>Welcome to nginx!</title>
[root@k8smaster ~]# curl "http://10.99.225.215"
<title>Welcome to nginx!</title>