centos7部署k8s

1.1 初始配置

1.1.1 关闭selinux和firewalld:初始化脚本

sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
grep SELINUX=disabled /etc/selinux/config
setenforce 0
getenforce

systemctl stop firewalld
systemctl disable firewalld

1.1.2 关闭swap交换分区(也可以不关闭)

#swapoff –a

1.1.3 安装时间时间同步服务

#不安装也可以
#yum install chronyd –y
#systemctl start chronyd
#systemctl enable chronyd

1.1.4 启用ipvs内核模块

cat >>/etc/sysconfig/modules/ipvs.modules<<EOF
ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
for mod in $(ls $ipvs_mods_dir |grep -o "^[^.]*");do
    /sbin/modinfo -F filename $mod &>/dev/null
    if [ $? -eq 0 ];then
        /sbin/modprobe $mod
    fi
done
EOF

1.1.5 为模块授权

chmod +x /etc/sysconfig/modules/ipvs.modules
#手动启动
bash /etc/sysconfig/modules/ipvs.modules

1.2 docker部署及相关配置

1.2.1 选择3台机器
    Master:11.0.0.11
    Node01:11.0.0.21
    Node02:11.0.0.22

1.2.2 配置docker下载源,下载安装docker

#看情况,如果有其他docker源,可以不用下载,因为docker官方源下载很慢
#cd /etc/yum.repo.d
#wget http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install docker-ce –y

1.2.3 启动所有节点docker

1.2.3.1 docker部署需要注意的点

docker1.13后自动设置iptables的FORWARD默认策略为DROP,这可能会影响Kubernetes集群依赖的报文转发功能,因此在docker服务启动后,重新将FORWARD链设置为ACCEPT,方式是修改/usr/lib/system/docker.service文件。

1.2.3.2 设置默认转发规则

    sed -i '14aExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT' /usr/lib/systemd/system/docker.service

1.2.3.3 内核配置检测

iptables –vnL

sysctl -a|grep bridge

cat >>/etc/sysctl.d/k8s.conf<<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

sysctl -p /etc/sysctl.d/k8s.conf

1.2.3.4 将配置好的内核文件和services分发给各节点

scp /etc/sysctl.d/k8s.conf node01:/etc/sysctl.d/
scp /etc/sysctl.d/k8s.conf node02:/etc/sysctl.d/

scp /usr/lib/systemd/system/docker.service node01:/usr/lib/systemd/system/
scp /usr/lib/systemd/system/docker.service node02:/usr/lib/systemd/system/

1.2.4 所有节点启动docker(在以下的部署中会自动启动)

#systemctl daemon-reload
#systemctl start docker
#systemctl enable docker

1.3 kubernetes部署及相关配置

1.3.1 配置kubernetes源

#centos7自带源,看情况配置源
cat >>/etc/yum.repos.d/kubernetes.repo<<EOF
[kubernetes]
name=Kubernetes Repository
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=1
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
   http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
EOF

1.3.2 所有节点配置kubelet 这是因为开启了swap 为了容错

sed行尾插入指令
#在生产环境无需配置这个
#sed -i 's/$/&"--fail-swap-on=false"/g' /etc/sysconfig/kubelet

1.3.3 k8s 生产部署准备

sudo yum -y install epel-release
sudo yum update

1.3.4 安装k8s相关软件

    配置源
 Sudo vim /etc/yum.repos.d/virt7-docker-common-release.repo
[virt7-docker-common-release]
name=virt7-docker-common-release
baseurl=http://cbs.centos.org/repos/virt7-docker-common-release/x86_64/os/
gpgcheck=0
#master节点安装
sudo yum install -y --enablerepo=virt7-docker-common-release kubernetes  etcd kubernetes-master ntp flannel
#slave节点安装
sudo yum install -y --enablerepo=virt7-docker-common-release kubernetes kubernetes-node ntp flannel docker 
#所有节点时间校准
#sudo systemctl start ntpd
#sudo systemctl enable ntpd
#sudo ntpdate ntp1.aliyun.com
#sudo hwclock –w

1.3.5 Master 的etcd配置

[web@m01 yum.repos.d]$  grep -v '^#' /etc/etcd/etcd.conf
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_CLIENT_URLS="http://11.0.0.11:2379,http://127.0.0.1:2379"
ETCD_NAME="default"    #etcd的集群名称我使用默认的,自己可以定义
ETCD_ADVERTISE_CLIENT_URLS=http://11.0.0.11:2379
#启动etcd服务
sudo systemctl start etcd
sudo systemctl enable etcd
#查看etcd集群是否正常
etcdctl cluster-health
#查看集群成员
[web@m01 yum.repos.d]$ etcdctl member list   #0.0.0.0表示内网所有机器
    8e9e05c52164694d: name=default peerURLs=http://localhost:2380 clientURLs=http://0.0.0.0:2379 isLeader=true

1.3.6 Master K8s config配置

[web@m01 yum.repos.d]$ grep -v '^#' /etc/kubernetes/config
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://11.0.0.11:8080"

1.3.7 Master K8s apiserver配置

[web@m01 yum.repos.d]$ grep -v '^#' /etc/kubernetes/apiserver

KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
KUBE_API_PORT="--port=8080"
KUBELET_PORT="--kubelet-port=10250"
KUBE_ETCD_SERVERS="--etcd-servers=http://11.0.0.11:2379"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
    KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota"
KUBE_API_ARGS=""

1.3.8 Master controller-manger配置

[web@m01 yum.repos.d]$  grep -v '^#' /etc/kubernetes/controller-manager

KUBE_CONTROLLER_MANAGER_ARGS=""

1.3.9 Master scheduler配置

[web@m01 yum.repos.d]$ grep -v '^#' /etc/kubernetes/scheduler

KUBE_SCHEDULER_ARGS="--address=0.0.0.0"

1.3.10 Master 启动相应服务

for i in  etcd kube-apiserver kube-controller-manager kube-scheduler flanneld;
do 
sudo systemctl restart $i; 
sudo systemctl enable $i;
done

1.3.11 Master网络配置

#
#etcdctl set /kube-centos/network/config '{"Network": "172.16.0.0/16"}'

1.3.12 Node01网络配置

[web@s01 ~]$ grep -v '^#' /etc/sysconfig/flanneld

FLANNEL_ETCD_ENDPOINTS="http://11.0.0.11:2379"
FLANNEL_ETCD_PREFIX="/kube-centos/network"   #etcd主节点网络配置调用路径,可在主节点查看
#master查看etcd配置网络文件路径
[web@m01 ~]$ etcdctl ls /kube-centos/network/subnets
/kube-centos/network/subnets/172.30.52.0-24
/kube-centos/network/subnets/172.30.53.0-24
[web@m01 ~]$ etcdctl get /kube-centos/network/subnets/172.30.52.0-24
{"PublicIP":"11.0.0.21","BackendType":"vxlan","BackendData":{"VtepMAC":"ea:bb:6e:be:bb:7e"}}
[web@m01 ~]$ etcdctl get /kube-centos/network/subnets/172.30.53.0-24
{"PublicIP":"11.0.0.11","BackendType":"vxlan","BackendData":{"VtepMAC":"ae:63:3a:83:34:31"}}

1.3.13 Node01 k8s配置

[web@s01 ~]$ grep -v '^#' /etc/kubernetes/config
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://11.0.0.11:8080"

1.3.14 Node01 k8s proxy配置

[web@s01 ~]$  grep -v '^#' /etc/kubernetes/proxy
KUBE_PROXY_ARGS="--bind-address=0.0.0.0" #支持内网所有机器

1.3.15 Node01 kubelet配置

[web@s01 ~]$ grep -v '^#' /etc/kubernetes/kubelet
KUBELET_ADDRESS="--address=11.0.0.21"
KUBELET_PORT="--port=10250"
KUBELET_HOSTNAME="--hostname-override=11.0.0.21"
KUBELET_API_SERVER="--api-servers=http://11.0.0.11:8080"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_ARGS="--logtostderr=false --v=0 --log-dir=/data/logs/kubernetes"

1.3.16 Node01 启动服务

for i in flanneld kube-proxy kubelet docker;
do sudo systemctl restart $i;
sudo systemctl enable $i;
sudo systemctl status $i ;
done

1.4 依次类推node02配置大致相同

1.4.1 此处配置集群接口和用户(所有slave节点配置)

kubectl config set-cluster default-cluster --server=http://11.0.0.11:8080
kubectl config set-context default-context --cluster=default-cluster --user=default-admin
kubectl config use-context default-context

1.4.2 查看容器单元

kubectl get pod

1.4.3 配置nginx pod

#这里我实现创建了一个k8s 部署容器的目录
[web@m01 k8s]$ pwd
/devops/k8s

#部署nginx pod
[web@m01 k8s]$ cat nginx-pod.yaml
apiVersion: v1
kind: Pod
metadata:
    name: nginx
    labels:
         app: nginx
spec:
         containers:
 - name: nginx
   image: nginx
   imagePullPolicy: IfNotPresent
   ports:
   - containerPort: 80
 restartPolicy: Always

#此处创建k8s nginx pod
[web@m01 k8s]$ kubectl create -f nginx-pod.yaml

[web@m01 k8s]$ cat nginx-svc.yaml
apiVersion: v1
kind: Service
metadata:
    name: nginx-service
spec:
    type: NodePort
    sessionAffinity: ClientIP
    selector:
        app: nginx
    ports:
        - port: 80
  nodePort: 30080

#此处创建k8s nginx svc
[web@m01 k8s]$ kubectl create -f nginx-svc.yaml

1.5 检查pod是否正常

1.5.1 不管是主还是从节点,执行后,如果status 是running状态,则代表Pod正常运行了,如果是其他状态代表创建有问题

[web@m01 k8s]$ kubectl get pod
NAME      READY     STATUS    RESTARTS   AGE
nginx     1/1       Running   0          6h

1.5.2 问题排查

有很多人创建后,发现状态是ContainerCreating,这代表/etc/docker/certs.d/registry.access.redhat.com/redhat-ca.crt证书没有软连接到/etc/rhsm/ca/redhat-uep.pem

#最直接的方法,下载rpm依赖,将密钥导入
wget http://mirror.centos.org/centos/7/os/x86_64/Packages/python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm

rpm2cpio python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm | cpio -iv --to-stdout ./etc/rhsm/ca/redhat-uep.pem | sudo tee /etc/rhsm/ca/redhat-uep.pem

#执行完这两步后,发现软连接好了,说白了,这就是一个加密过程
[web@m01 k8s]$ ll /etc/docker/certs.d/registry.access.redhat.com/redhat-ca.crt
lrwxrwxrwx. 1 root root 27 Mar 14 07:43 /etc/docker/certs.d/registry.access.redhat.com/redhat-ca.crt -> /etc/rhsm/ca/redhat-uep.pem

#此时需要重启相应服务,才能看见pod状态变为running
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章