centos7手動安裝k8s-1.11版本

原文地址:http://www.maogx.win/posts/35/

簡介

本文章主要介紹如何通過使用官方提供的二進制包安裝配置k8s集羣

實驗環境說明

實驗架構

1
2
3
lab1: master 11.11.11.111
lab2: node 11.11.11.112
lab3: node 11.11.11.113

實驗使用的Vagrantfile

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
# -*- mode: ruby -*-
# vi: set ft=ruby :

ENV["LC_ALL"] = "en_US.UTF-8"

Vagrant.configure("2") do |config|
   (1..3).each do |i|
     config.vm.define "lab#{i}" do |node|
       node.vm.box = "centos-7.4-docker-17"
       node.ssh.insert_key = false
       node.vm.hostname = "lab#{i}"
       node.vm.network "private_network", ip: "11.11.11.11#{i}"
       node.vm.provision "shell",
         inline: "echo hello from node #{i}"
       node.vm.provider "virtualbox" do |v|
         v.cpus = 2
         v.customize ["modifyvm", :id, "--name", "lab#{i}", "--memory", "2048"]
       end
     end
   end
end

安裝

配置系統相關參數

如下操作在所有節點操作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
# 臨時禁用selinux
# 永久關閉 修改/etc/sysconfig/selinux文件設置
sed -i 's/SELINUX=permissive/SELINUX=disabled/' /etc/sysconfig/selinux
setenforce 0

# 臨時關閉swap
# 永久關閉 註釋/etc/fstab文件裏swap相關的行
swapoff -a

# 開啓forward
# Docker從1.13版本開始調整了默認的防火牆規則
# 禁用了iptables filter表中FOWARD鏈
# 這樣會引起Kubernetes集羣中跨Node的Pod無法通信

iptables -P FORWARD ACCEPT

# 配置轉發相關參數,否則可能會出錯
cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
EOF
sysctl --system

# 加載ipvs相關內核模塊
# 如果重新開機,需要重新加載
modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4
lsmod | grep ip_vs

配置hosts解析

如下操作在所有節點操作

1
2
3
4
5
cat >>/etc/hosts<<EOF
11.11.11.111 lab1
11.11.11.112 lab2
11.11.11.113 lab3
EOF

安裝配置docker

v1.11.0版本推薦使用docker v17.03,
v1.11,v1.12,v1.13, 也可以使用,再高版本的docker可能無法正常使用。
測試發現17.09無法正常使用,不能使用資源限制(內存CPU)

如下操作在所有節點操作

安裝docker
1
2
3
4
5
# 卸載安裝指定版本docker-ce
yum remove -y docker-ce docker-ce-selinux container-selinux
yum install -y --setopt=obsoletes=0 \
docker-ce-17.03.1.ce-1.el7.centos \
docker-ce-selinux-17.03.1.ce-1.el7.centos
啓動docker
1
systemctl enable docker && systemctl restart docker

安裝CFSSL

只在lab1節點操作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
# 下載
# 百度雲鏈接:https://pan.baidu.com/s/1kgV40nwHy1IKnnLD6zH4cQ 密碼:alyj
mkdir -pv /server/software/k8s
cd /server/software/k8s
yum install -y wget
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

# 安裝
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
chmod +x /usr/local/bin/cfssl*

配置CA

只在lab1節點操作

此處的CA配置,後面配置etcd和k8s時都需要使用

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
mkdir -pv $HOME/ssl && cd $HOME/ssl
cat >ca-config.json<<EOF
{
 "signing": {
   "default": {
     "expiry": "87600h"
   },
   "profiles": {
     "kubernetes": {
       "usages": [
           "signing",
           "key encipherment",
           "server auth",
           "client auth"
       ],
       "expiry": "87600h"
     }
   }
 }
}
EOF

配置etcd集羣

生成etcd-ca

只在lab1節點操作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# 寫入配置
cat >etcd-ca-csr.json<<EOF
{
 "CN": "etcd",
 "key": {
   "algo": "rsa",
   "size": 2048
 },
 "names": [
   {
     "C": "CN",
     "ST": "BeiJing",
     "L": "BeiJing",
     "O": "etcd",
     "OU": "Etcd Security"
   }
 ]
}
EOF

# 生成 etcd root ca
cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare etcd-ca

cat >etcd-csr.json<<EOF
{
   "CN": "etcd",
   "hosts": [
     "127.0.0.1",
     "11.11.11.111",
     "11.11.11.112",
     "11.11.11.113"
   ],
   "key": {
       "algo": "rsa",
       "size": 2048
   },
   "names": [
       {
           "C": "CN",
           "ST": "BeiJing",
           "L": "BeiJing",
           "O": "etcd",
           "OU": "Etcd Security"
       }
   ]
}
EOF

# 生成 etcd ca
cfssl gencert -ca=etcd-ca.pem -ca-key=etcd-ca-key.pem -config=ca-config.json \
-profile=kubernetes etcd-csr.json | cfssljson -bare etcd
mkdir -pv /etc/etcd/ssl
cp etcd*.pem /etc/etcd/ssl
ls /etc/etcd/ssl/etcd*.pem

# 複製到其他節點
cd /etc/etcd && tar cvzf etcd-ssl.tgz ssl/
scp /etc/etcd/etcd-ssl.tgz lab2:~/
scp /etc/etcd/etcd-ssl.tgz lab3:~/
ssh lab2 'mkdir -pv /etc/etcd && tar xf etcd-ssl.tgz -C /etc/etcd && ls -l /etc/etcd/ssl'
ssh lab3 'mkdir -pv /etc/etcd && tar xf etcd-ssl.tgz -C /etc/etcd && ls -l /etc/etcd/ssl'
安裝啓動etcd

如下操作在所有節點操作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
# 安裝
# 百度雲鏈接:https://pan.baidu.com/s/1IVHyMqiJrlq9gmbF49Ly3Q 密碼:w5nx
mkdir -pv /server/software/k8s
cd /server/software/k8s
yum install -y wget
wget https://github.com/coreos/etcd/releases/download/v3.2.18/etcd-v3.2.18-linux-amd64.tar.gz
tar xf etcd-v3.2.18-linux-amd64.tar.gz
mv etcd-v3.2.18-linux-amd64 /usr/local/etcd-v3.2.18
ln -sv /usr/local/etcd-v3.2.18 /usr/local/etcd
cd /usr/local/etcd && mkdir bin && mv etcd etcdctl bin
/usr/local/etcd/bin/etcd --version
cd $HOME

# 配置啓動腳本
export ETCD_NAME=$(hostname)
export INTERNAL_IP=$(hostname -i | awk '{print $NF}')
export ECTD_CLUSTER='lab1=https://11.11.11.111:2380,lab2=https://11.11.11.112:2380,lab3=https://11.11.11.113:2380'
mkdir -pv /data/etcd
cat > /etc/systemd/system/etcd.service <<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/data/etcd
EnvironmentFile=-/etc/etcd/etcd.conf
ExecStart=/usr/local/etcd/bin/etcd \\
 --name ${ETCD_NAME} \\
 --cert-file=/etc/etcd/ssl/etcd.pem \\
 --key-file=/etc/etcd/ssl/etcd-key.pem \\
 --peer-cert-file=/etc/etcd/ssl/etcd.pem \\
 --peer-key-file=/etc/etcd/ssl/etcd-key.pem \\
 --trusted-ca-file=/etc/etcd/ssl/etcd-ca.pem \\
 --peer-trusted-ca-file=/etc/etcd/ssl/etcd-ca.pem \\
 --initial-advertise-peer-urls https://${INTERNAL_IP}:2380 \\
 --listen-peer-urls https://${INTERNAL_IP}:2380 \\
 --listen-client-urls https://${INTERNAL_IP}:2379,https://127.0.0.1:2379 \\
 --advertise-client-urls https://${INTERNAL_IP}:2379 \\
 --initial-cluster-token my-etcd-token \\
 --initial-cluster $ECTD_CLUSTER \\
 --initial-cluster-state new \\
 --data-dir=/data/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# 啓動並設置開機啓動
systemctl daemon-reload
systemctl start etcd
systemctl enable etcd
查看etcd集羣狀態
1
2
3
4
5
/usr/local/etcd/bin/etcdctl --endpoints "https://127.0.0.1:2379" \
 --ca-file=/etc/etcd/ssl/etcd-ca.pem \
 --cert-file=/etc/etcd/ssl/etcd.pem \
 --key-file=/etc/etcd/ssl/etcd-key.pem \
 cluster-health

生成k8s集羣的CA

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
# 進入相關目錄
cd $HOME/ssl

# 配置 root ca
cat >ca-csr.json<<EOF
{
 "CN": "kubernetes",
 "key": {
   "algo": "rsa",
   "size": 2048
 },
 "names": [
   {
     "C": "CN",
     "ST": "BeiJing",
     "L": "BeiJing",
     "O": "k8s",
     "OU": "System"
   }
 ],
 "ca": {
    "expiry": "87600h"
 }
}
EOF

# 生成 root ca
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
ls ca*.pem

# 配置 kube-apiserver ca
# 10.96.0.1 是 kube-apiserver 指定的 service-cluster-ip-range 網段的第一個IP
cat >kube-apiserver-csr.json<<EOF
{
   "CN": "kube-apiserver",
   "hosts": [
     "127.0.0.1",
     "11.11.11.111",
     "11.11.11.112",
     "11.11.11.113",
     "10.96.0.1",
     "kubernetes",
     "kubernetes.default",
     "kubernetes.default.svc",
     "kubernetes.default.svc.cluster",
     "kubernetes.default.svc.cluster.local"
   ],
   "key": {
       "algo": "rsa",
       "size": 2048
   },
   "names": [
       {
           "C": "CN",
           "ST": "BeiJing",
           "L": "BeiJing",
           "O": "k8s",
           "OU": "System"
       }
   ]
}
EOF

# 生成 kube-apiserver ca
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json \
-profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserver
ls kube-apiserver*.pem

# 配置 kube-controller-manager ca
cat >kube-controller-manager-csr.json<<EOF
{
   "CN": "system:kube-controller-manager",
   "hosts": [
     "127.0.0.1",
     "11.11.11.111",
     "11.11.11.112",
     "11.11.11.113"
   ],
   "key": {
       "algo": "rsa",
       "size": 2048
   },
   "names": [
       {
           "C": "CN",
           "ST": "BeiJing",
           "L": "BeiJing",
           "O": "system:kube-controller-manager",
           "OU": "System"
       }
   ]
}
EOF

# 生成 kube-controller-manager ca
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json \
-profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
ls kube-controller-manager*.pem

# 配置 kube-scheduler ca
cat >kube-scheduler-csr.json<<EOF
{
   "CN": "system:kube-scheduler",
   "hosts": [
     "127.0.0.1",
     "11.11.11.111",
     "11.11.11.112",
     "11.11.11.113"
   ],
   "key": {
       "algo": "rsa",
       "size": 2048
   },
   "names": [
       {
           "C": "CN",
           "ST": "BeiJing",
           "L": "BeiJing",
           "O": "system:kube-scheduler",
           "OU": "System"
       }
   ]
}
EOF

# 生成 kube-scheduler ca
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json \
-profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
ls kube-scheduler*.pem

# 配置 kube-proxy ca
cat >kube-proxy-csr.json<<EOF
{
   "CN": "system:kube-proxy",
   "key": {
       "algo": "rsa",
       "size": 2048
   },
   "names": [
       {
           "C": "CN",
           "ST": "BeiJing",
           "L": "BeiJing",
           "O": "system:kube-proxy",
           "OU": "System"
       }
   ]
}
EOF

# 生成 kube-proxy ca
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json \
-profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
ls kube-proxy*.pem

# 配置 admin ca
cat >admin-csr.json<<EOF
{
   "CN": "admin",
   "key": {
       "algo": "rsa",
       "size": 2048
   },
   "names": [
       {
           "C": "CN",
           "ST": "BeiJing",
           "L": "BeiJing",
           "O": "system:masters",
           "OU": "System"
       }
   ]
}
EOF

# 生成 admin ca
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json \
-profile=kubernetes admin-csr.json | cfssljson -bare admin
ls admin*.pem

# 複製生成的ca
mkdir -pv /etc/kubernetes/pki
cp ca*.pem admin*.pem kube-proxy*.pem kube-scheduler*.pem kube-controller-manager*.pem kube-apiserver*.pem /etc/kubernetes/pki
cd /etc/kubernetes && tar cvzf pki.tgz pki/
scp /etc/kubernetes/pki.tgz lab2:~/
scp /etc/kubernetes/pki.tgz lab3:~/
ssh lab2 'mkdir -pv /etc/kubernetes && tar xf pki.tgz -C /etc/kubernetes && ls -l /etc/kubernetes/pki'
ssh lab3 'mkdir -pv /etc/kubernetes && tar xf pki.tgz -C /etc/kubernetes && ls -l /etc/kubernetes/pki'
cd $HOME

安裝k8s文件

1
2
3
4
5
6
7
8
9
10
11
12
13
# 下載文件
# 需要×××,如果不能×××使用如下鏈接下載
# 鏈接:https://pan.baidu.com/s/1OI9Q4BRp7jNJUmsA8IAkbA 密碼:tnx5
cd /server/software/k8s
wget https://dl.k8s.io/v1.11.0/kubernetes-server-linux-amd64.tar.gz
tar xf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin
mkdir -pv /usr/local/kubernetes-v1.11.0/bin
cp kube-apiserver kube-controller-manager kube-scheduler kube-proxy kubelet kubectl /usr/local/kubernetes-v1.11.0/bin
ln -sv /usr/local/kubernetes-v1.11.0 /usr/local/kubernetes
cp /usr/local/kubernetes/bin/kubectl /usr/local/bin/kubectl
kubectl version
cd $HOME

生成kubeconfig

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
# 使用 TLS Bootstrapping 
export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
cat > /etc/kubernetes/token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

# 創建 kubelet bootstrapping kubeconfig
cd /etc/kubernetes
export KUBE_APISERVER="https://11.11.11.111:6443"
kubectl config set-cluster kubernetes \
 --certificate-authority=/etc/kubernetes/pki/ca.pem \
 --embed-certs=true \
 --server=${KUBE_APISERVER} \
 --kubeconfig=kubelet-bootstrap.conf
kubectl config set-credentials kubelet-bootstrap \
 --token=${BOOTSTRAP_TOKEN} \
 --kubeconfig=kubelet-bootstrap.conf
kubectl config set-context default \
 --cluster=kubernetes \
 --user=kubelet-bootstrap \
 --kubeconfig=kubelet-bootstrap.conf
kubectl config use-context default --kubeconfig=kubelet-bootstrap.conf

# 創建 kube-controller-manager kubeconfig
export KUBE_APISERVER="https://11.11.11.111:6443"
kubectl config set-cluster kubernetes \
 --certificate-authority=/etc/kubernetes/pki/ca.pem \
 --embed-certs=true \
 --server=${KUBE_APISERVER} \
 --kubeconfig=kube-controller-manager.conf
kubectl config set-credentials kube-controller-manager \
 --client-certificate=/etc/kubernetes/pki/kube-controller-manager.pem \
 --client-key=/etc/kubernetes/pki/kube-controller-manager-key.pem \
 --embed-certs=true \
 --kubeconfig=kube-controller-manager.conf
kubectl config set-context default \
 --cluster=kubernetes \
 --user=kube-controller-manager \
 --kubeconfig=kube-controller-manager.conf
kubectl config use-context default --kubeconfig=kube-controller-manager.conf

# 創建 kube-scheduler kubeconfig
export KUBE_APISERVER="https://11.11.11.111:6443"
kubectl config set-cluster kubernetes \
 --certificate-authority=/etc/kubernetes/pki/ca.pem \
 --embed-certs=true \
 --server=${KUBE_APISERVER} \
 --kubeconfig=kube-scheduler.conf
kubectl config set-credentials kube-scheduler \
 --client-certificate=/etc/kubernetes/pki/kube-scheduler.pem \
 --client-key=/etc/kubernetes/pki/kube-scheduler-key.pem \
 --embed-certs=true \
 --kubeconfig=kube-scheduler.conf
kubectl config set-context default \
 --cluster=kubernetes \
 --user=kube-scheduler \
 --kubeconfig=kube-scheduler.conf
kubectl config use-context default --kubeconfig=kube-scheduler.conf

# 創建 kube-proxy kubeconfig
export KUBE_APISERVER="https://11.11.11.111:6443"
kubectl config set-cluster kubernetes \
 --certificate-authority=/etc/kubernetes/pki/ca.pem \
 --embed-certs=true \
 --server=${KUBE_APISERVER} \
 --kubeconfig=kube-proxy.conf
kubectl config set-credentials kube-proxy \
 --client-certificate=/etc/kubernetes/pki/kube-proxy.pem \
 --client-key=/etc/kubernetes/pki/kube-proxy-key.pem \
 --embed-certs=true \
 --kubeconfig=kube-proxy.conf
kubectl config set-context default \
 --cluster=kubernetes \
 --user=kube-proxy \
 --kubeconfig=kube-proxy.conf
kubectl config use-context default --kubeconfig=kube-proxy.conf

# 創建 admin kubeconfig
export KUBE_APISERVER="https://11.11.11.111:6443"
kubectl config set-cluster kubernetes \
 --certificate-authority=/etc/kubernetes/pki/ca.pem \
 --embed-certs=true \
 --server=${KUBE_APISERVER} \
 --kubeconfig=admin.conf
kubectl config set-credentials admin \
 --client-certificate=/etc/kubernetes/pki/admin.pem \
 --client-key=/etc/kubernetes/pki/admin-key.pem \
 --embed-certs=true \
 --kubeconfig=admin.conf
kubectl config set-context default \
 --cluster=kubernetes \
 --user=admin \
 --kubeconfig=admin.conf
kubectl config use-context default --kubeconfig=admin.conf

# 把 kube-proxy.conf 複製到其他節點
scp kubelet-bootstrap.conf kube-proxy.conf lab2:/etc/kubernetes
scp kubelet-bootstrap.conf kube-proxy.conf lab3:/etc/kubernetes
cd $HOME

配置master相關組件

只在lab1節點操作

配置啓動kube-apiserver
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
# 複製 etcd ca
mkdir -pv /etc/kubernetes/pki/etcd
cd /etc/etcd/ssl
cp etcd-ca.pem etcd-key.pem etcd.pem /etc/kubernetes/pki/etcd

# 生成 service account key
openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub
ls /etc/kubernetes/pki/sa.*
cd $HOME

# 啓動文件
cat >/etc/systemd/system/kube-apiserver.service<<EOF
[Unit]
Description=Kubernetes API Service
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/apiserver
ExecStart=/usr/local/kubernetes/bin/kube-apiserver \\
   \$KUBE_LOGTOSTDERR \\
   \$KUBE_LOG_LEVEL \\
   \$KUBE_ETCD_ARGS \\
   \$KUBE_API_ADDRESS \\
   \$KUBE_SERVICE_ADDRESSES \\
   \$KUBE_ADMISSION_CONTROL \\
   \$KUBE_APISERVER_ARGS
Restart=on-failure
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# 該配置文件同時被 kube-apiserver, kube-controller-manager
# kube-scheduler, kubelet, kube-proxy 使用
cat >/etc/kubernetes/config<<EOF
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=2"
EOF

cat >/etc/kubernetes/apiserver<<EOF
KUBE_API_ADDRESS="--advertise-address=11.11.11.111"
KUBE_ETCD_ARGS="--etcd-servers=https://11.11.11.111:2379,https://11.11.11.112:2379,https://11.11.11.113:2379 --etcd-cafile=/etc/kubernetes/pki/etcd/etcd-ca.pem --etcd-certfile=/etc/kubernetes/pki/etcd/etcd.pem --etcd-keyfile=/etc/kubernetes/pki/etcd/etcd-key.pem"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.96.0.0/12"
KUBE_ADMISSION_CONTROL="--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
KUBE_APISERVER_ARGS="--allow-privileged=true --authorization-mode=Node,RBAC --enable-bootstrap-token-auth=true --token-auth-file=/etc/kubernetes/token.csv --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/pki/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/pki/kube-apiserver-key.pem --client-ca-file=/etc/kubernetes/pki/ca.pem --service-account-key-file=/etc/kubernetes/pki/sa.pub --enable-swagger-ui=true --secure-port=6443 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --anonymous-auth=false --kubelet-client-certificate=/etc/kubernetes/pki/admin.pem --kubelet-client-key=/etc/kubernetes/pki/admin-key.pem"
EOF

# 啓動
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver
systemctl status kube-apiserver

# 瀏覽器訪問測試
https://11.11.11.111:6443/swaggerapi
配置啓動kube-controller-manager
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
# 啓動文件
cat >/etc/systemd/system/kube-controller-manager.service<<EOF
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/controller-manager
ExecStart=/usr/local/kubernetes/bin/kube-controller-manager \\
   \$KUBE_LOGTOSTDERR \\
   \$KUBE_LOG_LEVEL \\
   \$KUBECONFIG \\
   \$KUBE_CONTROLLER_MANAGER_ARGS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

cat >/etc/kubernetes/controller-manager<<EOF
KUBECONFIG="--kubeconfig=/etc/kubernetes/kube-controller-manager.conf"
KUBE_CONTROLLER_MANAGER_ARGS="--address=127.0.0.1 --cluster-cidr=10.244.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem --service-account-private-key-file=/etc/kubernetes/pki/sa.key --root-ca-file=/etc/kubernetes/pki/ca.pem --leader-elect=true --use-service-account-credentials=true --node-monitor-grace-period=10s --pod-eviction-timeout=10s --allocate-node-cidrs=true --controllers=*,bootstrapsigner,tokencleaner"
EOF

# 啓動
systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl status kube-controller-manager
配置啓動kube-scheduler
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
cat >/etc/systemd/system/kube-scheduler.service<<EOF
[Unit]
Description=Kubernetes Scheduler Plugin
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/scheduler
ExecStart=/usr/local/kubernetes/bin/kube-scheduler \\
           \$KUBE_LOGTOSTDERR \\
           \$KUBE_LOG_LEVEL \\
           \$KUBECONFIG \\
           \$KUBE_SCHEDULER_ARGS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

cat >/etc/kubernetes/scheduler<<EOF
KUBECONFIG="--kubeconfig=/etc/kubernetes/kube-scheduler.conf"
KUBE_SCHEDULER_ARGS="--leader-elect=true --address=127.0.0.1"
EOF

# 啓動
systemctl daemon-reload
systemctl enable kube-scheduler
systemctl start kube-scheduler
systemctl status kube-scheduler
配置kubectl使用
1
2
3
4
5
rm -rf $HOME/.kube
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get no
查看組件狀態
1
kubectl get componentstatuses
配置kubelet使用bootstrap
1
2
3
4
# 將 bootstrap token 文件中的 kubelet-bootstrap 用戶賦予 system:node-bootstrapper cluster 角色
kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap

配置node相關組件

如下操作在所有節點操作

安裝cni
1
2
3
4
5
6
7
8
# 安裝 cni
# 百度雲鏈接:https://pan.baidu.com/s/1-PputObLs5jouXLnuBCI6Q 密碼:tzqm
cd /server/software/k8s
wget https://github.com/containernetworking/plugins/releases/download/v0.7.1/cni-plugins-amd64-v0.7.1.tgz
mkdir -pv /opt/cni/bin
tar xf cni-plugins-amd64-v0.7.1.tgz -C /opt/cni/bin
ls -l /opt/cni/bin
cd $HOME
配置啓動kubelet
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
# 啓動文件
mkdir -pv /data/kubelet
cat >/etc/systemd/system/kubelet.service<<EOF
[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/data/kubelet
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/kubelet
ExecStart=/usr/local/kubernetes/bin/kubelet \\
           \$KUBE_LOGTOSTDERR \\
           \$KUBE_LOG_LEVEL \\
           \$KUBELET_CONFIG \\
           \$KUBELET_HOSTNAME \\
           \$KUBELET_POD_INFRA_CONTAINER \\
           \$KUBELET_ARGS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

cat >/etc/kubernetes/config<<EOF
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=2"
EOF

# 注意修改相關ip
cat >/etc/kubernetes/kubelet<<EOF
KUBELET_HOSTNAME="--hostname-override=11.11.11.111"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1"
KUBELET_CONFIG="--config=/etc/kubernetes/kubelet-config.yml"
KUBELET_ARGS="--bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.conf --kubeconfig=/etc/kubernetes/kubelet.conf --cert-dir=/etc/kubernetes/pki --network-plugin=cni --cni-bin-dir=/opt/cni/bin --cni-conf-dir=/etc/cni/net.d"
EOF

# 注意修改相關ip
# lab1 lab2 lab3 使用各自ip
cat >/etc/kubernetes/kubelet-config.yml<<EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 11.11.11.111
port: 10250
cgroupDriver: cgroupfs
clusterDNS:
 - 10.96.0.10
clusterDomain: cluster.local.
hairpinMode: promiscuous-bridge
serializeImagePulls: false
authentication:
 x509:
   clientCAFile: /etc/kubernetes/pki/ca.pem
EOF

# 啓動
systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
systemctl status kubelet
通過證書請求
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# 在配置了kubectl的節點上執行如下操作

# 查看
kubectl get csr

# 通過
kubectl certificate approve node-csr-Yiiv675wUCvQl3HH11jDr0cC9p3kbrXWrxvG3EjWGoE

# 查看節點
# 此時節點狀態爲 NotReady
kubectl get nodes

# 在node節點查看生成的文件
ls -l /etc/kubernetes/kubelet.conf
ls -l /etc/kubernetes/pki/kubelet*
配置啓動kube-proxy
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
# 安裝
yum install -y conntrack-tools

# 啓動文件
cat >/etc/systemd/system/kube-proxy.service<<EOF
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/proxy
ExecStart=/usr/local/kubernetes/bin/kube-proxy \\
   \$KUBE_LOGTOSTDERR \\
   \$KUBE_LOG_LEVEL \\
   \$KUBECONFIG \\
   \$KUBE_PROXY_ARGS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# 注意修改相關ip
# lab1 lab2 lab3 使用各自ip
# 由於 1.11.0 ipvs 在centos7上有bug無法正常使用
# 實驗使用 iptables 模式
# 以後版本可以使用 ipvs 模式
cat >/etc/kubernetes/proxy<<EOF
KUBECONFIG="--kubeconfig=/etc/kubernetes/kube-proxy.conf"
KUBE_PROXY_ARGS="--bind-address=11.11.11.111 --proxy-mode=iptables --hostname-override=11.11.11.111 --cluster-cidr=10.244.0.0/16"
EOF

# 啓動
systemctl daemon-reload
systemctl enable kube-proxy
systemctl start kube-proxy
systemctl status kube-proxy

設置集羣角色

1
2
3
4
5
6
7
8
9
10
11
12
13
# 設置 lab1 爲 master
kubectl label nodes 11.11.11.111 node-role.kubernetes.io/master=

# 設置 lab2 lab3 爲 node
kubectl label nodes 11.11.11.112 node-role.kubernetes.io/node=
kubectl label nodes 11.11.11.113 node-role.kubernetes.io/node=

# 設置 master 一般情況下不接受負載
kubectl taint nodes 11.11.11.111 node-role.kubernetes.io/master=true:NoSchedule

# 查看節點
# 此時節點狀態爲 NotReady
kubectl get no

配置使用flannel網絡

lab1操作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
# 下載配置
mkdir flannel && cd flannel
wget https://raw.githubusercontent.com/coreos/flannel/v0.10.0/Documentation/kube-flannel.yml

# 修改配置
# 此處的ip配置要與上面kubeadm的pod-network一致
 net-conf.json: |
   {
     "Network": "10.244.0.0/16",
     "Backend": {
       "Type": "vxlan"
     }
   }

# 修改鏡像
image: registry.cn-shanghai.aliyuncs.com/gcr-k8s/flannel:v0.10.0-amd64

# 如果Node有多個網卡的話,參考flannel issues 39701,
# https://github.com/kubernetes/kubernetes/issues/39701
# 目前需要在kube-flannel.yml中使用--iface參數指定集羣主機內網網卡的名稱,
# 否則可能會出現dns無法解析。容器無法通信的情況,需要將kube-flannel.yml下載到本地,
# flanneld啓動參數加上--iface=<iface-name>
   containers:
     - name: kube-flannel
       image: registry.cn-shanghai.aliyuncs.com/gcr-k8s/flannel:v0.10.0-amd64
       command:
       - /opt/bin/flanneld
       args:
       - --ip-masq
       - --kube-subnet-mgr
       - --iface=eth1

# 啓動
kubectl apply -f kube-flannel.yml

# 查看
kubectl get pods -n kube-system
kubectl get svc -n kube-system

# 查看節點狀態
# 當 flannel pod 全部啓動之後,節點狀態爲 Ready
kubectl get no

配置使用coredns

lab1操作

1
2
3
4
5
6
7
8
9
10
11
12
# 安裝
# 10.96.0.10 kubelet中配置的dns
cd $HOME && mkdir coredns && cd coredns
wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sed
wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/deploy.sh
chmod +x deploy.sh
./deploy.sh -i 10.96.0.10 > coredns.yml
kubectl apply -f coredns.yml

# 查看
kubectl get pods -n kube-system
kubectl get svc -n kube-system

測試

啓動

1
2
3
4
kubectl run nginx --replicas=2 --image=nginx:alpine --port=80
kubectl expose deployment nginx --type=NodePort --name=example-service-nodeport
kubectl expose deployment nginx --name=example-service
kubectl scale --replicas=3 deployment/nginx

查看狀態

1
2
3
4
kubectl get deploy -o wide
kubectl get pods -o wide
kubectl get svc -o wide
kubectl describe svc example-service

DNS解析

1
2
3
4
kubectl run curl --image=radial/busyboxplus:curl -i --tty
nslookup kubernetes
nslookup example-service
curl example-service

訪問測試

1
2
3
4
5
6
7
# 10.96.59.56 爲查看svc時獲取到的clusterip
curl "10.107.91.153:80"

# 32223 爲查看svc時獲取到的 nodeport
http://11.11.11.111:32223/
http://11.11.11.112:32223/
http://11.11.11.113:32223/

清理

1
2
kubectl delete svc example-service example-service-nodeport
kubectl delete deploy nginx curl

參考文檔


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章