K8S 集羣二進制安裝(未完,代理功能與前端UI未驗證通過)

一、環境

操作系統

Centos 7.4
主機信息
kb-001  192.168.0.11
kb-002  192.168.0.12
kb-003  192.168.0.13

添加kube-master到kube-node的祕鑰認證
ssh-keygen
ssh-copy-id  hostname

關閉SELinux 
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
setenforce 0 

關閉sawp分區
swapoff -a
vi /etc/fstab
註釋掉swap分區
#/dev/mapper/cl-swap     swap                    swap    defaults        0 0

關閉系統防火牆
systemctl stop firewalld
systemctl disable firewalld 

添加配置內核參數/etc/sysctl.d/k8s.conf文件
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.conf.all.forwarding = 1
vm.swappiness = 0
EOF
sysctl -p /etc/sysctl.d/k8s.

或者
echo "net.ipv4.conf.all.forwarding = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf
sysctl -p

#若問題
執行sysctl -p 時出現:
sysctl -p
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
解決方法:
modprobe br_netfilter
ls /proc/sys/net/bridge

創建驗證證書

安裝cfssl
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
chmod +x cfssl_linux-amd64
mv cfssl_linux-amd64 /usr/local/bin/cfssl
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
chmod +x cfssljson_linux-amd64
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl-certinfo_linux-amd64
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
# config.json 文件
cat > config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "87600h"
      }
    }
  }
}
EOF
# csr.json 文件
cat > csr.json <<EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShenZhen",
      "L": "ShenZhen",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF
生成 CA 證書和私鑰
cfssl gencert -initca csr.json | cfssljson -bare ca
分發證書
# 創建證書目錄
mkdir -p /etc/kubernetes/ssl
# 拷貝所有文件到目錄下
cp *.pem /etc/kubernetes/ssl
cp ca.csr /etc/kubernetes/ssl
# 這裏要將文件拷貝到所有的k8s 機器上
scp -r /etc/kubernetes/ssl 192.168.0.12:/etc/kubernetes/
scp -r /etc/kubernetes/ssl 192.168.0.13:/etc/kubernetes/
創建ETCD證書
etcd 證書這裏,默認配置三個,後續如果需要增加,更多的 etcd 節點 這裏的認證IP 請多預留幾個,以備後續添加能通過認證,不需要重新簽發
# etcd-csr.json 文件
cat > etcd-csr.json <<EOF
{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "192.168.0.11",
    "192.168.0.12",
    "192.168.0.13"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShenZhen",
      "L": "ShenZhen",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF
生成 CA 證書和私鑰
cfssl gencert -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=config.json \
  -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
# 拷貝到etcd服務器
# kb-001 
cp etcd*.pem /etc/kubernetes/ssl/
# kb-002
scp etcd*.pem 192.168.0.12:/etc/kubernetes/ssl/
# kb-003
scp etcd*.pem 192.168.0.13:/etc/kubernetes/ssl/
# 如果 etcd 非 root 用戶,讀取證書會提示沒權限
chmod 644 /etc/kubernetes/ssl/etcd-key.pem

kubectl 與 kube-apiserver 的安全端口通信,需要爲安全通信提供 TLS 證書和祕鑰。
# admin-csr.json 文件
cat > admin-csr.json <<EOF
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShenZhen",
      "L": "ShenZhen",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF
生成 CA 證書和私鑰
cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
  -ca-key=/etc/kubernetes/ssl/ca-key.pem \
  -config=config.json \
  -profile=kubernetes admin-csr.json | cfssljson -bare admin
cp admin*.pem /etc/kubernetes/ssl/
scp admin*.pem 192.168.0.12:/etc/kubernetes/ssl/
scp admin*.pem 192.168.0.13:/etc/kubernetes/ssl/

創建kubernetes證書
# kubernetes-csr.json 文件
cat > kubernetes-csr.json <<EOF
{
  "CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "192.168.0.11",
    "192.168.0.12",
    "192.168.0.13",
    "10.254.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShenZhen",
      "L": "ShenZhen",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF
## 這裏 hosts 字段中 三個 IP 分別爲 127.0.0.1 本機, 172.16.1.64 和 172.16.1.65 爲 Master 的IP,多個Master需要寫多個。  10.254.0.1 爲 kubernetes SVC 的 IP, 一般是 部署網絡的第一個IP , 如: 10.254.0.1 , 在啓動完成後,我們使用   kubectl get svc , 就可以查看到
生成 kubernetes 證書和私鑰
cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
  -ca-key=/etc/kubernetes/ssl/ca-key.pem \
  -config=config.json \
  -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
# 拷貝到目錄
cp kubernetes*.pem /etc/kubernetes/ssl/
scp kubernetes*.pem 192.168.0.12:/etc/kubernetes/ssl/
scp kubernetes*.pem 192.168.0.13:/etc/kubernetes/ssl/
創建 kube-proxy 證書
# kube-proxy-csr.json 文件
cat > kube-proxy-csr.json <<EOF
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShenZhen",
      "L": "ShenZhen",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

生成 kube-proxy 證書和私鑰
cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
  -ca-key=/etc/kubernetes/ssl/ca-key.pem \
  -config=config.json \
  -profile=kubernetes  kube-proxy-csr.json | cfssljson -bare kube-proxy

# 查看生成
ls kube-proxy*
kube-proxy.csr  kube-proxy-csr.json  kube-proxy-key.pem  kube-proxy.pem
# 拷貝到目錄
cp kube-proxy* /etc/kubernetes/ssl/
scp kube-proxy* 192.168.0.12:/etc/kubernetes/ssl/
scp kube-proxy* 192.168.0.13:/etc/kubernetes/ssl/

將生成的證書添加主機認證
cp /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem_bak
chmod 644 /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
cat /etc/kubernetes/ssl/etcd.pem >> /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
cat /etc/kubernetes/ssl/admin.pem >> /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
cat /etc/kubernetes/ssl/ca.pem >> /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
cat /etc/kubernetes/ssl/kube-proxy.pem >> /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
cat /etc/kubernetes/ssl/kubernetes.pem >> /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
chmod 444 /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem

二、K8S組件說明

k8s 高可用2個核心:apiserver master 和 etcd
apiserver master:(需高可用)集羣核心,集羣API接口、集羣各個組件通信的中樞;集羣安全控制;
etcd :(需高可用)集羣的數據中心,用於存放集羣的配置以及狀態信息,非常重要,如果數據丟失那麼集羣將無法恢復;因此高可用集羣部署首先就是etcd是高可用集羣;
kube-scheduler:調度器 (內部自選舉)集羣Pod的調度中心;默認kubeadm安裝情況下–leader-elect參數已經設置爲true,保證master集羣中只有一個kube-scheduler處於活躍狀態;
kube-controller-manager: 控制器 (內部自選舉)集羣狀態管理器,當集羣狀態與期望不同時,kcm會努力讓集羣恢復期望狀態,比如:當一個pod死掉,kcm會努力新建一個pod來恢復對應replicas set期望的狀態;默認kubeadm安裝情況下–leader-elect參數已經設置爲true,保證master集羣中只有一個kube-controller-manager處於活躍狀態;
kubelet: agent node註冊apiserver
kube-proxy: 每個node上一個,負責service vip到endpoint pod的流量轉發,老版本主要通過設置iptables規則實現,新版1.9基於kube-proxy-lvs 實現
kube-router的網絡插件支持,更方便進行路由控制,發佈,和安全策略管理

三、ETCD集羣安裝

1、安裝etcd
yum install -y etcd

2、修改配置文件(注意主機IP地址修改)

cp /usr/lib/systemd/system/etcd.service /usr/lib/systemd/system/etcd.service_bak
cat > /usr/lib/systemd/system/etcd.service <<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
EnvironmentFile=-/etc/etcd/etcd.conf
User=root
# set GOMAXPROCS to number of processors
ExecStart=/usr/bin/etcd \
  --name=sz-xh_42f-op-test-docker-002 \
  --cert-file=/etc/kubernetes/ssl/etcd.pem \
  --key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \
  --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --initial-advertise-peer-urls=https://192.168.0.11:2380 \
  --listen-peer-urls=https://192.168.0.11:2380 \
  --listen-client-urls=https://192.168.0.11:2379,http://127.0.0.1:2379 \
  --advertise-client-urls=https://192.168.0.11:2379 \
  --initial-cluster-token=k8s-etcd-cluster \
  --initial-cluster=sz-xh_42f-op-test-docker-002=https://10.17.2.47:2380,sz-xh_42f-op-test-docker-003=https://10.17.2.45:2380,sz-xh_42f-op-test-docker-004=https://10.17.2.13:2380 \
  --initial-cluster-state=new \
  --data-dir=/data/etcd/
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

3、啓動服務
service etcd start
systemctl enable etcd

4、查看集羣狀態
etcdctl cluster-health

三、安裝FLANNELD

1、在每個node機器上安裝flanneld
yum -y install flannel

2、複製配置文件
cp /usr/lib/systemd/system/docker.service.d/flannel.conf /etc/systemd/system/docker.service.d

3、配置flannel網段
etcdctl --endpoints=https://192.168.0.11:2379,https://192.168.0.12:2379,https://192.168.0.13:2379\
        --cert-file=/etc/kubernetes/ssl/etcd.pem \
        --ca-file=/etc/kubernetes/ssl/ca.pem \
        --key-file=/etc/kubernetes/ssl/etcd-key.pem \
        set /flannel/network/config \ '{"Network":"10.254.64.0/18","SubnetLen":24,"Backend":{"Type":"host-gw"}}'

etcdctl set /flannel/network/config '{ "Network": "10.1.0.0/16" }'

4、修改配置文件/etc/sysconfig/flanneld
# Flanneld configuration options  
# etcd 地址
FLANNEL_ETCD_ENDPOINTS="https://192.168.0.11:2379,https://192.168.0.12:2379,https://192.168.0.13:2379"
# 配置爲上面的路徑 flannel/network
FLANNEL_ETCD_PREFIX="/flannel/network"
# 其他的配置,可查看 flanneld --help,這裏添加了 etcd ssl 認證
FLANNEL_OPTIONS="-ip-masq=true -etcd-cafile=/etc/kubernetes/ssl/ca.pem -etcd-certfile=/etc/kubernetes/ssl/etcd.pem -etcd-keyfile=/etc/kubernetes/ssl/etcd-key.pem -iface=em1"

5、修改docker配置文件
啓動文件 vim /usr/lib/systemd/system/docker.service
添加一行 EnvironmentFile=-/run/flannel/docker
![](http://i2.51cto.com/102?x-oss-process=image/watermark,size_16,text_QDUxQ1RP5Y2a5a6i,color_FFFFFF,t_100,g_se,x_10,y_10,shadow_90,type_ZmFuZ3poZW5naGVpdGk=)

修改docker配置文件 vim /etc/systemd/system/docker.service.d/docker.conf 
添加參數$DOCKER_NETWORK_OPTIONS
![](http://i2.51cto.com/102?x-oss-process=image/watermark,size_16,text_QDUxQ1RP5Y2a5a6i,color_FFFFFF,t_100,g_se,x_10,y_10,shadow_90,type_ZmFuZ3poZW5naGVpdGk=)

dockerd 運行時會調用其它 docker 命令,如 docker-proxy,所以需要將 docker 命令所在的目錄加到 PATH 環境變量中;
flanneld 啓動時將網絡配置寫入到 /run/flannel/docker 文件中的變量 DOCKER_NETWORK_OPTIONS,dockerd 命令行上指定該變量值來設置 docker0 網橋參數;
如果指定了多個 EnvironmentFile 選項,則必須將 /run/flannel/docker 放在最後(確保 docker0 使用 flanneld 生成的 bip 參數);
不能關閉默認開啓的 --iptables 和 --ip-masq 選項;
如果內核版本比較新,建議使用 overlay 存儲驅動;
docker 從 1.13 版本開始,可能將 iptables FORWARD chain的默認策略設置爲DROP,從而導致 ping 其它 Node 上的 Pod IP 失敗,遇到這種情況時,需要手動設置策略爲 ACCEPT:
$ sudo iptables -P FORWARD ACCEPT
$
並且把以下命令寫入/etc/rc.local文件中,防止節點重啓iptables FORWARD chain的默認策略又還原爲DROP
sleep 60 && /sbin/iptables -P FORWARD ACCEPT
爲了加快 pull image 的速度,可以使用國內的倉庫鏡像服務器,同時增加下載的併發數。(如果 dockerd 已經運行,則需要重啓 dockerd 生效。)
  $ cat /etc/docker/daemon.json
  {
    "registry-mirrors": ["https://docker.mirrors.ustc.edu.cn", "hub-mirror.c.163.com"],
    "max-concurrent-downloads": 10
  }

需要關閉 firewalld(centos7)/ufw(ubuntu16.04),否則可能會重複創建的 iptables 規則;
最好清理舊的 iptables rules 和 chains 規則;
sudo systemctl daemon-reload
sudo systemctl stop firewalld
sudo systemctl disable firewalld
sudo iptables -F && sudo iptables -X && sudo iptables -F -t nat && sudo iptables -X -t nat

4、啓動服務
systemctl daemon-reload
systemctl enable flanneld
systemctl start flanneld
systemctl status flanneld

5、重啓docker
systemctl daemon-reload
systemctl restart docker

6、重啓 kubelet

systemctl daemon-reload
systemctl restart kubelet
systemctl status kubelet

四、配置 Kubernetes 集羣

1、分別在三臺主機上覆制文件
kubectl 安裝在所有需要進行操作的機器上,Master 需要部署 kube-apiserver , kube-scheduler , kube-controller-manager 這三個組件。 kube-scheduler 作用是調度pods分配到那個node裏,簡單來說就是資源調度。 kube-controller-manager 作用是 對 deployment controller , replication controller, endpoints controller, namespace controller, and serviceaccounts controller等等的循環控制,與kube-apiserver交互。

cp -r server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet} /usr/local/bin/

2、配置 kubectl kubeconfig 文件
生成證書相關的配置文件存儲與 /root/.kube 目錄中
# 配置 kubernetes 集羣
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=https://127.0.0.1:6443
# 配置 客戶端認證
kubectl config set-credentials admin \
  --client-certificate=/etc/kubernetes/ssl/admin.pem \
  --embed-certs=true \
  --client-key=/etc/kubernetes/ssl/admin-key.pem

kubectl config set-context kubernetes \
  --cluster=kubernetes \
  --user=admin

kubectl config use-context kubernetes

配置 kube-apiserver

kubelet 首次啓動時向 kube-apiserver 發送 TLS Bootstrapping 請求,kube-apiserver 驗證 kubelet 請求中的 token 是否與它配置的 token 一致,如果一致則自動爲 kubelet生成證書和祕鑰。
# 生成 token
head -c 16 /dev/urandom | od -An -t x | tr -d ' '
df3b158fbdc425ae2ac70bbef0688921
# 創建 token.csv 文件
cd /root/ssl
vi token.csv
df3b158fbdc425ae2ac70bbef0688921,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
# 拷貝
cp token.csv /etc/kubernetes/
scp token.csv 192.168.0.12:/etc/kubernetes/
scp token.csv 192.168.0.13:/etc/kubernetes/

# 生成高級審覈配置文件
cat > /etc/kubernetes/audit-policy.yaml <<EOF
apiVersion: audit.k8s.io/v1beta1
kind: Policy
rules:
- level: Metadata
EOF
# 拷貝
scp audit-policy.yaml 192.168.0.12:/etc/kubernetes/
scp audit-policy.yaml 192.168.0.13:/etc/kubernetes/

創建 kube-apiserver.service 文件
# 自定義 系統 service 文件一般存於 /etc/systemd/system/ 下
# 配置爲 各自的本地 IP
cat > /etc/systemd/system/kube-apiserver.service <<EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
User=root
ExecStart=/usr/local/bin/kube-apiserver \
  --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
  --advertise-address=10.17.2.13 \
  --allow-privileged=true \
  --apiserver-count=3 \
  --audit-policy-file=/etc/kubernetes/audit-policy.yaml \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/var/log/kubernetes/audit.log \
  --authorization-mode=Node,RBAC \
  --bind-address=0.0.0.0 \
  --secure-port=6443 \
  --client-ca-file=/etc/kubernetes/ssl/ca.pem \
  --enable-swagger-ui=true \
  --etcd-cafile=/etc/kubernetes/ssl/ca.pem \
  --etcd-certfile=/etc/kubernetes/ssl/etcd.pem \
  --etcd-keyfile=/etc/kubernetes/ssl/etcd-key.pem \
  --etcd-servers=https://10.17.2.47:2379,https://10.17.2.45:2379,https://10.17.2.13:2379 \
  --event-ttl=1h \
  --kubelet-https=true \
  --insecure-bind-address=127.0.0.1 \
  --insecure-port=8080 \
  --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --service-cluster-ip-range=10.254.0.0/18 \
  --service-node-port-range=1024-32000 \
  --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
  --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
  --enable-bootstrap-token-auth \
  --token-auth-file=/etc/kubernetes/token.csv \
  --v=1
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# k8s 1.8 開始需要 添加 --authorization-mode=Node
# k8s 1.8 開始需要 添加 --admission-control=NodeRestriction
# k8s 1.8 開始需要 添加 --audit-policy-file=/etc/kubernetes/audit-policy.yaml

# 這裏面要注意的是 --service-node-port-range=30000-32000
# 這個地方是 映射外部端口時 的端口範圍,隨機映射也在這個範圍內映射,指定映射端口必須也在這個範圍內。
啓動 kube-apiserver
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver
systemctl status kube-apiserver

配置 kube-controller-manager

–cluster-signing-cert-file 與 –cluster-signing-key-file 標籤將被刪除。
# 創建 kube-controller-manager.service 文件
cat > /etc/systemd/system/kube-controller-manager.service <<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
  --address=0.0.0.0 \
  --master=http://127.0.0.1:8080 \
  --allocate-node-cidrs=true \
  --service-cluster-ip-range=10.254.0.0/18 \
  --cluster-cidr=10.254.64.0/18 \
  --cluster-name=kubernetes \
  --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --root-ca-file=/etc/kubernetes/ssl/ca.pem \
  --leader-elect=true \
  --v=1
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

啓動 kube-controller-manager
systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl status kube-controller-manager

配置 kube-scheduler

# 創建 kube-cheduler.service 文件
cat > /etc/systemd/system/kube-scheduler.service <<EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/usr/local/bin/kube-scheduler \
  --address=0.0.0.0 \
  --master=http://127.0.0.1:8080 \
  --leader-elect=true \
  --v=1
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

啓動 kube-scheduler
systemctl daemon-reload
systemctl enable kube-scheduler
systemctl start kube-scheduler
systemctl status kube-scheduler

驗證 master 節點功能
kubectl get componentstatuses
NAME                 STATUS    MESSAGE              ERROR
controller-manager   Healthy   ok                   
scheduler            Healthy   ok                   
etcd-1               Healthy   {"health": "true"}   
etcd-0               Healthy   {"health": "true"}   
etcd-2               Healthy   {"health": "true"}   

配置 kubelet

kubelet 啓動時向 kube-apiserver 發送 TLS bootstrapping 請求,需要先將 bootstrap token 文件中的 kubelet-bootstrap 用戶賦予 system:node-bootstrapper 角色,然後 kubelet 纔有權限創建認證請求(certificatesigningrequests)。
# 先創建認證請求
# --user=kubelet-bootstrap 是文件 /etc/kubernetes/token.csv 中指定的用戶名,同時也寫入了文件 /etc/kubernetes/bootstrap.kubeconfig
# 只需創建一次就可以
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
創建 kubelet bootstrapping kubeconfig 文件
# 配置集羣
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=https://127.0.0.1:6443 \
  --kubeconfig=bootstrap.kubeconfig
# 配置客戶端認證
kubectl config set-credentials kubelet-bootstrap \
  --token=df3b158fbdc425ae2ac70bbef0688921 \
  --kubeconfig=bootstrap.kubeconfig
# 配置關聯
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kubelet-bootstrap \
  --kubeconfig=bootstrap.kubeconfig
# 配置默認關聯
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
# 拷貝生成的 bootstrap.kubeconfig 文件
mv bootstrap.kubeconfig /etc/kubernetes/
--embed-certs 爲 true 時表示將 certificate-authority 證書寫入到生成的 bootstrap.kubeconfig 文件中;
設置 kubelet 客戶端認證參數時沒有指定祕鑰和證書,後續由 kube-apiserver 自動生成;

創建 kubelet.service 文件
# 創建 kubelet 目錄

mkdir /var/lib/kubelet
cat > /etc/systemd/system/kubelet.service <<EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
  --cgroup-driver=cgroupfs \
  --address=192.168.0.11 \
  --hostname-override=kb-001 \
  --pod-infra-container-image=registry-op.xxxxxxxxx.com/library/pause-amd64:3.0 \
  --experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \
  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
  --cert-dir=/etc/kubernetes/ssl \
  --cluster-dns=10.254.0.2 \
  --cluster-domain=cluster.local. \
  --hairpin-mode promiscuous-bridge \
  --allow-privileged=true \
  --fail-swap-on=false \
  --serialize-image-pulls=false \
  --logtostderr=true \
  --max-pods=512 \
  --v=3

[Install]
WantedBy=multi-user.target
EOF

# 如上配置:
kb-001    本機hostname
10.254.0.2       預分配的 dns 地址
cluster.local.   爲 kubernetes 集羣的 domain
jicki/pause-amd64:3.0  這個是 pod 的基礎鏡像,既 gcr 的 gcr.io/google_containers/pause-amd64:3.0 鏡像, 下載下來修改爲自己的倉庫中的比較快。
--address 不能設置爲 127.0.0.1,否則後續 Pods 訪問 kubelet 的 API 接口時會失敗,因爲 Pods 訪問的 127.0.0.1 指向自己而不是 kubelet;
如果設置了 --hostname-override 選項,則 kube-proxy 也需要設置該選項,否則會出現找不到 Node 的情況;
--experimental-bootstrap-kubeconfig 指向 bootstrap kubeconfig 文件,kubelet 使用該文件中的用戶名和 token 向 kube-apiserver 發送 TLS Bootstrapping 請求;
管理員通過了 CSR 請求後,kubelet 自動在 --cert-dir 目錄創建證書和私鑰文件(kubelet-client.crt 和 kubelet-client.key),然後寫入 --kubeconfig 文件(自動創建 --kubeconfig 指定的文件);
建議在 --kubeconfig 配置文件中指定 kube-apiserver 地址,如果未指定 --api-servers 選項,則必須指定 --require-kubeconfig 選項後才從配置文件中讀取 kue-apiserver 的地址,否則 kubelet 啓動後將找不到 kube-apiserver (日誌中提示未找到 API Server),kubectl get nodes 不會返回對應的 Node 信息;
--cluster-dns 指定 kubedns 的 Service IP(可以先分配,後續創建 kubedns 服務時指定該 IP),--cluster-domain 指定域名後綴,這兩個參數同時指定後纔會生效;
kubelet cAdvisor 默認在所有接口監聽 4194 端口的請求,對於有外網的機器來說不安全,ExecStartPost 選項指定的 iptables 規則只允許內網機器訪問 4194 端口;
注意:如果啓動kubelet的時候見到證書相關的報錯,有個trick可以解決這個問題,可以將master節點上的~/.kube/config文件(該文件在安裝kubectl命令行工具這一步中將會自動生成)拷貝到node節點的/etc/kubernetes/kubelet.kubeconfig位置,這樣就不需要通過CSR,當kubelet啓動後就會自動加入的集羣中。
啓動 kubelet

systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
systemctl status kubelet

# 如果報錯 請使用
journalctl -f -t kubelet  和 journalctl -u kubelet 來定位問題

配置 TLS 認證
# 查看 csr 的名稱
kubectl get csr
NAME                                                   AGE       REQUESTOR           CONDITION
node-csr-Pu4QYp3NAwlC6o8AG8iwdCl52CiqhjiSyrso3335JTs   1m        kubelet-bootstrap   Pending
node-csr-poycCHd7B8YPxc12EBgI3Rwe0wnDJah5uIGvQHzghVY   2m        kubelet-bootstrap   Pending
# 增加 認證
kubectl get csr | grep Pending | awk '{print $1}' | xargs kubectl certificate approve

驗證 nodes
kubectl get nodes
NAME            STATUS    ROLES     AGE       VERSION
kubernetes-64   Ready     <none>    12s       v1.9.1
kubernetes   Ready     <none>    17s       v1.9.1

# 成功以後會自動生成配置文件與密鑰

# 配置文件

ls /etc/kubernetes/kubelet.kubeconfig   
/etc/kubernetes/kubelet.kubeconfig

# 密鑰文件  這裏注意如果 csr 被刪除了,請刪除如下文件,並重啓 kubelet 服務

ls /etc/kubernetes/ssl/kubelet*
/etc/kubernetes/ssl/kubelet-client.crt  /etc/kubernetes/ssl/kubelet.crt
/etc/kubernetes/ssl/kubelet-client.key  /etc/kubernetes/ssl/kubelet.key

配置 kube-proxy

# 配置集羣
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=https://127.0.0.1:6443 \
  --kubeconfig=kube-proxy.kubeconfig

# 配置客戶端認證

kubectl config set-credentials kube-proxy \
  --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \
  --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig

# 配置關聯
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig

# 配置默認關聯
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

# 拷貝到需要的 node 端裏
cp kube-proxy.kubeconfig  /etc/kubernetes/
scp kube-proxy.kubeconfig 10.17.2.45:/etc/kubernetes/
scp kube-proxy.kubeconfig 10.17.2.13:/etc/kubernetes/
創建 kube-proxy.service 文件
1.9 官方 ipvs 已經 beta , 嘗試開啓 ipvs 測試一下, 官方 –feature-gates=SupportIPVSProxyMode=false 默認是 false 的, 需要打開 –feature-gates=SupportIPVSProxyMode=true –masquerade-all 必須添加這項配置,否則 創建 svc 在 ipvs 不會添加規則

打開 ipvs 需要安裝 ipvsadm 軟件, 在 node 中安裝
yum install ipvsadm ipset -y

# 創建 kube-proxy 目錄
mkdir -p /var/lib/kube-proxy
cat > /etc/systemd/system/kube-proxy.service <<EOF
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \
  --bind-address=192.168.0.11 \
  --hostname-override=kb-001 \
  --cluster-cidr=10.254.64.0/18 \
  --masquerade-all \
  --feature-gates=SupportIPVSProxyMode=true \
  --proxy-mode=ipvs \
  --ipvs-min-sync-period=5s \
  --ipvs-sync-period=5s \
  --ipvs-scheduler=rr \
  --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \
  --logtostderr=true \
  --v=1
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

啓動 kube-proxy

systemctl daemon-reload
systemctl enable kube-proxy
systemctl start kube-proxy
systemctl status kube-proxy

# 檢查  ipvs

[root@kubernetes-65 ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.254.0.1:443 rr persistent 10800
  -> 172.16.1.64:6443             Masq    1      0          0         
  -> 172.16.1.65:6443             Masq    1      0          0  

# 如果報錯 請使用
journalctl -f -t kube-proxy  和 journalctl -u kube-proxy 來定位問題

如果出現Pods不能訪問外網問題,需要添加一條iptables規則,如下
iptables -t nat -I POSTROUTING -s 10.254.64.0/18 -j MASQUERADE
service iptables save
service iptables reload

至此 Master 端 與 Master and Node 端的安裝完畢

配置 CoreDNS

官方 地址 https://coredns.io
下載鏡像

官方鏡像
coredns/coredns:1.0.6

下載 yaml 文件

wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sed

mv coredns.yaml.sed coredns.yaml

# vim coredns.yaml

...
data:
  Corefile: |
    .:53 {
        errors
        health
        kubernetes cluster.local 10.254.0.0/18 {
          pods insecure
          upstream /etc/resolv.conf
          fallthrough in-addr.arpa ip6.arpa
        }
...        
        image: jicki/coredns:1.0.6
...        
  clusterIP: 10.254.0.2

# 配置說明 

# 這裏 kubernetes cluster.local 爲 創建 svc 的 IP 段

kubernetes cluster.local 10.254.0.0/18 

# clusterIP  爲 指定 DNS 的 IP

clusterIP: 10.254.0.2

導入 yaml 文件

# 導入

kubectl apply -f coredns.yaml 

serviceaccount "coredns" created
clusterrole "system:coredns" created
clusterrolebinding "system:coredns" created
configmap "coredns" created
deployment "coredns" created
service "coredns" created

查看 kubedns 服務

kubectl get pod,svc -n kube-system

NAME                          READY     STATUS    RESTARTS   AGE
po/coredns-6bd7d5dbb5-jh4fj   1/1       Running   0          19s

NAME          TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)         AGE
svc/coredns   ClusterIP   10.254.0.2   <none>        53/UDP,53/TCP   19s
檢查日誌

kubectl logs -n kube-system coredns-6bd7d5dbb5-jh4fj

.:53
CoreDNS-1.0.1
linux/amd64, go1.9.2, 99e163c3
2017/12/20 09:34:24 [INFO] CoreDNS-1.0.1
2017/12/20 09:34:24 [INFO] linux/amd64, go1.9.2, 99e163c3
驗證 dns 服務

在驗證 dns 之前,在 dns 未部署之前創建的 pod 與 deployment 等,都必須刪除,重新部署,否則無法解析

# 創建一個 pods 來測試一下 dns 
apiVersion: v1
kind: Pod
metadata:
  name: alpine
spec:
  containers:
  - name: alpine
    image: alpine
    command:
    - sh
    - -c
    - while true; do sleep 1; done

# 查看 創建的服務

kubectl get pods,svc 
NAME                           READY     STATUS    RESTARTS   AGE
po/alpine                      1/1       Running   0          19s
po/nginx-dm-84f8f49555-tmqzm   1/1       Running   0          23s
po/nginx-dm-84f8f49555-wdk67   1/1       Running   0          23s

NAME             TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE
svc/kubernetes   ClusterIP   10.254.0.1      <none>        443/TCP   5h
svc/nginx-svc    ClusterIP   10.254.40.179   <none>        80/TCP    23s

# 測試

kubectl exec -it alpine nslookup nginx-ds
nslookup: can't resolve '(null)': Name does not resolve

Name:      nginx-svc
Address 1: 10.254.40.179 nginx-svc.default.svc.cluster.local

kubectl exec -it alpine nslookup kubernetes
nslookup: can't resolve '(null)': Name does not resolve

Name:      kubernetes
Address 1: 10.254.0.1 kubernetes.default.svc.cluster.local

注意:如果出現不能解析的情況,請將/etc/systemd/system/kube-proxy.service文件中的--proxy-mode=ipvs替換成--proxy-mode=iptables,然後重啓kube-proxy再重試(systemctl daemon-reload && systemctl restart kube-proxy)
如果還是不能解析,請按照以下步驟逐步檢查:
檢查DNS pod是否運行
使用kubectl get pods命令來確認DNS pod是否正在運行。
$ kubectl get pods --namespace=kube-system -l k8s-app=kube-dns
應該會有如下的結果:
NAME                                                       READY     STATUS    RESTARTS   AGE
...
kube-dns-v19-ezo1y                                         3/3       Running   0           1h
...

如果沒有相關的pod運行,或者pod狀態爲failed/completed,那麼就說明你的環境下,沒有默認部署DNS add-on,你需要手動部署它。
檢查DNS pod中的錯誤
使用kubectl log命令來查看DNS守護程序的日誌或者直接用docker logs 容器ID 的方式直接查看dns服務日誌
$ kubectl logs --namespace=kube-system $(kubectl get pods --namespace=kube-system -l k8s-app=kube-dns -o name) -c kubedns
$ kubectl logs --namespace=kube-system $(kubectl get pods --namespace=kube-system -l k8s-app=kube-dns -o name) -c dnsmasq

DNS服務是否啓動
使用kubectl get service命令來查看DNS服務是否已經啓動。
$ kubectl get svc --namespace=kube-system
你會看到:
NAME                    CLUSTER-IP     EXTERNAL-IP   PORT(S)             AGE
...
kube-dns                10.0.0.10      <none>        53/UDP,53/TCP        1h
...

是否暴露了DNS Endpoint?
可通過kubectl get endpoints命令來確認是否暴露了DNS Endpoint。
$ kubectl get ep kube-dns --namespace=kube-system
你應該會看到下面的結果:
NAME       ENDPOINTS                       AGE
kube-dns   10.180.3.17:53,10.180.3.17:53    1h

設置防火牆規則
iptables -P FORWARD ACCEPT
iptables -t nat -I POSTROUTING -s 10.254.0.0/16 -j MASQUERADE

操作命令備註
重啓所有服務
systemctl restart etcd
systemctl restart docker
systemctl restart flanneld
systemctl restart kube-apiserver
systemctl restart kube-controller-manager
systemctl restart kube-scheduler
systemctl restart kubelet
systemctl restart kube-proxy

停止所有服務
systemctl stop docker
systemctl stop flanneld
systemctl stop kube-apiserver
systemctl stop kube-controller-manager
systemctl stop kube-scheduler
systemctl stop kubelet
systemctl stop kube-proxy
systemctl stop etcd
rm -rf /data/etcd

啓動所有服務
systemctl start etcd
systemctl start flanneld
systemctl start docker
systemctl start kube-apiserver
systemctl start kube-controller-manager
systemctl start kube-scheduler
systemctl start kubelet
systemctl start kube-proxy
iptables -P FORWARD ACCEPT
iptables -t nat -I POSTROUTING -s 10.254.0.0/16 -j MASQUERADE

日誌查看
journalctl -eu flanneld
journalctl -eu kube-apiserver
journalctl -eu kube-controller-manager
journalctl -eu kube-scheduler
journalctl -eu kubelet
journalctl -eu kube-proxy

刪除所有服務
rm -rf /usr/local/bin/kube-apiserver
rm -rf /usr/local/bin/kube-controller-manager
rm -rf /usr/local/bin/kube-scheduler
rm -rf /usr/local/bin/kubectl
rm -rf /usr/local/bin/kube-proxy
rm -rf /usr/local/bin/kubelet

配置 Nginx Ingress

Kubernetes 暴露服務的方式目前只有三種:LoadBlancer Service、NodePort Service、Ingress; 什麼是 Ingress ? Ingress 就是利用 Nginx Haproxy 等負載均衡工具來暴露 Kubernetes 服務。

官方 Nginx Ingress github: https://github.com/kubernetes/ingress-nginx/
配置 調度 node

# ingress 有多種方式 1.  deployment 自由調度 replicas
 2.  daemonset 全局調度 分配到所有node裏

#  deployment 自由調度過程中,由於我們需要 約束 controller 調度到指定的 node 中,所以需要對 node 進行 label 標籤

# 默認如下:
kubectl get nodes
NAME            STATUS                     ROLES     AGE       VERSION
kubernetes-64   Ready,SchedulingDisabled   <none>    10d       v1.9.1
kubernetes-65   Ready                      <none>    10d       v1.9.1
kubernetes-66   Ready                      <none>    10d       v1.9.1

# 對 65 與 66 打上 label

# kubectl label nodes kubernetes-65 ingress=proxy
node "kubernetes-65" labeled
# kubectl label nodes kubernetes-66 ingress=proxy
node "kubernetes-66" labeled

# 打完標籤以後

# kubectl get nodes --show-labels
NAME            STATUS                     ROLES     AGE       VERSION   LABELS
kubernetes-64   Ready,SchedulingDisabled   <none>    10d       v1.9.1    beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=kubernetes-64
kubernetes-65   Ready                      <none>    10d       v1.9.1    beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,ingress=proxy,kubernetes.io/hostname=kubernetes-65
kubernetes-66   Ready                      <none>    10d       v1.9.1    beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,ingress=proxy,kubernetes.io/hostname=kubernetes-66

# 下載鏡像

# 官方鏡像
gcr.io/google_containers/defaultbackend:1.4
quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.10.0

# 國內鏡像
jicki/defaultbackend:1.4
jicki/nginx-ingress-controller:0.10.0

# 下載 yaml 文件

# 部署 Nginx  backend , Nginx backend 用於統一轉發 沒有的域名 到指定頁面。

curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/namespace.yaml

curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/default-backend.yaml

curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/configmap.yaml

curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/tcp-services-configmap.yaml

curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/udp-services-configmap.yaml

# 部署 Ingress RBAC 認證

curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/rbac.yaml

# 部署 Ingress Controller 組件

curl -O https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/with-rbac.yaml

# tcp-service 與 udp-service, 由於 ingress 不支持 tcp 與 udp 的轉發,所以這裏配置了兩個基於 tcp 與 udp 的 service ,通過 --tcp-services-configmap 與 --udp-services-configmap 來配置 tcp 與 udp 的轉發服務

# tcp 例子

apiVersion: v1
kind: ConfigMap
metadata:
  name: tcp-services
  namespace: ingress-nginx
data:
  9000: "default/tomcat:8080"

#  以上配置, 轉發 tomcat:8080 端口 到 ingress 節點的 9000 端口中

# udp 例子

apiVersion: v1
kind: ConfigMap
metadata:
  name: udp-services
  namespace: ingress-nginx
data:
  53: "kube-system/kube-dns:53"
# 替換所有的 images

sed -i 's/gcr\.io\/google_containers/jicki/g' *
sed -i 's/quay\.io\/kubernetes-ingress-controller/jicki/g' *

# 上面 對 兩個 node 打了 label 所以配置 replicas: 2
# 修改 yaml 文件 增加 rbac 認證 , hostNetwork  還有 nodeSelector, 第二個 spec 下 增加。

vi with-rbac.yaml

spec:
  replicas: 2
  ....
    spec:
      serviceAccountName: nginx-ingress-serviceaccount
      hostNetwork: true
      nodeSelector:
        ingress: proxy
    ....
          # 這裏添加一個 other 端口做爲後續tcp轉發
          ports:
          - name: http
            containerPort: 80
          - name: https
            containerPort: 443
          - name: other
            containerPort: 8888

# 導入 yaml 文件

# kubectl apply -f namespace.yaml 
namespace "ingress-nginx" created

# kubectl apply -f .

configmap "nginx-configuration" created
deployment "default-http-backend" created
service "default-http-backend" created
namespace "ingress-nginx" configured
serviceaccount "nginx-ingress-serviceaccount" created
clusterrole "nginx-ingress-clusterrole" created
role "nginx-ingress-role" created
rolebinding "nginx-ingress-role-nisa-binding" created
clusterrolebinding "nginx-ingress-clusterrole-nisa-binding" created
configmap "tcp-services" created
configmap "udp-services" created
deployment "nginx-ingress-controller" created

# 查看服務,可以看到這兩個 pods 被分別調度到 65 與 66 中
# kubectl get pods -n ingress-nginx -o wide

NAME                                        READY     STATUS    RESTARTS   AGE       IP             NODE
default-http-backend-76f7d74455-kxbr2       1/1       Running   0          5m        10.254.126.4   kubernetes-65
nginx-ingress-controller-8476958f94-8fh5h   1/1       Running   0          5m        172.16.1.66    kubernetes-66
nginx-ingress-controller-8476958f94-qfhhp   1/1       Running   0          5m        172.16.1.65    kubernetes-65
# 查看我們原有的 svc

# kubectl get pods

NAME                        READY     STATUS    RESTARTS   AGE
alpine                      1/1       Running   0          24m
nginx-dm-84f8f49555-tmqzm   1/1       Running   0          24m
nginx-dm-84f8f49555-wdk67   1/1       Running   0          24m

# 創建一個 基於 nginx-dm 的 ingress

vi nginx-ingress.yaml

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: nginx-ingress
spec:
  rules:
  - host: nginx.jicki.me
    http:
      paths:
      - backend:
          serviceName: nginx-svc
          servicePort: 80

# 查看服務

# kubectl get ingress

NAME            HOSTS            ADDRESS   PORTS     AGE
nginx-ingress   nginx.jicki.me             80        6s
# 測試訪問

# curl nginx.jicki.me

<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

# 創建一個基於 dashboard 的 https 的 ingress
# 新版本的 dashboard 默認就是 ssl ,所以這裏使用 tcp 代理到 443 端口

# 查看 dashboard svc

# kubectl get svc -n kube-system

NAME                   TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
kube-dns               ClusterIP   10.254.0.2      <none>        53/UDP,53/TCP   4h
kubernetes-dashboard   ClusterIP   10.254.18.143   <none>        443/TCP         57m

# 修改 tcp-services-configmap.yaml 文件

vi tcp-services-configmap.yaml

kind: ConfigMap
apiVersion: v1
metadata:
  name: tcp-services
  namespace: ingress-nginx
data:
  8888: "kube-system/kubernetes-dashboard:443"

# 導入文件

# kubectl apply -f tcp-services-configmap.yaml 

configmap "tcp-services" created

# 查看服務

# kubectl get configmap/tcp-services -n ingress-nginx

NAME           DATA      AGE
tcp-services   1         11m

# kubectl describe configmap/tcp-services -n ingress-nginx

Name:         tcp-services
Namespace:    ingress-nginx
Labels:       <none>
Annotations:  kubectl.kubernetes.io/last-applied-configuration={"apiVersion":"v1","data":{"8888":"kube-system/kubernetes-dashboard:443"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"tcp-services","namesp...

Data
====
8888:
----
kube-system/kubernetes-dashboard:443
Events:  <none>

# 測試訪問

# curl -I -k https://dashboard.jicki.me:8888

HTTP/1.1 200 OK
Accept-Ranges: bytes
Cache-Control: no-store
Content-Length: 990
Content-Type: text/html; charset=utf-8
Last-Modified: Mon, 15 Jan 2018 13:10:36 GMT
Date: Tue, 23 Jan 2018 09:12:08 GMT

# 登錄認證

# 首先創建一個 dashboard rbac 超級用戶

vi dashboard-admin-rbac.yaml

---
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-admin
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard-admin
  labels:
    k8s-app: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard-admin
  namespace: kube-system

# 導入文件

# kubectl apply -f dashboard-admin-rbac.yaml 

serviceaccount "kubernetes-dashboard-admin" created
clusterrolebinding "kubernetes-dashboard-admin" created

# 查看超級用戶的 token 名稱

# kubectl -n kube-system get secret | grep kubernetes-dashboard-admin

kubernetes-dashboard-admin-token-mnhdz   kubernetes.io/service-account-token   3         1m

# 查看 token 部分

# kubectl describe -n kube-system secret/kubernetes-dashboard-admin-token-mnhdz

Name:         kubernetes-dashboard-admin-token-mnhdz
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name=kubernetes-dashboard-admin
              kubernetes.io/service-account.uid=dc14511d-0020-11e8-b47b-44a8420b9988

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1363 bytes
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbi10b2tlbi1tbmhkeiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImRjMTQ1MTFkLTAwMjAtMTFlOC1iNDdiLTQ0YTg0MjBiOTk4OCIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTprdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbiJ9.Vg7vYBIaBICYFCX_XORvoUjkYAKdQoAuT2sy8o4y8Z6DmMaCQXijOBGCWsS40-n_qiBhlrSwLeN0RvjCOfLmcH4gUSjPBkSmc-S6SHh09ErzrHjCQSblCCZgXjyyse2w1LwWw87CiAiwHCb0Jm7r0lhm4DjhXeLpUhdXoqOltHlBoJqxzDwb9qKgtY-nsQ2Y9dhV405GeqB9RLOxSKHWx6K1lXP_0tLUGgIatJx6f-EMurFbmODJfex9mT2LTq9pblblegw9EG9j2IhfHQSnwR8hPMT3Tku-XEf3vtV-1eFqetZHRJHS23machhvSvuppFjmPAd_ID3eETBt7ncNmQ

# 登錄 web ui 選擇 令牌登錄

 dashboard 

k8s 運維相關
基礎維護

# 當需要對主機進行維護升級時,首先將節點主機設置成不可調度模式: 

kubectl cordon[nodeid]  

# 然後需要將主機上正在運行的容器驅趕到其它可用節點: 

kubectl drain [nodeid]

# 給予900秒寬限期優雅的調度
kubectl drain node1.k8s.novalocal --grace-period=120

# 當容器遷移完畢後,運維人員可以對該主機進行操作,配置升級性能參數調優等等。當對主機的維護操作完畢後, 再將主機設置成可調度模式: 

kubectl uncordon [nodeid]  
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章