Kubernetes v1.10.4 安裝記錄

#參考文檔:https://github.com/opsnull/follow-me-install-kubernetes-cluster

#約定:

#1、會顯式的標註出命令都要再哪一臺或哪幾臺上執行;

#2、本次不配置高可用apiserver

#3、ETCD集羣配置不加密模式;

#4、安裝鏡像使用默認的鏡像,被牆掉的鏡像需要提前導入到系統中,可以參考文章末尾的相關操作導入;

#5、Master主機 172.16.3.150, node主機 172.16.3.151,172.16.3.152, 測試擴充|刪除節點主機 172.16.3.153


#######################-----初始化系統-----#######################

#此部分的命令需要三臺服務器都執行

#初始化主機列表:172.16.3.150,172.16.3.151,172.16.3.152

#修改主機名

hostnamectl set-hostname dev-test-3-150 #172.16.3.150執行

hostnamectl set-hostname dev-test-3-151 #172.16.3.151執行

hostnamectl set-hostname dev-test-3-152 #172.16.3.162執行

#yum升級及相關組件安裝

yum install epel-release -y

yum update -y

yum install lrzsz vim wget net-tools ntp python-pip conntrack ipvsadm ipset jq sysstat curl iptables libseccomp conntrack-tools -y

yum install -y bash-completion

pip install --upgrade pip

#優化系統參數()

echo "net.ipv4.tcp_fin_timeout=30">>/etc/sysctl.conf

echo "net.ipv4.tcp_tw_recycle=1">>/etc/sysctl.conf

echo "net.ipv4.tcp_tw_reuse=1">>/etc/sysctl.conf

echo "net.ipv4.icmp_echo_ignore_broadcasts=1">>/etc/sysctl.conf

echo "net.ipv4.conf.all.rp_filter=1">>/etc/sysctl.conf

echo "net.ipv4.tcp_keepalive_time=300">>/etc/sysctl.conf

echo "net.ipv4.tcp_synack_retries=2">>/etc/sysctl.conf

echo "net.ipv4.tcp_syn_retries=2">>/etc/sysctl.conf

echo "net.ipv4.ip_forward=1">>/etc/sysctl.conf

sysctl -p

echo "*softnofile=65536">>/etc/security/limits.conf

echo "*hardnofile=65536">>/etc/security/limits.conf

echo "ulimit -n 65536">>/etc/profile

source /etc/profile

#ntp配置

service ntpd start

chkconfig ntpd on

#iptables配置

systemctl stop firewalld

systemctl disable firewalld

yum install -y iptables-services iptables-devel.x86_64 iptables.x86_64 

service iptables start 

chkconfig iptables on

iptables -F &&  iptables -X &&  iptables -F -t nat &&  iptables -X -t nat

iptables -P FORWARD ACCEPT

#增加hosts

vim /etc/hosts

#添加如下內容

172.16.3.150 dev-test-3-150

172.16.3.151 dev-test-3-151

172.16.3.152 dev-test-3-152

#關閉 swap 分區

swapoff -a

#爲了防止開機自動掛載 swap 分區,可以註釋 /etc/fstab 中相應的條目

sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab


#無密碼 ssh 登錄其它節點

#生成密鑰

ssh-keygen -t rsa

#拷貝密鑰

ssh-copy-id root@dev-test-3-150

ssh-copy-id root@dev-test-3-151

ssh-copy-id root@dev-test-3-152

#創建相關安裝目錄

mkdir -p /opt/k8s/bin

mkdir -p /etc/kubernetes/cert

#mkdir -p /etc/etcd/cert

mkdir -p /var/lib/etcd

#新建環境變量文件,根據實際主機修改NODE_IP和NODE_NAME

vim /opt/k8s/env.sh

# 生成 EncryptionConfig 所需的加密 key

export ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)

# TLS Bootstrapping 使用的 Token,可以使用命令 head -c 16 /dev/urandom | od -An -t x | tr -d ' ' 生成

#export BOOTSTRAP_TOKEN="261e41eb026ccacc5d71c7698392c00e"

# 建議用 未用的網段 來定義服務網段和 Pod 網段

# 服務網段 (Service CIDR),部署前路由不可達,部署後集羣內使用 IP:Port可達

export SERVICE_CIDR="10.253.0.0/16"

# POD 網段 (Cluster CIDR),部署前路由不可達,**部署後**路由可達 (flanneld 保證)

export CLUSTER_CIDR="172.50.0.0/16"

# 服務端口範圍 (NodePort Range)

export NODE_PORT_RANGE="10000-30000"

# etcd 集羣服務地址列表

export ETCD_ENDPOINTS="http://172.16.3.150:2379,http://172.16.3.151:2379,http://172.16.3.152:2379"

# flanneld 網絡配置前綴

export FLANNEL_ETCD_PREFIX="/kubernetes/network"

# kubernetes 服務 IP (預分配,一般是 SERVICE_CIDR 中第一個IP)

export CLUSTER_KUBERNETES_SVC_IP="10.253.0.1"

# 集羣 DNS 服務 IP (從 SERVICE_CIDR 中預分配)

export CLUSTER_DNS_SVC_IP="10.253.0.2"

# 集羣 DNS 域名

export CLUSTER_DNS_DOMAIN="cluster.local."

# 部署機器的主機名(修改爲對應主機)

export NODE_NAME=dev-test-3-150

# 當前部署主機的IP(修改爲對應主機)

export NODE_IP=172.16.3.150

# ETCD集羣所有主機的ip地址

export NODE_IPS="172.16.3.150 172.16.3.151 172.16.3.152"

# ETCD集羣所有主機的主機名

export NODE_NAMES="dev-test-3-150 dev-test-3-151 dev-test-3-152"

# ETCD集羣通訊的ip和端口

export ETCD_NODES=dev-test-3-150=http://172.16.3.150:2380,dev-test-3-151=http://172.16.3.151:2380,dev-test-3-152=http://172.16.3.152:2380

# 臨時的Kubernetes的api接口地址

export KUBE_APISERVER="https://172.16.3.150:6443"

#Master API Server 地址

export MASTER_URL="k8s-api.virtual.local"

#Master IP 地址

export MASTER_IP="172.16.3.150"

#二進制文件目錄

export PATH=/opt/k8s/bin:$PATH

#保存退出

#關閉selinux

vim /etc/selinux/config

SELINUX=enforcing  改成 :SELINUX=disabled

#保存退出

#重啓服務器

init 6


#######################-----創建 CA 證書和祕鑰-----#######################

#CA 證書是集羣所有節點共享的,只需要創建一個 CA 證書,後續創建的所有證書都由它簽名。

#安裝CFSSL(三個節點都裝)

mkdir /root/ssl/ && cd /root/ssl

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64

wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64

wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

chmod u+x *linux-amd64

mv cfssl_linux-amd64 /usr/bin/cfssl

mv cfssljson_linux-amd64 /usr/bin/cfssljson

mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo

#配置CA證書(只在3.150上配置)

#創建配置文件

#CA 配置文件用於配置根證書的使用場景 (profile) 和具體參數 (usage,過期時間、服務端認證、客戶端認證、加密等),後續在簽名其它證書時需要指定特定場景。

cd /home

source /opt/k8s/env.sh

cfssl print-defaults config > config.json

cfssl print-defaults csr > csr.json

cat > ca-config.json <<EOF

{

  "signing": {

    "default": {

      "expiry": "87600h"

    },

    "profiles": {

      "kubernetes": {

        "usages": [

            "signing",

            "key encipherment",

            "server auth",

            "client auth"

        ],

        "expiry": "87600h"

      }

    }

  }

}

EOF

#signing:表示該證書可用於簽名其它證書,生成的 ca.pem 證書中 CA=TRUE;

#server auth:表示 client 可以用該該證書對 server 提供的證書進行驗證;

#client auth:表示 server 可以用該該證書對 client 提供的證書進行驗證;


#創建證書籤名請求文件

cat > ca-csr.json <<EOF

{

  "CN": "kubernetes",

  "key": {

    "algo": "rsa",

    "size": 2048

  },

  "names": [

    {

      "C": "CN",

      "ST": "BeiJing",

      "L": "BeiJing",

      "O": "k8s",

      "OU": "QRXD"

    }

  ]

}

EOF

#CN:Common Name,kube-apiserver 從證書中提取該字段作爲請求的用戶名 (User Name),瀏覽器使用該字段驗證網站是否合法;

#O:Organization,kube-apiserver 從證書中提取該字段作爲請求用戶所屬的組 (Group);

#kube-apiserver 將提取的 User、Group 作爲 RBAC 授權的用戶標識;


#生成 CA 證書和私鑰(只在3.150上配置)

cfssl gencert -initca ca-csr.json | cfssljson -bare ca

ls ca*

scp ca*.pem ca-config.json  [email protected]:/etc/kubernetes/cert/

scp ca*.pem ca-config.json  [email protected]:/etc/kubernetes/cert/

scp ca*.pem ca-config.json  [email protected]:/etc/kubernetes/cert/


#######################-----部署 kubectl 命令行工具-----#######################

#只需要部署一次(只在3.150部署配置)

#下載

cd /home

wget https://dl.k8s.io/v1.10.4/kubernetes-server-linux-amd64.tar.gz

tar -zxvf kubernetes-server-linux-amd64.tar.gz

scp /home/kubernetes/server/bin/kubectl [email protected]:/usr/bin/

scp /home/kubernetes/server/bin/kubectl [email protected]:/usr/bin/

scp /home/kubernetes/server/bin/kubectl [email protected]:/usr/bin/

#創建 admin 證書和私鑰(只在3.150上配置)

#kubectl 與 apiserver https 安全端口通信,apiserver 對提供的證書進行認證和授權。

#kubectl 作爲集羣的管理工具,需要被授予最高權限。這裏創建具有最高權限的 admin 證書。

cd /home

cat > admin-csr.json <<EOF

{

  "CN": "admin",

  "hosts": [],

  "key": {

    "algo": "rsa",

    "size": 2048

  },

  "names": [

    {

      "C": "CN",

      "ST": "BeiJing",

      "L": "BeiJing",

      "O": "system:masters",

      "OU": "QRXD"

    }

  ]

}

EOF

#O 爲 system:masters,kube-apiserver 收到該證書後將請求的 Group 設置爲 system:masters;

#預定義的 ClusterRoleBinding cluster-admin 將 Group system:masters 與 Role cluster-admin 綁定,該 Role 授予所有 API的權限;

#該證書只會被 kubectl 當做 client 證書使用,所以 hosts 字段爲空;

#生成證書和私鑰

cfssl gencert -ca=/etc/kubernetes/cert/ca.pem \

  -ca-key=/etc/kubernetes/cert/ca-key.pem \

  -config=/etc/kubernetes/cert/ca-config.json \

  -profile=kubernetes admin-csr.json | cfssljson -bare admin

ls admin*

#創建 kubeconfig 文件

#kubeconfig 爲 kubectl 的配置文件,包含訪問 apiserver 的所有信息,如 apiserver 地址、CA 證書和自身使用的證書;

source /opt/k8s/env.sh

# 設置集羣參數

kubectl config set-cluster kubernetes \

  --certificate-authority=/etc/kubernetes/cert/ca.pem \

  --embed-certs=true \

  --server=${KUBE_APISERVER} \

  --kubeconfig=kubectl.kubeconfig

# 設置客戶端認證參數

kubectl config set-credentials admin \

  --client-certificate=admin.pem \

  --client-key=admin-key.pem \

  --embed-certs=true \

  --kubeconfig=kubectl.kubeconfig

# 設置上下文參數

kubectl config set-context kubernetes \

  --cluster=kubernetes \

  --user=admin \

  --kubeconfig=kubectl.kubeconfig

# 設置默認上下文

kubectl config use-context kubernetes --kubeconfig=kubectl.kubeconfig

#--certificate-authority:驗證 kube-apiserver 證書的根證書;

#--client-certificate、--client-key:剛生成的 admin 證書和私鑰,連接 kube-apiserver 時使用;

#--embed-certs=true:將 ca.pem 和 admin.pem 證書內容嵌入到生成的 kubectl.kubeconfig 文件中(不加時,寫入的是證書文件路徑);

mkdir /root/.kube

scp kubectl.kubeconfig [email protected]:/root/.kube/config

scp kubectl.kubeconfig [email protected]:/root/.kube/config

scp kubectl.kubeconfig [email protected]:/root/.kube/config

#scp kubectl.kubeconfig [email protected]:/root/.kube/config

#配置kubectl tab命令

source /usr/share/bash-completion/bash_completion

source <(kubectl completion bash)

#echo "source <(kubectl completion bash)" >> ~/.bashrc

#

#######################-----部署 etcd 集羣(非加密)-----#######################

#三個節點都執行

cd /home

wget https://github.com/coreos/etcd/releases/download/v3.1.19/etcd-v3.1.19-linux-amd64.tar.gz

tar -zxvf etcd-v3.1.19-linux-amd64.tar.gz

cp etcd-v3.1.19-linux-amd64/etcd* /usr/local/bin

cd /etc/systemd/system/

source /opt/k8s/env.sh

cat > etcd.service <<EOF

[Unit]

Description=Etcd Server

After=network.target

After=network-online.target

Wants=network-online.target

Documentation=https://github.com/coreos


[Service]

Type=notify

WorkingDirectory=/var/lib/etcd/

ExecStart=/usr/local/bin/etcd \\

  --name=${NODE_NAME} \\

  --initial-advertise-peer-urls=http://${NODE_IP}:2380 \\

  --listen-peer-urls=http://${NODE_IP}:2380 \\

  --listen-client-urls=http://${NODE_IP}:2379,http://127.0.0.1:2379 \\

  --advertise-client-urls=http://${NODE_IP}:2379 \\

  --initial-cluster-token=etcd-cluster-0 \\

  --initial-cluster=${ETCD_NODES} \\

  --initial-cluster-state=new \\

  --data-dir=/var/lib/etcd

Restart=on-failure

RestartSec=5

LimitNOFILE=65536


[Install]

WantedBy=multi-user.target

EOF

#如果提示訪問端口不通,防火牆需要開放2379和2380端口

#iptables -A INPUT -p tcp -m state --state NEW -m tcp --dport 2379 -j ACCEPT

#iptables -A INPUT -p tcp -m state --state NEW -m tcp --dport 2380 -j ACCEPT

#service iptables save

systemctl daemon-reload

systemctl enable etcd

systemctl restart etcd

etcdctl --endpoints=${ETCD_ENDPOINTS} cluster-health

#member 224d21bc6c7cdd08 is healthy: got healthy result from http://172.16.3.151:2379

#member 254a600a75e4f39f is healthy: got healthy result from http://172.16.3.152:2379

#member b6f4f1b2c016095e is healthy: got healthy result from http://172.16.3.150:2379

#cluster is healthy


#######################-----部署 flannel 網絡-----#######################

#kubernetes 要求集羣內各節點(包括 master 節點)能通過 Pod 網段互聯互通。flannel 使用 vxlan 技術爲各節點創建一個可以互通的 Pod 網絡。

#flaneel 第一次啓動時,從 etcd 獲取 Pod 網段信息,爲本節點分配一個未使用的 /24 段地址,然後創建 flannedl.1(也可能是其它名稱,如 flannel1 等) 接口。

#flannel 將分配的 Pod 網段信息寫入 /run/flannel/docker 文件,docker 後續使用這個文件中的環境變量設置 docker0 網橋。

#三臺服務器都執行

cd /home

wget https://github.com/coreos/flannel/releases/download/v0.9.0/flannel-v0.9.0-linux-amd64.tar.gz

tar -zxvf flannel-v0.9.0-linux-amd64.tar.gz

cp flanneld /usr/local/bin/

cp mk-docker-opts.sh /usr/local/bin/

mkdir -p /etc/flanneld/cert


#創建 flannel 證書和私鑰(3.150執行)

cd /home

cat > flanneld-csr.json <<EOF

{

  "CN": "flanneld",

  "hosts": [],

  "key": {

    "algo": "rsa",

    "size": 2048

  },

  "names": [

    {

      "C": "CN",

      "ST": "BeiJing",

      "L": "BeiJing",

      "O": "k8s",

      "OU": "QRXD"

    }

  ]

}

EOF

#該證書只會被 kubectl 當做 client 證書使用,所以 hosts 字段爲空;

cfssl gencert -ca=/etc/kubernetes/cert/ca.pem \

  -ca-key=/etc/kubernetes/cert/ca-key.pem \

  -config=/etc/kubernetes/cert/ca-config.json \

  -profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld

ls flanneld*pem

scp flanneld*pem [email protected]:/etc/flanneld/cert/

scp flanneld*pem [email protected]:/etc/flanneld/cert/

scp flanneld*pem [email protected]:/etc/flanneld/cert/

#向 etcd 寫入集羣 Pod 網段信息(3.150執行)

source /opt/k8s/env.sh

etcdctl --endpoints=${ETCD_ENDPOINTS} set ${FLANNEL_ETCD_PREFIX}/config '{"Network":"'${CLUSTER_CIDR}'", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'

#測試

etcdctl --endpoints=${ETCD_ENDPOINTS} get ${FLANNEL_ETCD_PREFIX}/config

#創建flanneld的systemd unit 文件(3個節點執行)

source /opt/k8s/env.sh

cd /etc/systemd/system/

cat > flanneld.service << EOF

[Unit]

Description=Flanneld overlay address etcd agent

After=network.target

After=network-online.target

Wants=network-online.target

After=etcd.service

Before=docker.service


[Service]

Type=notify

ExecStart=/usr/local/bin/flanneld \\

  -etcd-cafile=/etc/kubernetes/cert/ca.pem \\

  -etcd-certfile=/etc/flanneld/cert/flanneld.pem \\

  -etcd-keyfile=/etc/flanneld/cert/flanneld-key.pem \\

  -etcd-endpoints=${ETCD_ENDPOINTS} \\

  -etcd-prefix=${FLANNEL_ETCD_PREFIX}

ExecStartPost=/usr/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker

Restart=on-failure


[Install]

WantedBy=multi-user.target

RequiredBy=docker.service

EOF

#啓動flanneld

systemctl daemon-reload

systemctl enable flanneld

systemctl start flanneld

#檢查

etcdctl --endpoints=${ETCD_ENDPOINTS} ls ${FLANNEL_ETCD_PREFIX}/subnets


#######################-----部署 master 節點-----#######################

#在3.150上部署

cd /home

#wget https://dl.k8s.io/v1.10.4/kubernetes-server-linux-amd64.tar.gz

#tar -zxvf kubernetes-server-linux-amd64.tar.gz

cd kubernetes

tar -xzvf  kubernetes-src.tar.gz

cp server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler} /usr/local/bin/ 

#部署 kube-apiserver 組件

#創建 kubernetes 證書和私鑰

cd /home

source /opt/k8s/env.sh

cat > kubernetes-csr.json <<EOF

{

  "CN": "kubernetes",

  "hosts": [

    "127.0.0.1",

    "172.16.3.150",

    "172.16.3.151",

    "172.16.3.152",

    "172.16.0.0/16",

    "${CLUSTER_KUBERNETES_SVC_IP}",

    "kubernetes",

    "kubernetes.default",

    "kubernetes.default.svc",

    "kubernetes.default.svc.cluster",

    "kubernetes.default.svc.cluster.local"

  ],

  "key": {

    "algo": "rsa",

    "size": 2048

  },

  "names": [

    {

      "C": "CN",

      "ST": "BeiJing",

      "L": "BeiJing",

      "O": "k8s",

      "OU": "QRXD"

    }

  ]

}

EOF

cfssl gencert -ca=/etc/kubernetes/cert/ca.pem \

  -ca-key=/etc/kubernetes/cert/ca-key.pem \

  -config=/etc/kubernetes/cert/ca-config.json \

  -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes

ls kubernetes*pem

cp kubernetes*pem /etc/kubernetes/cert

#創建加密配置文件


# cat > token.csv <<EOF

# ${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"

# EOF

# mv token.csv /etc/kubernetes/

source /opt/k8s/env.sh

cat > encryption-config.yaml <<EOF

kind: EncryptionConfig

apiVersion: v1

resources:

  - resources:

      - secrets

    providers:

      - aescbc:

          keys:

            - name: key1

              secret: ${ENCRYPTION_KEY}

      - identity: {}

EOF

cp encryption-config.yaml /etc/kubernetes/encryption-config.yaml 

#創建kube-apiserver 的systemd unit文件

cd /etc/systemd/system

mkdir /var/log/kube

source /opt/k8s/env.sh

cat  > kube-apiserver.service <<EOF

[Unit]

Description=Kubernetes API Server

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

After=network.target


[Service]

ExecStart=/usr/local/bin/kube-apiserver \\

  --enable-admission-plugins=Initializers,NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \\

  --anonymous-auth=false \\

  --experimental-encryption-provider-config=/etc/kubernetes/encryption-config.yaml \\

  --insecure-bind-address=${MASTER_IP} \\

  --advertise-address=${NODE_IP} \\

  --bind-address=${MASTER_IP} \\

  --authorization-mode=Node,RBAC \\

  --runtime-config=api/all \\

  --enable-bootstrap-token-auth \\

  --service-cluster-ip-range=${SERVICE_CIDR} \\

  --service-node-port-range=${NODE_PORT_RANGE} \\

  --tls-cert-file=/etc/kubernetes/cert/kubernetes.pem \\

  --tls-private-key-file=/etc/kubernetes/cert/kubernetes-key.pem \\

  --client-ca-file=/etc/kubernetes/cert/ca.pem \\

  --kubelet-client-certificate=/etc/kubernetes/cert/kubernetes.pem \\

  --kubelet-client-key=/etc/kubernetes/cert/kubernetes-key.pem \\

  --service-account-key-file=/etc/kubernetes/cert/ca-key.pem \\

  --etcd-servers=${ETCD_ENDPOINTS} \\

  --enable-swagger-ui=true \\

  --allow-privileged=true \\

  --apiserver-count=3 \\

  --audit-log-maxage=30 \\

  --audit-log-maxbackup=3 \\

  --audit-log-maxsize=100 \\

  --audit-log-path=/var/log/kube-apiserver-audit.log \\

  --event-ttl=1h \\

  --alsologtostderr=true \\

  --logtostderr=false \\

  --log-dir=/var/log/kube \\

  --v=2

Restart=on-failure

RestartSec=5

Type=notify

LimitNOFILE=65536


[Install]

WantedBy=multi-user.target

EOF


systemctl daemon-reload

systemctl start kube-apiserver

systemctl enable kube-apiserver

systemctl status kube-apiserver


#檢查測試

ETCDCTL_API=3 etcdctl --endpoints=${ETCD_ENDPOINTS} get /registry/ --prefix --keys-only

kubectl cluster-info

#Kubernetes master is running at https://172.27.129.253:8443

#To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

kubectl get all --all-namespaces

#NAMESPACE   NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE

#default     service/kubernetes   ClusterIP   10.254.0.1   <none>        443/TCP   35m


kubectl get componentstatuses

#NAME                 STATUS      MESSAGE                                                                                        ERROR

#controller-manager   Unhealthy   Get http://127.0.0.1:10252/healthz: dial tcp 127.0.0.1:10252: getsockopt: connection refused

#scheduler            Unhealthy   Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: getsockopt: connection refused

#etcd-1               Healthy     {"health":"true"}

#etcd-0               Healthy     {"health":"true"}

#etcd-2               Healthy     {"health":"true"}

netstat -lnpt|grep kube

#授予 kubernetes 證書訪問 kubelet API 的權限

kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=systemdm:kubelet-api-admin --user kubernetes

#部署kube-controller-manager 

#創建和分發 kube-controller-manager systemd unit 文件

source /opt/k8s/env.sh

cd /etc/systemd/system


cat > kube-controller-manager.service <<EOF

[Unit]

Description=Kubernetes Controller Manager

Documentation=https://github.com/GoogleCloudPlatform/kubernetes


[Service]

ExecStart=/usr/local/bin/kube-controller-manager \\

  --address=127.0.0.1 \\

  --master=http://${MASTER_IP}:8080 \\

  --allocate-node-cidrs=true \\

  --service-cluster-ip-range=${SERVICE_CIDR} \\

  --cluster-cidr=${CLUSTER_CIDR} \\

  --cluster-name=kubernetes \\

  --cluster-signing-cert-file=/etc/kubernetes/cert/ca.pem \\

  --cluster-signing-key-file=/etc/kubernetes/cert/ca-key.pem \\

  --service-account-private-key-file=/etc/kubernetes/cert/ca-key.pem \\

  --root-ca-file=/etc/kubernetes/cert/ca.pem \\

  --feature-gates=RotateKubeletServerCertificate=true \\

  --controllers=*,bootstrapsigner,tokencleaner \\

  --use-service-account-credentials=true \\

  --leader-elect=true \\

  --alsologtostderr=true \\

  --logtostderr=false \\

  --log-dir=/var/log/kube \\

  --v=2


Restart=on-failure

RestartSec=5


[Install]

WantedBy=multi-user.target

EOF



systemctl daemon-reload

systemctl enable kube-controller-manager

systemctl restart kube-controller-manager

#檢查

systemctl status kube-controller-manager

netstat -lnpt|grep kube-controll

#curl -s --cacert /etc/kubernetes/cert/ca.pem https://127.0.0.1:10252/metrics |head


#部署kube-scheduler 

#在安全端口(https,10251) 輸出 prometheus 格式的 metrics;

#創建 kube-scheduler 證書和私鑰

#創建證書籤名請求:

cd /home


#創建和分發 kube-scheduler systemd unit 文件

source /opt/k8s/env.sh

cd /etc/systemd/system

cat > kube-scheduler.service <<EOF

[Unit]

Description=Kubernetes Scheduler

Documentation=https://github.com/GoogleCloudPlatform/kubernetes


[Service]

ExecStart=/usr/local/bin/kube-scheduler \\

  --address=127.0.0.1 \\

  --master=http://${MASTER_IP}:8080 \\

  --leader-elect=true \\

  --v=2

Restart=on-failure

RestartSec=5


[Install]

WantedBy=multi-user.target

EOF


systemctl daemon-reload

systemctl enable kube-scheduler

systemctl restart kube-scheduler

#檢查

systemctl status kube-scheduler

netstat -lnpt|grep kube-sche

#curl -s http://127.0.0.1:10251/metrics |head


#######################-----部署 worker 節點-----#######################

#三臺服務器都執行

#安裝Docker(使用yum的方式)

#安裝docker

#刪除舊版本

yum remove docker \

                  docker-client \

                  docker-client-latest \

                  docker-common \

                  docker-latest \

                  docker-latest-logrotate \

                  docker-logrotate \

                  docker-selinux \

                  docker-engine-selinux \

                  docker-engine

yum install -y yum-utils device-mapper-persistent-data lvm2

yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo

yum-config-manager --enable docker-ce-stable

yum-config-manager --disable docker-ce-test

yum-config-manager --disable docker-ce-edge

yum install docker-ce -y

rm -rf /usr/lib/systemd/system/docker.service

vim /usr/lib/systemd/system/docker.service

#輸入以下內容

#cat > docker.service <<"EOF"

[Unit]

Description=Docker Application Container Engine

Documentation=http://docs.docker.io


[Service]

EnvironmentFile=-/run/flannel/docker

ExecStart=/usr/bin/dockerd --log-level=info $DOCKER_NETWORK_OPTIONS

ExecReload=/bin/kill -s HUP $MAINPID

Restart=on-failure

RestartSec=5

LimitNOFILE=infinity

LimitNPROC=infinity

LimitCORE=infinity

Delegate=yes

KillMode=process


[Install]

WantedBy=multi-user.target

#EOF

systemctl daemon-reload

systemctl start docker.service

systemctl enable docker.service

#檢查

systemctl status docker.service

ip addr show flannel.1 && ip addr show docker0


#部署 kubelet 組件

#下載和分發 kubelet 二進制文件(3.150單獨執行)

cd /home/kubernetes

scp /home/kubernetes/server/bin/{kube-proxy,kubelet,kubeadm} [email protected]:/usr/local/bin/

scp /home/kubernetes/server/bin/{kube-proxy,kubelet,kubeadm} [email protected]:/usr/local/bin/

scp /home/kubernetes/server/bin/{kube-proxy,kubelet,kubeadm} [email protected]:/usr/local/bin/

#創建 kubelet bootstrap kubeconfig 文件(3.150單獨執行)

source /opt/k8s/env.sh

for node_name in ${NODE_NAMES[@]}

  do

    echo ">>> ${node_name}"


    # 創建 token

    export BOOTSTRAP_TOKEN=$(kubeadm token create \

      --description kubelet-bootstrap-token \

      --groups system:bootstrappers:${node_name} \

      --kubeconfig ~/.kube/config)


    # 設置集羣參數

    kubectl config set-cluster kubernetes \

      --certificate-authority=/etc/kubernetes/cert/ca.pem \

      --embed-certs=true \

      --server=${KUBE_APISERVER} \

      --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig


    # 設置客戶端認證參數

    kubectl config set-credentials kubelet-bootstrap \

      --token=${BOOTSTRAP_TOKEN} \

      --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig


    # 設置上下文參數

    kubectl config set-context default \

      --cluster=kubernetes \

      --user=kubelet-bootstrap \

      --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig


    # 設置默認上下文

    kubectl config use-context default --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig

done

kubeadm token list --kubeconfig ~/.kube/config

#創建一個 clusterrolebinding,將 group system:bootstrappers 和 clusterrole system:node-bootstrapper 綁定:(3.150執行)

kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers

#分發 bootstrap kubeconfig 文件到所有 worker 節點(3.150單獨執行)

scp kubelet-bootstrap-dev-test-3-150.kubeconfig [email protected]:/etc/kubernetes/kubelet-bootstrap.kubeconfig

scp kubelet-bootstrap-dev-test-3-151.kubeconfig [email protected]:/etc/kubernetes/kubelet-bootstrap.kubeconfig

scp kubelet-bootstrap-dev-test-3-152.kubeconfig [email protected]:/etc/kubernetes/kubelet-bootstrap.kubeconfig

#創建和分發 kubelet 參數配置文件(三臺服務器都執行)

source /opt/k8s/env.sh

cd /etc/kubernetes

cat > kubelet.config.json <<EOF

{

  "kind": "KubeletConfiguration",

  "apiVersion": "kubelet.config.k8s.io/v1beta1",

  "authentication": {

    "x509": {

      "clientCAFile": "/etc/kubernetes/cert/ca.pem"

    },

    "webhook": {

      "enabled": true,

      "cacheTTL": "2m0s"

    },

    "anonymous": {

      "enabled": false

    }

  },

  "authorization": {

    "mode": "Webhook",

    "webhook": {

      "cacheAuthorizedTTL": "5m0s",

      "cacheUnauthorizedTTL": "30s"

    }

  },

  "address": "${NODE_IP}",

  "port": 10250,

  "readOnlyPort": 0,

  "cgroupDriver": "cgroupfs",

  "hairpinMode": "promiscuous-bridge",

  "serializeImagePulls": false,

  "featureGates": {

    "RotateKubeletClientCertificate": true,

    "RotateKubeletServerCertificate": true

  },

  "clusterDomain": "${CLUSTER_DNS_DOMAIN}",

  "clusterDNS": ["${CLUSTER_DNS_SVC_IP}"]

}

EOF

#創建 kubelet systemd unit 文件(三臺服務器都執行)

source /opt/k8s/env.sh

mkdir /var/lib/kubelet

cd /etc/systemd/system

cat > kubelet.service <<EOF

[Unit]

Description=Kubernetes Kubelet

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

After=docker.service

Requires=docker.service


[Service]

WorkingDirectory=/var/lib/kubelet

ExecStart=/usr/local/bin/kubelet \\

  --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \\

  --cert-dir=/etc/kubernetes/cert \\

  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\

  --config=/etc/kubernetes/kubelet.config.json \\

  --hostname-override=${NODE_NAME} \\

  --pod-infra-container-image=gcr.io/google_containers/pause-amd64:3.0 \\

  --allow-privileged=true \\

  --alsologtostderr=true \\

  --logtostderr=false \\

  --log-dir=/var/log/kube \\

  --v=2

Restart=on-failure

RestartSec=5


[Install]

WantedBy=multi-user.target

EOF

其他節點啓動失敗的話,檢查一下防火牆

iptables -F &&  iptables -X &&  iptables -F -t nat &&  iptables -X -t nat

iptables -P FORWARD ACCEPT

systemctl daemon-reload

systemctl start kubelet.service

systemctl enable kubelet.service

#檢查

systemctl status kubelet.service

#approve kubelet CSR 請求(3.150執行)

kubectl get csr

#NAME                                                   AGE       REQUESTOR                 CONDITION

#node-csr-LY1jhbfkcVBthGoveaEhUycp8yg_7D4atQkmQSz3N3Y   13m       system:bootstrap:faum34   Pending

#node-csr-WY5RcykyS66oKc_wHydYiK6QkAFDluCIP856QB_0QRM   6m        system:bootstrap:aedkze   Pending

#node-csr-bwIGn4TJECVhWUIU5j0ckIAtzR5Qkgt8ZQzuHpg-NcA   6m        system:bootstrap:esigku   Pending

#手動 approve CSR 請求(3.150執行)

for i in `kubectl get csr | grep -v NAME | awk '{print $1}'`; do kubectl certificate approve ${i}; done

#查看節點

kubectl get nodes

#NAME             STATUS    ROLES     AGE       VERSION

#dev-test-3-150   Ready     <none>    27s       v1.10.4

#dev-test-3-151   Ready     <none>    27s       v1.10.4

#dev-test-3-152   Ready     <none>    25s       v1.10.4

#創建自動approve csr(3.150執行)

cd /home

source /opt/k8s/env.sh

cat > csr-crb.yaml <<EOF

 # Approve all CSRs for the group "system:bootstrappers"

 kind: ClusterRoleBinding

 apiVersion: rbac.authorization.k8s.io/v1

 metadata:

   name: auto-approve-csrs-for-group

 subjects:

 - kind: Group

   name: system:bootstrappers

   apiGroup: rbac.authorization.k8s.io

 roleRef:

   kind: ClusterRole

   name: system:certificates.k8s.io:certificatesigningrequests:nodeclient

   apiGroup: rbac.authorization.k8s.io

---

 # To let a node of the group "system:nodes" renew its own credentials

 kind: ClusterRoleBinding

 apiVersion: rbac.authorization.k8s.io/v1

 metadata:

   name: node-client-cert-renewal

 subjects:

 - kind: Group

   name: system:nodes

   apiGroup: rbac.authorization.k8s.io

 roleRef:

   kind: ClusterRole

   name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient

   apiGroup: rbac.authorization.k8s.io

---

# A ClusterRole which instructs the CSR approver to approve a node requesting a

# serving cert matching its client cert.

kind: ClusterRole

apiVersion: rbac.authorization.k8s.io/v1

metadata:

  name: approve-node-server-renewal-csr

rules:

- apiGroups: ["certificates.k8s.io"]

  resources: ["certificatesigningrequests/selfnodeserver"]

  verbs: ["create"]

---

 # To let a node of the group "system:nodes" renew its own server credentials

 kind: ClusterRoleBinding

 apiVersion: rbac.authorization.k8s.io/v1

 metadata:

   name: node-server-cert-renewal

 subjects:

 - kind: Group

   name: system:nodes

   apiGroup: rbac.authorization.k8s.io

 roleRef:

   kind: ClusterRole

   name: approve-node-server-renewal-csr

   apiGroup: rbac.authorization.k8s.io

EOF

#auto-approve-csrs-for-group:自動 approve node 的第一次 CSR; 注意第一次 CSR 時,請求的 Group 爲 system:bootstrappers;

#node-client-cert-renewal:自動 approve node 後續過期的 client 證書,自動生成的證書 Group 爲 system:nodes;

#node-server-cert-renewal:自動 approve node 後續過期的 server 證書,自動生成的證書 Group 爲 system:nodes;

kubectl apply -f csr-crb.yaml

#創建kube-proxy(3.150執行)

#創建 kube-proxy 證書

#創建證書籤名請求:

cd /home

cat > kube-proxy-csr.json <<EOF

{

  "CN": "system:kube-proxy",

  "key": {

    "algo": "rsa",

    "size": 2048

  },

  "names": [

    {

      "C": "CN",

      "ST": "BeiJing",

      "L": "BeiJing",

      "O": "k8s",

      "OU": "QRXD"

    }

  ]

}

EOF

cfssl gencert -ca=/etc/kubernetes/cert/ca.pem \

  -ca-key=/etc/kubernetes/cert/ca-key.pem \

  -config=/etc/kubernetes/cert/ca-config.json \

  -profile=kubernetes  kube-proxy-csr.json | cfssljson -bare kube-proxy

source /opt/k8s/env.sh

kubectl config set-cluster kubernetes \

  --certificate-authority=/etc/kubernetes/cert/ca.pem \

  --embed-certs=true \

  --server=${KUBE_APISERVER} \

  --kubeconfig=kube-proxy.kubeconfig


kubectl config set-credentials kube-proxy \

  --client-certificate=kube-proxy.pem \

  --client-key=kube-proxy-key.pem \

  --embed-certs=true \

  --kubeconfig=kube-proxy.kubeconfig


kubectl config set-context default \

  --cluster=kubernetes \

  --user=kube-proxy \

  --kubeconfig=kube-proxy.kubeconfig


kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

#分發配置文件

scp kube-proxy.kubeconfig [email protected]:/etc/kubernetes/

scp kube-proxy.kubeconfig [email protected]:/etc/kubernetes/

scp kube-proxy.kubeconfig [email protected]:/etc/kubernetes/

#創建 kube-proxy 配置文件(三臺都執行)

cd /etc/kubernetes

source /opt/k8s/env.sh

cat >kube-proxy.config.yaml <<EOF

apiVersion: kubeproxy.config.k8s.io/v1alpha1

bindAddress: ${NODE_IP}

clientConnection:

  kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig

clusterCIDR: ${CLUSTER_CIDR}

healthzBindAddress: ${NODE_IP}:10256

hostnameOverride: ${NODE_NAME}

kind: KubeProxyConfiguration

metricsBindAddress: ${NODE_IP}:10249

mode: "ipvs"

EOF

#創建 kube-proxy systemd unit 文件(三臺都執行)

mkdir /var/lib/kube-proxy

source /opt/k8s/env.sh

cd /etc/systemd/system 

cat > kube-proxy.service <<EOF

[Unit]

Description=Kubernetes Kube-Proxy Server

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

After=network.target


[Service]

WorkingDirectory=/var/lib/kube-proxy

ExecStart=/usr/local/bin/kube-proxy \\

  --config=/etc/kubernetes/kube-proxy.config.yaml \\

  --alsologtostderr=true \\

  --hostname-override=${NODE_NAME} \\

  --logtostderr=false \\

  --log-dir=/var/log/kube \\

  --v=2

Restart=on-failure

RestartSec=5

LimitNOFILE=65536


[Install]

WantedBy=multi-user.target

EOF

systemctl daemon-reload

systemctl enable kube-proxy

systemctl restart kube-proxy

#檢查

systemctl status kube-proxy

netstat -lnpt|grep kube-proxy

#查看路由規則

source /opt/k8s/env.sh

for node_ip in ${NODE_IPS[@]}

  do

    echo ">>> ${node_ip}"

    ssh root@${node_ip} "/usr/sbin/ipvsadm -ln"

done

#######################-----驗證集羣功能-----#######################

kubectl get nodes

#測試文件

cd /home

mkdir yaml

cd yaml

cat > nginx-ds.yml <<EOF

apiVersion: v1

kind: Service

metadata:

  name: nginx-ds

  labels:

    app: nginx-ds

spec:

  type: NodePort

  selector:

    app: nginx-ds

  ports:

  - name: http

    port: 80

    targetPort: 80

---

apiVersion: extensions/v1beta1

kind: DaemonSet

metadata:

  name: nginx-ds

  labels:

    addonmanager.kubernetes.io/mode: Reconcile

spec:

  template:

    metadata:

      labels:

        app: nginx-ds

    spec:

      containers:

      - name: my-nginx

        image: nginx:1.7.9

        ports:

        - containerPort: 80

EOF


kubectl apply -f nginx-ds.yml -n default


kubectl get pod


#######################-----部署集羣插件-----#######################

#-------------------------------

#部署 DNS 插件(coredns)

mkdir /home/yaml/coredns

cp /home/kubernetes/cluster/addons/dns/coredns.yaml.base /home/yaml/coredns/coredns.yaml

#修改信息如下:

#61行

__PILLAR__DNS__DOMAIN__  ==> cluster.local.

#153行

__PILLAR__DNS__SERVER__  ==> 10.253.0.2

#創建coredns

kubectl apply -f /home/kube-system/coredns.yaml -n kube-system

#檢查

kubectl get all -n kube-system

#-------------------------------

#部署 DNS 插件(sky-dns)

mkdir /home/kube-system/kube-dns

cp  /home/kubernetes/cluster/addons/dns/



#-------------------------------

#部署 dashboard 插件

mkdir /home/yaml/dashboard

#yaml文件地址:

#官方v1.10.0版本

#wget https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml

#修改版v1.8.3版本

#wget https://github.com/gh-Devin/kubernetes-dashboard/blob/master/kubernetes-dashboard.yaml

#鏡像名稱也需要修改下根據自己的鏡像修改需要提前下載下來

#kubectl apply -f kubernetes-dashboard.yaml

cd /home/kubernetes/cluster/addons/dashboard

cp dashboard-* /home/kubernetes/cluster/addons/dashboard/

vim dashboard-service.yaml

#末尾新增,並和ports節對齊

type: NodePort

#創建dashboard

kubectl applf -f .

#查看 kubectl get pod -n kube-system

kubectl get pod -n kube-system

kubectl get svc -n kube-system

kubectl cluster-info

#Kubernetes master is running at https://172.16.3.150:6443

#CoreDNS is running at https://172.16.3.150:6443/api/v1/namespaces/kube-system/services/coredns:dns/proxy

#kubernetes-dashboard is running at https://172.16.3.150:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy

#注意,只有cluster-info 中可以看到kubernetes-dashboard is running .... ,纔可以通過kube-apiserver去訪問dashboard


#創建登錄 Dashboard 的 token 和 kubeconfig 配置文件

cd /home

kubectl create sa dashboard-admin -n kube-system

kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin

ADMIN_SECRET=$(kubectl get secrets -n kube-system | grep dashboard-admin | awk '{print $1}')

DASHBOARD_LOGIN_TOKEN=$(kubectl describe secret -n kube-system ${ADMIN_SECRET} | grep -E '^token' | awk '{print $2}')

echo ${DASHBOARD_LOGIN_TOKEN}

#可以使用輸出的 token 登錄 Dashboard

#創建使用 token 的 KubeConfig 文件‘

source /opt/k8s/env.sh

cd /home

kubectl config set-cluster kubernetes \

  --certificate-authority=/etc/kubernetes/cert/ca.pem \

  --embed-certs=true \

  --server=${KUBE_APISERVER} \

  --kubeconfig=dashboard.kubeconfig


# 設置客戶端認證參數,使用上面創建的 Token

kubectl config set-credentials dashboard_user \

  --token=${DASHBOARD_LOGIN_TOKEN} \

  --kubeconfig=dashboard.kubeconfig


# 設置上下文參數

kubectl config set-context default \

  --cluster=kubernetes \

  --user=dashboard_user \

  --kubeconfig=dashboard.kubeconfig


# 設置默認上下文

kubectl config use-context default --kubeconfig=dashboard.kubeconfig


#導出dashboard.kubeconfig在登錄頁面進行上傳


#配置允許瀏覽器訪問(3.150)

cd /home

openssl pkcs12 -export -out admin.pfx -inkey admin-key.pem -in admin.pem -certfile ca.pem

#需要輸入證書密碼

#導出admin.pfx

#將admin.pfx導入到本地的證書管理中(windows通過 mmc管理單元配置)

#瀏覽器打開以下地址

https://172.16.3.150:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy

#使用nodeport訪問

kubectl get svc -n kube-system

#查看到對應的nodeport

#通過瀏覽器訪問(用Firefox訪問,用chrom會有些問題)

https://172.16.3.150:13054

#登錄時需要使用導出的dashboard.kubeconfig 進行驗證


#-------------------------------

#部署 heapster 插件

#提前把需要的鏡像下載下來

cd /home/

wget https://github.com/kubernetes/heapster/archive/v1.5.3.tar.gz

tar -xzvf v1.5.3.tar.gz

mkdir /home/yaml/heapster

cp /home/heapster-1.5.3/deploy/kube-config/influxdb/* /home/yaml/heapster/

#cp /home/heapster-1.5.3/deploy/kube-config/rbac/* /home/yaml/heapster/

cd /home/yaml/heapster

vim grafana.yaml

#67行註釋去掉,位置和ports對齊

type: NodePort


vim heapster.yaml

#27行 修改成如下

- --source=kubernetes:https://kubernetes.default?kubeletHttps=true&kubeletPort=10250

#influxdb 不需要修改

cat > heapster-rbac.yaml <<EOF

kind: ClusterRoleBinding

apiVersion: rbac.authorization.k8s.io/v1beta1

metadata:

  name: heapster-kubelet-api

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: ClusterRole

  #name: cluster-admin

  name: system:kubelet-api-admin

subjects:

- kind: ServiceAccount

  name: heapster

  namespace: kube-system

EOF

#將 serviceAccount kube-system:heapster 與 ClusterRole system:kubelet-api-admin 綁定,授予它調用 kubelet API 的權限;

kubectl apply -f .

#可以通過node port進行訪問

kubectl get svc -n kube-system|grep -E 'monitoring|heapster'

#錯誤信息:1 namespace based enricher.go:75] Namespace doesn't exist: default

#權限不夠,rbac授權 可以使用cluster-admin 角色解決該問題


#-------------------------------

#安裝prometheus

#Prometheus 可以通過服務發現掌握集羣內部已經暴露的監控點,然後主動拉取所有監控數據。

#通過這樣的架構設計,我們僅僅只需要向Kubernetes集羣中部署一份Prometheus實例,

#它就可以通過向apiserver查詢集羣狀態,然後向所有已經支持Prometheus metrics的kubelet 獲取所有Pod的運行數據。如果我們想採集底層服務器運行狀態,

#通過DaemonSet在所有服務器上運行 配套的node-exporter之後,Prometheus就可以自動採集到新的這部分數據。

#詳細配置解釋在配置文件中說明

##node-exporter.yaml

cat node-exporter.yaml <<EOF

---

apiVersion: extensions/v1beta1

kind: DaemonSet

metadata:

  name: node-exporter

  #指定運行在kube-system中,也可以根據自己的配置指定其他的namespace

  namespace: kube-system

  labels:

    k8s-app: node-exporter

spec:

  template:

    metadata:

      labels:

        k8s-app: node-exporter

    spec:

      containers:

      #鏡像不需要×××就可以下載

      - image: prom/node-exporter

        name: node-exporter

        ports:

        - containerPort: 9100

          protocol: TCP

          name: http

---

apiVersion: v1

kind: Service

metadata:

  labels:

    k8s-app: node-exporter

  name: node-exporter

  namespace: kube-system

spec:

  ports:

  - name: http

    port: 9100

    nodePort: 21672

    protocol: TCP

  type: NodePort

  selector:

    k8s-app: node-exporter

EOF

##prometheus-configmap.yaml

cat prometheus-configmap.yaml <<EOF

apiVersion: v1

kind: ConfigMap

metadata:

  name: prometheus-config

  #此處的namespace需要和node-exporter一致

  namespace: kube-system

data:

  prometheus.yml: |

    global:

      scrape_interval: 30s

      scrape_timeout: 30s

    scrape_configs:

    - job_name: 'prometheus'

      static_configs:

        - targets: ['localhost:9090']


    - job_name: 'kubernetes-apiservers'

      kubernetes_sd_configs:

      - role: endpoints

      scheme: https

      tls_config:

        ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt

      bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token

      relabel_configs:

      - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]

        action: keep

        regex: default;kubernetes;https


    - job_name: 'kubernetes-nodes'

      scheme: https

      tls_config:

        ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt

      bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token

      kubernetes_sd_configs:

      - role: node

      relabel_configs:

      - action: labelmap

        regex: __meta_kubernetes_node_label_(.+)

      - target_label: __address__

        replacement: kubernetes.default.svc:443

      - source_labels: [__meta_kubernetes_node_name]

        regex: (.+)

        target_label: __metrics_path__

        replacement: /api/v1/nodes/${1}/proxy/metrics


    - job_name: 'kubernetes-cadvisor'

      scheme: https

      tls_config:

        ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt

      bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token

      kubernetes_sd_configs:

      - role: node

      relabel_configs:

      - action: labelmap

        regex: __meta_kubernetes_node_label_(.+)

      - target_label: __address__

        replacement: kubernetes.default.svc:443

      - source_labels: [__meta_kubernetes_node_name]

        regex: (.+)

        target_label: __metrics_path__

        replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor


    - job_name: 'kubernetes-node-exporter'

      scheme: http

      tls_config:

        ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt

      bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token

      kubernetes_sd_configs:

      - role: node

      relabel_configs:

      - action: labelmap

        regex: __meta_kubernetes_node_label_(.+)

      - source_labels: [__meta_kubernetes_role]

        action: replace

        target_label: kubernetes_role

      - source_labels: [__address__]

        regex: '(.*):10250'

        #下面這個端口需要注意,這個地址是實際的node-exporter的nodeport端口

        replacement: '${1}:21672'

        target_label: __address__

EOF

##prometheus.yaml

cat >prometheus.yaml <<EOF

apiVersion: extensions/v1beta1

kind: Deployment

metadata:

  labels:

    k8s-app: prometheus

  name: prometheus

  namespace: kube-system

spec:

  replicas: 1

  template:

    metadata:

      labels:

        k8s-app: prometheus

    spec:

      serviceAccountName: prometheus

      containers:

      - image: prom/prometheus:v2.0.0

        name: prometheus

        command:

        - "/bin/prometheus"

        args:

        - "--config.file=/etc/prometheus/prometheus.yml"

        - "--storage.tsdb.path=/prometheus"

        - "--storage.tsdb.retention=15d"

        ports:

        - containerPort: 9090

          protocol: TCP

          name: http

        volumeMounts:

        - mountPath: "/prometheus"

          name: data

          subPath: prometheus/data

        - mountPath: "/etc/prometheus"

          name: config-volume

        resources:

          requests:

            cpu: 100m

            memory: 100Mi

          limits:

            cpu: 200m

            memory: 1Gi

      volumes:

      - name: data

        emptyDir: {}

      - configMap:

          name: prometheus-config

        name: config-volume

EOF

##prometheus-rabc.yaml

cat >prometheus-rabc.yaml<<EOF

apiVersion: v1

kind: ServiceAccount

metadata:

  name: prometheus

  namespace: kube-system

---

apiVersion: rbac.authorization.k8s.io/v1beta1

kind: ClusterRole

metadata:

  name: prometheus

  namespace: kube-system

rules:

- apiGroups: [""]

  resources:

  - nodes

  - nodes/proxy

  - services

  - endpoints

  - pods

  verbs: ["get", "list", "watch"]

- nonResourceURLs: ["/metrics"]

  verbs: ["get"]

---

apiVersion: rbac.authorization.k8s.io/v1beta1

kind: ClusterRoleBinding

metadata:

  name: prometheus

  namespace: kube-system

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: ClusterRole

  name: prometheus

subjects:

- kind: ServiceAccount

  name: prometheus

  namespace: kube-system

EOF

##prometheus-service.yaml

cat >prometheus-service.yaml <<EOF

apiVersion: v1

kind: Service

metadata:

  namespace: kube-system

  name: prometheus-svc

  labels:

    k8s-app: prometheus

spec:

  selector:

    k8s-app: prometheus

  ports:

  - port: 9090

    targetPort: 9090

  type: NodePort

EOF


kubectl apply -f .



#-------------------------------

#######################-----Kubernetes鏡像相關-----#######################

#首先下載鏡像

docker pull majun9129/kubernetes-images:pause-3.0

docker pull majun9129/kubernetes-images:kube-dashboard-1.10.0 

docker pull majun9129/kubernetes-images:dashboard-1.8.3

docker pull majun9129/kubernetes-images:kube-heapster-influxdb-1.3.3

docker pull majun9129/kubernetes-images:kube-heapster-grafana-4.4.3

docker pull majun9129/kubernetes-images:kube-heapster-1.5.3

#把鏡像重命名爲官網鏡像名稱

docker tag majun9129/kubernetes-images:pause-3.0 gcr.io/google_containers/pause-amd64:3.0

docker tag  majun9129/kubernetes-images:kube-dashboard-1.10.0  k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.0

docker tag majun9129/kubernetes-images:dashboard-1.8.3 k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3

docker tag majun9129/kubernetes-images:kube-heapster-influxdb-1.3.3 gcr.io/google_containers/heapster-influxdb-amd64:v1.3.3

docker tag majun9129/kubernetes-images:kube-heapster-grafana-4.4.3 gcr.io/google_containers/heapster-grafana-amd64:v4.4.3

docker tag majun9129/kubernetes-images:kube-heapster-1.5.3 gcr.io/google_containers/heapster-amd64:v1.5.3


#######################-----Kubernetes 插件yaml文件-----#######################

#####core-dns#####

cat >coredns.yaml <<EOF

# __MACHINE_GENERATED_WARNING__


apiVersion: v1

kind: ServiceAccount

metadata:

  name: coredns

  namespace: kube-system

  labels:

      kubernetes.io/cluster-service: "true"

      addonmanager.kubernetes.io/mode: Reconcile

---

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRole

metadata:

  labels:

    kubernetes.io/bootstrapping: rbac-defaults

    addonmanager.kubernetes.io/mode: Reconcile

  name: system:coredns

rules:

- apiGroups:

  - ""

  resources:

  - endpoints

  - services

  - pods

  - namespaces

  verbs:

  - list

  - watch

---

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRoleBinding

metadata:

  annotations:

    rbac.authorization.kubernetes.io/autoupdate: "true"

  labels:

    kubernetes.io/bootstrapping: rbac-defaults

    addonmanager.kubernetes.io/mode: EnsureExists

  name: system:coredns

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: ClusterRole

  name: system:coredns

subjects:

- kind: ServiceAccount

  name: coredns

  namespace: kube-system

---

apiVersion: v1

kind: ConfigMap

metadata:

  name: coredns

  namespace: kube-system

  labels:

      addonmanager.kubernetes.io/mode: EnsureExists

data:

  Corefile: |

    .:53 {

        errors

        health

        kubernetes cluster.local. in-addr.arpa ip6.arpa {

            pods insecure

            upstream

            fallthrough in-addr.arpa ip6.arpa

        }

        prometheus :9153

        proxy . /etc/resolv.conf

        cache 30

    }

---

apiVersion: extensions/v1beta1

kind: Deployment

metadata:

  name: coredns

  namespace: kube-system

  labels:

    k8s-app: coredns

    kubernetes.io/cluster-service: "true"

    addonmanager.kubernetes.io/mode: Reconcile

    kubernetes.io/name: "CoreDNS"

spec:

  replicas: 3

  strategy:

    type: RollingUpdate

    rollingUpdate:

      maxUnavailable: 1

  selector:

    matchLabels:

      k8s-app: coredns

  template:

    metadata:

      labels:

        k8s-app: coredns

    spec:

      serviceAccountName: coredns

      tolerations:

        - key: node-role.kubernetes.io/master

          effect: NoSchedule

        - key: "CriticalAddonsOnly"

          operator: "Exists"

      containers:

      - name: coredns

        image: coredns/coredns:1.0.6

        imagePullPolicy: IfNotPresent

        resources:

          limits:

            memory: 170Mi

          requests:

            cpu: 100m

            memory: 70Mi

        args: [ "-conf", "/etc/coredns/Corefile" ]

        volumeMounts:

        - name: config-volume

          mountPath: /etc/coredns

        ports:

        - containerPort: 53

          name: dns

          protocol: UDP

        - containerPort: 53

          name: dns-tcp

          protocol: TCP

        livenessProbe:

          httpGet:

            path: /health

            port: 8080

            scheme: HTTP

          initialDelaySeconds: 60

          timeoutSeconds: 5

          successThreshold: 1

          failureThreshold: 5

      dnsPolicy: Default

      volumes:

        - name: config-volume

          configMap:

            name: coredns

            items:

            - key: Corefile

              path: Corefile

---

apiVersion: v1

kind: Service

metadata:

  name: coredns

  namespace: kube-system

  labels:

    k8s-app: coredns

    kubernetes.io/cluster-service: "true"

    addonmanager.kubernetes.io/mode: Reconcile

    kubernetes.io/name: "CoreDNS"

spec:

  selector:

    k8s-app: coredns

  clusterIP: 10.253.0.2

  ports:

  - name: dns

    port: 53

    protocol: UDP

  - name: dns-tcp

    port: 53

    protocol: TCP

EOF

###dashboard###

cat >dashboard-configmap.yaml <<EOF

apiVersion: v1

kind: ConfigMap

metadata:

  labels:

    k8s-app: kubernetes-dashboard

    # Allows editing resource and makes sure it is created first.

    addonmanager.kubernetes.io/mode: EnsureExists

  name: kubernetes-dashboard-settings

  namespace: kube-system

EOF

cat >dashboard-controller.yaml <<EOF

apiVersion: v1

kind: ServiceAccount

metadata:

  labels:

    k8s-app: kubernetes-dashboard

    addonmanager.kubernetes.io/mode: Reconcile

  name: kubernetes-dashboard

  namespace: kube-system

---

apiVersion: apps/v1

kind: Deployment

metadata:

  name: kubernetes-dashboard

  namespace: kube-system

  labels:

    k8s-app: kubernetes-dashboard

    kubernetes.io/cluster-service: "true"

    addonmanager.kubernetes.io/mode: Reconcile

spec:

  selector:

    matchLabels:

      k8s-app: kubernetes-dashboard

  template:

    metadata:

      labels:

        k8s-app: kubernetes-dashboard

      annotations:

        scheduler.alpha.kubernetes.io/critical-pod: ''

    spec:

      priorityClassName: system-cluster-critical

      containers:

      - name: kubernetes-dashboard

        image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3

        resources:

          limits:

            cpu: 100m

            memory: 300Mi

          requests:

            cpu: 50m

            memory: 100Mi

        ports:

        - containerPort: 8443

          protocol: TCP

        args:

          # PLATFORM-SPECIFIC ARGS HERE

          - --auto-generate-certificates

        volumeMounts:

        - name: kubernetes-dashboard-certs

          mountPath: /certs

        - name: tmp-volume

          mountPath: /tmp

        livenessProbe:

          httpGet:

            scheme: HTTPS

            path: /

            port: 8443

          initialDelaySeconds: 30

          timeoutSeconds: 30

      volumes:

      - name: kubernetes-dashboard-certs

        secret:

          secretName: kubernetes-dashboard-certs

      - name: tmp-volume

        emptyDir: {}

      serviceAccountName: kubernetes-dashboard

      tolerations:

      - key: "CriticalAddonsOnly"

        operator: "Exists"

EOF

cat >dashboard-rbac.yaml <<EOF

kind: Role

apiVersion: rbac.authorization.k8s.io/v1

metadata:

  labels:

    k8s-app: kubernetes-dashboard

    addonmanager.kubernetes.io/mode: Reconcile

  name: kubernetes-dashboard-minimal

  namespace: kube-system

rules:

  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.

- apiGroups: [""]

  resources: ["secrets"]

  resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]

  verbs: ["get", "update", "delete"]

  # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.

- apiGroups: [""]

  resources: ["configmaps"]

  resourceNames: ["kubernetes-dashboard-settings"]

  verbs: ["get", "update"]

  # Allow Dashboard to get metrics from heapster.

- apiGroups: [""]

  resources: ["services"]

  resourceNames: ["heapster"]

  verbs: ["proxy"]

- apiGroups: [""]

  resources: ["services/proxy"]

  resourceNames: ["heapster", "http:heapster:", "https:heapster:"]

  verbs: ["get"]

---

apiVersion: rbac.authorization.k8s.io/v1

kind: RoleBinding

metadata:

  name: kubernetes-dashboard-minimal

  namespace: kube-system

  labels:

    k8s-app: kubernetes-dashboard

    addonmanager.kubernetes.io/mode: Reconcile

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: Role

  name: kubernetes-dashboard-minimal

subjects:

- kind: ServiceAccount

  name: kubernetes-dashboard

  namespace: kube-system

EOF

cat >dashboard-secret.yaml <<EOF

apiVersion: v1

kind: Secret

metadata:

  labels:

    k8s-app: kubernetes-dashboard

    # Allows editing resource and makes sure it is created first.

    addonmanager.kubernetes.io/mode: EnsureExists

  name: kubernetes-dashboard-certs

  namespace: kube-system

type: Opaque

---

apiVersion: v1

kind: Secret

metadata:

  labels:

    k8s-app: kubernetes-dashboard

    # Allows editing resource and makes sure it is created first.

    addonmanager.kubernetes.io/mode: EnsureExists

  name: kubernetes-dashboard-key-holder

  namespace: kube-system

type: Opaque


EOF

cat >dashboard-service.yaml <<EOF

apiVersion: v1

kind: Service

metadata:

  name: kubernetes-dashboard

  namespace: kube-system

  labels:

    k8s-app: kubernetes-dashboard

    kubernetes.io/cluster-service: "true"

    addonmanager.kubernetes.io/mode: Reconcile

spec:

  selector:

    k8s-app: kubernetes-dashboard

  ports:

  - port: 443

    targetPort: 8443

  type: NodePort

EOF


#####heapster#####

cat >grafana.yaml <<EOF

apiVersion: extensions/v1beta1

kind: Deployment

metadata:

  name: monitoring-grafana

  namespace: kube-system

spec:

  replicas: 1

  template:

    metadata:

      labels:

        task: monitoring

        k8s-app: grafana

    spec:

      containers:

      - name: grafana

        image: gcr.io/google_containers/heapster-grafana-amd64:v4.4.3

        ports:

        - containerPort: 3000

          protocol: TCP

        volumeMounts:

        - mountPath: /etc/ssl/certs

          name: ca-certificates

          readOnly: true

        - mountPath: /var

          name: grafana-storage

        env:

        - name: INFLUXDB_HOST

          value: monitoring-influxdb

        - name: GF_SERVER_HTTP_PORT

          value: "3000"

          # The following env variables are required to make Grafana accessible via

          # the kubernetes api-server proxy. On production clusters, we recommend

          # removing these env variables, setup auth for grafana, and expose the grafana

          # service using a LoadBalancer or a public IP.

        - name: GF_AUTH_BASIC_ENABLED

          value: "false"

        - name: GF_AUTH_ANONYMOUS_ENABLED

          value: "true"

        - name: GF_AUTH_ANONYMOUS_ORG_ROLE

          value: Admin

        - name: GF_SERVER_ROOT_URL

          # If you're only using the API Server proxy, set this value instead:

          # value: /api/v1/namespaces/kube-system/services/monitoring-grafana/proxy

          value: /

      volumes:

      - name: ca-certificates

        hostPath:

          path: /etc/ssl/certs

      - name: grafana-storage

        emptyDir: {}

---

apiVersion: v1

kind: Service

metadata:

  labels:

    # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)

    # If you are NOT using this as an addon, you should comment out this line.

    kubernetes.io/cluster-service: 'true'

    kubernetes.io/name: monitoring-grafana

  name: monitoring-grafana

  namespace: kube-system

spec:

  # In a production setup, we recommend accessing Grafana through an external Loadbalancer

  # or through a public IP.

  # type: LoadBalancer

  # You could also use NodePort to expose the service at a randomly-generated port

  type: NodePort

  ports:

  - port: 80

    targetPort: 3000

  selector:

    k8s-app: grafana

EOF


cat >heapster.yaml <<EOF

apiVersion: v1

kind: ServiceAccount

metadata:

  name: heapster

  namespace: kube-system

---

apiVersion: extensions/v1beta1

kind: Deployment

metadata:

  name: heapster

  namespace: kube-system

spec:

  replicas: 1

  template:

    metadata:

      labels:

        task: monitoring

        k8s-app: heapster

    spec:

      serviceAccountName: heapster

      containers:

      - name: heapster

        image: gcr.io/google_containers/heapster-amd64:v1.5.3

        imagePullPolicy: IfNotPresent

        command:

        - /heapster

        - --source=kubernetes:https://kubernetes.default?kubeletHttps=true&kubeletPort=10250

        #- --source=kubernetes:https://kubernetes.default

        - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086

---

apiVersion: v1

kind: Service

metadata:

  labels:

    task: monitoring

    # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)

    # If you are NOT using this as an addon, you should comment out this line.

    kubernetes.io/cluster-service: 'true'

    kubernetes.io/name: Heapster

  name: heapster

  namespace: kube-system

spec:

  ports:

  - port: 80

    targetPort: 8082

  selector:

    k8s-app: heapster

---

kind: ClusterRoleBinding

apiVersion: rbac.authorization.k8s.io/v1beta1

metadata:

  name: heapster-kubelet-api

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: ClusterRole

  #name: system:kubelet-api-admin

  name: cluster-admin

subjects:

- kind: ServiceAccount

  name: heapster

  namespace: kube-system

EOF


cat >influxdb.yaml <<EOF

apiVersion: extensions/v1beta1

kind: Deployment

metadata:

  name: monitoring-influxdb

  namespace: kube-system

spec:

  replicas: 1

  template:

    metadata:

      labels:

        task: monitoring

        k8s-app: influxdb

    spec:

      containers:

      - name: influxdb

        image: gcr.io/google_containers/heapster-influxdb-amd64:v1.3.3

        volumeMounts:

        - mountPath: /data

          name: influxdb-storage

      volumes:

      - name: influxdb-storage

        emptyDir: {}

---

apiVersion: v1

kind: Service

metadata:

  labels:

    task: monitoring

    # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)

    # If you are NOT using this as an addon, you should comment out this line.

    kubernetes.io/cluster-service: 'true'

    kubernetes.io/name: monitoring-influxdb

  name: monitoring-influxdb

  namespace: kube-system

spec:

  ports:

  - port: 8086

    targetPort: 8086

  selector:

    k8s-app: influxdb

EOF


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章