k8s二進制簡易搭建筆記

環境

192.168.201.41
etcd01、master(apiserver、scheduler、controller-manager)
192.168.201.42
etcd02、node1(kubelet、proxy)
192.168.201.43
etcd03、node2(kubelet、proxy)

系統準備

#systemctl stop firewalld #臨時關閉防火牆
#systemctl disable firewalld #關閉開機啓動
#systemctl list-unit-files #查看服務是否開機啓動
#setenforce 0 #臨時設置selinux爲permissive
#vi /etc/selinux/config 
SELINUX=disabled
#swapoff -a  # 臨時關閉swap
#vim /etc/fstab  # 永久關閉swap
#timedatectl list-timezones # 列出所有時區
#timedatectl set-local-rtc 1 # 將硬件時鐘調整爲與本地時鐘一致, 0 爲設置爲 UTC 時間
#timedatectl set-timezone Asia/Shanghai # 設置系統時區爲上海,實際就是# cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
#timedatectl set-ntp true #設置允許ntp同步
#hostnamectl set-hostname master1 #修改主機名爲master1
#vi /etc/hosts #添加hosts中主機名
#yum -y install chrony
#vi /etc/chrony.conf #查看同步配置文件
#chronyc sources -v #查看同步服務器情況
#chronyc tracking #查看最後一次同步情況

部署etcd集羣

#./cfssl.sh #在etcd服務器先執行cfssl.sh,用於下載cfssl。Cfssl與openssl區別在於cfssl可以使用json格式,較方便。
#cat cfssl.sh #查看cfssl.sh文件
curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o /usr/local/bin/cfssl
curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o /usr/local/bin/cfssljson
curl -L https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -o /usr/local/bin/cfssl-certinfo
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo
#bash etcd-cert.sh #執行etcd-cert.sh中的ca-config.json部分,用於配置CA選項,生成CA根證書。ca-csr.json部分,用於生成CA 證書籤名請求(CSR)的JSON文件,執行後生成ca.pem與ca-key.pem;執行etcd-cert.sh中的server-csr.json部分,爲etcd服務器頒發證書生成server.pem與server-key.pem。(指定ETCD所有服務器,可以用IP,也可以用域名)
#tar -xvf etcd-v3.3.10-linux-amd64.tar.gz #二進制包下載地址,https://github.com/etcd-io/etcd/releases;
#mkdir /opt/etcd/{bin,cfg,ssl} #在/opt/etcd/目錄下創建對應的bin、cfg、ssl目錄;
#cp etcd etcdctl /opt/etcd/bin #將二進制包的etcd、etcdctl可執行文件放到bin目錄下;
#cp *.pem /opt/etcd/ssl 將ETCD上生成的*.pem複製到/opt/etcd/ssl目錄下;
#./etcd.sh etcd01 192.168.201.41 etcd02=https://192.168.201.42:2380,etcd03=https://192.168.201.43:2380 #執行etcd.sh,查看腳本中執行例子,用於配置ETCD服務並等待其它節點加入
#scp -r [email protected]:/opt/etcd /opt 
#scp  [email protected]:/usr/lib/systemd/system/etcd.service /usr/lib/systemd/system/etcd.service  #將/opt/etcd文件夾、/usr/lib/systemd/system/etcd.service複製到其它兩臺ETCD服務器上。修改另外兩臺etcd的etcd.service,名稱、IP。
#systemctl start etcd #然後systemctl啓動。
#/opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.201.41:2379,https://192.168.201.42:2379,https://192.168.201.43:2379"  cluster-health #查看集羣狀態(查看筆記)
#wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
#yum -y install docker-ce-18.06.1.ce-3.el7
#systemctl enable docker && systemctl start docker
#docker --version
Docker version 18.06.1-ce, build e68fc7a #在node節點安裝docker;
#rm /usr/lib/systemd/system/docker.service #安裝後自帶的docker.service需要移除掉,不然會與flannel.sh生成的服務文件衝突
#/opt/etcd/bin/etcdctl \
--ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem \
--endpoints="https://192.168.0.x:2379,https://192.168.0.x:2379,https://192.168.0.x:2379" \
put /coreos.com/network/config '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}' #寫入分配的子網段到etcd,供flanneld使用(子網是爲每個docker節點分配一個小子網,flanneld保證每個節點都是唯一的IP網段)
#tar -xvf flannel-v0.12.0-linux-amd64.tar.gz #解壓flannel,可從官網https://github.com/coreos/flannel/releases下載
#mkdir /opt/kubernetes/{cfg,bin,ssl} -p
#mv flanneld mk-docker-opts.sh /opt/kubernetes/bin/ #mk-docker-opts.sh用於生成一個子網,寫到一個文件中,docker讀取此文件進行啓動
#bash flannel.sh https://192.168.201.41:2379,https://192.168.201.42:2379,https://192.168.201.43:2379 #使用flannel.sh文件啓動flannel。文件前半部分是指定ETCd地址和配置證書;文件後半部分是生成system;文件最後是docker配置文件;
#cat /usr/lib/systemd/system/dockerd.service #文件中這兩行主要是用於讓docker與flannel進行整合,讓docker啓動時指定子網配置文件
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
#systecmctl daemon-reload
#systemctl restart docker
#ip addr #查看docker 與flannel,網段是一樣的
#其餘etcd也重複上面步驟,安裝flannel,然後ping網絡進行驗證。再分別創建docker,進行網絡ping驗證。

查看etcd-cert.sh文件

#cat etcd-cert.sh 
cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "www": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

cat > ca-csr.json <<EOF
{
    "CN": "etcd CA",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
        }
    ]
}
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

#-----------------------

cat > server-csr.json <<EOF
{
    "CN": "etcd",
    "hosts": [
    "10.206.240.188",
    "10.206.240.189",
    "10.206.240.111"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing"
        }
    ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server

查看etcd.sh文件

# cat etcd.sh 
#!/bin/bash
# example: ./etcd.sh etcd01 192.168.1.10 etcd02=https://192.168.1.11:2380,etcd03=https://192.168.1.12:2380

ETCD_NAME=$1
ETCD_IP=$2
ETCD_CLUSTER=$3

WORK_DIR=/opt/etcd

cat <<EOF >$WORK_DIR/cfg/etcd
#[Member]
ETCD_NAME="${ETCD_NAME}"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://${ETCD_IP}:2380"
ETCD_LISTEN_CLIENT_URLS="https://${ETCD_IP}:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://${ETCD_IP}:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://${ETCD_IP}:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://${ETCD_IP}:2380,${ETCD_CLUSTER}"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

cat <<EOF >/usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=${WORK_DIR}/cfg/etcd
ExecStart=${WORK_DIR}/bin/etcd \
--name=\${ETCD_NAME} \
--data-dir=\${ETCD_DATA_DIR} \
--listen-peer-urls=\${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=\${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
--advertise-client-urls=\${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=\${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=\${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=\${ETCD_INITIAL_CLUSTER_TOKEN} \
--initial-cluster-state=new \
--cert-file=${WORK_DIR}/ssl/server.pem \
--key-file=${WORK_DIR}/ssl/server-key.pem \
--peer-cert-file=${WORK_DIR}/ssl/server.pem \
--peer-key-file=${WORK_DIR}/ssl/server-key.pem \
--trusted-ca-file=${WORK_DIR}/ssl/ca.pem \
--peer-trusted-ca-file=${WORK_DIR}/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable etcd
systemctl restart etcd

查看flannel.sh文件

#cat flannel.sh 
#!/bin/bash

ETCD_ENDPOINTS=${1:-"http://127.0.0.1:2379"}

cat <<EOF >/opt/kubernetes/cfg/flanneld

FLANNEL_OPTIONS="--etcd-endpoints=${ETCD_ENDPOINTS} \
-etcd-cafile=/opt/etcd/ssl/ca.pem \
-etcd-certfile=/opt/etcd/ssl/server.pem \
-etcd-keyfile=/opt/etcd/ssl/server-key.pem"

EOF

cat <<EOF >/usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service

[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure

[Install]
WantedBy=multi-user.target

EOF

cat <<EOF >/usr/lib/systemd/system/dockerd.service

[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd \$DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP \$MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s

[Install]
WantedBy=multi-user.target

EOF

systemctl daemon-reload
systemctl enable flanneld
systemctl restart flanneld
systemctl restart dockerd

知識點1
Chrony 是 NTP 客戶端的替代品。
Chrony 的優勢:
1、更快的同步只需要數分鐘而非數小時時間,從而最大程度減少了時間和頻率誤差,對於並非全天 24 小時運行的虛擬計算機而言非常有用
2、能夠更好地響應時鐘頻率的快速變化,對於具備不穩定時鐘的虛擬機或導致時鐘頻率發生變化的節能技術而言非常有用
3、在初始同步後,它不會停止時鐘,以防對需要系統時間保持單調的應用程序造成影響
4、在應對臨時非對稱延遲時(例如,在大規模下載造成鏈接飽和時)提供了更好的穩定性
5、無需對服務器進行定期輪詢,因此具備間歇性網絡連接的系統仍然可以快速同步時鐘
chrony是網絡時間協議的 (NTP) 的另一種實現,由兩個程序組成
chronyd:後臺運行的守護進程,用於調整內核中運行的系統時鐘和時鐘服務器同步。它確定計算機增減時間的比率,並對此進行補償
chronyc:命令行用戶工具,用於監控性能並進行多樣化的配置。它可以在chronyd實例控制的計算機上工作,也可在一臺不同的遠程計算機上工作

知識點2
基礎概念
CA(Certification Authority)證書,指的是權威機構給我們頒發的證書。
密鑰就是用來加解密用的文件或者字符串。密鑰在非對稱加密的領域裏,指的是私鑰和公鑰,他們總是成對出現,其主要作用是加密和解密。常用的加密強度是2048bit。
RSA即非對稱加密算法。非對稱加密有兩個不一樣的密碼,一個叫私鑰,另一個叫公鑰,用其中一個加密的數據只能用另一個密碼解開,用自己的都解不了,也就是說用公鑰加密的數據只能由私鑰解開。

證書的編碼格式
PEM(Privacy Enhanced Mail),通常用於數字證書認證機構(Certificate Authorities,CA),擴展名爲.pem, .crt, .cer, 和 .key。內容爲Base64編碼的ASCII碼文件,有類似"-----BEGIN CERTIFICATE-----" 和 "-----END CERTIFICATE-----"的頭尾標記。服務器認證證書,中級認證證書和私鑰都可以儲存爲PEM格式(認證證書其實就是公鑰)。Apache和nginx等類似的服務器使用PEM格式證書。
DER(Distinguished Encoding Rules),與PEM不同之處在於其使用二進制而不是Base64編碼的ASCII。擴展名爲.der,但也經常使用.cer用作擴展名,所有類型的認證證書和私鑰都可以存儲爲DER格式。Java使其典型使用平臺

證書籤名請求CSR
CSR(Certificate Signing Request),它是向CA機構申請數字證書時使用的請求文件。在生成請求文件前,我們需要準備一對對稱密鑰。私鑰信息自己保存,請求中會附上公鑰信息以及國家,城市,域名,Email等信息,CSR中還會附上簽名信息。當我們準備好CSR文件後就可以提交給CA機構,等待他們給我們簽名,簽好名後我們會收到crt文件,即證書。
注意:CSR並不是證書。而是向權威證書頒發機構獲得簽名證書的申請。
把CSR交給權威證書頒發機構,權威證書頒發機構對此進行簽名,完成。保留好CSR,當權威證書頒發機構頒發的證書過期的時候,你還可以用同樣的CSR來申請新的證書,key保持不變.

數字證書和公鑰
數字證書則是由證書認證機構(CA)對證書申請者真實身份驗證之後,用CA的根證書對申請人的一些基本信息以及申請人的公鑰進行簽名(相當於加蓋發證書機 構的公章)後形成的一個數字文件。實際上,數字證書就是經過CA認證過的公鑰,除了公鑰,還有其他的信息,比如Email,國家,城市,域名等。

知識點3
Apr 12 01:06:48 k8s-master etcd[3092]: publish error: etcdserver: request timed out
分析是因爲etcd1的配置文件/etc/etcd/etcd.conf中的ETCD_INITIAL_CLUSTER_STATE是new,而在配置中ETCD_INITIAL_CLUSTER寫入了etcd2/3的IP:PORT,這時etcd1嘗試去連接etcd2、etcd3,但是etcd2、3的etcd服務此時還未啓動,因此需要先啓動etcd2和3的etcd服務,再去啓動etcd1。

知識點4
查看API版本
[root@etcd-master opt]# /opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem version
etcdctl version: 3.4.7
API version: 3.4

知識點5
ETCD3.4版本會自動讀取環境變量的參數,所以EnvironmentFile文件中有的參數,不需要再次在ExecStart啓動參數中添加,二選一,如同時配置,會觸發以下類似報錯“etcd: conflicting environment variable “ETCD_NAME” is shadowed by corresponding command-line flag (either unset environment variable or disable flag)”

知識點6
1m=1代表第二個參數 m={1:-start}
如果$1存在且不內爲空,m就是容$1
如果$1不存在或爲空,那麼m就是start

知識點7
/opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints=“https://192.168.201.41:2379,https://192.168.201.42:2379,https://192.168.201.43:2379” put /coreos.com/network/config ‘{ “Network”: “172.17.0.0/16”, “Backend”: {“Type”: “vxlan”}}’
#3.4.7新版本將set換成了put,否則會報錯Error: unknown command “set” for “etcdctl”

知識點8
原本安裝的docker虛擬IP可能是同一網段IP,那不同節點的docker訪問就會有問題。flannel會讓每個pod都是不同網段的IP,讓訪問能夠使用UDP傳輸到veth0傳出去,另一個節點的docker
node1上的pod1通過10.1.15.2/24 veth0到 docker0 上10.1.15.1/24然後到flannel0上10.1.15.0,再通過flanneld根據ETCD存儲的路由表信息,將flannel0上包進行封裝(加頭信息:將原地址,pod1上veth0地址。目標地址,pod上veth0地址)成UDP。反之就逆向過程

知識點9
cfssl-certinfo -cert /opt/etcd/ssl/server.pem
#查看密鑰中指定服務器IP

知識點10
3.4.7版本
/opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints=“https://192.168.201.41:2379,https://192.168.201.42:2379,https://192.168.201.43:2379” member list --write-out=table
#查看成員

/opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints=“https://192.168.201.41:2379,https://192.168.201.42:2379,https://192.168.201.43:2379” --write-out=table endpoint health
#查看健康

/opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints=“https://192.168.201.41:2379,https://192.168.201.42:2379,https://192.168.201.43:2379” endpoint status --write-out=table
#查看健康、leader

知識點11
flannel與etcd版本一定對比,flannel不一定支持新版本。此次安裝過程中,因爲一直報無法

知識點12
各etcd之間時間不同步會造成, the clock difference against peer xxx is too high [xxxs > 1s]報錯

部署master

#mkdir /opt/kubernetes/{cfg,bin,ssl} -p #創建文件夾用於存在
#mkdir k8s-cert;cd k8s-cert
#vi k8s-cert.sh  #與etcd中類似文件內容基本一致。其中hosts的值,只有中間IP可以修改。一般寫入master,node,lb,vip的IP地址
#bash k8s-cert.sh #創建生成CA文件的 ca-config.json,CA證書籤名請求CSR的ca-csr.json,然後生成ca.pem與ca-key.pem;創建生成api-server證書的server-csr.json,然後生成server.pem與server-key.pem;創建生成admin證書的admin-csr.json,然後生成admin.pem與admin-key.pem;創建kube-proxy的kube-proxy-csr.json,然後生成kube-proxy.pem與kube-proxy-key.pem;
#cp ca*pem server*pem /opt/ kubernetes /ssl  ##master只要pem證書
#cd /opt;tar -zvf kubernetes-server-linux-amd64.tar.gz ##解壓安裝包
#cd kubernetes;cd server;cd bin;
#cp kube-apiserver kube-scheduler kube-controller-manager kubectl /opt/kubernetes/bin/ #複製執行程序文件
#head -c 16 /dev/urandom | od -An -t x | tr -d ' '
c47ffb939f5ca36231d9e3121a252940
#vi /opt/kubernetes/cfg/token.csv #寫入token,用記,用戶ID,綁定的角色。生成token,可以使用腳本中命令進行生成,在kubernetes.sh文件中
c47ffb939f5ca36231d9e3121a252940,kubelet-bootstrap,10001,"system:node-bootstrapper"
#bash apiserver.sh master_ip https://192.168.2.10:2379,https://192.168.2.11:2379,https://192.168.2.12:2379 #初始化
#ps -ef |grep kube #查看是否啓動,api-server監聽6443,https端口。還有8080端口,kube-scheduler kube-controller-manager連接這個地址到api-server
#./scheduler.sh 127.0.0.1 #啓動scheduler
#./controller-manager.sh 127.0.0.1 #啓動controller-manager
#/opt/kubernetes/bin/kubectl get cs #查看當前master的狀態

查看k8s-cert.sh文件

#cat k8s-cert.sh 
cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

cat > ca-csr.json <<EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
      	    "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

#-----------------------

cat > server-csr.json <<EOF
{
    "CN": "kubernetes",
    "hosts": [
      "10.0.0.1",
      "127.0.0.1",
      "10.206.176.19",
      "10.206.240.188",
      "10.206.240.189",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server

#-----------------------

cat > admin-csr.json <<EOF
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

#-----------------------

cat > kube-proxy-csr.json <<EOF
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

查看apiserver.sh文件

#cat apiserver.sh 
#!/bin/bash

MASTER_ADDRESS=$1
ETCD_SERVERS=$2

cat <<EOF >/opt/kubernetes/cfg/kube-apiserver

KUBE_APISERVER_OPTS="--logtostderr=true \\
--v=4 \\
--etcd-servers=${ETCD_SERVERS} \\
--bind-address=${MASTER_ADDRESS} \\
--secure-port=6443 \\
--advertise-address=${MASTER_ADDRESS} \\
--allow-privileged=true \\
--service-cluster-ip-range=10.0.0.0/24 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--kubelet-https=true \\
--enable-bootstrap-token-auth \\
--token-auth-file=/opt/kubernetes/cfg/token.csv \\
--service-node-port-range=30000-50000 \\
--tls-cert-file=/opt/kubernetes/ssl/server.pem  \\
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\
--client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--etcd-cafile=/opt/etcd/ssl/ca.pem \\
--etcd-certfile=/opt/etcd/ssl/server.pem \\
--etcd-keyfile=/opt/etcd/ssl/server-key.pem"

EOF

cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kube-apiserver
systemctl restart kube-apiserver

查看scheduler.sh文件

#cat scheduler.sh
#!/bin/bash

MASTER_ADDRESS=$1

cat <<EOF >/opt/kubernetes/cfg/kube-scheduler

KUBE_SCHEDULER_OPTS="--logtostderr=true \\
--v=4 \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect"

EOF

cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kube-scheduler
systemctl restart kube-scheduler

查看controller-manager.sh文件

#cat controller-manager.sh 
#!/bin/bash

MASTER_ADDRESS=$1

cat <<EOF >/opt/kubernetes/cfg/kube-controller-manager


KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \\
--v=4 \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect=true \\
--address=127.0.0.1 \\
--service-cluster-ip-range=10.0.0.0/24 \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem  \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s"

EOF

cat <<EOF >/usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl restart kube-controller-manager

部署node

#cd /opt/kubernetes/server/bin/;scp kubelet kube-proxy root@ip:/opt/kubernetes/bin/ #在master上拷貝node組件到node1與node2
#cd /root/k8s #上傳kubeconfig.sh
#mkdir kubeconfig;mv kubeconfig.sh kubeconfig;cd kubeconifg #在master上生成kubectlet bootstrapping kubeconfig與kube-proxy kubeconifg
#vi kubeconfig.sh #將/opt/kubernetes/cfg/token.csv中token複製到替換對應變量,將前一段用於請求API來頒發證書;後一段用於連接api-server,用於拉取網絡規則本地進行刷新
#echo "export PATH=$PATH:/opt/kubernetes/bin/" >> /etc/profile;source /etc/profile #設置環境變量
#bash  kubeconfig.sh master_ip /opt/k8s-cert
#scp bootstrap.kubeconfig kube-proxy.kubeconfig root@ip:/opt/kubernetes/cfg/#將文件再次拷貝到node1與node2
#kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap #上傳node.zip,在master上將kubelet-bootstrap用戶綁定到系統集羣角色
#bash kubelet.sh node_ip#啓動node,配置文件中鏡像url要可訪問,--pod-infra-container-image=mirrorgooglecontainers/pause-amd64:3.0,文件中指定的dockerd.service名稱要對應上之前配置的docker服務。
#ps ef |grep kube #查看是否啓動
#kubectl get csr #在master上可以查看到等待頒發證書name值
#kubectl certificate approve node-csr-前一命令中name值 #在master爲其頒發證書
#kubectl get node #在master上查看node
#bash proxy.sh node_ip #啓動proxy
#scp -r /opt/kubernetes/ root@node2_ip:/opt #將node1上所有都複製到nod2
#scp /usr/lib/systemd/system/{kubelet,kube-proxy}.service root@node2_ip:/usr/lib/systemd/system
#cd /opt/kubernetes/ssl;rm * #刪除node2上覆制過來的node1上的證書文件
#cd ../cfg;vi kubelet;vi kubelet.config;vi kube-proxy#將配置文件中node1_ip修改爲node2_ip
#systemctl start kubelet;systemctl start kube-proxy
#kubectl get csr #在master上查看等待授權node
#kubectl certificate approve node-csr-前一命令中name值 #在master爲其頒發證書
#kubectl get node #在master上查看node

查看kubeconfig.sh文件

#cat kubeconfig.sh
# 創建 TLS Bootstrapping Token
#BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
BOOTSTRAP_TOKEN=0fb61c46f8991b718eb38d27b605b008

cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

#----------------------

APISERVER=$1
SSL_DIR=$2

# 創建kubelet bootstrapping kubeconfig 
export KUBE_APISERVER="https://$APISERVER:6443"

# 設置集羣參數
kubectl config set-cluster kubernetes \
  --certificate-authority=$SSL_DIR/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=bootstrap.kubeconfig

# 設置客戶端認證參數
kubectl config set-credentials kubelet-bootstrap \
  --token=${BOOTSTRAP_TOKEN} \
  --kubeconfig=bootstrap.kubeconfig

# 設置上下文參數
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kubelet-bootstrap \
  --kubeconfig=bootstrap.kubeconfig

# 設置默認上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

#----------------------

# 創建kube-proxy kubeconfig文件

kubectl config set-cluster kubernetes \
  --certificate-authority=$SSL_DIR/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy \
  --client-certificate=$SSL_DIR/kube-proxy.pem \
  --client-key=$SSL_DIR/kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

查看kubelet.sh文件

#cat kubelet.sh
#!/bin/bash

NODE_ADDRESS=$1
DNS_SERVER_IP=${2:-"10.0.0.2"}

cat <<EOF >/opt/kubernetes/cfg/kubelet

KUBELET_OPTS="--logtostderr=true \\
--v=4 \\
--hostname-override=${NODE_ADDRESS} \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--config=/opt/kubernetes/cfg/kubelet.config \\
--cert-dir=/opt/kubernetes/ssl \\
--pod-infra-container-image=mirrorgooglecontainers/pause-amd64:3.0"

EOF

cat <<EOF >/opt/kubernetes/cfg/kubelet.config

kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: ${NODE_ADDRESS}
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- ${DNS_SERVER_IP} 
clusterDomain: cluster.local.
failSwapOn: false
authentication:
  anonymous:
    enabled: true
EOF

cat <<EOF >/usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=dockerd.service
Requires=dockerd.service

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet

查看proxy.sh文件

#cat proxy.sh 
#!/bin/bash

NODE_ADDRESS=$1

cat <<EOF >/opt/kubernetes/cfg/kube-proxy

KUBE_PROXY_OPTS="--logtostderr=true \\
--v=4 \\
--hostname-override=${NODE_ADDRESS} \\
--cluster-cidr=10.0.0.0/24 \\
--proxy-mode=ipvs \\
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"

EOF

cat <<EOF >/usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kube-proxy
systemctl restart kube-proxy

知識點1
Failed to start kubelet.service: Unit not found
檢查下,docker服務是否已經安裝並啓動。如果已安裝啓動就檢查kubelet.service

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章