文章目錄
什麼是ETCD?
Etcd是Kubernetes集羣中的一個十分重要的組件,用於保存集羣所有的網絡配置和對象的狀態信息,K8S中所有持久化的狀態信息都是以Key-Value的形式存儲在ETCD中,提供分佈式協調服務。之所以說kubenetes各個組件是無狀態的,就是因爲其中把數據都存放在ETCD中。
由於ETCD支持集羣,本實驗中在三臺主機上都部署上ETCD.
1. 準備ETCD軟件包
[root@linux-node1 src]# pwd
/usr/local/src
[root@linux-node1 src]# wget https://github.com/coreos/etcd/releases/download/v3.2.18/etcd-v3.2.18-linux-amd64.tar.gz
[root@linux-node1 src]# tar zxf etcd-v3.2.18-linux-amd64.tar.gz #解壓etcd
[root@linux-node1 src]# cd etcd-v3.2.18-linux-amd64 #有2個文件,etcdctl是操作etcd的命令
[root@linux-node1 etcd-v3.2.18-linux-amd64]# cp etcd etcdctl /opt/kubernetes/bin/
[root@linux-node1 etcd-v3.2.18-linux-amd64]# scp etcd etcdctl 192.168.219.136:/opt/kubernetes/bin/
[root@linux-node1 etcd-v3.2.18-linux-amd64]# scp etcd etcdctl 192.168.219.137:/opt/kubernetes/bin/
2. 創建ETCD證書籤名請求
[root@linux-node1 ~]# cd /usr/local/src/ssl
[root@linux-node1 ssl]# vim etcd-csr.json
{
"CN": "etcd",
#此處的ip是etcd集羣中各個節點的ip地址
"hosts": [
"127.0.0.1",
"192.168.219.135",
"192.168.219.136",
"192.168.219.137"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "ShangHai",
"L": "ShangHai",
"O": "k8s",
"OU": "System"
}
]
}
3. 生成ETCD證書和私鑰
[root@linux-node1 ~]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
-ca-key=/opt/kubernetes/ssl/ca-key.pem \
-config=/opt/kubernetes/ssl/ca-config.json \
-profile=kubernetes etcd-csr.json | cfssljson -bare etcd
[root@linux-node1 ssl]# ls -l etcd*
-rw-r--r--. 1 root root 1066 5月 8 09:45 etcd.csr
-rw-r--r--. 1 root root 307 5月 8 10:35 etcd-csr.json
-rw-------. 1 root root 1675 5月 8 09:45 etcd-key.pem
-rw-r--r--. 1 root root 1440 5月 8 09:45 etcd.pem
4. 將證書拷貝到三臺主機的/opt/kurbernetes/ssl目錄下
[root@linux-node1 ~]# cp etcd*.pem /opt/kubernetes/ssl
[root@linux-node1 ~]# scp etcd*.pem 192.168.219.136:/opt/kubernetes/ssl
[root@linux-node1 ~]# scp etcd*.pem 192.168.219.137:/opt/kubernetes/ssl
5. 配置ETCD的配置文件
2379端口用於外部通信,2380用於內部通信
Linux-node1(Master)
[root@linux-node1 ~]# vim /opt/kubernetes/cfg/etcd.conf
#[member]
##ETCD節點名稱修改,這個ETCD_NAME每個節點必須不同
ETCD_NAME="etcd-node1"
#ETCD數據目錄
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
#
##ETCD監聽的URL,每個節點不同需要修改
ETCD_LISTEN_PEER_URLS="https://192.168.219.135:2380"
#外部通信監聽URL修改,每個節點不同需要修改
ETCD_LISTEN_CLIENT_URLS="https://192.168.219.135:2379,https://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.219.135:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="etcd-node1=https://192.168.219.135:2380,etcd-node2=https://192.168.219.136:2380,etcd-node3=https://192.168.219.137:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.219.135:2379"
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
Linux-node2
[root@linux-node2 ~]# vim /opt/kubernetes/cfg/etcd.conf
#[member]
##ETCD節點名稱修改,這個ETCD_NAME每個節點必須不同
ETCD_NAME="etcd-node2"
#ETCD數據目錄
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
##ETCD監聽的URL,每個節點不同需要修改
ETCD_LISTEN_PEER_URLS="https://192.168.219.136:2380"
#外部通信監聽URL修改,每個節點不同需要修改
ETCD_LISTEN_CLIENT_URLS="https://192.168.219.136:2379,https://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.219.136:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
# #添加集羣訪問
#
ETCD_INITIAL_CLUSTER="etcd-node1=https://192.168.219.135:2380,etcd-node2=https://192.168.219.136:2380,etcd-node3=https://192.168.219.137:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.219.136:2379"
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
Linux-node3
[root@linux-node3 ~]# vim /opt/kubernetes/cfg/etcd.conf
#[member]
##ETCD節點名稱修改,這個ETCD_NAME每個節點必須不同
ETCD_NAME="etcd-node3"
#ETCD數據目錄
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
##ETCD監聽的URL,每個節點不同需要修改
ETCD_LISTEN_PEER_URLS="https://192.168.219.137:2380"
#外部通信監聽URL修改,每個節點不同需要修改
ETCD_LISTEN_CLIENT_URLS="https://192.168.219.137:2379,https://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.219.137:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
# 添加集羣訪問
ETCD_INITIAL_CLUSTER="etcd-node1=https://192.168.219.135:2380,etcd-node2=https://192.168.219.136:2380,etcd-node3=https://192.168.219.137:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.219.137:2379"
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
6. 創建ETCD系統服務
在linux-node1、linux-node2、linux-node3都創建ETCD系統服務。
[root@linux-node1 ~]# vim /etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
[Service]
Type=simple
WorkingDirectory=/var/lib/etcd
EnvironmentFile=-/opt/kubernetes/cfg/etcd.conf
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /opt/kubernetes/bin/etcd"
Type=notify
[Install]
WantedBy=multi-user.target
7. 重新加載系統服務
在linux-node1、linux-node2、linux-node3都重新加載並設置開機啓動ETCD系統服務。
[root@linux-node1 ~]# systemctl daemon-reload
[root@linux-node1 ~]# systemctl enable etcd
默認不會創建etcd的數據存儲目錄,這裏在三個節點上創建etcd數據存儲目錄
[root@linux-node1 ~]# mkdir /var/lib/etcd
[root@linux-node1 ~]# systemctl start etcd
[root@linux-node1 ~]# systemctl status etcd
[root@linux-node2 ~]# mkdir /var/lib/etcd
[root@linux-node2 ~]# systemctl start etcd
[root@linux-node2 ~]# systemctl status etcd
[root@linux-node3 ~]# mkdir /var/lib/etcd
[root@linux-node3 ~]# systemctl start etcd
[root@linux-node3 ~]# systemctl status etc
#在各節點上查看是否監聽了2379和2380端口
#linux-node1
[root@linux-node1 ~]# ss -tulnp | grep etcd
tcp LISTEN 0 128 192.168.219.135:2379 *:* users:(("etcd",pid=24791,fd=7))
tcp LISTEN 0 128 127.0.0.1:2379 *:* users:(("etcd",pid=24791,fd=6))
tcp LISTEN 0 128 192.168.219.135:2380 *:* users:(("etcd",pid=24791,fd=5))
#linux-node2
[root@localhost ~]# ss -tulnp | grep etcd
tcp LISTEN 0 128 192.168.219.136:2379 *:* users:(("etcd",pid=21073,fd=7))
tcp LISTEN 0 128 127.0.0.1:2379 *:* users:(("etcd",pid=21073,fd=6))
tcp LISTEN 0 128 192.168.219.136:2380 *:* users:(("etcd",pid=21073,fd=5))
#linux-node3
[root@localhost ~]# ss -tulnp |grep etcd
tcp LISTEN 0 128 192.168.219.137:2379 *:* users:(("etcd",pid=9336,fd=7))
tcp LISTEN 0 128 127.0.0.1:2379 *:* users:(("etcd",pid=9336,fd=6))
tcp LISTEN 0 128 192.168.219.137:2380 *:* users:(("etcd",pid=9336,fd=5))
8. 驗證ETCD集羣
[root@linux-node1 ~]#etcdctl --endpoints=https://192.168.219.135:2379 \
> --ca-file=/opt/kubernetes/ssl/ca.pem \
> --cert-file=/opt/kubernetes/ssl/etcd.pem \
> --key-file=/opt/kubernetes/ssl/etcd-key.pem cluster-health
member 252a9cd8ec6cef2d is healthy: got healthy result from https://192.168.219.137:2379
member 2de3a11ecb96f087 is healthy: got healthy result from https://192.168.219.136:2379
member a60cce210fbb41a1 is healthy: got healthy result from https://192.168.219.135:2379
cluster is healthy
參考:《每天5分鐘玩轉Kubernetes》、https://www.cnblogs.com/linuxk/