1.軟件下載
(1)下載安裝Docker(node節點)
①yum -y install yum-utils
②yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
③yum makecache
④ wget https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-18.06.0.ce-3.el7.x86_64.rpm
⑤ rpm -ivh docker-ce-18.06.0.ce-3.el7.x86_64.rpm
⑥yum -y install docker-ce-18.06.0.ce
(2)下載etcd
①wget https://github.com/coreos/etcd/releases/download/v3.2.24/etcd-v3.2.24-linux-amd64.tar.gz
(3)下載k8s
①https://dl.k8s.io/v1.12.1/kubernetes-server-linux-amd64.tar.gz
2.安裝規劃
192.168.42.140 master docker、etcd、kube-api、kube-controlle-manager、kube-scheduler、
192.168.42.145 node1 docker、kubelet、kube-proxy
192.168.42.146 node1 docker、kubelet、kube-proxy
3. master節點安裝部署
安裝etcd
(1)安裝etcd
tar -xf etcd-v3.2.24-linux-amd64.tar.gz
cp etcd* /usr/bin/
chmod 755 /usr/bin/etcd*
(2)配置啓動文件
vi /etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
[Service]
Type=bitify
TimeoutStartSec=0
Restart=always
WorkingDirectory=/app/etcd/work
EnvironmentFile=-/etc/etcd/etcd.conf
ExecStart=/usr/bin/etcd
Type=notify
[Install]
WantedBy=multi-user.target
(3)mkdir -p /app/etcd/{work,data}
(4)vi /etc/etcd/etcd.conf
ETCD_NAME=k8setcd
ETCD_DATA_DIR="/app/etcd/data/"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.42.140:2379"
(5)systemctl daemon-reload
systemctl enable etcd.service
systemctl start etcd.service
systemctl status etcd.service
(6)檢測etcd是否安裝成功
etcdctl cluster-health
安裝kube-apiserver
tar -xf kubernetes-server-linux-amd64.tar.gz
cd kubernetes
cd server/bin/
cp kube-apiserver kube-controller-manager kube-scheduler /usr/bin/
chmod 755 /usr/bin/kube-*
#配置apiserver的啓動文件
vi /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kube-apiserver
After=etcd.service
Wants=etcd.service
[Service]
EnvironmentFile=-/etc/kubernetes/apiserver
ExecStart=/usr/bin/kube-apiserver $KUBE_API_ARGS
Restart=on-failure
Type=notify
LimitNOFILE=65535
[Install]
WantedBy=muti-user.target
#創建apiserver的配置文件
vi /etc/kubernetes/apiserver
KUBE_API_ARGS="--logtostderr=true \
--v=4 \
--etcd-servers=http://127.0.0.1:2379 \
--insecure-bind-address=0.0.0.0 \
--insecure-port=8080 \
--advertise-address=192.168.42.140 \
--allow-privileged=true \
--service-cluster-ip-range=10.10.10.0/24 \
--service-node-port-range=30000-50000"
安裝kube-controller-manager
#創建controller-manager的啓動文件
vi /usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kube-apiserver
After=kube-apiserver.service
Requires=kube-apiserver.service
[Service]
EnvironmentFile=-/etc/kubernetes/controller-manager
ExecStart=/usr/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_ARGS
Restart=on-failure
LimitNOFILE=65535
[Install]
WantedBy=muti-user.target
#創建controller-manager的配置文件
vi /etc/kubernetes/controller-manager
KUBE_CONTROLLER_MANAGER_ARGS="--logtostderr=true \
--v=4 \
--master=192.168.42.140:8080 \
--leader-elect=true \
--address=127.0.0.1"
安裝scheduler
創建scheduler的啓動文件
vi /usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kube-scheduler
After=kube-apiserver.service
Requires=kube-apiserver.service
[Service]
User=root
EnvironmentFile=-/etc/kubernetes/scheduler
ExecStart=/usr/bin/kube-scheduler $KUBE_SCHEDULER_ARGS
Restart=on-failure
LimitNOFILE=65535
[Install]
WantedBy=muti-user.target
創建scheduler的配置文件
vi /etc/kubernetes/scheduler
KUBE_SCHEDULER_ARGS="--logtostderr=true \
--v=4 \
--master=192.168.42.140:8080 \
--leader-elect"
啓動所有master的服務
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver
systemctl status kube-apiserver
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl status kube-controller-manager
systemctl enable kube-scheduler
systemctl start kube-scheduler
systemctl status kube-scheduler
4.安裝NODE節點
1.創建配置文件kubelet.kubeconfig,用於kubelet連接master apiserver的配置信息
vi /etc/kubernetes/kubelet.kubeconfig
apiVersion: v1
kind: Config
clusters:
- cluster:
server: http://192.168.42.140:8080
name: kubernetes
contexts:
- context:
cluster: kubernetes
name: default-context
current-context: default-context
2.創建kubelet的配置文件
vi /etc/kubernetes/kubelet
KUBELET_ARGS="--logtostderr=true \
--v=4 \
--address=192.168.42.145 \
--hostname-override=192.168.42.145 \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
--allow-privileged=true \
--cluster-dns=$10.10.10.2 \
--cluster-domain=cluster.local \
--fail-swap-on=false \
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
3.創建kubelet的啓動文件
vi /usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
[Service]
EnvironmentFile=-/etc/kubernetes/kubelet
ExecStart=/usr/bin/kubelet $KUBELET_ARGS
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
4.創建配置文件kube-proxy.kubeconfig
vi /etc/kubernetes/kube-proxy.kubeconfig
apiVersion: v1
kind: Config
clusters:
- cluster:
server: http://192.168.42.140:8080
name: kubernetes
contexts:
- context:
cluster: kubernetes
name: default-context
current-context: default-context
5.創建proxy的配置文件
vi /etc/kubernetes/kube-proxy
KUBE_PROXY_ARGS="--logtostderr=true \
--v=4 \
--hostname-override=192.168.42.145 \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig"
6.創建kube-proxy的啓動文件
vi /usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=-/etc/kubernetes/kube-proxy
ExecStart=/usr/bin/kube-proxy $KUBE_PROXY_ARGS
Restart=on-failure
[Install]
WantedBy=multi-user.target
7.啓動服務
systemctl enable kubelet
systemctl start kubelet
systemctl status kubelet
systemctl enable kube-proxy
systemctl start kube-proxy
systemctl status kube-proxy
安裝flanned
#在etcd中配置flanneld的網絡地址信息
etcdctl -endpoint="http://192.168.42.140:2379" set /coreos.com/network/config '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}'
master節點
mkdir /root/flannel
wget https://github.com/coreos/flannel/releases/download/v0.10.0/flannel-v0.10.0-linux-amd64.tar.gz
tar -xzvf flannel-v0.10.0-linux-amd64.tar.gz -C flannel
scp -r /root/flannel 192.168.42.145:/root/
scp -r /root/flannel 192.168.42.146:/root/
cp flanneld mk-docker-opts.sh /usr/bin/(所有節點都要執行)
三個節點都執行的操作
vi /etc/sysconfig/flanneld
# Flanneld configuration options
# etcd url location. Point this to the server where etcd runs
FLANNEL_ETCD_ENDPOINTS="http://192.168.42.140:2379"
# etcd config key. This is the configuration key that flannel queries
# For address range assignment
FLANNEL_ETCD_PREFIX="/coreos.com/network"
# Any additional options that you want to pass
#FLANNEL_OPTIONS=""
FLANNEL_OPTIONS="--etcd-endpoints=http://192.168.42.140:2379 --ip-masq=true"
vi /usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
After=network-online.target
Wants=network-online.target
Before=docker.service
[Service]
Type=notify
EnvironmentFile=/etc/sysconfig/flanneld
ExecStart=/usr/bin/flanneld $FLANNEL_OPTIONS
ExecStartPost=/usr/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure
[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
node節點需要執行的操作
vi /usr/lib/systemd/system/docker.service
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP $MAINPID