centos7.8 安裝高可用 k8s 集羣

安裝準備

  • 虛擬機:centos7.8 3臺

本實踐中以 3臺已安裝centos7.8的虛機作爲實踐環境組建高可用 k8s 集羣,分別在每臺虛機用docker版本的haproxy做負載均衡,用docker版本的keeplived做虛擬IP;其中docker 版本爲 19.03.5,k8s 版本爲 1.18.15,集羣使用 kubeadm 進行安裝。

具體安裝規劃如下:

hostname

ip

os

kernel

cpu

memory

disk

role

k8s-master01

192.168.5.220

centos7.8

3.10.0-1127.el7.x86_64

3c

8g

40g

master

k8s-master02

192.168.5.221

centos7.8

3.10.0-1127.el7.x86_64

3c

8g

40g

master

k8s-master03

192.168.5.222

centos7.8

3.10.0-1127.el7.x86_64

3c

8g

40g

master

centos7.8系統優化

  • ulimit調優

echo -ne "
* soft nofile 65536
* hard nofile 65536
" >>/etc/security/limits.conf
  • 時區

cp -y /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
  • 配置時間同步

使用chrony同步時間,centos7默認已安裝,這裏修改時鐘源,所有節點與網絡時鐘源同步

# 註釋默認ntp服務器
sed -i 's/^server/#&/' /etc/chrony.conf
# 指定上游公共 ntp 服務器
cat >> /etc/chrony.conf << EOF
server 0.asia.pool.ntp.org iburst
server 1.asia.pool.ntp.org iburst
server 2.asia.pool.ntp.org iburst
server 3.asia.pool.ntp.org iburst
EOF

# 重啓chronyd服務並設爲開機啓動
systemctl enable chronyd && systemctl restart chronyd
  • 中文環境

localectl set-locale LANG=zh_CN.utf8
  •  關掉防火牆

systemctl stop firewalld && systemctl disable firewalld
  • 關閉selinux

# 查看selinux狀態
getenforce  
# 臨時關閉selinux
setenforce 0    
# 永久關閉(需重啓系統)
sed -i 's/^ *SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config  
  • 禁用swap

# 臨時禁用
swapoff -a

# 永久禁用(若需要重啓後也生效,在禁用swap後還需修改配置文件/etc/fstab,註釋swap)
sed -i.bak '/swap/s/^/#/' /etc/fstab
  • 內核參數修改

cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF
sysctl --system
  • 開啓ipvs支持

yum -y install ipvsadm  ipset

# 臨時生效
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4

# 永久生效
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4  
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

安裝docker和 kubernetes

本實踐我們安裝的docker版本是19.03.5,kubernetes版本是1.18.15

  • 安裝所需插件

yum install -y yum-utils \
device-mapper-persistent-data \
lvm2
  • 使用阿里雲源地址

yum-config-manager \
  --add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
  • 安裝docker

yum install docker-ce-19.03.5 docker-ce-cli-19.03.5 containerd.io
  • 安裝 kubernetes

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

# 更新緩存
yum clean all
yum -y makecache

# 版本查看
yum list kubelet --showduplicates | sort -r


# 目前最新版是1.20.2-0,我們安裝的是1.18.15
# 安裝kubelet、kubeadm和kubectl
yum install -y kubelet-1.18.15 kubeadm-1.18.15 kubectl-1.18.15

# kubelet命令補全
echo "source <(kubectl completion bash)" >> ~/.bash_profile
source .bash_profile
  • 修改cgroup Driver

修改daemon.json,新增‘"exec-opts": ["native.cgroupdriver=systemd"]’

  • 設置自啓動

systemctl daemon-reload && systemctl enable docker && systemctl enable kubelet
  • 下載 kubernetes 對應的 images

一般直接下載 k8s 相關鏡像會有網絡問題,這裏提供一種變通的方式,通過先從 aliyun上下載對應版本的相關鏡像,下載完成後,再重新打 tag。

kubeadm config images list --kubernetes-version=v1.18.15
W0126 21:22:00.485045   24826 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
k8s.gcr.io/kube-apiserver:v1.18.15
k8s.gcr.io/kube-controller-manager:v1.18.15
k8s.gcr.io/kube-scheduler:v1.18.15
k8s.gcr.io/kube-proxy:v1.18.15
k8s.gcr.io/pause:3.2
k8s.gcr.io/etcd:3.4.3-0
k8s.gcr.io/coredns:1.6.7

more image.sh
#!/bin/bash
url=registry.cn-hangzhou.aliyuncs.com/google_containers
version=$1
images=(`kubeadm config images list --kubernetes-version=$version|awk -F '/' '{print $2}'`)
for imagename in ${images[@]} ; do
docker pull $url/$imagename
docker tag $url/$imagename k8s.gcr.io/$imagename
docker rmi -f $url/$imagename
done

sh image.sh v1.18.15

安裝啓動haproxy和keepalived

  • 編輯start-haproxy.sh文件

修改Kubernetes Master節點IP地址爲實際Kubernetes集羣所使用的值(Master Port默認爲6443不用修改)

cat > /opt/lb/start-haproxy.sh << "EOF"
#!/bin/bash
MasterIP1=192.168.5.220
MasterIP2=192.168.5.221
MasterIP3=192.168.5.222
MasterPort=6443

docker run -d --restart=always --name HAProxy-K8S -p 6444:6444 \
        -e MasterIP1=$MasterIP1 \
        -e MasterIP2=$MasterIP2 \
        -e MasterIP3=$MasterIP3 \
        -e MasterPort=$MasterPort \
        wise2c/haproxy-k8s
EOF
  • 編輯start-keepalived.sh文件

修改虛擬IP地址VIRTUAL_IP、虛擬網卡設備名INTERFACE、虛擬網卡的子網掩碼NETMASK_BIT、路由標識符RID、虛擬路由標識符VRID的值爲實際Kubernetes集羣所使用的值。(CHECK_PORT的值6444一般不用修改,它是HAProxy的暴露端口,內部指向Kubernetes Master Server的6443端口)

cat > /opt/lb/start-keepalived.sh << "EOF"
#!/bin/bash
VIRTUAL_IP=192.168.5.230  #vip注意必須是沒用過的ip,設置時先ping一下看有沒有人用過
INTERFACE=ens32 #ens32是實際的網卡地址,可以通過ip a命令查看
NETMASK_BIT=24
CHECK_PORT=6444
RID=10
VRID=160
MCAST_GROUP=224.0.0.18

docker run -itd --restart=always --name=Keepalived-K8S \
        --net=host --cap-add=NET_ADMIN \
        -e VIRTUAL_IP=$VIRTUAL_IP \
        -e INTERFACE=$INTERFACE \
        -e CHECK_PORT=$CHECK_PORT \
        -e RID=$RID \
        -e VRID=$VRID \
        -e NETMASK_BIT=$NETMASK_BIT \
        -e MCAST_GROUP=$MCAST_GROUP \
        wise2c/keepalived-k8s
EOF
  • 啓動haproxy和keepalived

sh /opt/lb/start-keepalived.sh && sh /opt/lb/start-haproxy.sh

以上步驟在每一臺虛擬機上都要執行,確保每一臺虛擬機 docker、kuberletes 安裝正常,併成功下載了 k8s 相關鏡像,以及正常啓動了haproxy和keepalived。

修改主機名

  • 設置 hostname

# k8s-master01
hostnamectl set-hostname k8s-master01
# k8s-master02
hostnamectl set-hostname k8s-master02
# k8s-master03
hostnamectl set-hostname k8s-master03
  • 修改/etc/hosts

cat >> /etc/hosts << EOF
192.168.5.220 k8s-master01
192.168.5.221 k8s-master02
192.168.5.222 k8s-master03
EOF

重啓系統

# reboot

組建 k8s 集羣

初始化master節點(只在k8s-mater01主機上執行)

  • 創建初始化配置文件,可以使用如下命令生成初始化配置文件

kubeadm config print init-defaults > kubeadm-config.yaml
vi kubeadm-config.yaml
  • 編輯配置文件kubeadm-config.yaml

apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  # k8s-master01的ip:192.168.5.220
  advertiseAddress: 192.168.5.220
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master01
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
# 配置 Keepalived 地址和 HAProxy 端口
controlPlaneEndpoint: "192.168.5.230:6444"
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
# 修改版本號
kubernetesVersion: v1.18.15
networking:
  dnsDomain: cluster.local
  # 配置成 Calico 的默認網段
  podSubnet: "10.244.0.0/16"
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
# 開啓 IPVS 模式
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true
mode: ipvs
  • kubeadm 初始化

# kubeadm 初始化
kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log

# 配置 kubectl
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

# 驗證是否成功
kubectl get node
NAME           STATUS     ROLES    AGE     VERSION
k8s-master01   NotReady   master   7m11s   v1.18.15
# 注意這裏返回的 node 是 NotReady 狀態,因爲沒有裝網絡插件
kubectl get cs
NAME                 STATUS      MESSAGE                                                                                     ERROR
controller-manager   Unhealthy   Get http://127.0.0.1:10252/healthz: dial tcp 127.0.0.1:10252: connect: connection refused   
scheduler            Unhealthy   Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: connect: connection refused   
etcd-0               Healthy     {"health":"true"}
# 注意這裏返回的 controller-manager和scheduler 是 Unhealthy 狀態,下面是解決方案
  • 解決controller-manager和scheduler 是 Unhealthy 狀態

開啓scheduler, control-manager的10251,10252端口

修改以下配置文件:

/etc/kubernetes/manifests/kube-scheduler.yaml,把port=0那行註釋

/etc/kubernetes/manifests/kube-controller-manager.yaml,把port=0那行註釋

# 重啓kubelet
systemctl restart kubelet
kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   
# 注意這裏返回的 controller-manager和scheduler 已經是 healthy 狀態了
  • 安裝calico網絡組件

# 下載
wget https://docs.projectcalico.org/manifests/calico.yaml
# 替換 calico 部署文件的 IP 爲 kubeadm 中的 networking.podSubnet 參數 10.244.0.0
sed -i 's/192.168.0.0/10.244.0.0/g' calico.yaml
# 安裝
kubectl apply -f calico.yaml
kubectl get node
NAME           STATUS   ROLES    AGE   VERSION
k8s-master01   Ready    master   59m   v1.18.15
# 注意這裏返回的 node 已經是 Ready 狀態了
  • 加入 Master 節點((在k8s-mater01和k8s-mater02主機上執行))

# 以下爲示例命令
kubeadm join 192.168.5.230:6444 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:32102a6df7b4dd044ad461d4d84d165ed3f43c4ad06cc6bbe1b38e47d1d7ee7c \
    --control-plane --certificate-key 67a5cdc70d254a96e2e41c2b0e54b5660b969c600c60a516ff357fa6a96e6a1d
  • 查看 kubernetes 狀態:

[root@k8s-master01 k8s]# kubectl get nodes
NAME           STATUS   ROLES    AGE    VERSION
k8s-master01   Ready    master   131m   v1.18.15
k8s-master02   Ready    master   69m    v1.18.15
k8s-master03   Ready    master   69m    v1.18.15
  • 讓 Master 也進行 Pod 調度

默認情況下,pod 節點不會分配到 master 節點,可以通過如下命令讓 master 節點也可以進行 pod 調度:

[root@k8s-master01 k8s]# kubectl taint node k8s-master01 k8s-master02 k8s-master03 node-role.kubernetes.io/master-
node/k8s-master01 untainted
node/k8s-master02 untainted
node/k8s-master03 untainted
  • 取消 k8s 對外開放默認端口限制

本操作在k8s-mater01、k8s-mater02和k8s-mater03主機上均要執行

k8s 默認對外開放的端口範圍爲 30000~32767,可以通過修改 kube-apiserver.yaml 文件重新調整端口範圍,在 /etc/kubernetes/manifests/kube-apiserver.yaml 文件中,找到 --service-cluster-ip-range 這一行,在下面增加如下內容,

- --service-node-port-range=10-65535

然後重啓 kubelet 即可。

systemctl daemon-reload && systemctl restart kubelet

安裝 helm3

cd /opt/k8s
wget https://get.helm.sh/helm-v3.5.0-linux-amd64.tar.gz
# 解壓並放到 usr/bin 下
tar -zxvf helm-v3.5.0-linux-amd64.tar.gz
cp linux-amd64/helm /usr/bin
# 打印版本號
helm version
version.BuildInfo{Version:"v3.5.0", GitCommit:"32c22239423b3b4ba6706d450bd044baffdcf9e6", GitTreeState:"clean", GoVersion:"go1.15.6"}

安裝 nginx-ingress

  • 下載ingress

helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
"ingress-nginx" has been added to your repositories
helm pull ingress-nginx/ingress-nginx
tar -zxf ingress-nginx-3.22.0.tgz && cd ingress-nginx
  • 修改values.yaml

# 修改controller鏡像地址
repository: pollyduan/ingress-nginx-controller
# 註釋掉 digest
# digest: sha256:9bba603b99bf25f6d117cf1235b6598c16033ad027b143c90fa5b3cc583c5713

# dnsPolicy
dnsPolicy: ClusterFirstWithHostNet
​
# 使用hostNetwork,即使用宿主機上的端口80 443
hostNetwork: true
​
# 使用DaemonSet,將ingress部署在指定節點上
kind: DaemonSet
​
# 節點選擇,將需要部署的節點打上ingress=true的label
  nodeSelector:
    kubernetes.io/os: linux
    ingress: "true"
     
# 修改type,改爲ClusterIP。如果在雲環境,有loadbanace可以使用loadbanace
type: ClusterIP
​
# 修改kube-webhook-certgen鏡像地址
jettech/kube-webhook-certgen
  • 安裝ingress

# 選擇節點打label
kubectl label node k8s-master01 ingress=true
kubectl label node k8s-master02 ingress=true
kubectl label node k8s-master03 ingress=true
​
# 安裝ingress
# helm install ingress-nginx ./
NAME: ingress-nginx
LAST DEPLOYED: Thu Jan 28 23:00:09 2021
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
The ingress-nginx controller has been installed.
Get the application URL by running these commands:
  export POD_NAME=$(kubectl --namespace default get pods -o jsonpath="{.items[0].metadata.name}" -l "app=ingress-nginx,component=controller,release=ingress-nginx")
  kubectl --namespace default port-forward $POD_NAME 8080:80
  echo "Visit http://127.0.0.1:8080 to access your application."

An example Ingress that makes use of the controller:

  apiVersion: networking.k8s.io/v1beta1
  kind: Ingress
  metadata:
    annotations:
      kubernetes.io/ingress.class: nginx
    name: example
    namespace: foo
  spec:
    rules:
      - host: www.example.com
        http:
          paths:
            - backend:
                serviceName: exampleService
                servicePort: 80
              path: /
    # This section is only required if TLS is to be enabled for the Ingress
    tls:
        - hosts:
            - www.example.com
          secretName: example-tls

If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:

  apiVersion: v1
  kind: Secret
  metadata:
    name: example-tls
    namespace: foo
  data:
    tls.crt: <base64 encoded cert>
    tls.key: <base64 encoded key>
  type: kubernetes.io/tls
  • 確認nginx- ingress 安裝是否 OK。

kubectl get all
NAME                                 READY   STATUS    RESTARTS   AGE
pod/ingress-nginx-controller-4ccfz   1/1     Running   0          10h
pod/ingress-nginx-controller-fjqgw   1/1     Running   0          10h
pod/ingress-nginx-controller-xqcx7   1/1     Running   0          10h

NAME                                         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
service/ingress-nginx-controller             ClusterIP   10.105.250.15   <none>        80/TCP,443/TCP   10h
service/ingress-nginx-controller-admission   ClusterIP   10.101.89.154   <none>        443/TCP          10h
service/kubernetes                           ClusterIP   10.96.0.1       <none>        443/TCP          14h

NAME                                      DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR                         AGE
daemonset.apps/ingress-nginx-controller   3         3         3       3            3           ingress=true,kubernetes.io/os=linux   10h

用瀏覽器訪問 http://192.168.5.230/,界面如下:

 

安裝 kubernetes-dashboard

  • 下載kubernetes-dashboard

helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
"kubernetes-dashboard" has been added to your repositories
helm pull kubernetes-dashboard/kubernetes-dashboard
tar -zxf kubernetes-dashboard-3.22.0.tgz && cd kubernetes-dashboard
  • 修改values.yaml

ingress: 
  # 啓用ingress
  enabled: true
  
  # 使用指定域名訪問
  hosts:
  - kdb.k8s.com
  • 安裝kubernetes-dashboard

# 安裝kubernetes-dashboard
helm install kubernetes-dashboard ./ -n kube-system
NAME: kubernetes-dashboard
LAST DEPLOYED: Fri Jan 29 20:21:39 2021
NAMESPACE: kube-system
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
*********************************************************************************
*** PLEASE BE PATIENT: kubernetes-dashboard may take a few minutes to install ***
*********************************************************************************
From outside the cluster, the server URL(s) are:
     https://kdb.k8s.com
  • 確認kubernetes-dashboard 安裝是否 OK

kubectl get po -n kube-system |grep kubernetes-dashboard
kubernetes-dashboard-69574fd85c-ngfff      1/1     Running   0          9m33s

kubectl get svc -n kube-system |grep kubernetes-dashboard
kubernetes-dashboard   ClusterIP   10.110.87.163   <none>        443/TCP                  9m40s

kubectl get ingress -n kube-system |grep kubernetes-dashboard
kubernetes-dashboard   <none>   kdb.k8s.com   10.107.1.113   80      9m22s

用域名訪問https://kdb.k8s.com,顯示風險提示頁面如下:

點“高級”按鈕,頁面展示風險原因,並提供了可以接受風險並繼續的按鈕

點擊“”接受風險並繼續”,進入登錄頁面如下:

查看token並使用token登陸

# 查看內容含有token的secret
kubectl get secret -n kube-system | grep kubernetes-dashboard-token
kubernetes-dashboard-token-9llx4                 kubernetes.io/service-account-token   3      25m
# 查看對應的token
kubectl describe secret -n kube-system kubernetes-dashboard-token-9llx4
Name:         kubernetes-dashboard-token-9llx4
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: kubernetes-dashboard
              kubernetes.io/service-account.uid: eb2a13d1-e3cb-47c3-9376-4e7ffba6d5e0

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1025 bytes
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6Ilh1cE93UXFVOGR1ZUs3bjJEVGtZZDJxaUxmVXlyMkYtaEVONUZsN3FqQkUifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi05bGx4NCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImViMmExM2QxLWUzY2ItNDdjMy05Mzc2LTRlN2ZmYmE2ZDVlMCIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.mx7-LFVA0dn0_hRLxXSstuKTNyTc3klUzdOd3uE5hrEgDjvB7YNC7Y7gP9hBeRU3a9Kh0eG6Z1OfvNDgMUXNxv4Hqi0bqghE9Th4PPGtitDiXlVhtVQSA3eUgbQjNcuYWere30SizEckPRVm-Vpu-31-hEtfClUlnhlbPtEc-lDaOoRncqUpi07uZfT0MCoGzADxUaAJU6v5AUs3iP_0xl1cVqDJtXk-ysabG-KG1y6c8iJiPRUgwi8Uj499dOHjcDcZlJxE3dLgoPvKJ-YWrPUZH_alBIUgj-o2XdgPiY4od3xFnpLDpbmrmyYbDmqT_zyzvoNVfp2WtjzoZi1fwg

使用TOken方式登錄,填入token,點登錄按鈕後進入dashboard如下

可以看到這裏顯示“這裏沒有可以顯示的”,是因爲dashboard默認的serviceaccount並沒有權限,所以我們需要給予它授權。

創建dashboard-admin.yaml,內容爲以下

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kube-system

給dashboard的serviceaccont授權

kubectl apply -f dashboard-admin.yaml
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard configured

然後刷新dashboard,這時候可以通過dashboard查看如下

安裝 local-path

  • 下載local-path-provisioner

wget https://github.com/rancher/local-path-provisioner/archive/v0.0.19.tar.gz
tar -zxf v0.0.19.tar.gz && cd local-path-provisioner-0.0.19
  • 安裝local-path-provisioner

kubectl apply -f deploy/local-path-storage.yaml
kubectl get po -n local-path-storage
NAME                                      READY   STATUS    RESTARTS   AGE
local-path-provisioner-5b577f66ff-q2chs   1/1     Running   0          24m
[root@k8s-master01 deploy]# kubectl get storageclass
NAME         PROVISIONER             RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
local-path   rancher.io/local-path   Delete          WaitForFirstConsumer   false                  24m
  • 配置 local-path 爲默認存儲

kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
storageclass.storage.k8s.io/local-path patched
kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.beta.kubernetes.io/is-default-class":"true"}}}'
storageclass.storage.k8s.io/local-path patched
[root@k8s-master01 deploy]# kubectl get storageclass
NAME                   PROVISIONER             RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
local-path (default)   rancher.io/local-path   Delete          WaitForFirstConsumer   false                  27m               25m
  • 驗證local-path安裝是否OK

kubectl apply -f examples/pod.yaml,examples/pvc.yaml
pod/volume-test created
persistentvolumeclaim/local-path-pvc created

kubectl get pvc,pv
NAME                                   STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
persistentvolumeclaim/local-path-pvc   Bound    pvc-74935037-f8c7-4f1a-ab2d-f7670f8ff540   2Gi        RWO            local-path     58s

NAME                                                        CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                    STORAGECLASS   REASON   AGE
persistentvolume/pvc-74935037-f8c7-4f1a-ab2d-f7670f8ff540   2Gi        RWO            Delete           Bound    default/local-path-pvc   local-path              37s

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章