基於NFS服務創建StorageClass 存儲實現自動創建PV

前言

Storageclass解決PV手動創建需求

當每次創建 PVC 聲明使用存儲時,都需要去手動的創建 PV,來滿足 PVC 的使用。

可以用一種機制來根據用戶聲明的存儲使用量(PVC)來動態的創建對應的持久化存儲卷(PV)。k8s 用 StorageClass 來實現動態創建 持久化存儲。

實現原理:

存儲控制器 Volume Controller,是用來專門處理持久化存儲的控制器,其一個子控制循環 PersistentVolumeController 負責實現 PV 和 PVC 的綁定。
PersistentVolumeController 會 watch
kube-apiserver 的 PVC 對象。如果發現有 PVC對象創建,則會查看所有可用的 PV,
如果有則綁定,若沒有,則會使用 StorageClass 的配置和 PVC 的描述創建 PV 進行綁定。所謂將一個 PV 與 PVC 進行“綁定”,其實就是將這個PV對象的名字,填在了 PVC 對象的spec.volumeName 字段上

一、搭建NFS服務

在這裏我建k8s master01節點作爲NFS server服務

[root@k8s-master01 ~]# yum install -y nfs-utils #所有master都要安裝
[root@k8s-master01 ~]# mkdir /data/volumes/v1
[root@k8s-master01 ~]# systemctl start rpcbind
[root@k8s-master01 ~]# systemctl status rpcbind
root@k8s-master01 ~]# systemctl enable --now nfs-server

Created symlink from /etc/systemd/system/multi-user.target.wants/nfs-server.service to /u:qsr/lib/systemd/system/nfs-server.ser

Master節點創建共享掛載目錄

#mkdir -p /data/volumes/{v1,v2,v3}

編輯master節點/etc/exports文件,將目錄共享到192.168.126.0/24這個網段中(網段可根據自己環境來填寫,exports文件需要在每臺master節點上進行配置)

#vim /etc/exports

/data/volumes/v1 192.168.126.0/24(rw,no_root_squash,no_all_squash)

發佈

#exportfs -arv

exporting 192.168.126.0/24:/data/volumes/v1

查看

#showmounte -e 

Export list for k8s-master01:
/data/volumes/v1 192.168.126.0/24

二、創建StorageClass存儲類型

創建StorageClass,負責創建PVC並調用NFSprovisioner進行預訂工作,並關聯PV和PVC

#vim class.yaml

kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
  name: nfs-storage
  annotations:
    storageclass.beta.kubernetes.io/is-default-class: 'true'
    storageclass.kubernetes.io/is-default-class: 'true'
provisioner: fuseim.pri/ifs #這裏指定存儲供應者來源名稱
reclaimPolicy: Delete  #指定回收策略,在這裏選擇的是Delete,與PV相連的後端存儲完成Volume的刪除操作
volumeBindingMode: Immediate #指定綁定模式,在這裏選擇的是即刻綁定,也就是存儲卷聲明創建之後,立刻動態創建存儲捲餅將其綁定到存儲卷聲明,另外還有"WaitForFirstConsumer",直到存儲卷聲明第一次被容器組使用時,才創建存儲卷,並將其綁定到存儲卷聲明

#kubectl apply -f  class.yaml

注意:如果SC存儲不是默認的,可以標記一個StorageClass爲默認的(根據自己實際名稱標記即可)
#kubectl patch storageclass managed-nfs-storage -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'

二、創建RBAC權限

創建Service Account,用來管控NFS provisioner在k8s集羣中的運行權限

rbac(基於角色的訪問控制,就是用戶通過角色與權限進行關聯),是一個從認證---->授權-----》准入機制

 #vim rbac.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io 

#將上述編寫好的yaml文件創建出來

[root@k8s-master01 nfs-damon]# kubectl apply -f rbac.yaml
serviceaccount/nfs-client-provisioner created
clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created
role.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created
rolebinding.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created

三、創建基於NFS類型的Deployment

創建NFS Provisioner,有兩個功能, 一是在NFS共享目錄下創建掛載點(volume),二是建立PV並將PC與NFS掛載建立關聯

#vim deployment.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          #image: quay.io/external_storage/nfs-client-provisioner:latest
          #image: gcr.io/k8s-staging-sig-storage/nfs-subdir-external-provisioner:latest
          image: gmoney23/nfs-client-provisioner:latest #注意該NFS-client鏡像一定要與k8s版本相匹配,如果不兼容那麼就無法綁定
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: fuseim.pri/ifs  #這裏必須要填寫storageclass中的PROVISIONER名稱信息一致
            - name: NFS_SERVER
              value: 192.168.126.131  #指定NFS服務器的IP地址
            - name: NFS_PAT      
              value: /data/volumes/v1  #指定NFS服務器中的共享掛載目錄
      volumes:
        - name: nfs-client-root  #定義持久化卷的名稱,必須要上面volumeMounts掛載的名稱一致
          nfs:
            server: 192.168.126.131 #指定NFS所在的IP地址
            path: /data/volumes/v1 #指定NFS服務器中的共享掛載目錄

[root@k8s-master01 nfs-damon]# kubectl apply -f deployment.yaml
deployment.apps/nfs-client-provisioner created

 

 通過檢查容器日誌查看啓動的NFS插件是否正常

#kubectl logs  pod nfs-client-provisioner-f755d8ffd-d6swv

四、創建PVC持久化卷

#vim test-claim.yaml

kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-claim
  namespace: default
spec:
  storageClassName: nfs-storage
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 256Mi

#kubectl apply -f test-claim.yaml

查看PVC是否已經與Storageclass掛載綁定

#kubectl get pvc 

 五、創建測試文件

#vim statefulset-nfs.yaml

apiVersion: v1
kind: Service
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  ports:
  - port: 80
    name: web
  clusterIP: None
  selector:
    app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: nfs-web
spec:
  serviceName: "nginx"
  replicas: 3
  selector:
    matchLabels:
      app: nfs-web # has to match .spec.template.metadata.labels
  template:
    metadata:
      labels:
        app: nfs-web
    spec:
      terminationGracePeriodSeconds: 10
      containers:
      - name: nginx
        image: nginx:1.7.9
        ports:
        - containerPort: 80
          name: web
        volumeMounts:
        - name: www
          mountPath: /usr/share/nginx/html
  volumeClaimTemplates:
  - metadata:
      name: www
      annotations:
        volume.beta.kubernetes.io/storage-class: nfs-storage
    spec:
      accessModes: [ "ReadWriteOnce" ]
      resources:
        requests:
          storage: 10Mi

 

#kubectl apply -f statefulset-nfs.yaml

 

 

驗證是否自動創建PV

 查看PVC

 

 查看PV

 查看NFS-server 數據信息

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章