k8s附加組件之存儲-glusterfs

k8s附加組件之存儲-glusterfs

2018/2/1

部署 glusterfs 集羣

  • 初始化 glusterfs 集羣
  • 創建 glusterfs 卷,用於保存數據
不做特別說明的,都在 67 上操作

集羣節點

  • 10.10.9.67
  • 10.10.9.68
  • 10.10.9.69
初始化 glusterfs 集羣
~]# yum install centos-release-gluster310 -y
~]# yum install glusterfs-server -y
~]# yum install xfsprogs -y

##### 準備磁盤:
~]# umount /data1
~]# mkfs.xfs -i size=512 -f /dev/vdc1
~]# mount /dev/vdc1 /data1
~]# vim /etc/fstab
### glusterfs
/dev/vdc1 /data1                       xfs    defaults        0 0

##### 啓動服務:
~]# systemctl start glusterd && systemctl enable glusterd

##### 建立集羣:
~]# gluster peer probe 10.10.9.68
~]# gluster peer probe 10.10.9.69
~]# gluster peer status

##### 在所有節點操作:
~]# mkdir /data1/glusterfs_data/gv0 -p && \
echo '### glusterfs data for k8s-dev only!' >/data1/glusterfs_data/README.md
創建 glusterfs 卷,用於保存數據
~]# gluster volume create gv0 replica 3 transport tcp \
10.10.9.67:/data1/glusterfs_data/gv0 \
10.10.9.68:/data1/glusterfs_data/gv0 \
10.10.9.69:/data1/glusterfs_data/gv0
~]# gluster volume start gv0
~]# gluster volume info gv0

##### 配置參數,特別是 quorum 相關的,用於預防腦裂問題
gluster volume set gv0 diagnostics.count-fop-hits on
gluster volume set gv0 diagnostics.latency-measurement on
gluster volume set gv0 cluster.server-quorum-type server
gluster volume set gv0 cluster.quorum-type auto
gluster volume set gv0 network.remote-dio enable
gluster volume set gv0 cluster.eager-lock enable
gluster volume set gv0 performance.stat-prefetch off
gluster volume set gv0 performance.io-cache off
gluster volume set gv0 performance.read-ahead off
gluster volume set gv0 performance.quick-read off

##### 在客戶端上配置 glusterfs 環境並掛載測試
~]# yum install centos-release-gluster310 -y
~]# yum install glusterfs glusterfs-fuse -y
~]# mkdir /mnt/test
~]# mount -t glusterfs -o backup-volfile-servers=10.10.9.68:10.10.9.69 10.10.9.67:/gv0 /mnt/test
~]# df -h |grep test
10.10.9.67:/gv0  1.0T   33M  1.0T   1% /mnt/test
~]# umount /mnt/test

準備 k8s yaml 來使用 glusterfs 集羣

  • 創建一個數據卷
  • 通過定義 endpoints 和 service 來提供訪問外部服務(glusterfs集羣)的方法
  • 通過定義 pv 和 pvc 來使用存儲卷
  • 測試,在 deployment 中通過關聯 pvc 來掛載卷
  • 測試,多副本讀寫同一個日誌文件的場景
創建一個數據卷
[root@tvm-00 glusterfs]# ls
10.endpoints  20.pv  30.pvc  bin  deploy_test

[root@tvm-00 glusterfs]# cat bin/create-gv1-default.sh
#!/bin/bash
#

gluster volume create gv1-default replica 3 transport tcp \
10.10.9.67:/data1/glusterfs_data/gv1-default \
10.10.9.68:/data1/glusterfs_data/gv1-default \
10.10.9.69:/data1/glusterfs_data/gv1-default
gluster volume start gv1-default
gluster volume info gv1-default
通過定義 endpoints 和 service 來提供訪問外部服務(glusterfs集羣)的方法
[root@tvm-00 glusterfs]# cat 10.endpoints/glusterfs-r3/default.yaml
apiVersion: v1
kind: Endpoints
metadata:
  name: glusterfs-r3
  namespace: default
subsets:
- addresses:
  - ip: 10.10.9.67
  - ip: 10.10.9.68
  - ip: 10.10.9.69
  ports:
  - port: 49152
    protocol: TCP

---
apiVersion: v1
kind: Service
metadata:
  name: glusterfs-r3
  namespace: default
spec:
  ports:
  - port: 49152
    protocol: TCP
    targetPort: 49152
  sessionAffinity: None
  type: ClusterIP

[root@tvm-00 glusterfs]# kubectl apply -f 10.endpoints/glusterfs-r3/default.yaml

##### 查看狀態:
[root@tvm-00 glusterfs]# kubectl get ep glusterfs-r3
NAME           ENDPOINTS                                            AGE
glusterfs-r3   10.10.9.67:49152,10.10.9.68:49152,10.10.9.69:49152   4h
通過定義 pv 和 pvc 來使用存儲卷
##### 注意:pv 是不區分 namespace 的資源,但又和不同 namespace 下的資源交互,此處有點奇怪,不知爲何如此設計
[root@tvm-00 glusterfs]# cat 20.pv/glusterfs-r3/gv1-default.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: glusterfs-r3-gv1-default
  labels:
    type: glusterfs
spec:
  storageClassName: gv1-default
  capacity:
    storage: 100Gi
  accessModes:
    - ReadWriteMany
  glusterfs:
    endpoints: "glusterfs-r3"
    path: "gv1-default"
    readOnly: false

[root@tvm-00 glusterfs]# cat 30.pvc/glusterfs-r3/gv1-default.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: glusterfs-r3-gv1-default
  namespace: default
spec:
  storageClassName: gv1-default
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 100Gi

[root@tvm-00 glusterfs]# kubectl apply -f 20.pv/glusterfs-r3/gv1-default.yaml
[root@tvm-00 glusterfs]# kubectl apply -f 30.pvc/glusterfs-r3/gv1-default.yaml

##### 查看狀態:
[root@tvm-00 glusterfs]# kubectl get pv,pvc
NAME                               CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS    CLAIM                                        STORAGECLASS       REASON    AGE
pv/glusterfs-r3-gv1-default        100Gi      RWX            Retain           Bound     default/glusterfs-r3-gv1-default             gv1-default                  46m

NAME                           STATUS    VOLUME                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
pvc/glusterfs-r3-gv1-default   Bound     glusterfs-r3-gv1-default   100Gi      RWX            gv1-default    44m
測試,在 deployment 中通過關聯 pvc 來掛載卷
[root@tvm-00 glusterfs]# cat deploy_test/gv1-default-t1.yaml
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
  name: gv1-default-t1
  namespace: default
  labels:
    app.name: gv1-default-t1
spec:
  replicas: 1
  selector:
    matchLabels:
      app.name: gv1-default-t1
  template:
    metadata:
      labels:
        app.name: gv1-default-t1
    spec:
      containers:
      - name: nginx-test
        image: nginx
        ports:
        - containerPort: 80
        volumeMounts:
        - name: glusterfsvol
          mountPath: "/mnt/glusterfsvol"
      volumes:
      - name: glusterfsvol
        persistentVolumeClaim:
          claimName: glusterfs-r3-gv1-default

[root@tvm-00 glusterfs]# kubectl apply -f deploy_test/gv1-default-t1.yaml --record

##### 查看狀態:
[root@tvm-00 glusterfs]# kubectl get deploy,po
NAME                    DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/gv1-default-t1   1         1         1            1           33s

NAME                                 READY     STATUS    RESTARTS   AGE
po/gv1-default-t1-848455b8b6-fldkb   1/1       Running   0          33s

##### 驗證掛載的數據盤:
[root@tvm-00 glusterfs]# kubectl exec gv1-default-t1-848455b8b6-fldkb -- mount | grep gluster
10.10.9.67:gv1-default on /mnt/glusterfsvol type fuse.glusterfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other,max_read=131072)

[root@tvm-00 glusterfs]# kubectl exec gv1-default-t1-848455b8b6-fldkb -- df -h | grep glusterfsvol
10.10.9.67:gv1-default  1.0T   33M  1.0T   1% /mnt/glusterfsvol

[root@tvm-00 glusterfs]# kubectl exec gv1-default-t1-848455b8b6-fldkb -- ls -l /mnt/glusterfsvol
total 0

##### 讀寫測試:
[root@tvm-00 glusterfs]# kubectl exec gv1-default-t1-848455b8b6-fldkb -- sh -c 'echo " [$(date)] writeFrom: $(hostname)" >>/mnt/glusterfsvol/README.md'

[root@tvm-00 glusterfs]# kubectl exec gv1-default-t1-848455b8b6-fldkb -- cat /mnt/glusterfsvol/README.md
 [Tue Jan 16 09:50:01 UTC 2018] writeFrom: gv1-default-t1-848455b8b6-fldkb
 [Tue Jan 16 09:50:40 UTC 2018] writeFrom: gv1-default-t1-848455b8b6-fldkb

##### 符合預期
測試,多副本讀寫同一個日誌文件的場景
##### 之前測試的都是 1 個 pod 副本的場景,現在,我們擴容到 3 個副本,然後再看看讀寫狀況:
  replicas: 1
  ->
  replicas: 3
[root@tvm-00 glusterfs]# kubectl scale --replicas=3 -f deploy_test/gv1-default-t1.yaml

##### 查看狀態:
[root@tvm-00 glusterfs]# kubectl get deploy,po
NAME                    DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/gv1-default-t1   3         3         3            3           20m

NAME                                 READY     STATUS    RESTARTS   AGE
po/gv1-default-t1-848455b8b6-fldkb   1/1       Running   0          20m
po/gv1-default-t1-848455b8b6-gzd4s   1/1       Running   0          1m
po/gv1-default-t1-848455b8b6-t6bsk   1/1       Running   0          1m

##### 分別在 3 個 pod 中往同一個文件寫入一條數據
[root@tvm-00 glusterfs]# kubectl exec gv1-default-t1-848455b8b6-fldkb -- sh -c 'echo " [$(date)] writeFrom: $(hostname)" >>/mnt/glusterfsvol/README.md'
[root@tvm-00 glusterfs]# kubectl exec gv1-default-t1-848455b8b6-gzd4s -- sh -c 'echo " [$(date)] writeFrom: $(hostname)" >>/mnt/glusterfsvol/README.md'
[root@tvm-00 glusterfs]# kubectl exec gv1-default-t1-848455b8b6-t6bsk -- sh -c 'echo " [$(date)] writeFrom: $(hostname)" >>/mnt/glusterfsvol/README.md'

##### 驗證:
[root@tvm-00 glusterfs]# kubectl exec gv1-default-t1-848455b8b6-fldkb -- cat /mnt/glusterfsvol/README.md
 [Tue Jan 16 09:50:01 UTC 2018] writeFrom: gv1-default-t1-848455b8b6-fldkb
 [Tue Jan 16 09:50:40 UTC 2018] writeFrom: gv1-default-t1-848455b8b6-fldkb
 [Tue Jan 16 09:55:53 UTC 2018] writeFrom: gv1-default-t1-848455b8b6-fldkb
 [Tue Jan 16 09:55:59 UTC 2018] writeFrom: gv1-default-t1-848455b8b6-gzd4s
 [Tue Jan 16 09:56:04 UTC 2018] writeFrom: gv1-default-t1-848455b8b6-t6bsk

##### 符合預期,但有一個細節請注意,,如何合理的記錄日誌以便於區分某條記錄是來自哪一個 pod 實例?
##### 答案是:寫入日誌時,業務層應提取類似 hostname 那樣的標誌符來寫入日誌中
[root@tvm-00 glusterfs]# kubectl exec gv1-default-t1-848455b8b6-t6bsk -- hostname
gv1-default-t1-848455b8b6-t6bsk
如何快速的爲指定 namespace 部署一個 volume

##### 目標:
##### namespace=ns-test1

##### 準備:
cp -a bin/create-gv1-default.sh bin/create-gv1-ns-test1.sh
sed -i 's/default/ns-test1/g' bin/create-gv1-ns-test1.sh
sh bin/create-gv1-ns-test1.sh

cp -a 10.endpoints/glusterfs-r3/default.yaml 10.endpoints/glusterfs-r3/ns-test1.yaml
sed -i 's/default/ns-test1/g' 10.endpoints/glusterfs-r3/ns-test1.yaml

cp -a 20.pv/glusterfs-r3/gv1-default.yaml 20.pv/glusterfs-r3/gv1-ns-test1.yaml
sed -i 's/default/ns-test1/g' 20.pv/glusterfs-r3/gv1-ns-test1.yaml

cp -a 30.pvc/glusterfs-r3/gv1-default.yaml 30.pvc/glusterfs-r3/gv1-ns-test1.yaml
sed -i 's/default/ns-test1/g' 30.pvc/glusterfs-r3/gv1-ns-test1.yaml

cp -a deploy_test/gv1-default-t1.yaml deploy_test/gv1-ns-test1-t1.yaml
sed -i 's/default/ns-test1/g' deploy_test/gv1-ns-test1-t1.yaml

##### 部署:
kubectl apply -f 10.endpoints/glusterfs-r3/ns-test1.yaml
kubectl apply -f 20.pv/glusterfs-r3/gv1-ns-test1.yaml
kubectl apply -f 30.pvc/glusterfs-r3/gv1-ns-test1.yaml
kubectl apply -f deploy_test/gv1-ns-test1-t1.yaml

##### 驗證:
[root@tvm-00 glusterfs]# kubectl -n ns-test1 get ep,svc |grep 'glusterfs-r3'
ep/glusterfs-r3                   10.10.9.67:49152,10.10.9.68:49152,10.10.9.69:49152   2m

svc/glusterfs-r3                   ClusterIP   10.107.81.35     <none>        49152/TCP        2m

[root@tvm-00 glusterfs]# kubectl -n ns-test1 get pv,pvc |grep 'ns-test1'
pv/glusterfs-r3-gv1-ns-test1       100Gi      RWX            Retain           Bound     ns-test1/glusterfs-r3-gv1-ns-test1           gv1-ns-test1                 2m

pvc/glusterfs-r3-gv1-ns-test1   Bound     glusterfs-r3-gv1-ns-test1   100Gi      RWX            gv1-ns-test1   2m

[root@tvm-00 glusterfs]# kubectl -n ns-test1 get deploy,po |grep 'ns-test1'

deploy/gv1-ns-test1-t1                1         1         1            1           3m
po/gv1-ns-test1-t1-7f986bfbf8-v5zmz               1/1       Running   0          3m

[root@tvm-00 glusterfs]# kubectl -n ns-test1 exec gv1-ns-test1-t1-7f986bfbf8-v5zmz -- df -h |grep gluster
10.10.9.67:gv1-ns-test1  1.0T   36M  1.0T   1% /mnt/glusterfsvol

ZYXW、參考

  1. glusterfs
  2. CentOS Storage
  3. k8s-glusterfs
  4. 使用glusterfs做持久化存儲
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章