K8s資源第五篇(volume存儲卷)

Kubernetes Volume

 Kubernetes提供的存儲卷屬於Pod資源級別,共享於Pod內的所有容器,可用於再容器的文件系統之外存儲應用存儲的相關數據,也可以獨立Pod生命週期之外實現數據的持久化。

K8s的volume類型
  • emptyDir:翻譯過來是空目錄,相當於是臨時存儲卷,容器掛了,目錄也就掛了,數據不能做到持久化。
  • hostPath:把Pod做運行在的宿主機的目錄與該Pod建立關係,數據不會因爲Pod掛了而丟失,但是如果節點宕機的話,數據就會丟失。
  • gitRepo:將git倉庫掛載到容器內,但是如果git倉庫的內容發生改變,容器內的數據不會跟着改變。只是掛載git倉庫某一時刻的數據。
  • nfs:掛載nfs的目錄到容器裏,這樣即使節點和容器都宕機,數據也能夠得到保存。
  • configmap:用來將環境變量,配置信息注入到Pod中。
  • secret:和configmap類型,但是在secret中的數據是以base64加密過的,用來存放敏感數據(證書密鑰文件,數據庫密碼等)。
emptyDir(臨時存儲卷):

 emptydir是Pod生命週期的一個臨時目錄,生命週期跟隨Pod的生命週期,也就是說如果Pod死了,emptyDir也就死了。

#定義一個臨時存儲卷。
[root@k8smaster data]# vim pod-vol-demo.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-demo
  namespace: default
  labels:
    app: myapp
    tier: frontend
  annotations:
    magedu.com/created-by: "cluster admin"
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
    imagePullPolicy: IfNotPresent
    ports:
    - name: http
      containerPort: 80
    - name: https
      containerPort: 443
    volumeMounts:
    - name: html
      mountPath: /usr/share/nginx/html/
  - name: busybox
    image: busybox:latest
    imagePullPolicy: IfNotPresent
    volumeMounts:
    - name: html
      mountPath: /data/
    command: ['/bin/sh']
    args: ["-c","while true; do echo $(date) >> /data/index.html; sleep 2; done"]
  volumes:
  - name: html
    emptyDir: {}
[root@k8smaster data]# kubectl apply -f pod-vol-demo.yaml 
pod/pod-demo created
[root@k8smaster data]# kubectl get pods -o wide
NAME                                READY   STATUS    RESTARTS   AGE   IP             NODE       NOMINATED NODE   READINESS GATES
myapp-deployment-558f94fb55-plk4v   1/1     Running   2          47d   10.244.2.99    k8snode2   <none>           <none>
myapp-deployment-558f94fb55-rd8f5   1/1     Running   2          47d   10.244.2.98    k8snode2   <none>           <none>
myapp-deployment-558f94fb55-zzmpg   1/1     Running   2          47d   10.244.1.107   k8snode1   <none>           <none>
nginx-deployment-6f77f65499-8g24d   1/1     Running   2          46d   10.244.1.108   k8snode1   <none>           <none>
pod-demo                            2/2     Running   0          13s   10.244.2.102   k8snode2   <none>           <none>
[root@k8smaster data]# curl 10.244.2.102
Tue Dec 17 09:24:58 UTC 2019
Tue Dec 17 09:25:00 UTC 2019
Tue Dec 17 09:25:02 UTC 2019
Tue Dec 17 09:25:04 UTC 2019
Tue Dec 17 09:25:06 UTC 2019
Tue Dec 17 09:25:08 UTC 2019
Tue Dec 17 09:25:10 UTC 2019
Tue Dec 17 09:25:12 UTC 2019
Tue Dec 17 09:25:14 UTC 2019
Tue Dec 17 09:25:16 UTC 2019
Tue Dec 17 09:25:18 UTC 2019
Tue Dec 17 09:25:20 UTC 2019
hostPath(節點存儲卷):

 hostPath類型的存儲卷是指將工作節點上的某文件系統的目錄或文件掛載於Pod中,它可以獨立於Pod的生命週期,如果Pod要是掛了,數據會得到保留。

k8smaster:
#定義一個hostPath
[root@k8smaster data]# vim pod-hostpath-vol.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-vol-hostpath
  namespace: default
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
    volumeMounts:
    - name: html
      mountPath: /usr/share/nginx/html
  volumes:
  - name: html
    hostPath:
      path: /data/pod/volume1
      type: DirectoryOrCreate
type:
  • DirectoryOrCreate:掛載的目標是宿主機的一個目錄,如果宿主機上不存在這個目錄,則自動創建一個。。
  • DirectoryOrCreate:掛載的目標是宿主機的一個目錄,如果宿主機上不存在這個目錄,則會報錯。
  • FileOrCreate:掛載的目標是宿主機的一個文件,如果宿主機上不存在這個文件,則自動創建一個。
  • File:掛載的目標是宿主機的一個文件,如果宿主機上不存在這個文件,則會報錯
  • 掛載的類型是一個Socket文件。
  • CharDevice:掛載類型是一個字符設備文件。
  • BlockDevice:掛載的類型是一個塊設備文件。
k8snode1:
[root@k8snode1 ~]# mkdir /data/pod/volume1 -p
[root@k8snode1 ~]# vim /data/pod/volume1/index.html
添加:
test page -- 1
k8snode2:
[root@k8snode2 ~]# mkdir /data/pod/volume1 -p
[root@k8snode2 ~]# vim /data/pod/volume1/index.html
添加:
test page -- 2
k8smaster:
[root@k8smaster data]# kubectl apply -f pod-hostpath-vol.yaml 
pod/pod-vol-hostpath created
[root@k8smaster data]# kubectl get pods -o wide
NAME                                READY   STATUS    RESTARTS   AGE   IP             NODE       NOMINATED NODE   READINESS GATES
myapp-deployment-558f94fb55-plk4v   1/1     Running   2          47d   10.244.2.99    k8snode2   <none>           <none>
myapp-deployment-558f94fb55-rd8f5   1/1     Running   2          47d   10.244.2.98    k8snode2   <none>           <none>
myapp-deployment-558f94fb55-zzmpg   1/1     Running   2          47d   10.244.1.107   k8snode1   <none>           <none>
nginx-deployment-6f77f65499-8g24d   1/1     Running   2          46d   10.244.1.108   k8snode1   <none>           <none>
pod-vol-hostpath                    1/1     Running   0          18s   10.244.1.113   k8snode1   <none>           <none>
[root@k8smaster data]# curl 10.244.1.113
test page -- 1
網絡存儲器:

 Kubernetes之處衆多類型的網絡存儲卷,包括傳統的NAS或SAN設備(NFS,ISCSI…),分佈式存儲(RDB,GlusterFS),雲端存儲等。

NFS存儲卷:
Host 網絡信息
NFS ens33:192.168.43.104
NFS:
[root@localhost ~]# yum -y install nfs-utils
[root@localhost ~]# mkdir /data/nfs/volumes -pv
mkdir: 已創建目錄 "/data"
mkdir: 已創建目錄 "/data/nfs"
mkdir: 已創建目錄 "/data/nfs/volumes"
[root@localhost ~]# vim /etc/exports
/data/nfs/volumes 192.168.43.0/24(rw,no_root_squash)
[root@localhost ~]# systemctl restart nfs
[root@localhost ~]# netstat -anput | grep 2049
tcp        0      0 0.0.0.0:2049            0.0.0.0:*               LISTEN      -                   
tcp6       0      0 :::2049                 :::*                    LISTEN      -                   
udp        0      0 0.0.0.0:2049            0.0.0.0:*                           -                   
udp6       0      0 :::2049                 :::*                                -

[root@localhost ~]# vim /etc/hosts
192.168.43.45 k8smaster
192.168.43.136 k8snode1
192.168.43.176 k8snode2
192.168.43.104 k8snfs
K8snode1:
[root@k8snode1 ~]# yum -y install nfs-utils
[root@k8snode1 ~]# vim /etc/hosts
添加:
192.168.43.104 k8snfs
K8snode2:
[root@k8snode2 ~]# yum -y install nfs-utils
[root@k8snode2 ~]# vim /etc/hosts
添加:
192.168.43.104 k8snfs
K8smaster:
[root@k8smaster data]# cp pod-hostpath-vol.yaml  pod-nfs-vol.yaml
[root@k8smaster data]# vim pod-nfs-vol.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-vol-nfs
  namespace: default
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
    volumeMounts:
    - name: html
      mountPath: /usr/share/nginx/html
  volumes:
  - name: html
    nfs:
      path: /data/nfs/volumes
      server: k8snfs

[root@k8smaster data]# vim /etc/hosts
添加:
192.168.43.104 k8snfs
[root@k8smaster data]# kubectl apply -f pod-nfs-vol.yaml 
pod/pod-vol-nfs created
NFS:
[root@localhost ~]# echo "test -- page -- nfs" >> /data/nfs/volumes/index.html
K8smaster:
[root@k8smaster data]# kubectl get pods -o wide
NAME                                READY   STATUS    RESTARTS   AGE   IP             NODE       NOMINATED NODE   READINESS GATES
myapp-deployment-558f94fb55-plk4v   1/1     Running   3          47d   10.244.2.104   k8snode2   <none>           <none>
myapp-deployment-558f94fb55-rd8f5   1/1     Running   3          47d   10.244.2.103   k8snode2   <none>           <none>
myapp-deployment-558f94fb55-zzmpg   1/1     Running   3          47d   10.244.1.116   k8snode1   <none>           <none>
nginx-deployment-6f77f65499-8g24d   1/1     Running   3          47d   10.244.1.114   k8snode1   <none>           <none>
pod-vol-hostpath                    1/1     Running   1          22h   10.244.1.115   k8snode1   <none>           <none>
pod-vol-nfs                         1/1     Running   0          16s   10.244.2.106   k8snode2   <none>           <none>
[root@k8smaster data]# curl 10.244.2.106
test -- page -- nfs
[root@k8smaster data]# kubectl delete -f pod-nfs-vol.yaml 
pod "pod-vol-nfs" deleted
持久存儲卷:
PV:

 PersistentVolume是由集羣管理員配置提供的某存儲系統上的一段存儲空間,是對底層共享存儲的抽象,將共享存儲作爲一種可由用戶申請使用的資源。

PVC:

 PersistentVolumeClaim(簡稱PVC)用來提出使用申請來完成綁定,它向PV申請特點大小的空間以及訪問模式,來創建出PVC存儲卷,再由Pod資源通過PVC存儲卷關聯使用。

一個簡單的邏輯圖:
Access Modes:
  • ReadWriteOnce(RWO):可讀單節點掛載。
  • ReadOnlyMany(ROX):只讀多節點掛載。
  • ReadWriteMany(RWX):可讀寫多節點掛載。
RECLAIN POLICY:
  • Retain:當PV被刪除了,數據會得到保留。
  • Recycl:當PV被刪除了,數據會被回收。
  • Delete:當PV被刪除了,數據也就丟棄了。
NFS:
[root@localhost ~]# cd /data/nfs/volumes/
[root@localhost volumes]# mkdir v{1..5}
[root@localhost volumes]# ls
index.html  v1  v2  v3  v4  v5
[root@localhost volumes]# vim /etc/exports
添加:
/data/nfs/volumes/v1 192.168.43.0/24(rw,no_root_squash)
/data/nfs/volumes/v2 192.168.43.0/24(rw,no_root_squash)
/data/nfs/volumes/v3 192.168.43.0/24(rw,no_root_squash)
/data/nfs/volumes/v4 192.168.43.0/24(rw,no_root_squash)
/data/nfs/volumes/v5 192.168.43.0/24(rw,no_root_squash)
[root@localhost volumes]# exportfs -arv
exporting 192.168.43.0/24:/data/nfs/volumes/v5
exporting 192.168.43.0/24:/data/nfs/volumes/v4
exporting 192.168.43.0/24:/data/nfs/volumes/v3
exporting 192.168.43.0/24:/data/nfs/volumes/v2
exporting 192.168.43.0/24:/data/nfs/volumes/v1
[root@localhost volumes]# showmount -e
Export list for k8snfs:
/data/nfs/volumes/v5 192.168.43.0/24
/data/nfs/volumes/v4 192.168.43.0/24
/data/nfs/volumes/v3 192.168.43.0/24
/data/nfs/volumes/v2 192.168.43.0/24
/data/nfs/volumes/v1 192.168.43.0/24
K8smaster:
[root@k8smaster data]# vim pv-demo.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv001
  labels:
    name: pv001
spec:
  nfs:
    path: /data/nfs/volumes/v1
    server: k8snfs
  accessModes: ["ReadWriteMany","ReadWriteOnce"]
  capacity:
    storage: 2Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv002
  labels:
    name: pv002
spec:
  nfs:
    path: /data/nfs/volumes/v2
    server: k8snfs
  accessModes: ["ReadWriteOnce"]
  capacity:
    storage: 2Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv003
  labels:
    name: pv003
spec:
  nfs:
    path: /data/nfs/volumes/v3
    server: k8snfs
  accessModes: ["ReadWriteMany","ReadWriteOnce"]
  capacity:
    storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv004
  labels:
    name: pv004
spec:
  nfs:
    path: /data/nfs/volumes/v4
    server: k8snfs
  accessModes: ["ReadWriteMany","ReadWriteOnce"]
  capacity:
    storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv005
  labels:
    name: pv005
spec:
  nfs:
    path: /data/nfs/volumes/v5
    server: k8snfs
  accessModes: ["ReadWriteMany","ReadWriteOnce"]
  capacity:
    storage: 1Gi
[root@k8smaster data]# kubectl apply -f pv-demo.yaml 
persistentvolume/pv001 created
persistentvolume/pv002 created
persistentvolume/pv003 created
persistentvolume/pv004 created
persistentvolume/pv005 created
[root@k8smaster data]# kubectl get pv
NAME    CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
pv001   2Gi        RWO,RWX        Retain           Available                                   7s
pv002   2Gi        RWO            Retain           Available                                   7s
pv003   1Gi        RWO,RWX        Retain           Available                                   7s
pv004   1Gi        RWO,RWX        Retain           Available                                   7s
pv005   1Gi        RWO,RWX        Retain           Available                                   7s

[root@k8smaster data]# vim pod-vol-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc1
  namespace: default
spec:
  accessModes: ["ReadWriteMany"]
  resources:
    requests:
      storage: 2Gi
---
apiVersion: v1
kind: Pod
metadata:
  name: pod-vol-pvc
  namespace: default
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
    volumeMounts:
    - name: html
      mountPath: /usr/share/nginx/html
  volumes:
  - name: html
    persistentVolumeClaim:
      claimName: pvc1

[root@k8smaster data]# kubectl apply -f pod-pvc-vol.yaml 
persistentvolumeclaim/pvc1 created
pod/pod-vol-pcv created
[root@k8smaster data]# kubectl get pv
NAME    CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM          STORAGECLASS   REASON   AGE
pv001   2Gi        RWO,RWX        Retain           Bound       default/pvc1                           15m
pv002   2Gi        RWO            Retain           Available                                          15m
pv003   1Gi        RWO,RWX        Retain           Available                                          15m
pv004   1Gi        RWO,RWX        Retain           Available                                          15m
pv005   1Gi        RWO,RWX        Retain           Available                                          15m
[root@k8smaster data]# kubectl get pvc
NAME   STATUS   VOLUME   CAPACITY   ACCESS MODES   STORAGECLASS   AGE
pvc1   Bound    pv001    2Gi        RWO,RWX                       9s
[root@k8smaster data]# kubectl get pods
NAME                                READY   STATUS    RESTARTS   AGE
myapp-deployment-558f94fb55-plk4v   1/1     Running   3          48d
myapp-deployment-558f94fb55-rd8f5   1/1     Running   3          48d
myapp-deployment-558f94fb55-zzmpg   1/1     Running   3          48d
nginx-deployment-6f77f65499-8g24d   1/1     Running   3          47d
pod-vol-hostpath                    1/1     Running   1          24h
pod-vol-nfs                         1/1     Running   0          6m1s
pod-vol-pvc                         1/1     Running   0          32s
Configmap:

 configMap可以讓配置信息與鏡像文件解耦,鏡像可以作爲骨架,配置信息注入到Pod中的容器中使用,實現容器的應用化,使得配置起來比較方便,生效速度快。

配置容器化應用的方式:
  • 自定義命令行參數(command,args,shell… …)。
  • 把配置文件直接寫進鏡像。
  • 環境變量。
  • 存儲卷。

[root@k8smaster data]# mkdir configmap
[root@k8smaster data]# cd configmap/
[root@k8smaster configmap]# vim www.conf
添加:
server {
        server_name myapp.k8s.com;
        listen 80;
        root /data/web/html/;
}
root@k8smaster configmap]# kubectl create configmap nginx-www --from-file=./www.conf
configmap/nginx.www created
[root@k8smaster configmap]# kubectl get cm
NAME        DATA   AGE
nginx-www   1      7s
[root@k8smaster configmap]# kubectl get cm nginx-www -o yaml
apiVersion: v1
data:
  www.conf: |
    server {
            server_name myapp.k8s.com;
            listen 80;
            root /data/web/html/;
    }
kind: ConfigMap
metadata:
  creationTimestamp: "2019-12-20T06:54:44Z"
  name: nginx-www
  namespace: default
  resourceVersion: "154434"
  selfLink: /api/v1/namespaces/default/configmaps/nginx-www
  uid: b91268e8-8b29-4f54-987a-bc9c33ca88dc
[root@k8smaster configmap]# kubectl describe cm nginx-www
Name:         nginx-www
Namespace:    default
Labels:       <none>
Annotations:  <none>

Data
====
www.conf:
----
server {
        server_name myapp.k8s.com;
        listen 80;
        root /data/web/html/;
}

Events:  <none>
[root@k8smaster configmap]# vim pod-cm-demo.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod-cm
  namespace: default
  labels:
    apps: myapp
    tier: frontend
  annotations:
    k8s.com/create-by: "cluster admin"
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
    ports:
    - name: http
      containerPort: 80
    volumeMounts:
    - name: nginx-config
      mountPath: /etc/nginx/conf.d/
      readOnly: true
  volumes:
  - name: nginx-config
    configMap:
      name: nginx-www
[root@k8smaster configmap]# kubectl apply -f pod-cm-demo.yaml 
pod/pod-cm created
[root@k8smaster ~]# kubectl get pods
NAME     READY   STATUS    RESTARTS   AGE
pod-cm   1/1     Running   0          91m
[root@k8smaster configmap]# kubectl exec -it  pod-cm -- /bin/sh
/ # cd /etc/nginx/conf.d/
/etc/nginx/conf.d # ls
www.conf
/etc/nginx/conf.d # cat www.conf 
server {
        server_name myapp.k8s.com;
        listen 80;
        root /data/web/html/;
}
/etc/nginx/conf.d # nginx -T
... ...
... ...
# configuration file /etc/nginx/conf.d/www.conf:
server {
        server_name myapp.k8s.com;
        listen 80;
        root /data/web/html/;
}
/etc/nginx/conf.d # echo test -- page >>  /data/web/html/index.html

[root@k8smaster ~]# kubectl get pods -o wide
NAME     READY   STATUS    RESTARTS   AGE    IP             NODE       NOMINATED NODE   READINESS GATES
pod-cm   1/1     Running   0          118m   10.244.1.125   k8snode1   <none>           <none>
[root@k8smaster ~]# vim /etc/hosts
添加:
10.244.1.125 myapp.k8s.com
[root@k8smaster ~]# curl myapp.k8s.com
test -- page
[root@k8smaster ~]# kubectl edit cm nginx-www
修改:
listen 8080;

/etc/nginx/conf.d # cat www.conf 
server {
        server_name myapp.k8s.com;
        listen 8080;
        root /data/web/html/;
}
/etc/nginx/conf.d # netstat -tnl
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       
tcp        0      0 0.0.0.0:80              0.0.0.0:*               LISTEN      
/etc/nginx/conf.d # nginx -s reload
2019/12/20 13:01:07 [notice] 24#24: signal process started
/etc/nginx/conf.d # netstat -tnl
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       
tcp        0      0 0.0.0.0:8080            0.0.0.0:*               LISTEN      

[root@k8smaster ~]# curl myapp.k8s.com:8080
test -- page
Secret:

 secret與configMap類似,只不過secret用於存儲敏感信息,在secret中的data是以base64加密過的。並且也支持兩種使用方式,這裏只介紹存儲卷方式使用secret。

secret有三種類型:
  • Service Account:用來訪問Kubernetes API,由Kubernetes自動創建,並且會自動掛載到Pod的/run/secrets/kubernetes.io/serviceaccount目錄中。
  • Opaque:base64編碼格式的secret,用來存儲密碼、密匙等。
  • kubernetes. io/dockerconfigjson:用來存儲私有docker registry的認證信息。

[root@k8smaster configmap]# kubectl create secret generic mysql-root-password --from-literal=password=MyP@ss123
secret/mysql-root-password created
[root@k8smaster configmap]# kubectl get secret 
NAME                  TYPE                                  DATA   AGE
default-token-kk2fq   kubernetes.io/service-account-token   3      53d
mysql-root-password   Opaque                                1      10s
[root@k8smaster configmap]# kubectl describe secret mysql-root-password
Name:         mysql-root-password
Namespace:    default
Labels:       <none>
Annotations:  <none>

Type:  Opaque

Data
====
password:  9 bytes

#解碼
[root@k8smaster configmap]# kubectl get secret mysql-root-password -o yaml
apiVersion: v1
data:
  password: TXlQQHNzMTIz
kind: Secret
metadata:
  creationTimestamp: "2019-12-20T13:12:18Z"
  name: mysql-root-password
  namespace: default
  resourceVersion: "180658"
  selfLink: /api/v1/namespaces/default/secrets/mysql-root-password
  uid: d5130e29-9989-47e7-8236-32c9cbdc98d9
type: Opaque
[root@k8smaster configmap]# echo TXlQQHNzMTIz | base64 -d
MyP@ss123[root@k8smaster configmap]# 

[root@k8smaster configmap]# vim pod-se-demo.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: pod-se
  namespace: default
  labels:
    apps: myapp
    tier: frontend
  annotations:
    k8s.com/create-by: "cluster admin"
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
    ports:
    - name: http
      containerPort: 80
    env:
    - name: MYSQL_ROOT_PASSWORD
      valueFrom:
        secretKeyRef:
          name: mysql-root-password
          key: password
[root@k8smaster configmap]# kubectl apply -f pod-se-demo.yaml 
pod/pod-se created
[root@k8smaster configmap]# kubectl get pods
NAME     READY   STATUS    RESTARTS   AGE
pod-cm   1/1     Running   0          144m
pod-se   1/1     Running   0          21s
[root@k8smaster configmap]# kubectl exec  pod-se  -- printenv | grep MYSQL_ROOT_PASSWORD
MYSQL_ROOT_PASSWORD=MyP@ss123
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章