準備一臺機器安裝nfs
#安裝nfs-utils
yum -y install nfs-utils
systemctl start nfs
systemctl enable nfs
# 創建目錄
mkdir /data/volumes -pv
cd /data/volumes
mkdir v{1,2,3,4,5}
配置共享文件
vim /etc/exports
/data/volumes/v1 192.168.1.0/24(rw,sync,no_root_squash)
/data/volumes/v2 192.168.1.0/24(rw,sync,no_root_squash)
/data/volumes/v3 192.168.1.0/24(rw,sync,no_root_squash)
/data/volumes/v4 192.168.1.0/24(rw,sync,no_root_squash)
/data/volumes/v5 192.168.1.0/24(rw,sync,no_root_squash)
# 重新掛載/etc/exports的設置
exportfs -arv
# 顯示主機的/etc/exports所共享的目錄數據
showmount -e
在k8s所有的node節點上安裝nfs
yum -y install nfs-utils
systemctl start nfs
systemctl enable nfs
測試下nfs功能是否正常
mkdir /v1
echo test v1 >> /v1/test.txt
#192.168.1.190是nfs主機ip
mount -t nfs 192.168.1.190:/data/volumes/v1 /v1
echo test v1 >> /v1/test.txt
# nfs節點
cat /data/volumes/v1/test.txt
node節點上的test.txt已經同步到nfs節點,No problem。
#卸載node節點上的/v1測試目錄
umount /v1
在master節點上創建PersistentVolume、PersistentVolumeClaim、Pod
vim pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv1
labels:
name: pv1
spec:
nfs:
# path 掛載路徑
path: /data/volumes/v1
# server nfs機器的ip
server: 192.168.1.190
# ReadWriteOnce能以讀寫模式被加載到一個節點上
accessModes: ["ReadWriteOnce"]
capacity:
storage: 2Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv2
labels:
name: pv2
spec:
nfs:
path: /data/volumes/v2
server: 192.168.1.190
# ReadOnlyMany以只讀模式加載到多個節點上
accessModes: ["ReadOnlyMany"]
capacity:
storage: 5Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv3
labels:
name: pv3
spec:
nfs:
path: /data/volumes/v3
server: 192.168.1.190
# ReadWriteMany以讀寫模式被加載到多個節點上
accessModes: ["ReadWriteMany", "ReadWriteOnce"]
capacity:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv4
labels:
name: pv4
spec:
nfs:
path: /data/volumes/v4
server: 192.168.1.190
accessModes: ["ReadWriteMany", "ReadWriteOnce"]
capacity:
storage: 10Gi
vim pod-pvc.yaml
apiVersion: v1
# 存儲卷聲明
kind: PersistentVolumeClaim
metadata:
name: pvc-myapp
namespace: default
spec:
accessModes: ["ReadWriteMany"]
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Pod
metadata:
name: pod-myapp
namespace: default
spec:
volumes:
- name: html
# 使用pvc-myapp存儲卷聲明
persistentVolumeClaim:
claimName: pvc-myapp
containers:
- name: myapp
image: ikubernetes/myapp:v1
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
部署
kubectl apply -f pv.yaml
kubectl apply -f pod-pvc.yaml
查看pv
kubectl get pv
查看pvc
kubectl get pvc
使用了pv3,到nfs節點新建/data/volumes/v3/index.html
echo 這是主頁pv3 >> /data/volumes/v3/index.html
查看pod
kubectl get pods -o wide
curl 10.244.2.39
nfs的/data/volumes/v3與pod容器中的/usr/share/nginx/html/目錄同步了
到pod容器中修改/usr/share/nginx/html/看是否會同步到/data/volumes/v3
kubectl exec -it pod-myapp -- /bin/sh
cd /usr/share/nginx/html
echo 添加修改 >> index.html
echo 添加文件 >> new.txt
可到nfs的/data/volumes/v3查看修改是否同步了。
刪除pod、pvc
kubectl delete -f pod-pvc.yaml
kubectl get pv
pv3是Released狀態
Released:釋放狀態,表明PVC解綁PV,但還未執行回收策略。
也就是刪除pvc之後,與之綁定的pv無法被新的pvc利用。
想要再次利用pv3,可通過修改pv3的定義實現
kubectl edit pv pv3
刪除spec.claimRef(紅框中的內容)即可
kubectl get pv
pv變成Available了。