kubernetes-----pod深入管理

目錄

一.資源限制

二.重啓策略

三.健康檢查(探針--Probe)

LivenessProbe(親和性探針)

ReadinessProbe(就緒性探針)

探針的三種實現方式


一.資源限制

  • 生產環境中我們需要對pod進行資源限制,因爲pod內部是容器,而容器是共享內核資源的,如果不對pod進行資源限制,就會導致一個pod可能佔用大量資源。
  • 如右是kubernetes官方對,pod資源限制的定義與解釋,https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
  • pod資源在yaml文件限制的字段如下所示
#resources表示資源限制字段
##requests表示基本資源
##limits表示資源上限,即這個pod最大能用到多少資源

spec.containers[].resources.limits.cpu    //cpu上限
spec.containers[].resources.limits.memory    //內存上限
spec.containers[].resources.requests.cpu    //創建時分配的基本cpu資源
spec.contaoners[].resources.requests.memory    //創建時分配的基本內存資源

實例如下

  • 創建yaml文件
#如下所示的yaml文件中創建兩個容器,就是一個pod中創建兩個容器
[root@master demo]# cat pod2.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: frontend
spec:
  containers:
  - name: db
    image: mysql
    env:
    - name: MYSQL_ROOT_PASSWORD
      value: "password"
    resources:
      requests:
        memory: "64Mi"    ##基礎內存爲64M
        cpu: "250m"    ##基礎cpu使用爲25%
      limits:
        memory: "128Mi"        ##這個容器內存上限爲128M
        cpu: "500m"    ##這個容器cpu上限爲50%
  - name: wp
    image: wordpress
    resources:
      requests:
        memory: "64Mi"
        cpu: "250m"
      limits:
        memory: "128Mi"
        cpu: "500m"
[root@master demo]# 
  • 創建pod

##apply和create都可以創建資源,但是apply還有更新加載的功能
[root@master demo]# kubectl apply -f pod2.yaml 
pod/frontend created
  • 查看事件(event),即創建過程,資源創建之前會被記錄在event事件中,可以通過event查看創建過程中出現的錯誤

[root@master demo]# kubectl describe pod frontend
Name:               frontend
Namespace:          default
Priority:           0
PriorityClassName:  <none>
Node:               192.168.43.103/192.168.43.103
Start Time:         Tue, 12 May 2020 10:14:15 +0800
Labels:             <none>
Annotations:        kubectl.kubernetes.io/last-applied-configuration:
                      {"apiVersion":"v1","kind":"Pod","metadata":{"annotations":{},"name":"frontend","namespace":"default"},"spec":{"containers":[{"env":[{"name...
Status:             Running
IP:                 172.17.60.2
Containers:
  db:
    Container ID:   docker://b51bcdd6f7962d3f0fa73c2cd93fca5d58fd7ebe74eb428cc5fb91b4f0935929
    Image:          mysql
    Image ID:       docker-pullable://mysql@sha256:61a2a33f4b8b4bc93b7b6b9e65e64044aaec594809f818aeffbff69a893d1944
    Port:           <none>
    Host Port:      <none>
    State:          Running
      Started:      Tue, 12 May 2020 10:14:26 +0800
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     500m
      memory:  128Mi
    Requests:
      cpu:     250m
      memory:  64Mi
    Environment:
      MYSQL_ROOT_PASSWORD:  password
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-h4tl7 (ro)
  wp:
    Container ID:   docker://668b88b91a49acaf0278d628e2a158b21ba0ecc26fced51288eb8cb243b4589a
    Image:          wordpress
    Image ID:       docker-pullable://wordpress@sha256:c3312ab9d4b35148c3ae6f6e06ca3a999850c4aa34dbe310856c52311ec06a93
    Port:           <none>
    Host Port:      <none>
    State:          Running
      Started:      Tue, 12 May 2020 10:14:33 +0800
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     500m
      memory:  128Mi
    Requests:
      cpu:        250m
      memory:     64Mi
    Environment:  <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-h4tl7 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  default-token-h4tl7:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-h4tl7
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type    Reason     Age   From                     Message
  ----    ------     ----  ----                     -------
  Normal  Scheduled  21s   default-scheduler        Successfully assigned default/frontend to 192.168.43.103
  Normal  Pulling    19s   kubelet, 192.168.43.103  pulling image "mysql"
  Normal  Pulled     10s   kubelet, 192.168.43.103  Successfully pulled image "mysql"
  Normal  Created    10s   kubelet, 192.168.43.103  Created container
  Normal  Started    10s   kubelet, 192.168.43.103  Started container
  Normal  Pulling    10s   kubelet, 192.168.43.103  pulling image "wordpress"
  Normal  Pulled     4s    kubelet, 192.168.43.103  Successfully pulled image "wordpress"
  Normal  Created    4s    kubelet, 192.168.43.103  Created container
  Normal  Started    3s    kubelet, 192.168.43.103  Started container
[root@master demo]# 
  • 查看node節點的資源狀態

#查看pod網絡狀態
[root@master demo]# kubectl get pod -o wide
NAME       READY   STATUS    RESTARTS   AGE   IP            NODE             NOMINATED NODE
frontend   2/2     Running   0          14s   172.17.60.2   192.168.43.103   <none>

##查看pod資源對應節點的資源狀態
[root@master demo]# kubectl describe nodes 192.168.43.103
Name:               192.168.43.103
Roles:              <none>
Labels:             beta.kubernetes.io/arch=amd64
                    beta.kubernetes.io/os=linux
                    kubernetes.io/hostname=192.168.43.103
Annotations:        node.alpha.kubernetes.io/ttl: 0
                    volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp:  Mon, 27 Apr 2020 20:46:21 +0800
Taints:             <none>
Unschedulable:      false
Conditions:
  Type             Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
  ----             ------  -----------------                 ------------------                ------                       -------
  OutOfDisk        False   Tue, 12 May 2020 10:23:48 +0800   Tue, 12 May 2020 09:09:37 +0800   KubeletHasSufficientDisk     kubelet has sufficient disk space available
  MemoryPressure   False   Tue, 12 May 2020 10:23:48 +0800   Tue, 12 May 2020 09:09:37 +0800   KubeletHasSufficientMemory   kubelet has sufficient memory available
  DiskPressure     False   Tue, 12 May 2020 10:23:48 +0800   Tue, 12 May 2020 09:09:37 +0800   KubeletHasNoDiskPressure     kubelet has no disk pressure
  PIDPressure      False   Tue, 12 May 2020 10:23:48 +0800   Mon, 27 Apr 2020 20:46:21 +0800   KubeletHasSufficientPID      kubelet has sufficient PID available
  Ready            True    Tue, 12 May 2020 10:23:48 +0800   Tue, 12 May 2020 09:09:37 +0800   KubeletReady                 kubelet is posting ready status
Addresses:
  InternalIP:  192.168.43.103
  Hostname:    192.168.43.103
Capacity:
 cpu:                1
 ephemeral-storage:  20470Mi
 hugepages-1Gi:      0
 hugepages-2Mi:      0
 memory:             2911652Ki
 pods:               110
Allocatable:
 cpu:                1
 ephemeral-storage:  19317915617
 hugepages-1Gi:      0
 hugepages-2Mi:      0
 memory:             2809252Ki
 pods:               110
System Info:
 Machine ID:                 bf6c47173ce244fc94186bd579f13d7f
 System UUID:                EB0A4D56-93E1-9352-9F9E-D0F9B49FCECE
 Boot ID:                    c5da0e09-5876-419a-ac62-689058e2e389
 Kernel Version:             3.10.0-1062.el7.x86_64
 OS Image:                   CentOS Linux 7 (Core)
 Operating System:           linux
 Architecture:               amd64
 Container Runtime Version:  docker://19.3.8
 Kubelet Version:            v1.12.3
 Kube-Proxy Version:         v1.12.3

##在此字段中包含資源限制
##cpu基本資源是50%,上限是100%
##內存基本資源是128M,上限是256M
##由於是一個pod中創建兩個container,所以這些資源都是相加的
Non-terminated Pods:         (1 in total)
  Namespace                  Name        CPU Requests  CPU Limits  Memory Requests  Memory Limits
  ---------                  ----        ------------  ----------  ---------------  -------------
  default                    frontend    500m (50%)    1 (100%)    128Mi (4%)       256Mi (9%)
Allocated resources:
  (Total limits may be over 100 percent, i.e., overcommitted.)
  Resource  Requests    Limits
  --------  --------    ------
  cpu       500m (50%)  1 (100%)
  memory    128Mi (4%)  256Mi (9%)
Events:     <none>
  • 查看pod狀態,和命名空間
##看到ready的狀態爲2/2說明創建了兩個容器
[root@master demo]# kubectl get pod
NAME       READY   STATUS    RESTARTS   AGE
frontend   2/2     Running   0          27s

##default命名空間是pod資源默認的
[root@master demo]# kubectl get ns
NAME          STATUS   AGE
default       Active   14d
kube-public   Active   14d
kube-system   Active   14d
[root@master demo]# 

二.重啓策略

  • 在pod遇到故障之後的重啓的動作稱爲重啓策略

1.Always:當容器終止退出之後,總是總是重啓容器,爲默認策略

2.OnFailure:當容器異常退出之後(退出狀態碼爲非0)時,重啓容器

3.Never:當容器終止退出,從不重啓容器

注意:k8s中不支持重啓pod資源,只有刪除重建

  • 查看已有控制器,策略爲Always

創建實例如下

  • 編輯YAML文件,創建資源,定義重啓策略
##如下使得鏡像10秒之後異常退出,查看鏡像會不會重啓
##查看restarts次數
[root@master demo]# cat pod3.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: foo
spec:
  containers:
  - name: busybox
    image: busybox
    args:        ##添加命令參數
    - /bin/sh
    - -c
    - sleep 10; exit 3    ##休眠10s,並且返回狀態碼爲3
[root@master demo]# 
  • 創建pod資源,查看restart狀態
##pod的狀態由containercreating-->running-->error-->running
##其中restart的次數爲1
[root@master demo]# kubectl create -f pod3.yaml
pod/foo created
[root@master demo]# kubectl get pod
NAME       READY   STATUS              RESTARTS   AGE
foo        0/1     ContainerCreating   0          10s

[root@master demo]# kubectl get pod
NAME       READY   STATUS             RESTARTS   AGE
foo        1/1     Running            0          13s

[root@master demo]# kubectl get pod
NAME       READY   STATUS             RESTARTS   AGE
foo        0/1     Error              0          31s

[root@master demo]# kubectl get pod
NAME       READY   STATUS             RESTARTS   AGE
foo        1/1     Running            1          39s
[root@master demo]# 
  • 重新定義pod3.yaml,添加重啓策略,異常退出也不會重啓

  • 創建資源,查看狀態
[root@master demo]# kubectl create -f pod3.yaml 
pod/foo created

##當狀態爲Error之後不會再重啓容器
[root@master demo]# kubectl get pod
NAME   READY   STATUS              RESTARTS   AGE
foo    0/1     ContainerCreating   0          5s
[root@master demo]# kubectl get pod
NAME   READY   STATUS    RESTARTS   AGE
foo    1/1     Running   0          12s
[root@master demo]# kubectl get pod
NAME   READY   STATUS    RESTARTS   AGE
foo    1/1     Running   0          20s
[root@master demo]# kubectl get pod
NAME   READY   STATUS   RESTARTS   AGE
foo    0/1     Error    0          22s
[root@master demo]# 

三.健康檢查(探針--Probe)

LivenessProbe(親和性探針)

  • 親和性探針:用於判斷容器是否存活(Running狀 態),如果LivenessProbe探針探測到容器不健康,則kubelet將殺掉該容
    器,並根據容器的重啓策略做相應的處理。如果一個容器不包含 LivenessProbe探針,那麼kubelet認爲該容器的LivenessProbe探針返回的 值永遠是Success

ReadinessProbe(就緒性探針)

  • 就緒性探針:用於判斷容器服務是否可用(Ready狀態),達到ready狀態的pod次啊可以接受請求。對於被service管理的pod,Service與Pod Endpoint的關聯關係也將基於Pod是否ready進行設置。如果在運行過程中ready狀態變爲false,則系統自動將其從service的後端Endpoint列表中隔離出去,後續在把恢復到Ready狀態的Pod加回後端的Endpoint列表。這樣就能保證客戶端在訪問service'時不會轉發到服務不可用的pod實例上

endpoint是service負載均衡集羣列表,添加pod資源的地址

探針的三種檢查方式

  • 親和性探針和就緒性探針都可以配置這三種檢查方式

ExecAction

  • 在容器內部執行一個命令,如果該命令的返回碼爲0,則表明容器健康。
  • 實例如下,通過“cat /tmp/health”命令來判斷一個容器運行狀態是否正常。在該pod運行之後,將在創建/tmp/health文件10s後刪除該文件,而且親和性探針的初始探測時間(initialDelaySeconds)爲15s,探測結果是Fail,將導致Kubelet殺掉該容器並且重啓容器
apiVersion: v1
kind: Pod
metadata:
  labels:
    test: liveness
  name: liveness-exec
spec:
  containers:
  - name: liveness
    image: busybox
    args:
    - /bin/sh
    - -c
    - touch /tmp/healthy; sleep 20; rm -rf /tmp/healthy; sleep 100s  #休眠100s,給k8s一個檢查pod狀態的時間
    livenessProbe:
      exec:
        command:
        - cat
        - /tmp/healthy
      initialDelaySeconds: 5
      timeoutSeconds: 5
      periodSeconds: 5

##initialDelaySeconds:啓動容器之後進行首次健康檢查的等待時間,單位爲s
##timeoutSeconds:健康檢查發送請求之後等待響應的超時時間,單位爲s。當超時發生時,kubelet會認爲容器以及無法提供服務,將會重啓該容器。
##periodSeconds,探測的頻率時間

TCPSocketAction

  • 通過容器的IP地址和端口號執行TCP檢查,如果能夠簡歷TCP鏈接,則表明容器健康。
  • 實例如下,通過與容器內的localhost:80建立TCP連接進行健康檢查:
apiVersion: v1
kind: Pod
metadata:
  name: pod-with-healthcheck
spec:
  containers:
  - name: nginx
    image: nginx
    ports:
      - containerPort: 80
      livenessProbe:
        tcpSocket:
          port: 80
        initialDelaySeconds: 30    #首次等30s之後再檢查
        timeoutSeconds: 1    
        periodSeconds: 5    #每個5秒檢查一次

 

HTTPGetAction

  • 通過容器的IP地址、端口號以及路徑調用HTTP Get方法,如果響應的狀態碼大於或者等於200且小於400,則認爲容器健康
  • 實例如下,kubelet定時發送HTTP請求到localhost:80/_status/healthz來進行容器應用的健康檢查
apiVersion: v1
kind: Pod
metadata:
  name: pod-with-healthcheck
spec:
  containers:
  -name: nginx
   image: nginx
   ports:
   - containerPort: 80
   livenessProbe:
      httpGet:
        path: /_status/healthz
        port: 80
      initialDelaySeconds: 15
      timeoutSeconds: 1 
      periodSeconds: 5

使用TCP/http的探針時,對於日誌中網站訪問量會有誤差(對於網站服務使用tcp/http探測時頻率可以降低),一般在生產環境中使用exec檢查方式比較多。當然,不同的業務可使用不同檢查方式

 

 

 

 

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章