kubernetes應用

前戲:
Kubernetes中文文檔
Kubernetes集羣部署


部署web UI(Dashboard)

開源地址

  1. 拉取到本地,創建容器
[root@Fone7 dashboard]# kubectl create -f dashboard-configmap.yaml 
configmap/kubernetes-dashboard-settings created
[root@Fone7 dashboard]# kubectl create -f dashboard-rbac.yaml 
role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
[root@Fone7 dashboard]# kubectl create -f dashboard-secret.yaml 
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-key-holder created

阿里雲鏡像倉庫

  1. 修改鏡像地址
# vim dashboard-controller.yaml
image: registry.cn-beijing.aliyuncs.com/kubernetes2s/kubernetes-dashboard-amd64
# kubectl create -f dashboard-controller.yaml 
serviceaccount/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
[root@Fone7 dashboard]# kubectl get pods -n kube-system
NAME                                   READY   STATUS    RESTARTS   AGE
kubernetes-dashboard-77fd5947f-gqgft   1/1     Running   0          5m3s
  1. 修改yml文件,允許其他節點訪問
# vim dashboard-service.yaml
spec:
  # 加入下面這一行
  type: NodePort
  ...
# kubectl create -f dashboard-service.yaml 
service/kubernetes-dashboard created
  1. 生成token
# vim k8s-admin.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: dashboard-admin
  namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: dashboard-admin
subjects:
  - kind: ServiceAccount
    name: dashboard-admin
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io
# kubectl create -f k8s-admin.yaml 
serviceaccount/dashboard-admin created
clusterrolebinding.rbac.authorization.k8s.io/dashboard-admin created
[root@Fone7 k8s]# kubectl get secret -n kube-system
NAME                               TYPE                                  DATA   AGE
dashboard-admin-token-vzmfz        kubernetes.io/service-account-token   3      49s
default-token-9slj4                kubernetes.io/service-account-token   3      23h
kubernetes-dashboard-certs         Opaque                                0      43m
kubernetes-dashboard-key-holder    Opaque                                2      43m
kubernetes-dashboard-token-jmk6l   kubernetes.io/service-account-token   3      20m
[root@Fone7 k8s]# kubectl 
[root@Fone7 k8s]# kubectl describe secret dashboard-admin-token-vzmfz -n kube-system
Name:         dashboard-admin-token-vzmfz
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: dashboard-admin
              kubernetes.io/service-account.uid: 103c7142-9973-11ea-b60d-080027b6e76f

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1359 bytes
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tdnptZnoiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiMTAzYzcxNDItOTk3My0xMWVhLWI2MGQtMDgwMDI3YjZlNzZmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmRhc2hib2FyZC1hZG1pbiJ9.twUnFS7avAu4B8IuozgYDbic8GxkrIyc7P205-pG0h5giiQeU-sIJNWc-fKR0DLDzb98QZqAILH6CNCUNwSJwynUxIBoKIkJqaA-ljfGeHh4xSCCoNb7vG66UPjP1mC5woxyIRMg5TTeAWpkMKUm21sp6HVsZHLxyMUk99EtpXa13vWsv2HSN_LWG5zN2zndKFQQ-57K_p5DoJxqHGDLoSJOQ1_DSuFs1wydH15ot0PORaU0nLGNHlPrtWYlCyARhC4tiUmwMsx0c6LqTh3ZbFmXiswFwGAhSVNMgfAS0YIBGwTAndEi_lPsmA_1cV0k2Gn7GoHIxNvKZtYtWe735g
  1. 瀏覽器登陸
[root@Fone7 dashboard]# kubectl get svc -n kube-system
NAME                   TYPE       CLUSTER-IP   EXTERNAL-IP   PORT(S)         AGE
kubernetes-dashboard   NodePort   10.0.0.19    <none>        443:49208/TCP   80s

在火狐瀏覽器訪問https://192.168.33.8:49208。(谷歌瀏覽器無法訪問)
將第4步中的token輸入令牌,登陸
在這裏插入圖片描述
在這裏插入圖片描述


部署多master集羣

  1. 將master節點文件複製到master2上
# scp -r /opt/kubernetes/ master2:/opt/
# scp /usr/lib/systemd/system/{kube-apiserver,kube-scheduler,kube-controller-manager}.service master2:/usr/lib/systemd/system
  1. 在master2節點上修改配置文件IP並啓動
# cd /opt/kubernetes/cfg/
# vim kube-apiserver
# systemctl start kube-apiserver
# systemctl start kube-scheduler
# systemctl start kube-controller-manager
# ps -fe | grep kube
# /opt/kubernetes/bin/kubectl get cs
# /opt/kubernetes/bin/kubectl get nodes

nginx + keepalived(LB)

在這裏插入圖片描述
待驗證
nginx主備節點安裝nginx參考

  1. 修改配置文件
# vim /etc/nginx/nginx.conf
# 修改,加大後臺進程
worker_processes  4;
# http上面加入
stream {
    log_format main "$remote_addr $upstream_addr - $time_local $status";
    access_log /var/log/nginx/k8s-access.log main;
    upstream k8s-apiserver {
        server 192.168.33.7:6443;
    }
    server {
        listen 0.0.0.0:88;
        proxy_pass k8s-apiserver;
    }
}
# systemctl restart nginx
# systemctl status nginx
# ps -ef | grep nginx
# yum install -y keepalived
# vim /etc/keepalived/keepalived.conf
global_defs { 
   # 接收郵件地址 
   notification_email { 
     [email protected] 
     [email protected] 
     [email protected] 
   } 
   # 郵件發送地址 
   notification_email_from [email protected]  
   smtp_server 127.0.0.1 
   smtp_connect_timeout 30 
   router_id NGINX_MASTER 
} 

vrrp_script check_nginx {
    script "/usr/local/nginx/sbin/check_nginx.sh"
}

vrrp_instance VI_1 { 
    state MASTER         # 備節點則改爲BACKUP
    interface enp0s3     # 這裏修改爲配置VIP的網卡
    virtual_router_id 51 # VRRP 路由 ID實例,每個實例是唯一的 
    priority 100    # 優先級,備服務器設置 90 
    advert_int 1    # 指定VRRP 心跳包通告間隔時間,默認1秒 
    authentication { 
        auth_type PASS      
        auth_pass 1111 
    }  
    virtual_ipaddress { 
        192.168.33.10/24 
    } 
    track_script {
        check_nginx
    } 
}

# vim /usr/local/nginx/sbin/check_nginx.sh
count=$(ps -ef |grep nginx |egrep -cv "grep|$$")

if [ "$count" -eq 0 ];then
    systemctl stop keepalived
fi

# chmod +x /usr/local/nginx/sbin/check_nginx.sh
# systemctl start keepalived
# ip a   # 查看VIP是否生效
  1. 在兩個node節點修改配置(server改爲192.168.33.10:88)
# cd /opt/kubernetes/cfg/
# grep 7 *
# vim bootstrap.kubeconfig 
# vim kubelet.kubeconfig 
# vim kube-proxy.kubeconfig 
# systemctl restart kubelet
# systemctl restart kube-proxy
# ps -ef | grep kube

回到master檢查集羣狀態:kubectl get node


kubectl命令行管理工具

  • 命令概要:
    在這裏插入圖片描述
    在這裏插入圖片描述
  • kubectl管理應用程序生命週期
  1. 創建
    kubectl run nginx --replicas=3 --image=nginx:1.14 --port=80
    kubectl get deploy,pods
    說明:
    –replicas: 副本數,一般在兩個以上,相當於跑了多少個服務
  2. 發佈
    kubectl expose deployment nginx --port=80 --type=NodePort --target-port=80 --name=nginx-service
    檢查:
    kubectl get service
    kubectl get pods
    kubectl logs [pod_name]
    kubectl describe pod [pod_name]
  3. 更新
    kubectl set image deployment/nginx nginx=nginx:1.15
    觸發滾動更新,保證業務不中斷進行發佈更新
    kubectl get pods 查看滾動更新過程
  4. 回滾
    查看:
    kubectl rollout history deployment/nginx
    回滾到上一個版本:(同樣是滾動更新)
    kubectl rollout undo deployment/nginx
  5. 刪除
    kubectl delete deploy/nginx
    kubectl delete svc/nginx-service
  • kubectl遠程連接K8s集羣
  1. 設置連接的API地址
    kubectl config set-cluster kubernetes
    –server=https://192.168.33.7:6443
    –embed-certs=true
    –certificate-authority=ca.pem
    –kubeconfig=config
  2. 設置使用的證書
    kubectl config set-credentials cluster-admin
    –certificate-authority=ca.pem
    –embed-certs=true
    –client-key=admin-key.pem
    –client-certificate=admin.pem
    –kubeconfig=config
  3. 設置上下文
    kubectl config set-context default --cluster=kubernetes --user=cluster-admin --kubeconfig=config
    kubectl config use-context default --kubeconfig=config
  4. 執行完成以後會生成一個config文件,將其複製到遠程連接機器中的~/.kube/目錄下,即可使用kubectl命令管理k8s集羣。
    如果config不是放在~/.kube/目錄,需要使用參數指定文件位置--kubeconfig config

YAML 配置文件管理資源

  • 語法格式:
    • 縮進表示層級關係
    • 不支持製表符tab縮進,只支持使用空格縮進
    • 通常開頭縮進2個空格
    • 字符後縮進1個空格,如冒號、逗號等
    • ---”表示YAML格式,表示一個文件的開始或者分割
    • “#”註釋
  • 配置文件說明:
    • 定義配置時,指定最新穩定版API(當前爲v1);
    • 配置文件應該存儲在集羣之外的版本控制倉庫中。如果需要,可以快速回滾配置、重新創建和恢復;
    • 應該使用YAML格式編寫配置文件,而不是JSON。儘管這些格式都可以使用,但YAML對用戶更加友好;
    • 可以將相關對象組合成單個文件,通常會更容易管理;
    • 不要沒必要的指定默認值,簡單和最小配置減少錯誤;
    • 在註釋中說明一個對象描述更好維護。
  • 飯粒:創建並啓動一個nginx實例
# vim nginx-deployment.yaml
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  selector:
    matchLabels:
      app: nginx
  replicas: 2 # tells deployment to run 2 pods matching the template
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.14.2
        ports:
        - containerPort: 80

---

apiVersion: v1
kind: Service
metadata:
  name: nginx-service
  labels:
    app: nginx
spec:
  type: NodePort
  ports:
  - port: 80
    targetPort: 80
  selector:
    app: nginx
# kubectl create -f nginx-deployment.yaml
實時查看pod創建情況
# kubectl get pod -w
# kubectl get svc
獲取所有版本
# kubectl api-versions

在瀏覽器訪問 node IP:端口
配置文件說明:
在這裏插入圖片描述

  • 系統生成YAML配置文件
    • 飯粒1:使用run命令導出
      kubectl run nginx --image=nginx --replicas=3 --dry-run -o yaml > my-deployment.yaml
      說明:

      • 加上--dry-run參數不會真正執行,只是做檢查。
      • -o指定輸出格式,可以指定yaml、json格式。
      • 重定向到輸出文件。
      • 系統會將提交創建資源對象的所有字段都輸出,可以進入文件將不需要的刪除。
        獲取資源清單:kubectl api-resources
    • 飯粒2:使用get命令導出
      kubectl get deploy/nginx --export -o yaml > me-deploy.yaml
      說明見飯粒1

    • 忘記關鍵字查找kubectl explain --help
      例如,查看容器資源可用字段:kubectl explain pods.spec.containers


深入理解Pod

  • Pod
    • 最小部署單元
    • 一組容器的集合
    • 一個Pod中的容器共享網絡命名空間
    • Pod是短暫的
  • 容器分類
    • Infrastructure Container:基礎容器
      • 維護整個Pod網絡空間
    • InitContainers:初始化容器 官文
      • 先於業務容器執行
    • Containers:業務容器
      • 並行啓動
  • 鏡像拉取策略(imagePullPolicy)
    • IfNotPresent:默認值,鏡像在宿主機上不存在時才拉取
    • Always:每次創建Pod都會重新拉取一次鏡像
    • Never:Pod永遠不會主動拉取這個鏡像
  • 拉取需要認證的倉庫鏡像(私有鏡像)
  1. 登陸
    docker login -p [password] -u [username]
  2. 獲取認證信息
    cat .docker/config.json
    cat .docker/config.json | base64 -w 0
    在這裏插入圖片描述
# vim registry-pull-secret.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: registry-pull-secret
  namespace: blog
data:
  .dockerconfigjson: [上面生成的base64編碼] 
type: kubernetes.io/dockerconfigjson

# kubectl create -f registry-pull-secret.yaml 
# kubectl get secret
輸出的Data大於0纔算配置成功

在這裏插入圖片描述
將其寫入YAML文件

...
imagePullSecrets:
- name: registry-pull-secret
...

在這裏插入圖片描述

  • pod資源限制 官文

    • spec.containers[].resources.limits.cpu
    • spec.containers[].resources.limits.memory
    • spec.containers[].resources.requests.cpu
    • spec.containers[].resources.requests.memory
      在這裏插入圖片描述
    • 飯粒
    # vim wordpress.yaml
    apiVersion: v1
    kind: Pod
    metadata:
      name: frontend
    spec:
      containers:
      - name: db
        image: mysql
        env:
        - name: MYSQL_ROOT_PASSWORD
          value: "333333"
        resources:
          requests:
            memory: "64Mi"
            cpu: "250m"
          limits:
            memory: "128Mi"
            cpu: "500m"     # 0.5核cpu
      - name: wp
        image: wordpress
        resources:
          requests:
            memory: "64Mi"
            cpu: "250m"
          limits:
            memory: "128Mi"
            cpu: "500m"
    # kubectl apply -f wordpress.yaml   # 啓動實例
    # kubectl describe pod frontend    # 查看pod調度情況
    # kubectl describe nodes 192.168.33.8      # 查看節點資源使用情況
    # kubectl get ns      # 查看所有的namespace
    
  • 重啓策略(restartPolicy:放在containers同級)

    • Always:當容器終止退出後,總是重啓容器(默認策略)
    • OnFailure:當容器異常退出(退出狀態碼非0)時,才重啓容器
    • Never:從不重啓容器
  • 健康檢查(Probe)官文

    • Probe有以下兩種類型:
    1. livenessProbe:如果檢查失敗,將殺死容器,根據Pod的restartPolicy來操作。
    2. readinessProbe:如果檢查失敗,Kubernetes會把Pod從service endpoints中剔除。
    • Probe支持以下三種檢查方法:
    1. httpGet:發送HTTP請求,返回200-400範圍狀態碼爲成功。
    2. exec:執行Shell命令返回狀態碼是0爲成功。
    3. tcpSocket:發起TCP Socket建立成功。
    • 飯粒
    apiVersion: v1
    kind: Pod
    metadata:
      labels:
        test: liveness
      name: liveness-exec
    spec:
      containers:
      - name: liveness
        image: busybox
        args:
        - /bin/sh
        - -c
        - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
        livenessProbe:
          exec:
            command:
            - cat
            - /tmp/healthy
          initialDelaySeconds: 5
          periodSeconds: 5
    
  • 調度約束(與containers同級)
    在這裏插入圖片描述

    • nodeName:用於將Pod調度到指定的Node名稱上(繞過調度器調度)
    • nodeSelector:用於將Pod調度到匹配Label的Node上(label匹配,通過調度器調度)
    • 飯粒1:nodeName
    apiVersion: v1 
    kind: Pod 
    metadata: 
      name: pod-example 
      labels: 
        app: nginx 
    spec: 
      nodeName: 192.168.33.8
      containers: 
      - name: nginx 
        image: nginx:1.15 
    

    kubectl describe pod [pod_name]查看調度信息。
    在這裏插入圖片描述

    • 飯粒2:nodeSelector
    # kubectl label nodes 192.168.33.8 team=a
    # kubectl label nodes 192.168.33.9 team=b
    # kubectl get nodes --show-labels
    
    apiVersion: v1 
    kind: Pod 
    metadata: 
      name: pod-example 
    spec: 
      nodeSelector: 
        team: b
      containers: 
      - name: nginx 
        image: nginx:1.15
    

    kubectl describe pod [pod_name]查看調度信息。
    在這裏插入圖片描述

  • pod鼓掌排查
    pod狀態:官文

    描述
    Pending Pod創建已經提交到Kubernetes。但是,因爲某種原因而不能順利創建。例如下載鏡像慢調度不成功。kubectl describe pod [pod_name]打印信息中Events前兩步
    Running Pod已經綁定到一個節點,並且已經創建了所有容器。至少有一個容器正在運行中,或正在啓動或重新啓動。
    Succeeded Pod中的所有容器都已成功終止,不會重新啓動。
    Failed Pod的所有容器均已終止,且至少有一個容器已在故障中終止。也就是說,容器要麼以非零狀態退出,要麼被系統終止。kubectl logs [POD_NAME]
    Unknown 由於某種原因apiserver無法獲得Pod的狀態,通常是由於Master與Pod所在主機kubelet通信時出錯。
    • 處理總結:
      kubectl describe [TYPE] [NAME_PREFIX]:創建報錯,查看Events事件
      kubectl logs [POD_NAME]:查看容器日誌
      kubectl exec –it [POD_NAME] bash:運行中問題,進入容器查看應用狀態

Service

  • Service作用
    • 防止Pod失聯
    • 定義一組Pod的訪問策略
    • 支持ClusterIP,NodePort以及LoadBalancer三種類型
    • Service的底層實現主要有iptables和ipvs二種網絡模式
  • 飯粒1:創建Service
    # vim my-service.yaml
    apiVersion: v1
    kind: Service
    metadata:
      name: my-service
      namespace: default
    spec:
      clusterIP: 10.0.0.123
      selector:
        app: nginx
      ports:
        - protocol: TCP
          name: http
          port: 80        # service端口
          targetPort: 8080       # 容器端口
    # kubectl apply -f my-service.yaml
    # kubectl get svc         # 查看所有service
    # kubectl get ep          # 查看後端ENDPOINTS
    # kubectl describe svc my-service      # 查看service詳細信息
    
  • Pod和service的關係
    • 通過label-selector相關聯
    • 通過Service實現Pod的負載均衡,輪詢分發( TCP/UDP 4層)
  • Service類型
    • ClusterIP:默認,分配一個集羣內部可以訪問的虛擬IP(VIP)。上述飯粒1
    • NodePort:在每個Node上分配一個端口作爲外部訪問入口
    • LoadBalancer:工作在特定的Cloud Provider上,例如Google Cloud,AWS,OpenStack
      ClusterIP
      NodePort訪問流程:
      用戶 -> 域名 -> 負載均衡器 -> NodeIP:Port -> PodIP:Port
      NodePort
      LoadBalancer
  • 飯粒2:創建NodePort service
    apiVersion: v1 
    kind: Service 
    metadata: 
      name: my-service2
    spec: 
      selector: 
        app: nginx
      ports: 
        - protocol: TCP 
          port: 80 
          targetPort: 8080 
          nodePort: 48300
      type: NodePort
    
    在這裏插入圖片描述
    在各節點上查看端口監聽情況:ss -antpu | grep 48300
    查看負載均衡綁定後端容器(需要安裝ipvsadm):ipvsadm -ln
    在這裏插入圖片描述
  • Service代理模式
    • 底層流量轉發與負載均衡實現:
      • Iptables(默認)
        查看規則:iptables-save | grep 10.0.0.123
        優點:靈活,功能強大(可以在數據包不同階段對包進行操作)
        缺點:
      1. 工作在用戶態,創建很多iptables規則(更新是非增量式)
      2. iptables規則從上到下逐條匹配(延時大)
        在這裏插入圖片描述
      • IPVS
        LVS是基於IPVS內核調度模塊實現的負載均衡。(阿里雲SLB,基於LVS實現四層負載均衡)
        優點:
      1. 工作在內核態,有更好的性能
      2. 調度算法豐富:rr,wrr,lc,wlc,ip hash…
        在配置文件/opt/kubernetes/cfg/kube-proxy中加上參數--ipvs-scheduler=wrr可修改調度算法。
        在這裏插入圖片描述

部署集羣內部的DNS網絡

  1. 部署Yaml文件
    需要修改:

    1. image:去掉前面的域名
    2. $DNS_DOMAIN:域名,如cluster.local
    3. $DNS_MEMORY_LIMIT:最大可用內存值,如170Mi
    4. $DNS_SERVER_IP:node中/opt/kubernetes/cfg/kubelet.config設置的clusterDNS值
    5. 刪除Corefile裏面這一行(用於對集羣以外的域名解析):proxy . /etc/resolv.conf
  2. 執行kubectl apply -f coredns.yaml進行部署
    執行kubectl get pods -n kube-system查看pod是否運行正常

  3. 測試能否正常解析

# kubectl run -it --image=busybox:1.28.4 --rm --restart=Never sh
/ # nslookup kubernetes
  • DNS服務監視Kubernetes API,爲每一個Service創建DNS記錄用於域名解析。
  • ClusterIP A記錄格式:[service-name].[namespace-name].svc.cluster.local
    示例:my-svc.my-namespace.svc.cluster.local
    .svc.cluster.local 可省略

Ingress

  1. 建議使用這種方式對外暴露接口:
    用戶 -> 域名 -> 負載均衡器 -> Ingress Controller(Node) -> Pod
  2. 支持自定義service訪問策略;
  3. 只支持基於域名的訪問策略;
  4. 支持TLS;
  • Ingress通過service關聯pod

  • 通過Ingress Controller實現Pod的負載均衡

  • 注意事項:
    • 鏡像地址修改成國內的:lizhenliang/nginx-ingress-controller:0.20.0
    • 使用宿主機網絡:hostNetwork: true
    • 保證節點的80/443端口沒有被佔用
    • 注意所有node節點的kube-proxy都要配置爲ipvs調度,調度算法需要統一,使用ipvsadm -ln查看。

  • 飯粒1:Ingress實現http轉發
    ingress_test.yaml(修改最下面兩項即可)

    apiVersion: extensions/v1beta1
    kind: Ingress
    metadata:
      name: simple-fanout-example
      annotations:
        nginx.ingress.kubernetes.io/rewrite-target: /
    spec:
      rules:
      - host: foo.bar.com
        http:
          paths:
          - path: /
            backend:
              serviceName: nginx-service
              servicePort: 80
    
    # kubectl apply -f ingress_test.yaml 
    ingress.extensions/simple-fanout-example created
    # kubectl get ingress
    NAME                    HOSTS         ADDRESS   PORTS   AGE
    simple-fanout-example   foo.bar.com             80      35s
    

    在宿主機添加hosts解析:192.168.33.8 foo.bar.com
    在瀏覽器訪問:http://foo.bar.com/ 即可訪問對應service服務
    在這裏插入圖片描述

  • 實現原理
    進入Ingress實例

    # kubectl get pods -n ingress-nginx
    NAME                                        READY   STATUS    RESTARTS   AGE
    nginx-ingress-controller-7dcb4bbb8d-jtfvr   1/1     Running   0          15h
    # kubectl exec -it nginx-ingress-controller-7dcb4bbb8d-jtfvr bash -n ingress-nginx
    www-data@Fone8:/etc/nginx$ ps -ef | grep nginx
    ...
    www-data     7     6  1 May21 ?        00:10:19 /nginx-ingress-controller --configmap=ingress-nginx/nginx-configuration --publish-service=ingress-nginx/ingress-nginx --annotations-prefix=nginx.ingress.kubernetes.io
    ...
    

    上面的進程會實時監控api-server所有 service變化,當發生改變就立刻更新nginx配置文件/etc/nginx/nginx.conf

  • 飯粒2:Ingress實現https轉發

  1. 自簽證書
    # vim certs.sh
    cat > ca-config.json <<EOF
    {
      "signing": {
        "default": {
          "expiry": "87600h"
        },
        "profiles": {
          "kubernetes": {
             "expiry": "87600h",
             "usages": [
                "signing",
                "key encipherment",
                "server auth",
                "client auth"
            ]
          }
        }
      }
    }
    EOF
    
    cat > ca-csr.json <<EOF
    {
        "CN": "kubernetes",
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "L": "Beijing",
                "ST": "Beijing",
            }
        ]
    }
    EOF
    
    cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
    
    cat > sslexample.foo.com-csr.json <<EOF
    {
      "CN": "sslexample.foo.com",
      "hosts": [],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "L": "BeiJing",
          "ST": "BeiJing"
        }
      ]
    }
    EOF
    
    cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes sslexample.foo.com-csr.json | cfssljson -bare sslexample.foo.com 
    
    kubectl create secret tls sslexample.foo.com --cert=sslexample.foo.com.pem --key=sslexample.foo.com-key.pem
    
    # sh certs.sh
    # ls sslexample.*.pem
    sslexample.foo.com-key.pem  sslexample.foo.com.pem
    # 生成認證
    # kubectl create secret tls sslexample-foo-com --cert=sslexample.foo.com.pem --key=sslexample.foo.com-key.pem
    # kubectl get secret
    NAME                  TYPE                                  DATA   AGE
    ...
    sslexample-foo-com    kubernetes.io/tls                     2      19s
    ...
    
  2. ingress_https.yaml(修改secretName和最下面兩行即可)
    apiVersion: extensions/v1beta1
    kind: Ingress
    metadata:
      name: tls-example-ingress
    spec:
      tls:
      - hosts:
        - sslexample.foo.com
        secretName: sslexample-foo-com
      rules:
        - host: sslexample.foo.com
          http:
            paths:
            - path: /
              backend:
                serviceName: nginx
                servicePort: 88
    
    # kubectl apply -f ingress_https.yaml 
    ingress.extensions/tls-example-ingress created
    # kubectl get ingress
    NAME                    HOSTS                ADDRESS   PORTS     AGE
    simple-fanout-example   foo.bar.com                    80        48m
    tls-example-ingress     sslexample.foo.com             80, 443   21s
    
  3. 在宿主機添加hosts解析:192.168.33.8 sslexample.foo.com
    在瀏覽器訪問https://sslexample.foo.com
    在這裏插入圖片描述
    查看證書

數據卷

  • Volume 官文
    • Kubernetes中的Volume提供了在容器中掛載外部存儲的能力。

    • Pod需要設置捲來源(spec.valume)和掛載點(spec.containers.volumeMounts)兩個信息後纔可以使用響應的Volume。

    • emptyDir

      • 創建一個空卷,掛載到Pod中的容器。Pod刪除該卷也會被刪除。
      • 應用場景:Pod中容器之間數據共享。
      • 飯粒:emptyDir.yaml
        apiVersion: v1
        kind: Pod
        metadata:
          name: my-pod
        spec:
          containers:
          - name: write
            image: centos
            command: ["bash","-c","for i in {1..100};do echo $i >> /data/hello;sleep 1;done"]
            volumeMounts:
              - name: data
                mountPath: /data
          - name: read
            image: centos
            command: ["bash","-c","tail -f /data/hello"]
            volumeMounts:
              - name: data
                mountPath: /data
          volumes:
          - name: data
            emptyDir: {}
        
        創建kubectl apply -f emptyDir.yaml並查看狀態kubectl get pods
        查看實例logs:
        kubectl logs my-pod -c write
        kubectl logs my-pod -c read -f
    • hostPath

      • 掛載Node文件系統上文件或者目錄到Pod中的容器。
      • 應用場景:Pod中容器需要訪問宿主機文件
      • 飯粒
        apiVersion: v1
        kind: Pod
        metadata:
          name: my-pod2
        spec:
          containers:
          - name: busybox
            image: busybox
            args:
            - /bin/sh
            - -c
            - sleep 36000
            volumeMounts:
            - name: data
              mountPath: /data
          volumes:
          - name: data
            hostPath:
              path: /tmp
              type: Directory
        
        創建kubectl apply -f hostPath.yaml並查看狀態和實例Node位置kubectl get pods -o wide
        進入實例查看/data目錄內容與對應Node機器的/tmp目錄內容是否一致:
        kubectl exec -it my-pod2 sh
    • 持久化NFS

      • 配置NFS
      1. 所有Node節點安裝nfs客戶端:yum install -y nfs-utils
      2. 對外暴露訪問接口(在一個Node節點配置即可,下一步同)
      # vim /etc/exports
      /data/nfs *(rw,no_root_squash)
      
      1. 啓動守護進程systemctl start nfs
    • 飯粒
      NFS_test.yaml

      apiVersion: apps/v1beta1
      kind: Deployment
      metadata:
        name: nfs-deployment
      spec:
        replicas: 3
        selector:
          matchLabels:
            app: nginx
        template:
          metadata:
            labels:
              app: nginx
          spec:
            containers:
            - name: nginx
              image: nginx
              volumeMounts:
              - name: wwwroot
                mountPath: /usr/share/nginx/html
              ports:
              - containerPort: 80
            volumes:
            - name: wwwroot
              nfs:
                server: 192.168.33.9
                path: /data/nfs
      

      啓動實例

      # kubectl apply -f NFS_test.yaml
      # kubectl get pods
      # kubectl get svc -o wide     # 查看SELECTOR對應的端口
      NAME            TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)        AGE     SELECTOR
      kubernetes      ClusterIP   10.0.0.1     <none>        443/TCP        4d1h    <none>
      my-service      ClusterIP   10.0.0.123   <none>        80/TCP         47h     app=nginx
      my-service2     NodePort    10.0.0.210   <none>        80:30008/TCP   45h     app=nginx
      nginx           NodePort    10.0.0.186   <none>        88:37361/TCP   3d22h   run=nginx
      nginx-service   NodePort    10.0.0.235   <none>        80:49236/TCP   2d16h   app=nginx
      

      在NFS服務端寫入/data/nfs/index.html內容<h1>Hello World!!!</h1>
      進入實例查看是否同步:

      # kubectl exec -it nfs-deployment-6b86fcf776-7kzmv bash
      root@nfs-deployment-6b86fcf776-7kzmv:/# ls /usr/share/nginx/html/
      root@nfs-deployment-6b86fcf776-7kzmv:/# ls /usr/share/nginx/html/
      index.html
      

      瀏覽器訪問:http://192.168.33.8:49236/
      在這裏插入圖片描述
      刪除重建實例,瀏覽器仍可以訪問到index.html內容。


將公司項目部署到Kubernetes平臺中

  • 部署步驟

    • 部署的項目情況
    1. 業務架構及服務(dubbo,spring cloud)
    2. 第三方服務,例如mysql,redis,zookeeper,eruka,mq
    3. 服務之間怎麼通信
    4. 資源消耗:硬件資源,帶寬
    • 部署項目時使用到的K8s資源
    1. 使用namespace進行不同項目隔離,或者隔離不同環境(test、prod、dev)
    2. 無狀態應用(deployment)
    3. 有狀態應用(statefulset,pv,pvc)
    4. 暴露外部訪問(Service,Ingress)
    5. secret,configmap
    • 項目基礎鏡像
    • 編排部署(YAML)
      以鏡像爲交付物。
      1. 項目構建(Java):CI/CD環境在這個階段是自動完成的(代碼拉取->代碼編譯構建->鏡像打包->推送到鏡像倉庫)
      2. 編寫YAML文件,使用這個鏡像
    • 工作流程
      kubectl -> Yaml -> 鏡像倉庫拉取鏡像 -> Service(集羣內部訪問)/ Ingress 暴露給外部用戶

一些非官方的組件編排YAML

  • 安裝harbor
  1. 下載安裝包 官方 加速下載1.9版本
  2. 解壓進入解壓目錄,修改配置文件harbor.ymlhostname爲機器IP
  3. 執行./prepare進行預配置信息;執行安裝腳本./install.sh
  4. 啓動容器:docker-compose up -d
    查看容器狀態:docker-compose ps
  5. 在宿主機訪問harbor鏡像倉庫(端口是80):192.168.33.9
    默認用戶名密碼 admin:Harbor12345
  6. 在docker配置/etc/docker/daemon.json中加入可信任機器(所有Node節點都要配置)並重啓docker
    # vim /etc/docker/daemon.json
    {"registry-mirrors": ["http://bc437cce.m.daocloud.io"],
    "insecure-registries": ["192.168.33.9"]
    }
    # systemctl restart docker
    
  • 部署Java項目
  1. 安裝Java、maven
  2. 下載Java項目
  3. 進入項目目錄,使用maven進行構建
    /usr/local/src/apache-maven-3.6.3/bin/mvn clean package
  4. 打包:docker build -t 192.168.33.9/project/java-demo:lastest .
    打包完成輸出:
    Successfully built 2de0871198e3
    Successfully tagged 192.168.33.9/project/java-demo:lastest
    
    在這裏插入圖片描述
  5. 推送鏡像到harbor倉庫中:
    # docker login 192.168.33.9
    # 輸入登陸harbor的用戶名密碼admin:Harbor12345
    # docker push 192.168.33.9/project/java-demo:lastest
    
  6. 編排部署YAML(一次執行一下YAML文件kubectl create -f xxx.yaml,確保前一個創建成功再執行下一個kubectl get pod -n test
    namespace.yaml
    apiVersion: v1
    kind: Namespace
    metadata:
      name: test
    
    創建securt:
    # kubectl create secret docker-registry registry-pull-secret --docker-username=admin --docker-password=Harbor12345 [email protected] --docker-server=192.168.33.9 -n test
    # kubectl get secret -n test          # 查看是否創建成功
    
    deployment.yaml
    apiVersion: apps/v1beta1
    kind: Deployment
    metadata:
      name: tomcat-java-demo
      namespace: test
    spec:
      replicas: 3
      selector:
        matchLabels:
          project: www
          app: java-demo
      template:
        metadata:
          labels:
            project: www
            app: java-demo
        spec:
          imagePullSecrets:
          - name: registry-pull-secret
          containers:
          - name: tomcat
            image: 192.168.33.9/project/java-demo:lastest
            imagePullPolicy: Always
            ports:
            - containerPort: 8080
              name: web
              protocol: TCP
            resources:
              requests:
                cpu: 0.5
                memory: 1Gi
              limits:
                cpu: 1
                memory: 2Gi
            livenessProbe:
              httpGet:
                path: /
                port: 8080
              initialDelaySeconds: 60
              timeoutSeconds: 20
            readinessProbe:
              httpGet:
                path: /
                port: 8080
              initialDelaySeconds: 60
              timeoutSeconds: 20
    
    service.yaml
    apiVersion: v1
    kind: Service
    metadata:
      name: tomcat-java-demo
      namespace: test
    spec:
      selector:
        project: www
        app: java-demo
      ports:
      - name: web
        port: 80
        targetPort: 8080
    
    ingress.yaml
    apiVersion: extensions/v1beta1
    kind: Ingress
    metadata:
      name: tomcat-java-demo
      namespace: test
    spec:
      rules:
        - host: java.ctnrs.com
          http:
            paths:
            - path: /
              backend:
                serviceName: tomcat-java-demo
                servicePort: 80
    
    mysql.yaml
    apiVersion: v1
    kind: Service
    metadata:
      name: mysql
    spec:
      ports:
      - port: 3306 
        name: mysql 
      clusterIP: None
      selector:
        app: mysql-public
    
    ---
    
    apiVersion: apps/v1beta1
    kind: StatefulSet
    metadata:
      name: db
    spec:
      serviceName: "mysql"
      template:
        metadata:
          labels:
            app: mysql-public 
        spec:
          containers:
          - name: mysql
            image: mysql:5.7 
            env: 
            - name: MYSQL_ROOT_PASSWORD
              value: "123456"
            - name: MYSQL_DATABASE
              value: test
            ports: 
            - containerPort: 3306
            volumeMounts:
            - mountPath: "/var/lib/mysql"
              name: mysql-data
      volumeClaimTemplates:
      - metadata:
          name: mysql-data 
        spec:
          accessModes: ["ReadWriteMany"]
          storageClassName: "managed-nfs-storage"
          resources:
            requests:
              storage: 2Gi 
    
    將數據導入數據庫中:
    [root@Fone8 tomcat-java-demo]# scp db/tables_ly_tomcat.sql master:/root
    
    [root@Fone7 java-demo]# kubectl cp /root/tables_ly_tomcat.sql db-0:/
    root@db-0:/# mysql -uroot -p123456
    mysql> source /tables_ly_tomcat.sql;
    [root@Fone7 java-demo]# kubectl describe pod db-0          # 查看pod IP
    
    [root@Fone8 tomcat-java-demo]# vim src/main/resources/application.yml          # 修改鏈接後端數據庫ip
    ...
    url: jdbc:mysql://172.17.87.10:3306/test?characterEncoding=utf-8
    ...
    [root@Fone8 tomcat-java-demo]# /usr/local/src/apache-maven-3.6.3/bin/mvn clean package          # 重新構建
    [root@Fone8 tomcat-java-demo]# docker build -t  192.168.33.9/project/java-demo:lastest .      # 重新打包鏡像
    
    瀏覽器訪問域名java.ctnrs.com

Kubernetes集羣資源監控

  • 監控指標
    • 集羣監控
      • 節點資源利用率
      • 節點數
      • 運行Pods
    • Pod監控
      • Kubernetes指標
      • 容器指標
      • 應用程序
  • 監控方案
    在這裏插入圖片描述
  • Heapster+InfluxDB+Grafana監控方案部署
    • 架構圖
      在這裏插入圖片描述
  1. 開啓所有節點採集數據端口
    # vim /opt/kubernetes/cfg/kubelet.config
    ...
    readOnlyPort: 10255
    ...
    # systemctl restart kubelet
    # curl 192.168.33.8:10255/metrics
    
  2. 部署influxdb、heapster、grafana
    influxdb.yaml
    apiVersion: extensions/v1beta1
    kind: Deployment
    metadata:
      name: monitoring-influxdb
      namespace: kube-system
    spec:
      replicas: 1
      template:
        metadata:
          labels:
            task: monitoring
            k8s-app: influxdb
        spec:
          containers:
          - name: influxdb
            image: registry.cn-hangzhou.aliyuncs.com/google-containers/heapster-influxdb-amd64:v1.1.1
            volumeMounts:
            - mountPath: /data
              name: influxdb-storage
          volumes:
          - name: influxdb-storage
            emptyDir: {}
    
    ---
    
    apiVersion: v1
    kind: Service
    metadata:
      labels:
        task: monitoring
        kubernetes.io/cluster-service: 'true'
        kubernetes.io/name: monitoring-influxdb
      name: monitoring-influxdb
      namespace: kube-system
    spec:
      ports:
      - port: 8086
        targetPort: 8086
      selector:
        k8s-app: influxdb
    
    heapster.yaml
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: heapster
      namespace: kube-system
    
    ---
    
    kind: ClusterRoleBinding
    apiVersion: rbac.authorization.k8s.io/v1beta1
    metadata:
      name: heapster
    roleRef:
      kind: ClusterRole
      name: cluster-admin
      apiGroup: rbac.authorization.k8s.io
    subjects:
      - kind: ServiceAccount
        name: heapster
        namespace: kube-system
    
    ---
    
    apiVersion: extensions/v1beta1
    kind: Deployment
    metadata:
      name: heapster
      namespace: kube-system
    spec:
      replicas: 1
      template:
        metadata:
          labels:
            task: monitoring
            k8s-app: heapster
        spec:
          serviceAccountName: heapster
          containers:
          - name: heapster
            image: registry.cn-hangzhou.aliyuncs.com/google-containers/heapster-amd64:v1.4.2
            imagePullPolicy: IfNotPresent
            command:
            - /heapster
            - --source=kubernetes:https://10.0.0.1
            - --sink=influxdb:http://10.0.0.188:8086
    
    ---
    
    apiVersion: v1
    kind: Service
    metadata:
      labels:
        task: monitoring
        kubernetes.io/cluster-service: 'true'
        kubernetes.io/name: Heapster
      name: heapster
      namespace: kube-system
    spec:
      ports:
      - port: 80
        targetPort: 8082
      selector:
        k8s-app: heapster
    
    grafana.yaml
    apiVersion: extensions/v1beta1
    kind: Deployment
    metadata:
      name: monitoring-grafana
      namespace: kube-system
    spec:
      replicas: 1
      template:
        metadata:
          labels:
            task: monitoring
            k8s-app: grafana
        spec:
          containers:
          - name: grafana
            image: registry.cn-hangzhou.aliyuncs.com/google-containers/heapster-grafana-amd64:v4.4.1
            ports:
              - containerPort: 3000
                protocol: TCP
            volumeMounts:
            - mountPath: /var
              name: grafana-storage
            env:
            - name: INFLUXDB_HOST
              value: monitoring-influxdb
            - name: GF_AUTH_BASIC_ENABLED
              value: "false"
            - name: GF_AUTH_ANONYMOUS_ENABLED
              value: "true"
            - name: GF_AUTH_ANONYMOUS_ORG_ROLE
              value: Admin
            - name: GF_SERVER_ROOT_URL
              value: /
          volumes:
          - name: grafana-storage
            emptyDir: {}
    
    ---
    
    apiVersion: v1
    kind: Service
    metadata:
      labels:
        kubernetes.io/cluster-service: 'true'
        kubernetes.io/name: monitoring-grafana
      name: monitoring-grafana
      namespace: kube-system
    spec:
      type: NodePort
      ports:
      - port : 80
        targetPort: 3000
      selector:
        k8s-app: grafana
    
    1. 啓動以後查看service grafana運行端口kubectl get pods -n kube-system,在瀏覽器中訪問masterIP:端口

Kubernetes平臺中日誌收集

  • 收集日誌類別
    • K8S系統的組件日誌
    • K8S Cluster裏面部署的應用程序日誌
  • 容器中的日誌怎麼收集
  1. Node上部署一個日誌收集程序
    • DaemonSet方式部署日誌收集程序
    • 對本節點/var/log/var/lib/docker/containers/
    兩個目錄下的日誌進行採集
    在這裏插入圖片描述
  2. Pod中附加專用日誌收集的容器
    • 每個運行應用程序的Pod中增加一個日誌收集容器,使用emtyDir共享日誌目錄讓日誌收集程序讀取到
    在這裏插入圖片描述
  3. 應用程序直接推送日誌
    • 超出Kubernetes範圍
    在這裏插入圖片描述
  • 對比
    在這裏插入圖片描述
  • 日誌方案:Filebeat+ELK
    在這裏插入圖片描述
  1. 安裝logstashelasticsearchkibana
  2. 配置文件
    /etc/kibana/kibana.yml
    下面註釋打開:
    server.port: 5601
    server.host: "0.0.0.0"
    elasticsearch.hosts: ["http://localhost:9200"]
    
    新建/etc/logstash/conf.d/logstash-to-es.conf
    input {
      beats {
        port => 5044
      }
    }
    
    filter {
    }
    
    output {
      elasticsearch {
        hosts => ["http://127.0.0.1:9200"]
        index => "k8s-log-%{+YYYY.MM.dd}"
      }
      stdout { codec => rubydebug }
    }
    
  3. 執行filebeat部署(在各節點收集日誌)
    apiVersion: v1
    kind: ConfigMap
    metadata:
      name: k8s-logs-filebeat-config
      namespace: kube-system 
    
    data:
      filebeat.yml: |
        filebeat.inputs:
          - type: log
            paths:
              - /var/log/messages  
            fields:
              app: k8s 
              type: module 
            fields_under_root: true
    
        setup.ilm.enabled: false
        setup.template.name: "k8s-module"
        setup.template.pattern: "k8s-module-*"
    
        output.elasticsearch:
          hosts: ['192.168.33.8:5044']
          index: "k8s-module-%{+yyyy.MM.dd}"
    
    ---
    
    apiVersion: apps/v1
    kind: DaemonSet 
    metadata:
      name: k8s-logs
      namespace: kube-system
    spec:
      selector:
        matchLabels:
          project: k8s 
          app: filebeat
      template:
        metadata:
          labels:
            project: k8s
            app: filebeat
        spec:
          containers:
          - name: filebeat
            image: elastic/filebeat:7.7.0
            args: [
              "-c", "/etc/filebeat.yml",
              "-e",
            ]
            resources:
              requests:
                cpu: 100m
                memory: 100Mi
              limits:
                cpu: 500m
                memory: 500Mi
            securityContext:
              runAsUser: 0
            volumeMounts:
            - name: filebeat-config
              mountPath: /etc/filebeat.yml
              subPath: filebeat.yml
            - name: k8s-logs 
              mountPath: /var/log/messages
          volumes:
          - name: k8s-logs
            hostPath: 
              path: /var/log/messages
          - name: filebeat-config
            configMap:
              name: k8s-logs-filebeat-config
    

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章