一.部署Web UI(Dashboard)
1.解壓包,進入目錄
包就在之前的master部署組件裏
這裏裏面kubernetes-server-linux-amd64.tar.gz
2.執行yaml文件
查看啓動的pod,沒在默認命名空間,在kube-system下
注:
其中dashboard-controller.yaml這個裏面的dashboard鏡像是國外的,如果慢,可以換成國內的鏡像地址 image: registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.10.0
3.創建Dashboard服務
4.查看啓動的Dashboard服務
外網訪問的是端口是44721
5.設置登陸令牌,訪問web界面
創建用戶訪問,綁定集羣管理員,使用它產生的密鑰
創建賬戶產生的token
查看token
複製token到頁面上即可
二.coredns的安裝
安裝coredns的yaml文檔可以在kubernetes的github上找到https://github.com/kubernetes/kubernetes/edit/master/cluster/addons/dns/coredns/coredns.yaml.sed
[root@master ~]# vim coredns.yaml
# Warning: This is a file generated from the base underscore template file: coredns.yaml.base
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources: - endpoints
- services
- pods
- namespaces
verbs: - list
- watch
- ""
- apiGroups:
- ""
resources: - nodes
verbs: -
get
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- ""
-
kind: ServiceAccount
name: coredns
namespace: kube-systemapiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
.:53 {
errors
health
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
proxy . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter. # 2. Default is 1. # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
serviceAccountName: coredns
tolerations:- key: "CriticalAddonsOnly"
operator: "Exists"
containers:- name: coredns
image: coredns/coredns:1.2.6
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: coredns
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports: - containerPort: 53
name: dns
protocol: UDP - containerPort: 53
name: dns-tcp
protocol: TCP - containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:- NET_BIND_SERVICE
drop: - all
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- NET_BIND_SERVICE
- name: config-volume
configMap:
name: coredns
items:-
key: Corefile
path: CorefileapiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.0.0.2
ports:- name: dns
port: 53
protocol: UDP - name: dns-tcp
port: 53
protocol: TCP
- name: dns
-
- key: "CriticalAddonsOnly"
1.部署coredns
2.查看部署dns結果
[root@master ~]# kubectl get pods -n kube-system
3.測試可以解析到集羣裏的服務
不同命名空間解析
後面加上命名空間.svc.cluster.local
三.kubectl管理工具
1.刪除
先刪除控制器,再刪服務
[root@master ~]# kubectl delete deployment.apps/nginx
[root@master ~]# kubectl delete service/nginx
2.在默認命名空間下創建nginx容器
[root@master ~]# kubectl run nginx --image=nginx --replicas=3 --labels="app=nignx-example" --image=nginx:1.10 --port=80
查看pod
[root@master ~]# kubectl get all
3.查看pod的詳細信息describe
[root@master ~]# kubectl describe pod/nginx-7cfb59d88d-nmtp9
下面還有事件,可以用於排錯
4.查看pod和service
[root@master ~]# kubectl get pod
[root@master ~]# kubectl get svc
5.顯示標籤
[root@master ~]# kubectl get pods --show-labels
注:設置標籤的好處,到時候pod多了,可以指定pod的標籤查找
6.查詢pod的詳細信息
[root@master ~]# kubectl get pods -o wide
7.查看控制用到的那些鏡像
[root@master ~]# kubectl get deployment -o wide
8.啓動發佈服務
[root@master ~]# kubectl expose deployment nginx --port=88 --type=NodePort --target-port=80 --name=nginx-service
expose暴露服務
--port=88 外部ba暴露端口
type=NodePort 使用節點端口訪問應用
--target-port=80 內部端口
--name=nginx-service 服務名稱
通過集羣ip加外部暴露端口可以在任意節點訪問應用
或者通過node端口和節點地址地址訪問
四.故障排查
1.查看pod詳細信息
kubectl describe 容器pod id
2.查看pod日誌
kubectl log 容器pod id
3.進入到pod查看情況
kubectl exec -it pod id bash
五.更新鏡像
kubectl set --help 這個命令更新設置容器的設置
1.更新鏡像版本
將之前的nginx1.10跟新爲nginx1.11版本
這個過程是將之前的1.10版本的3個鏡像刪除,在創建3個1.11版本的鏡像
查看剛創建的pod詳細信息,已經變成nginx:1.11
[root@master ~]# kubectl describe pod nginx-799f6b8cfc-khbvf
2.還有另一種的辦法更新鏡像,直接編輯控制器資源文件
[root@master ~]# kubectl edit deploy/nginx
查看新創建的pod信息顯示image是nginx:1.12
3.可以查看版本歷史
[root@master ~]# kubectl rollout status deploy/nginx
[root@master ~]# kubectl rollout history deploy/nginx
再次升級爲nginx:1.13
更新完成
history可以查看之前的記錄
4.回滾狀態
現在版本是nginx:1.13,假設這個版本有問題,要根據history回滾到之前的版本
[root@master ~]# kubectl rollout undo deployment.apps/nginx
[root@master ~]# kubectl rollout status deploy/nginx
查看版本歷史,少了第三個版本,已經回滾到nginx:1.12了
查看新pod詳細信息已經是1.12了
六.擴容副本scale
由於高併發業務需要將後端pod擴展爲5個(現在爲3個)
[root@master ~]# kubectl scale deployment.apps/nginx --replicas=5
業務下來了還可以縮容