1,service
工作模式:username、iptables、ipvs
help命令:
[root@master ~]# kubectl explain svc
編寫文件:
[root@master manifests]# cat redis-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: redis
namespace: default
spec:
selector:
app: redis
role: logstor
clusterIP: 10.97.97.97
type: ClusterIP
ports:
- port: 6379
targetPort: 6379
查看:
[root@master manifests]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 6d21h
redis ClusterIP 10.97.97.97 <none> 6379/TCP 9s
[root@master manifests]# kubectl describe svc redis
Name: redis
Namespace: default
Labels: <none>
Annotations: kubectl.kubernetes.io/last-applied-configuration:
{"apiVersion":"v1","kind":"Service","metadata":{"annotations":{},"name":"redis","namespace":"default"},"spec":{"clusterIP":"10.97.97.97","...
Selector: app=redis,role=logstor
Type: ClusterIP
IP: 10.97.97.97
Port: <unset> 6379/TCP
TargetPort: 6379/TCP
Endpoints: 10.244.1.32:6379
Session Affinity: None
Events: <none>
[root@master manifests]# cat redis-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: redis
namespace: default
spec:
selector:
app: redis
role: logstor
clusterIP: 10.97.97.97
type: ClusterIP
ports:
- port: 6379
targetPort: 6379
交互:
編寫pod:
[root@master manifests]# cat deploy-demo.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp-deploy
namespace: default
spec:
replicas: 3
selector:
matchLabels:
app: myapp
release: canary
template:
metadata:
labels:
app: myapp
release: canary
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
ports:
- name: http
containerPort: 80
編寫svc:
[root@master manifests]# cat myapp-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: myapp
namespace: default
spec:
selector:
app: myapp
release: canary
clusterIP: 10.99.99.99
type: NodePort
ports:
- port: 80
targetPort: 80
nodePort: 30250
[root@master manifests]#
總結:
Service:
模型:userspace、iptables、ipvs
類型:ClusterIP、NodePort、No ClusterIP
2,Ingress Controller
help命令:
[root@master ~]# kubectl explain ingress
創建命名空間:
[root@master ~]# kubectl create namespace ingress-nginx
下載yaml:
[root@master ingress-nginx]# for file in namespace.yaml configmap.yaml rbac.yaml with-rbac.yaml; do wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/$file; done
apply文件:
[root@master ingress-nginx]# kubectl apply -f namespace.yaml
Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
namespace/ingress-nginx configured
[root@master ingress-nginx]# kubectl apply -f /*
error: Unexpected args: [/boot /dev /etc /home /lib /lib64 /media /mnt /opt /proc /root /run /sbin /srv /sys /tmp /usr /var]
See 'kubectl apply -h' for help and examples
[root@master ingress-nginx]# kubectl apply -f ./*
error: Unexpected args: [./namespace.yaml ./rbac.yaml ./with-rbac.yaml]
See 'kubectl apply -h' for help and examples
[root@master ingress-nginx]# ls
configmap.yaml namespace.yaml rbac.yaml with-rbac.yaml
[root@master ingress-nginx]# kubectl apply -f configmap.yaml
configmap/nginx-configuration created
configmap/tcp-services created
configmap/udp-services created
[root@master ingress-nginx]# kubectl apply -f rbac.yaml
serviceaccount/nginx-ingress-serviceaccount created
clusterrole.rbac.authorization.k8s.io/nginx-ingress-clusterrole created
role.rbac.authorization.k8s.io/nginx-ingress-role created
rolebinding.rbac.authorization.k8s.io/nginx-ingress-role-nisa-binding created
clusterrolebinding.rbac.authorization.k8s.io/nginx-ingress-clusterrole-nisa-binding created
[root@master ingress-nginx]# kubectl apply -f with-rbac.yaml
deployment.apps/nginx-ingress-controller created
[root@master ingress-nginx]#
查看狀態:
[root@master ingress-nginx]# kubectl get pods -n ingress-nginx
編寫pod:
[root@master ingress-nginx]# cat tomcat-deploy.yaml
apiVersion: v1
kind: Service
metadata:
name: tomcat
namespace: default
spec:
selector:
app: tomcat
release: canary
ports:
- name: http
targetPort: 8080
port: 8080
- name: ajp
targetPort: 8009
port: 8009
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: tomcat-deploy
namespace: default
spec:
replicas: 3
selector:
matchLabels:
app: tomcat
release: canary
template:
metadata:
labels:
app: tomcat
release: canary
spec:
containers:
- name: tomcat
image: tomcat:8.5.32-jre8-alpine
ports:
- name: http
containerPort: 8080
- name: ajp
containerPort: 8009
[root@master ingress-nginx]#
編寫ingress:
[root@master ingress-nginx]# cat ingess-tomcat.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: ingress-myapp
namespace: default
annotations:
kubernetes.io/ingress.class: "nginx"
spec:
rules:
- host: tomcat.wz.com
http:
paths:
- path:
backend:
serviceName: tomcat
servicePort: 8080
[root@master ingress-nginx]#
apply文件:
[root@master ingress-nginx]# kubectl apply -f tomcat-deploy.yaml
service/tomcat created
deployment.apps/tomcat-deploy created
[root@master ingress-nginx]# kubectl apply -f ingess-tomcat.yaml
ingress.extensions/ingress-myapp created
查看端口:
[root@master ingress-nginx]# kubectl exec tomcat-deploy-579d97b849-2rp95 -- netstat -tnl
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State
tcp 0 0 0.0.0.0:8080 0.0.0.0:* LISTEN
tcp 0 0 127.0.0.1:8005 0.0.0.0:* LISTEN
tcp 0 0 0.0.0.0:8009 0.0.0.0:* LISTEN
查看svc:
[root@master ingress-nginx]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
app NodePort 10.99.99.99 <none> 80:30250/TCP 7h11m
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 7d5h
myapp ClusterIP 10.103.137.94 <none> 80/TCP 44m
redis ClusterIP 10.97.97.97 <none> 6379/TCP 8h
tomcat ClusterIP 10.109.152.237 <none> 8080/TCP,8009/TCP 3m33s
查看ingress:
[root@master ingress-nginx]# kubectl get ingress
…
3,存儲卷
類型:
emptyDir:節點存儲卷(臨時目錄)
hostPath:宿主機存儲卷
分佈式存儲、雲存儲
emptyDir存儲卷(僅此節點共享):
htlp命令:
[root@master ~]# kubectl explain pods.spec.volumes
編寫文件:
[root@master volumes]# cat pod-vol-demo.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-demo
namespace: default
labels:
app: myapp
tier: frontend
annotations:
magedu.com/craete-by: "cluster admin"
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
ports:
- name: http
containerPort: 80
volumeMounts:
- name: html
mountPath: /data/web/html/
- name: busybox
image: busybox:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- name: html
mountPath: /data/
command:
- "/bin/sh"
- "-c"
- "sleep 7200"
volumes:
- name: html
emptyDir: {}
進入容器1寫入數據:
[root@master volumes]# kubectl exec -it pod-demo -c busybox -- /bin/sh
/ # ls
/ # echo $(date) >> /data/index.html
/ # echo $(date) >> /data/index.html
/ # cat/data/index.html
/bin/sh: cat/data/index.html: not found
/ # cat /data/index.html
Wed Sep 25 11:00:27 UTC 2019
Wed Sep 25 11:00:27 UTC 2019
/ #
進入容器2查看是否共享:
[root@master volumes]# kubectl exec -it pod-demo -c myapp -- /bin/bash
OCI runtime exec failed: exec failed: container_linux.go:345: starting container process caused "exec: \"/bin/bash\": stat /bin/bash: no such file or directory": unknown
command terminated with exit code 126
[root@master volumes]# kubectl exec -it pod-demo -c myapp -- /bin/sh
/ # ls
bin data dev etc home lib media mnt proc root run sbin srv sys tmp usr var
/ # cd /data/web/html/
/data/web/html # ls
index.html
/data/web/html # cat index.html
Wed Sep 25 11:00:27 UTC 2019
Wed Sep 25 11:00:27 UTC 2019
/data/web/html #
重新再定義一個存儲卷:
[root@master volumes]# cat pod-vol-demo.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-demo
namespace: default
labels:
app: myapp
tier: frontend
annotations:
magedu.com/craete-by: "cluster admin"
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 80
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
- name: busybox
image: busybox:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- name: html
mountPath: /data/
command:
- "/bin/sh"
- "-c"
- "while true; do echo $(date) >> /data/index.html; sleep 2; done"
volumes:
- name: html
emptyDir: {}
查看:
[root@master volumes]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
client 0/1 Completed 0 2d4h 10.244.1.13 node01 <none> <none>
pod-demo 2/2 Running 0 27s 10.244.1.46 node01 <none> <none>
[root@master volumes]# curl 10.244.1.46
Wed Sep 25 11:17:24 UTC 2019
Wed Sep 25 11:17:26 UTC 2019
Wed Sep 25 11:17:28 UTC 2019
Wed Sep 25 11:17:30 UTC 2019
Wed Sep 25 11:17:32 UTC 2019
Wed Sep 25 11:17:34 UTC 2019
Wed Sep 25 11:17:36 UTC 2019
Wed Sep 25 11:17:38 UTC 2019
Wed Sep 25 11:17:40 UTC 2019
Wed Sep 25 11:17:42 UTC 2019
Wed Sep 25 11:17:44 UTC 2019
Wed Sep 25 11:17:46 UTC 2019
Wed Sep 25 11:17:48 UTC 2019
hostPath宿主機存儲卷(重點、與集羣節點之間共享):
編寫pod:
[root@master volumes]# cat pod-hostpath-vol.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-vol-hostpath
namespace: default
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
volumes:
- name: html
hostPath:
path: /data/pod/volume1
type: DirectoryOrCreate
[root@master volumes]#
在各節點創建目錄:
[root@node01 ~]# mkdir /data/pod/volume1 -p
[root@node01 ~]# vi index.html
node01
訪問Ip:
[root@master volumes]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
client 0/1 Completed 0 2d4h 10.244.1.13 node01 <none> <none>
pod-vol-hostpath 1/1 Running 0 14s 10.244.1.47 node01 <none> <none>
[root@master volumes]# curl 10.244.1.47
node01
nfs存儲卷(與集羣之外節點共享)
pvc存儲卷:
help命令:
[root@master volumes]# kubectl explain pvc