Kubernetes集羣部署(yum部署)

 

環境準備

Kubernetes-Master:192.168.37.134    #yum install kubernetes-master etcd flannel -y

Kubernetes-node1:192.168.37.135     #yum install kubernetes-node etcd docker flannel *rhsm* -y

Kubernetes-node2:192.168.37.146     #yum install kubernetes-node etcd docker flannel *rhsm* -y

系統版本:Centos7.5

關閉Firewalld防火牆,保證ntp時間正常同步同步

【K8s-master-etcd配置】

[root@Kubernetes-master ~]# egrep -v "#|^$" /etc/etcd/etcd.conf
ETCD_DATA_DIR="/data/etcd1"
ETCD_LISTEN_PEER_URLS="http://192.168.37.134:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.37.134:2379,http://127.0.0.1:2379"
ETCD_MAX_SNAPSHOTS="5"
ETCD_NAME="etcd1"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.37.134:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.37.134:2379"
ETCD_INITIAL_CLUSTER="etcd1=http://192.168.37.134:2380,etcd2=http://192.168.37.135:2380,etcd3=http://192.168.37.136:2380"

配置文件詳解:

ETCD_DATA_DIR:etcd節點名稱

ETCD_LISTEN_PEER_URLS:該節點與其他etcd節點通信時所監聽的地址

ETCD_LISTEN_CLIENT_URLS:etcd節點與客戶端通信時所監聽的地址列表

ETCD_INITIAL_ADVERTISE_PEER_URLS:etcd集羣通信所監聽節點地址和端口

ETCD_ADVERTISE_CLIENT_URLS:廣播本地節點地址告知其他etcd節點,監聽本地的網絡和端口2379

ETCD_INITIAL_CLUSTER:配置etcd集羣內部所有成員地址,同時監聽2380端口,方便etcd集羣節點同步數據

 

 

root@Kubernetes-master ~]# mkdir -p /data/etcd1/

[root@Kubernetes-master ~]# chmod 757 -R /data/etcd1/

【K8s-etcd1配置】

[root@kubernetes-node1 ~]# egrep -v "#|^$" /etc/etcd/etcd.conf 
ETCD_DATA_DIR="/data/etcd2"
ETCD_LISTEN_PEER_URLS="http://192.168.37.135:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.37.135:2379,http://127.0.0.1:2379"
ETCD_MAX_SNAPSHOTS="5"
ETCD_NAME="etcd2"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.37.135:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.37.135:2379"
ETCD_INITIAL_CLUSTER="etcd1=http://192.168.37.134:2380,etcd2=http://192.168.37.135:2380,etcd3=http://192.168.37.136:2380"

 

[root@kubernetes-node1 ~]# mkdir -p /data/etcd2/

[root@kubernetes-node1 ~]#chmod 757 -R /data/etcd2/

【K8s-node2-etcd配置】

[root@kubernetes-node2 ~]# egrep -v "#|^$" /etc/etcd/etcd.conf  
ETCD_DATA_DIR="/data/etcd3"
ETCD_LISTEN_PEER_URLS="http://192.168.37.136:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.37.136:2379,http://127.0.0.1:2379"
ETCD_MAX_SNAPSHOTS="5"
ETCD_NAME="etcd3"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.37.136:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.37.136:2379"
ETCD_INITIAL_CLUSTER="etcd1=http://192.168.37.134:2380,etcd2=http://192.168.37.135:2380,etcd3=http://192.168.37.136:2380"

 

[root@kubernetes-node2 ~]# mkdir /data/etcd3/

[root@kubernetes-node2 ~]# chmod 757 -R /data/etcd3/

至此,ETCD集羣已配置完畢,接下來啓動並驗證etcd集羣是否正常~

[root@Kubernetes-master ~]# systemctl start etcd.service     #注意,上述節點都需要啓動etcd服務,同時也設置自啓
[root@Kubernetes-master ~]# systemctl enable etcd.service

【K8s-master節點API-server/config配置】

[root@Kubernetes-master ~]# egrep -v "#|^$" /etc/kubernetes/apiserver

KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
KUBE_API_PORT="--port=8080"
KUBELET_PORT="--kubelet-port=10250"
KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.37.134,http://192.168.37.135:2379,http://192.168.37.136:2379"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota"
KUBE_API_ARGS=""

 

[root@Kubernetes-master ~]#systemctl start kube-apiserver  
[root@Kubernetes-master ~]# systemctl enable kube-apiserver

 

[root@Kubernetes-master ~]# egrep -v "#|^$" /etc/kubernetes/config 
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://192.168.37.134:8080"

[root@Kubernetes-master kubernetes]# systemctl start kube-controller-manager
[root@Kubernetes-master kubernetes]# systemctl enable kube-controller-manager
[root@Kubernetes-master kubernetes]# systemctl start kube-scheduler
[root@Kubernetes-master kubernetes]# systemctl enable kube-scheduler

【k8s-node1】

kubelet配置文件

[root@kubernetes-node1 ~]# egrep -v "#|^$" /etc/kubernetes/kubelet 
KUBELET_ADDRESS="--address=0.0.0.0"
KUBELET_HOSTNAME="--hostname-override=192.168.37.135"
KUBELET_API_SERVER="--api-servers=http://192.168.37.134:8080"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_ARGS=""

 

config主配置文件

[root@kubernetes-node1 ~]# egrep -v "#|^$" /etc/kubernetes/config 
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://192.168.37.134:8080"

 

[root@kubernetes-node1 ~]# systemctl start kubelet
[root@kubernetes-node1 ~]# systemctl enable kubelet
[root@kubernetes-node1 ~]# systemctl start kube-proxy
[root@kubernetes-node1 ~]# systemctl enable kube-proxy

【k8s-node2】

kubelet配置文件

[root@kubernetes-node2 ~]# egrep -v "#|^$" /etc/kubernetes/kubelet 
KUBELET_ADDRESS="--address=0.0.0.0"
KUBELET_HOSTNAME="--hostname-override=192.168.37.136"
KUBELET_API_SERVER="--api-servers=http://192.168.37.134:8080"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_ARGS=""

 

config主配置文件

[root@kubernetes-node2 ~]# egrep -v "^$|#" /etc/kubernetes/config 
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://192.168.37.134:8080"

 

[root@kubernetes-node2 ~]# systemctl start kubelet
[root@kubernetes-node2 ~]# systemctl enable kubelet
[root@kubernetes-node2 ~]# systemctl start kube-proxy
[root@kubernetes-node2 ~]# systemctl enable kube-proxy
【Kubernetes-flanneld網絡配置】

 [root@Kubernetes-master kubernetes]# egrep -v "#|^$" /etc/sysconfig/flanneld
FLANNEL_ETCD_ENDPOINTS="http://192.168.37.134:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network"

[root@kubernetes-node1 ~]# egrep -v "#|^$" /etc/sysconfig/flanneld 
FLANNEL_ETCD_ENDPOINTS="http://192.168.37.134:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network"

 [root@kubernetes-node2 ~]# egrep -v "#|^$" /etc/sysconfig/flanneld
 FLANNEL_ETCD_ENDPOINTS="http://192.168.37.134:2379"
 FLANNEL_ETCD_PREFIX="/atomic.io/network"

 

[root@Kubernetes-master kubernetes]# etcdctl mk /atomic.io/network/config '{"Network":"172.17.0.0/16"}'
{"Network":"172.17.0.0/16"}
[root@Kubernetes-master kubernetes]# etcdctl get /atomic.io/network/config
{"Network":"172.17.0.0/16"}

[root@Kubernetes-master kubernetes]# systemctl restart flanneld
[root@Kubernetes-master kubernetes]# systemctl enable flanneld

[root@kubernetes-node1 ~]# systemctl start flanneld
[root@kubernetes-node1 ~]# systemctl enable flanneld

[root@kubernetes-node2 ~]# systemctl start flanneld
[root@kubernetes-node2 ~]# systemctl enable flanneld

Ps:重啓flanneld網絡,會出現三個節點的IP,在node節點上要保證docker和自己的flanneld網段一致。如果不一致,重啓docker服務即可恢復,否則的話,三個網段ping測不通

[root@Kubernetes-master ~]# etcdctl ls /atomic.io/network/subnets
/atomic.io/network/subnets/172.17.2.0-24
/atomic.io/network/subnets/172.17.23.0-24
/atomic.io/network/subnets/172.17.58.0-24

檢查:在master上查看kubernetes的節點狀態

[root@Kubernetes-master ~]# kubectl get nodes
NAME STATUS AGE
192.168.37.135 Ready 5m
192.168.37.136 Ready 5m

 【K8s-Dashboard UI平臺】

Kubernetes實現對docker容器集羣的統一管理和調度,通過web界面能夠更好的管理和控制

Ps:這裏我們只需要在node1節點導入鏡像即可

[root@kubernetes-node1 ~]# docker load < pod-infrastructure.tgz 
[root@kubernetes-node1 ~]# docker tag $(docker images | grep none | awk '{print $3}') registry.access.redhat.com/rhel7/pod-infrastructure [root@kubernetes-node1 ~]# docker images REPOSITORY TAG IMAGE ID CREATED SIZE registry.access.redhat.com/rhel7/pod-infrastructure latest 99965fb98423 18 months ago 209 MB
[root@kubernetes-node1 ~]# docker load < kubernetes-dashboard-amd64.tgz
[root@kubernetes-node1 ~]# docker tag $(docker images | grep none | awk '{print $3}') bestwu/kubernetes-dashboard-amd64:v1.6.3
[root@kubernetes-node1 ~]# docker images
REPOSITORY                                            TAG                 IMAGE ID            CREATED             SIZE
registry.access.redhat.com/rhel7/pod-infrastructure   latest              99965fb98423        18 months ago       209 MB
bestwu/kubernetes-dashboard-amd64                     v1.6.3              9595afede088        21 months ago       139 MB

【Kubernetes-master】

編輯 ymal文件並創建Dashboard pods模塊

 [root@Kubernetes-master ~]# vim dashboard-controller.yaml

[root@Kubernetes-master ~]# cat dashboard-controller.yaml 
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
spec:
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
        scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
    spec:
      containers:
      - name: kubernetes-dashboard
        image: bestwu/kubernetes-dashboard-amd64:v1.6.3
        resources:
          # keep request = limit to keep this container in guaranteed class
          limits:
            cpu: 100m
            memory: 50Mi
          requests:
            cpu: 100m
            memory: 50Mi
        ports:
        - containerPort: 9090
        args:
          - --apiserver-host=http://192.168.37.134:8080
        livenessProbe:
          httpGet:
            path: /
            port: 9090
          initialDelaySeconds: 30
          timeoutSeconds: 30
View Code

 [root@Kubernetes-master ~]# vim dashboard-service.yaml

apiVersion: v1
kind: Service
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
spec:
  selector:
    k8s-app: kubernetes-dashboard
  ports:
  - port: 80
    targetPort: 9090
View Code

[root@Kubernetes-master ~]# kubectl apply -f dashboard-controller.yaml

[root@Kubernetes-master ~]# kubectl apply -f dashboard-service.yaml 

Ps:在創建 模塊的同時,檢查日誌是否出現異常信息

[root@Kubernetes-master ~]# tail -f /var/log/messages

可以在node1節點上查看容器已經啓動成功~

[root@kubernetes-node1 ~]# docker ps 
CONTAINER ID        IMAGE                                                        COMMAND                  CREATED             STATUS              PORTS        
f118f845f19f        bestwu/kubernetes-dashboard-amd64:v1.6.3                     "/dashboard --inse..."   8 minutes ago       Up 8 minutes                     30dc9e7f_kubernetes-dashboard-1315149111-pfb60_kube-system_19dcb04b-6d6e-11e9-9599-000c291881f6_02fd5b8e
67b7746a6d23        registry.access.redhat.com/rhel7/pod-infrastructure:latest   "/usr/bin/pod"           8 minutes ago       Up 8 minutes                     es-dashboard-1315149111-pfb60_kube-system_19dcb04b-6d6e-11e9-9599-000c291881f6_4e2cb565

 通過瀏覽器可驗證輸出k8s-master端訪問即可

 

 【拓展-本地私有倉庫部署】

# docker run -itd -p 5000:5000 -v /data/registry:/var/registry docker.io/registry

# docker tag docker.io/tomcat 192.168.37.135:5000/tomcat

# vim  /etc/sysconfig/docker

OPTIONS='--selinux-enabled --log-driver=journald --signature-verification=false --insecure-registry 192.168.37.135:5000'
ADD_REGISTRY='--add-registry 192.168.37.135:5000'

 

#systemctl  restart docker.service

# docker push 192.168.37.135:5000/tomcat

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章