centos7二進制部署單機版kubernet 1.15.0

目錄

 

一、節點初始化

二、創建 TLS 證書和祕鑰

三、創建kubeconfig 文件

四、安裝etcd

五、部署master節點

六、安裝flannel網絡插件

七、部署node節點

八、安裝CoreDNS

九、安裝kube-dashboard


GitHub地址:https://github.com/Idiomroot/centos7-kubernet-1.15.0(kubernet二進制安裝包放在package目錄)

一、節點初始化

1、安裝docker
[root@idiom-k8s ~]# yum install -y yum-utils \
  device-mapper-persistent-data \
  lvm2

[root@idiom-k8s ~]#  yum-config-manager \
    --add-repo \
    https://download.docker.com/linux/centos/docker-ce.repo

[root@idiom-k8s ~]#  yum install docker-ce docker-ce-cli containerd.io -y
2、配置docker拉取鏡像加速&指定數據目錄&指定私有倉庫harbor
[root@idiom-k8s ~]#  mkdir -p /etc/docker  /data/docker
[root@idiom-k8s ~]# vi /etc/docker/daemon.json 
{
        "insecure-registries": ["192.168.6.130:5000"],(如果有私有harbor的話可以指定,沒有則刪除)
        "registry-mirrors": ["https://l3mbfzpf.mirror.aliyuncs.com"],
        "graph": "/data/docker"
}
[root@idiom-k8s ~]#  systemctl daemon-reload && systemctl start docker  && systemctl enable docker
2、關閉防火牆和selinux
[root@idiom-k8s ~]#  systemctl stop firewalld && systemctl disable firewalld && setenforce 0
[root@idiom-k8s ~]#  sed -i 's/enforcing/disabled/g' /etc/selinux/config 
3、關閉swap
[root@idiom-k8s ~]#  swapoff -a && sysctl -w vm.swappiness=0
[root@idiom-k8s ~]#  sed -i '/swap/d' /etc/fstab  
4、配置k8s系統參數
[root@idiom-k8s ~]#  cat << EOF | tee /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

[root@idiom-k8s ~]#  sysctl -p /etc/sysctl.d/k8s.conf

二、創建 TLS 證書和祕鑰

1、安裝 CFSSL

[root@idiom-k8s ~]#  yum install -y ebtables socat git gcc
[root@idiom-k8s ~]#  mkdir /home/tools && cd /home/tools
[root@idiom-k8s tools]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 && chmod +x cfssl_linux-amd64 && mv cfssl_linux-amd64 /usr/local/bin/cfssl
[root@idiom-k8s tools]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 && chmod +x cfssljson_linux-amd64 && mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
[root@idiom-k8s tools]#  wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 && chmod +x cfssl-certinfo_linux-amd64 && mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
[root@idiom-k8s tools]#  export PATH=/usr/local/bin:$PATH

[root@idiom-k8s tools]#  mkdir /root/ssl && cd /root/ssl

[root@idiom-k8s ssl]#  cfssl print-defaults config > config.json

[root@idiom-k8s ssl]#  cfssl print-defaults csr > csr.json

2、創建ca證書

cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "87600h"
      }
    }
  }
}
EOF

 

cat > ca-csr.json <<EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

[root@idiom-k8s ssl]#  cfssl gencert -initca ca-csr.json | cfssljson -bare ca

3、創建kubernet證書

[root@idiom-k8s ssl]#

cat > kubernetes-csr.json <<EOF
{
    "CN": "kubernetes",
    "hosts": [
      "127.0.0.1",
      "192.168.6.134",
      "10.254.0.1",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "BeiJing",
            "L": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

[root@idiom-k8s ssl]#  cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes

4、創建admin證書

cat > admin-csr.json << EOF
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF

[root@idiom-k8s ssl]#  cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

5、創建kube-proxy證書

cat > kube-proxy-csr.json << EOF
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

[root@idiom-k8s ssl]#  cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes  kube-proxy-csr.json | cfssljson -bare kube-proxy

6、頒發證書

[root@idiom-k8s ssl]# mkdir -p /etc/kubernetes/ssl && cp *.pem /etc/kubernetes/ssl

7、安裝kubectl命令

[root@idiom-k8s ssl]#  cd /home/tools &&  wget https://dl.k8s.io/v1.15.0/kubernetes-server-linux-amd64.tar.gz

[root@idiom-k8s tools]# tar -xzvf kubernetes-server-linux-amd64.tar.gz && cp kubernetes/server/bin/kubectl /usr/bin/

三、創建kubeconfig 文件

1、創建TLS Bootstrapping Token

[root@idiom-k8s tools]#cd /etc/kubernetes/ && export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')

cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

2、創建kubelet bootstrapping kubeconfig 文件

[root@idiom-k8s kubernetes]# export KUBE_APISERVER="https://192.168.6.134:6443"

# 設置集羣參數
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=bootstrap.kubeconfig
# 設置客戶端認證參數
kubectl config set-credentials kubelet-bootstrap \
  --token=${BOOTSTRAP_TOKEN} \
  --kubeconfig=bootstrap.kubeconfig
# 設置上下文參數
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kubelet-bootstrap \
  --kubeconfig=bootstrap.kubeconfig
# 設置默認上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

 

3、創建 kube-proxy kubeconfig 文件

# 設置集羣參數
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-proxy.kubeconfig
# 設置客戶端認證參數
kubectl config set-credentials kube-proxy \
  --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \
  --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig
# 設置上下文參數
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig
# 設置默認上下文
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

4、創建 kubectl kubeconfig 文件

# 設置集羣參數
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER}
# 設置客戶端認證參數
kubectl config set-credentials admin \
  --client-certificate=/etc/kubernetes/ssl/admin.pem \
  --embed-certs=true \
  --client-key=/etc/kubernetes/ssl/admin-key.pem
# 設置上下文參數
kubectl config set-context kubernetes \
  --cluster=kubernetes \
  --user=admin
# 設置默認上下文
kubectl config use-context kubernetes

四、安裝etcd

[root@idiom-k8s kubernetes]#cd /home/tools/ &&  wget https://github.com/etcd-io/etcd/releases/download/v3.3.13/etcd-v3.3.13-linux-amd64.tar.gz

[root@idiom-k8s tools]#  tar -xf etcd-v3.3.13-linux-amd64.tar.gz && cp etcd-v3.3.13-linux-amd64/etcd* /usr/bin/
 

cat  /usr/lib/systemd/system/etcd.service  <<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
EnvironmentFile=-/etc/etcd/etcd.conf
ExecStart=/usr/bin/etcd \
  --name ${ETCD_NAME} \
  --cert-file=/etc/kubernetes/ssl/kubernetes.pem \
  --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
  --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
  --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --advertise-client-urls ${ETCD_ADVERTISE_CLIENT_URLS} \
  --listen-client-urls ${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
  --data-dir=${ETCD_DATA_DIR}
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
cat >  /etc/etcd/etcd.conf <<EOF
# [member]
ETCD_NAME=idiom
# [member]
ETCD_NAME=idiom
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_CLIENT_URLS="https://192.168.6.134:2379"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.6.134:2379"
EOF

[root@idiom-k8s tools]#  mkdir -p /var/lib/etcd/ && systemctl daemon-reload && systemctl start etcd  && systemctl enable etcd

驗證服務

[root@idiom-k8s tools]#  etcdctl    --ca-file=/etc/kubernetes/ssl/ca.pem    --cert-file=/etc/kubernetes/ssl/kubernetes.pem    --key-file=/etc/kubernetes/ssl/kubernetes-key.pem    cluster-health

member 8e9e05c52164694d is healthy: got healthy result from https://192.168.6.134:2379
cluster is healthy

五、部署master節點

1、配置kube-apiserver

[root@idiom-k8s tools]# wget   https://dl.k8s.io/v1.15.0/kubernetes-server-linux-amd64.tar.gz

[root@idiom-k8s tools]# tar -xf kubernetes-server-linux-amd64.tar.gz  && cp kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler} /usr/bin/

cat > /usr/lib/systemd/system/kube-apiserver.service <<EOF
[Unit]
Description=Kubernetes API Service
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
After=etcd.service

[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/apiserver
ExecStart=/usr/bin/kube-apiserver \
        $KUBE_LOGTOSTDERR \
        $KUBE_LOG_LEVEL \
        $KUBE_ETCD_SERVERS \
        $KUBE_API_ADDRESS \
        $KUBE_API_PORT \
        $KUBELET_PORT \
        $KUBE_ALLOW_PRIV \
        $KUBE_SERVICE_ADDRESSES \
        $KUBE_ADMISSION_CONTROL \
        $KUBE_API_ARGS
Restart=on-failure
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
cat > /etc/kubernetes/config <<EOF
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_MASTER="--master=http://192.168.6.134:8080"
EOF
cat > /etc/kubernetes/apiserver <<EOF
KUBE_API_ADDRESS="--advertise-address=192.168.6.134 --bind-address=192.168.6.134 --insecure-bind-address=192.168.6.134"
KUBE_ETCD_SERVERS="--etcd-servers=https://192.168.6.134:2379"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_ADMISSION_CONTROL="--admission-control=ServiceAccount,NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota"
KUBE_API_ARGS="--authorization-mode=RBAC --runtime-config=rbac.authorization.k8s.io/v1beta1 --kubelet-https=true  --token-auth-file=/etc/kubernetes/token.csv --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem --client-ca-file=/etc/kubernetes/ssl/ca.pem --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem --etcd-cafile=/etc/kubernetes/ssl/ca.pem --etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem --etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem --enable-swagger-ui=true --apiserver-count=3 --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --audit-log-path=/var/lib/audit.log --event-ttl=1h"
EOF

[root@idiom-k8s tools]# systemctl daemon-reload && systemctl enable kube-apiserver && systemctl start kube-apiserver

2、配置kube-controller-manager

cat > /usr/lib/systemd/system/kube-controller-manager.service  <<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/controller-manager
ExecStart=/usr/bin/kube-controller-manager \
        $KUBE_LOGTOSTDERR \
        $KUBE_LOG_LEVEL \
        $KUBE_MASTER \
        $KUBE_CONTROLLER_MANAGER_ARGS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
cat > /etc/kubernetes/controller-manager <<EOF
KUBE_CONTROLLER_MANAGER_ARGS="--address=127.0.0.1 --service-cluster-ip-range=10.254.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem --root-ca-file=/etc/kubernetes/ssl/ca.pem --leader-elect=true"
EOF

[root@idiom-k8s tools]# systemctl daemon-reload && systemctl enable kube-controller-manager && systemctl start kube-controller-manager

3、配置kube-scheduler

cat >  /usr/lib/systemd/system/kube-scheduler.service  <<EOF
[Unit]
Description=Kubernetes Scheduler Plugin
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/scheduler
ExecStart=/usr/bin/kube-scheduler \
            $KUBE_LOGTOSTDERR \
            $KUBE_LOG_LEVEL \
            $KUBE_MASTER \
            $KUBE_SCHEDULER_ARGS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
cat > /etc/kubernetes/scheduler <<EOF
KUBE_SCHEDULER_ARGS="--leader-elect=true --address=127.0.0.1"
EOF

[root@idiom-k8s tools]#systemctl daemon-reload && systemctl enable kube-scheduler && systemctl start kube-scheduler

4、驗證master節點功能

[root@idiom-k8s tools]# kubectl get componentstatuses
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-0               Healthy   {"health":"true"}  

六、安裝flannel網絡插件

1、安裝並修改配置文件

[root@idiom-k8s tools]# yum install -y flannel

cat > /usr/lib/systemd/system/flanneld.service  <<EOF
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
After=network-online.target
Wants=network-online.target
After=etcd.service
Before=docker.service

[Service]
Type=notify
EnvironmentFile=/etc/sysconfig/flanneld
EnvironmentFile=-/etc/sysconfig/docker-network
ExecStart=/usr/bin/flanneld-start \
  -etcd-endpoints=${FLANNEL_ETCD_ENDPOINTS} \
  -etcd-prefix=${FLANNEL_ETCD_PREFIX} \
  $FLANNEL_OPTIONS
ExecStartPost=/usr/libexec/flannel/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker
Restart=on-failure

[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
EOF
cat > /etc/sysconfig/flanneld  <<EOF
# Flanneld configuration options  

# etcd url location.  Point this to the server where etcd runs
FLANNEL_ETCD_ENDPOINTS="https://192.168.6.134:2379"

# etcd config key.  This is the configuration key that flannel queries
# For address range assignment
FLANNEL_ETCD_PREFIX="/idiomk8s/network"

# Any additional options that you want to pass
FLANNEL_OPTIONS="-etcd-cafile=/etc/kubernetes/ssl/ca.pem -etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem -etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem"
EOF

2、etcd創建網絡配置

etcdctl --endpoints=https://192.168.6.134:2379 \
  --ca-file=/etc/kubernetes/ssl/ca.pem \
  --cert-file=/etc/kubernetes/ssl/kubernetes.pem \
  --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
  mkdir /idiomk8s/network

etcdctl --endpoints=https://192.168.6.134:2379 \
  --ca-file=/etc/kubernetes/ssl/ca.pem \
  --cert-file=/etc/kubernetes/ssl/kubernetes.pem \
  --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
  mk /idiomk8s/network/config '{"Network":"172.100.0.0/16","SubnetLen":24,"Backend":{"Type":"vxlan"}}'

 

[root@idiom-k8s tools]# systemctl daemon-reload && systemctl enable flanneld && systemctl start flanneld

查詢etcd
etcdctl --endpoints=${ETCD_ENDPOINTS} \
    --ca-file=/etc/kubernetes/ssl/ca.pem \
    --cert-file=/etc/kubernetes/ssl/kubernetes.pem \
    --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
    ls /idiomk8s/network/subnets
/idiomk8s/network/subnets/172.100.68.0-24

etcdctl --endpoints=${ETCD_ENDPOINTS} \
   --ca-file=/etc/kubernetes/ssl/ca.pem \
   --cert-file=/etc/kubernetes/ssl/kubernetes.pem \
   --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
   get /idiomk8s/network/config
{"Network":"172.100.0.0/16","SubnetLen":24,"Backend":{"Type":"vxlan"}}

etcdctl --endpoints=${ETCD_ENDPOINTS} \
   --ca-file=/etc/kubernetes/ssl/ca.pem \
   --cert-file=/etc/kubernetes/ssl/kubernetes.pem \
   --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
   get /idiomk8s/network/subnets/172.100.68.0-24
{"PublicIP":"192.168.6.134","BackendType":"vxlan","BackendData":{"VtepMAC":"b6:ab:55:76:56:da"}}

 

七、部署node節點

1、配置docker

[root@idiom-k8s tools]# cp  /usr/lib/systemd/system/docker.service   /usr/lib/systemd/system/docker.service.bak 

[root@idiom-k8s tools]# vi   /usr/lib/systemd/system/docker.service

[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target

[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
EnvironmentFile=-/run/flannel/docker
EnvironmentFile=-/run/flannel/subnet.env

EnvironmentFile=-/etc/sysconfig/docker
EnvironmentFile=-/etc/sysconfig/docker-storage
EnvironmentFile=-/etc/sysconfig/docker-network
#ExecStart=/usr/bin/dockerd  --bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU}
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
#TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
# restart the docker process if it exits prematurely
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s

[Install]
WantedBy=multi-user.target

[root@idiom-k8s tools]#  systemctl daemon-reload && systemctl restart docker && systemctl restart flanneld

2、配置kubelet

[root@idiom-k8s tools]# wget  https://dl.k8s.io/v1.15.0/kubernetes-node-linux-amd64.tar.gz

[root@idiom-k8s tools]# tar -xf kubernetes-node-linux-amd64.tar.gz  && cp kubernetes/node/bin/{kube-proxy,kubelet} /usr/bin/

[root@idiom-k8s tools]# cd /etc/kubernetes && kubectl -s http://192.168.6.134:8080 create clusterrolebinding kubelet-bootstrap   --clusterrole=system:node-bootstrapper   --user=kubelet-bootstrap

cat >  /usr/lib/systemd/system/kubelet.service  <<EOF
[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/kubelet
ExecStart=/usr/bin/kubelet \
            $KUBE_LOGTOSTDERR \
            $KUBE_LOG_LEVEL \
            $KUBELET_ADDRESS \
            $KUBELET_PORT \
            $KUBELET_HOSTNAME \
            $KUBE_ALLOW_PRIV \
            $KUBELET_POD_INFRA_CONTAINER \
            $KUBELET_ARGS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

cat > /etc/kubernetes/kubelet <<EOF
KUBELET_ADDRESS="--address=192.168.6.134"
KUBELET_PORT="--port=10250"
KUBELET_HOSTNAME="--hostname-override=192.168.6.134"
KUBELET_API_SERVER="--api-servers=https://192.168.6.134:6443"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
KUBELET_ARGS="--cluster-dns=10.254.0.2  --experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig  --cert-dir=/etc/kubernetes/ssl --cluster-domain=cluster.local --hairpin-mode promiscuous-bridge --serialize-image-pulls=false"
EOF

cat > /etc/kubernetes/config <<EOF
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_MASTER="--master=http://192.168.6.134:8080"
EOF

[root@idiom-k8s kubernetes]# mkdir /var/lib/kubelet && systemctl daemon-reload && systemctl enable kubelet && systemctl start kubelet

3、通過 kublet 的 TLS 證書請求(多節點的話在master操作,注意只要是kubectl命令,只能在master節點執行)

[root@idiom-k8s kubernetes]# kubectl get csr

NAME        AGE       REQUESTOR           CONDITION

node-csr-g7yiWhmFfelICllg8KqOrmG_J4OmFzqkzocjuadawd0   4m        kubelet-bootstrap   Pending 

[root@idiom-k8s kubernetes]# kubectl certificate approve node-csr-g7yiWhmFfelICllg8KqOrmG_J4OmFzqkzocjuadawd0

certificatesigningrequest "node-csr-g7yiWhmFfelICllg8KqOrmG_J4OmFzqkzocjuadawd0" approved

[root@idiom-k8s kubernetes]# kubectl get node
NAME            STATUS   ROLES    AGE     VERSION
192.168.6.134   Ready    <none>   3h54m   v1.15.0

註釋:如果一直通過csr無法獲得node,查看kubelet狀態顯示

an 23 10:52:31 192-168-6-134 kubelet[13894]: E0123 10:52:31.692374   13894 reflector.go:205] k8s.io/kubernetes/pkg/kubelet/kubelet.go:413: Failed to list *v1.Service: services is forbidden: User "system:node:192.168.6.134" cannot list services at the cluster scope

Jan 23 10:52:31 192-168-6-134 kubelet[13894]: E0123 10:52:31.692712   13894 reflector.go:205] k8s.io/kubernetes/pkg/kubelet/kubelet.go:422: Failed to list *v1.Node: nodes is forbidden: User "system:node:192.168.6.134" cannot list nodes at the cluster scope

Jan 23 10:52:31 192-168-6-134 kubelet[13894]: E0123 10:52:31.693849   13894 reflector.go:205] k8s.io/kubernetes/pkg/kubelet/config/apiserver.go:47: Failed to list *v1.Pod: pods is forbidden: User "system:node:192.168.6.134" cannot list pods at the cluster scope

這是因爲啓用了RBAC的角色控制,此時的node用戶雖然是 ClusterRole ,但是沒有綁定到system:nodes ,創建新的clusterrolebinding:

kubectl create clusterrolebinding system:nodes \
  --clusterrole=system:node \
  --user=system:node:192.168.6.134 

然後重新應用一下csr即可,執行systemctl status kubelet -l,顯示

Attempting to register node 192.168.6.134 

Successfully registered node 192.168.6.134 

出現以上狀態表示node在master上註冊成功。

4、配置kube-proxy

[root@idiom-k8s kubernetes]#  yum install -y conntrack-tools

cat > /usr/lib/systemd/system/kube-proxy.service <<EOF
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/proxy
ExecStart=/usr/bin/kube-proxy \
        $KUBE_LOGTOSTDERR \
        $KUBE_LOG_LEVEL \
        $KUBE_MASTER \
        $KUBE_PROXY_ARGS
Restart=on-failure
LimitNOFILE=65536

[Install]
EOF

cat >  /etc/kubernetes/proxy  <<EOF
KUBE_PROXY_ARGS="--bind-address=192.168.6.134 --hostname-override=192.168.6.134 --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig --cluster-cidr=10.254.0.0/16"
EOF

[root@idiom-k8s kubernetes]#  systemctl daemon-reload &&  systemctl enable kube-proxy  && systemctl start kube-proxy

驗證網絡連通性,(多節點的話在master操作,從該步驟開始,以下全部都是在master節點進行)

[root@idiom-k8s kubernetes]#  mkdir /root/test/ &&  cd /root/test

cat > centos.yaml <<EOF
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: test
  labels:
    app: test
spec:
  replicas: 2
  template:
    metadata:
      labels:
        app: test
    spec:
      containers:
      - image: centos:latest
        name: test
        command: ["/bin/bash","-c","while true; do sleep 1000; done"]
        imagePullPolicy: IfNotPresent
EOF

 

[root@idiom-k8s test]# kubectl create -f centos.yaml 
deployment.extensions/test created
[root@idiom-k8s test]# kubectl  get pod 
NAME                    READY   STATUS    RESTARTS   AGE
test-5dc9965f96-rsrfm   1/1     Running   0          5s
test-5dc9965f96-w7w94   1/1     Running   0          5s
[root@idiom-k8s test]# kubectl describe po  test-5dc9965f96-rsrfm | grep IP
IP:             172.100.68.7
[root@idiom-k8s test]# kubectl exec test-5dc9965f96-w7w94  -i -t -- ping 172.100.68.7
PING 172.100.68.7 (172.100.68.7) 56(84) bytes of data.
64 bytes from 172.100.68.7: icmp_seq=1 ttl=64 time=0.139 ms
64 bytes from 172.100.68.7: icmp_seq=2 ttl=64 time=0.162 ms
^C
--- 172.100.68.7 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1000ms
rtt min/avg/max/mdev = 0.139/0.150/0.162/0.016 ms

通過service測試集羣是否可用

[root@idiom-k8s test]#  kubectl run nginx --replicas=2 --labels="run=load-balancer-example" --image=nginx:latest  --port=80
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx created
[root@idiom-k8s test]# kubectl expose deployment nginx --type=NodePort --name=example-service
service/example-service exposed
[root@idiom-k8s test]# kubectl describe svc example-service
Name:                     example-service
Namespace:                default
Labels:                   run=load-balancer-example
Annotations:              <none>
Selector:                 run=load-balancer-example
Type:                     NodePort
IP:                       10.254.110.235
Port:                     <unset>  80/TCP
TargetPort:               80/TCP
NodePort:                 <unset>  31747/TCP
Endpoints:                172.100.68.8:80,172.100.68.9:80
Session Affinity:         None
External Traffic Policy:  Cluster
Events:                   <none>
[root@idiom-k8s test]# curl 10.254.110.235
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

或者瀏覽器訪問http://192.168.6.134:31747

八、安裝CoreDNS

[root@idiom-k8s test]# cd /home/tools/ && wget https://dl.k8s.io/v1.15.0/kubernetes.tar.gz

[root@idiom-k8s tools]# tar -xf kubernetes.tar.gz

[root@idiom-k8s tools]# cd kubernetes/cluster/addons/dns/coredns/

[root@idiom-k8s tools]# docker pull coredns/coredns:latest

[root@idiom-k8s tools]# docker tag coredns/coredns:latest k8s.gcr.io/coredns:1.3.1

[root@idiom-k8s tools]# vi transforms2sed.sed 

將$DNS_SERVER_IP和DNS_DOMAIN替換成kubelet配置的內容。這裏將$DNS_SERVER_IP替換成10.254.0.2,將DNS_DOMAIN替換成cluster.local

[root@idiom-k8s coredns]# cat transforms2sed.sed    
s/__PILLAR__DNS__SERVER__/10.254.0.2/g
s/__PILLAR__DNS__DOMAIN__/cluster.local/g
s/__PILLAR__CLUSTER_CIDR__/$SERVICE_CLUSTER_IP_RANGE/g
s/__PILLAR__DNS__MEMORY__LIMIT__/$DNS_MEMORY_LIMIT/g
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g

[root@idiom-k8s coredns]# sed -f transforms2sed.sed coredns.yaml.base > coredns.yaml && kubectl create -f coredns.yaml

使用dnstools測試dns是否正常解析

[root@idiom-k8s coredns]# kubectl get svc
NAME              TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
example-service   NodePort    10.254.110.235   <none>        80:31747/TCP   9m31s
kubernetes        ClusterIP   10.254.0.1       <none>        443/TCP        4h4m
[root@idiom-k8s coredns]# kubectl run -it --rm --restart=Never --image=infoblox/dnstools:latest dnstools
If you don't see a command prompt, try pressing enter.
dnstools# nslookup kubernetes
Server:         10.254.0.2
Address:        10.254.0.2#53

Name:   kubernetes.default.svc.cluster.local
Address: 10.254.0.1

dnstools# nslookup  example-service 
Server:         10.254.0.2
Address:        10.254.0.2#53

Name:   example-service.default.svc.cluster.local
Address: 10.254.110.235
註釋:DNS服務是Kubernetes賴以實現服務發現的核心組件之一,默認情況下只會創建一個DNS Pod,在生產環境中我們需要對coredns進行擴容。手動擴容 kubectl –namespace=kube-system scale deployment coredns –replicas=2

九、安裝kube-dashboard

[root@idiom-k8s coredns]# mkdir /root/kube-dashboard/ && cd /root/kube-dashboard/

[root@idiom-k8s kube-dashboard]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/heapster-amd64:v1.5.4

[root@idiom-k8s kube-dashboard]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/heapster-amd64:v1.5.4 k8s.gcr.io/heapster-amd64:v1.5.4

[root@idiom-k8s kube-dashboard]# docker pull mirrorgooglecontainers/kubernetes-dashboard-amd64:v1.10.1

[root@idiom-k8s kube-dashboard]#  docker tag mirrorgooglecontainers/kubernetes-dashboard-amd64:v1.10.1 k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1

安裝heapster插件(沒有這個插件dashboard不能顯示出圖形)
cat > heapster.yaml <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: heapster
  namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: heapster
  namespace: kube-system
spec:
  replicas: 1
  template:
    metadata:
      labels:
        task: monitoring
        k8s-app: heapster
    spec:
      serviceAccountName: heapster
      containers:
      - name: heapster
        image: k8s.gcr.io/heapster-amd64:v1.5.4 
        imagePullPolicy: IfNotPresent
        command:
        - /heapster
        - --source=kubernetes:https://kubernetes.default
        - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086
---
apiVersion: v1
kind: Service
metadata:
  labels:
    task: monitoring
    # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
    # If you are NOT using this as an addon, you should comment out this line.
    kubernetes.io/cluster-service: 'true'
    kubernetes.io/name: Heapster
  name: heapster
  namespace: kube-system
spec:
  ports:
  - port: 80
    targetPort: 8082
  selector:
    k8s-app: heapster
EOF

cat > heapster-rbac.yaml  <<EOF
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: heapster
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:heapster
subjects:
- kind: ServiceAccount
  name: heapster
  namespace: kube-system
EOF
[root@idiom-k8s kube-dashboard]# kubectl  create -f heapster-rbac.yaml 
[root@idiom-k8s kube-dashboard]# kubectl  create -f heapster.yaml 

安裝kubernetes-dashboard
cat > kubernetes-dashboard.yaml <<EOF
# ------------------- Dashboard Secrets ------------------- #

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kube-system
type: Opaque

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kube-system
type: Opaque
data:
  csrf: ""

---
# ------------------- Dashboard Service Account ------------------- #

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system

---
# ------------------- Dashboard Role & Role Binding ------------------- #

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: kubernetes-dashboard-minimal
  namespace: kube-system
rules:
  # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
  resources: ["secrets"]
  verbs: ["create"]
  # Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
  resources: ["configmaps"]
  verbs: ["create"]
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
  resources: ["secrets"]
  resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
  verbs: ["get", "update", "delete"]
  # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
  resources: ["configmaps"]
  resourceNames: ["kubernetes-dashboard-settings"]
  verbs: ["get", "update"]
  # Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
  resources: ["services"]
  resourceNames: ["heapster"]
  verbs: ["proxy"]
- apiGroups: [""]
  resources: ["services/proxy"]
  resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
  verbs: ["get"]

---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: kubernetes-dashboard-minimal
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard
  namespace: kube-system

---
# ------------------- Dashboard Deployment ------------------- #

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
      - name: kubernetes-dashboard
        image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1 
        ports:
        - containerPort: 8443
          protocol: TCP
        args:
          - --auto-generate-certificates
          - --heapster-host=http://heapster
          # Uncomment the following line to manually specify Kubernetes API server Host
          # If not specified, Dashboard will attempt to auto discover the API server and connect
          # to it. Uncomment only if the default does not work.
          # - --apiserver-host=http://my-address:port
        volumeMounts:
        - name: kubernetes-dashboard-certs
          mountPath: /certs
          # Create on-disk volume to store exec logs
        - mountPath: /tmp
          name: tmp-volume
        livenessProbe:
          httpGet:
            scheme: HTTPS
            path: /
            port: 8443
          initialDelaySeconds: 30
          timeoutSeconds: 30
      volumes:
      - name: kubernetes-dashboard-certs
        secret:
          secretName: kubernetes-dashboard-certs
      - name: tmp-volume
        emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
      - key: node-role.kubernetes.io/master
        effect: NoSchedule

---
# ------------------- Dashboard Service ------------------- #

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  type: NodePort
  ports:
    - port: 443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard
EOF


cat > dashboard-adminuser.yaml <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
EOF

cat > dashboard-rbac.yaml <<EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system
EOF

不自制證書,雖然Firefox可以訪問dashboard,但是Chrome訪問不了。所以這裏我們選擇自制證書

[root@idiom-k8s kube-dashboard]# openssl genrsa -out ca.key 2048

[root@idiom-k8s kube-dashboard]# openssl req -new -x509 -key ca.key -out ca.crt -days 3650 -subj "/C=CN/ST=HB/L=WH/O=DM/OU=YPT/CN=CA"
[root@idiom-k8s kube-dashboard]# openssl genrsa -out dashboard.key 2048
[root@idiom-k8s kube-dashboard]# openssl req -new -sha256 -key dashboard.key -out dashboard.csr -subj "/C=CN/ST=HB/L=WH/O=DM/OU=YPT/CN=192.168.6.134"

[root@idiom-k8s kube-dashboard]# vi dashboard.cnf

extensions = san
[san]
keyUsage = digitalSignature
extendedKeyUsage = clientAuth,serverAuth
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
subjectAltName = IP:192.168.6.134,IP:127.0.0.1,DNS:192.168.6.134,DNS:localhost

[root@idiom-k8s kube-dashboard]# openssl x509 -req -sha256 -days 3650 -in dashboard.csr -out dashboard.crt -CA ca.crt -CAkey ca.key -CAcreateserial -extfile dashboard.cnf
[root@idiom-k8s kube-dashboard]# kubectl create secret generic kubernetes-dashboard-certs --from-file="dashboard.crt,dashboard.key" -n kube-system 
[root@idiom-k8s kube-dashboard]# kubectl apply -f kubernetes-dashboard.yaml 

[root@idiom-k8s kube-dashboard]# kubectl create -f  dashboard-adminuser.yaml  
[root@idiom-k8s kube-dashboard]# kubectl get service --namespace=kube-system                                                                     
NAME                   TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                  AGE
heapster               ClusterIP   10.254.21.99     <none>        80/TCP                   3h19m
kube-dns               ClusterIP   10.254.0.2       <none>        53/UDP,53/TCP,9153/TCP   4h19m
kubernetes-dashboard   NodePort    10.254.108.254   <none>        443:30994/TCP            168m 

[root@idiom-k8s kube-dashboard]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
Name:         admin-user-token-2q6xx
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin-user
              kubernetes.io/service-account.uid: 910464ab-aa50-4d93-900d-60adc757e4af

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1359 bytes
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTJxNnh4Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI5MTA0NjRhYi1hYTUwLTRkOTMtOTAwZC02MGFkYzc1N2U0YWYiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.NDvHXJTSiDDOmvldp1obFBjdXfxtVrLWb5UPEezYHKZTZSqV5FDgMH1Sn86tfA1ygMzyADK5ncgKkuxrFWQQjJ288r79qeYyucakpJ05MzZCcVWYO2hLLGMltJHFkc4wcRSsZJ8UOj1lhlY_1CqA26Ll797k6GF6TJSOmcdcRY12G2rYfYLWFX_ksTeI5jog4Bbzai4z-iJMmeoIHVmb8BuyruhsVTt-e3PN0qif-CdgwepgA2Dvb_4OeHr9dvSnIfUQKQxv0S5UXJGW7OfwJ3ZR9QJC6dcCqLQyLq0FhPe8SrZeqm95LzP8EAkJsDOMZPGW5cbr5cBqHYsgoWg_Uw

 

瀏覽器訪問https://192.168.6.134:30994

 

注意,假如按照上述步驟不能正確安裝kube-dashboard,你得查看你是不是全程按照我這個文檔進行操作的,如果你是

kubeadm安裝的,那安裝kube-dashboard有可能會出錯,因爲部署環境不一樣。具體表現爲不能正常顯示圖形,top命令不能正常使用,top node或者pod會返回error: metrics not available yet,這個時候就需要查看heapster服務日誌,一步步的排錯了。

這裏添加排錯案例,

1、

[root@idiom-k8s kube-dashboard]# kubectl logs -n kube-system --tail=1 -f pods/heapster-7897654d76-5qzxc

I1014 09:07:25.010075 1 influxdb.go:274] Created database "k8s" on influxDB server at "monitoring-influxdb.kube-system.svc:8086"

E1014 09:08:05.005964 1 manager.go:101] Error in scraping containers from kubelet:172.18.156.24:10255: failed to get all container stats from Kubelet URL "http://172.18.156.24:10255/stats/container/": Post http://172.18.156.24:10255/stats/container/: dial tcp 172.18.156.24:10255: getsockopt: connection refused

W1014 09:08:25.000416 1 manager.go:152] Failed to get all responses in time (got 0/1)

修改heapster.yaml,添加配置

- --source=kubernetes:https://kubernetes.default?kubeletHttps=true&kubeletPort=10250&insecure=true

- --sink=influxdb:http://monitoring-influxdb.kube-system.svc.cluster.local:8086

[root@idiom-k8s kube-dashboard]# kubectl apply -f heapster.yml

[root@idiom-k8s kube-dashboard]# kubectl top node

error: metrics not available yet

未解決,繼續查看日誌

2、

[root@idiom-k8s kube-dashboard]# kubectl logs -n kube-system --tail=1 -f pods/heapster-bfdcbf4d4-pmz4p

W1014 09:14:25.000370 1 manager.go:152] Failed to get all responses in time (got 0/1)

E1014 09:15:05.009168 1 manager.go:101] Error in scraping containers from kubelet:172.18.156.24:10250: failed to get all container stats from Kubelet URL "https://172.18.156.24:10250/stats/container/": request failed - "403 Forbidden", response: "Forbidden (user=system:serviceaccount:kube-system:heapster, verb=create, resource=nodes, subresource=stats)"

 

 

 

^C

[root@idiom-k8s kube-dashboard]# vi heapster-rbac.yaml

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:heapster
rules:
- apiGroups:
  - ""
  resources:
  - events
  - namespaces
  - nodes
  - pods
  - nodes/stats
  verbs:
  - create
  - get
  - list
  - watch
- apiGroups:
  - extensions
  resources:
  - deployments
  verbs:
  - get
  - list
  - watch

[root@idiom-k8s kube-dashboard]# kubectl apply -f heapster-rbac.yml

[root@idiom-k8s kube-dashboard]# kubectl top node

NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%

grpc 628m 7% 8743Mi 55%

 

 

 

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章