kubeadmin安裝k8s

yum源

mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bk

wget -nc http://mirrors.aliyun.com/repo/Centos-7.repo

系統配置

net.bridge.bridge-nf-call-ip6tables = 1

net.bridge.bridge-nf-call-iptables = 1

net.ipv4.ip_forward = 1

vm.swappiness = 0

modprobe br_netfilter

sysctl -p /etc/sysctl.d/k8s.conf

echo "* soft nofile 65536" >> /etc/security/limits.conf

echo "* hard nofile 65536" >> /etc/security/limits.conf

echo "* soft nproc 65536"  >> /etc/security/limits.conf

echo "* hard nproc 65536"  >> /etc/security/limits.conf

echo "* soft  memlock  unlimited"  >> /etc/security/limits.conf

echo "* hard memlock  unlimited"  >> /etc/security/limits.conf

ipvs

cat > /etc/sysconfig/modules/ipvs.modules <<EOF

#!/bin/bash

modprobe -- ip_vs

modprobe -- ip_vs_rr

modprobe -- ip_vs_wrr

modprobe -- ip_vs_sh

modprobe -- nf_conntrack_ipv4

EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

 ipset

yum install ipset ipvsadm

添加k8s yum源

cat << EOF > /etc/yum.repos.d/kubernetes.repo

[kubernetes]

name=Kubernetes

baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/

enabled=1

gpgcheck=1

repo_gpgcheck=1

gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

EOF

wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo

安裝軟件

yum install kubelet kubeadm kubectl docker-ce

修改docker配置

ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd

初始化配置

kubeadm config print init-defaults > kubeadm-init.yaml

apiVersion: kubeadm.k8s.io/v1beta2

bootstrapTokens:

- groups:

  - system:bootstrappers:kubeadm:default-node-token

  token: abcdef.0123456789abcdef

  ttl: 24h0m0s

  usages:

  - signing

  - authentication

kind: InitConfiguration

localAPIEndpoint:

  advertiseAddress: 172.16.21.143

  bindPort: 6443

nodeRegistration:

  criSocket: /var/run/dockershim.sock

  name: k8s-master

  taints:

  - effect: NoSchedule

    key: node-role.kubernetes.io/master

---

apiServer:

  timeoutForControlPlane: 4m0s

apiVersion: kubeadm.k8s.io/v1beta2

certificatesDir: /etc/kubernetes/pki

clusterName: kubernetes

controllerManager: {}

dns:

  type: CoreDNS

etcd:

  local:

    dataDir: /var/lib/etcd

imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers

kind: ClusterConfiguration

kubernetesVersion: v1.15.0

networking:

  dnsDomain: cluster.local

  serviceSubnet: 10.96.0.0/12

scheduler: {}

---

apiVersion: kubeproxy.config.k8s.io/v1alpha1

kind: KubeProxyConfiguration

mode: "ipvs"

預下載鏡像

kubeadm config images pull --config kubeadm-init.yaml

集羣初始化

kubeadm init --config kubeadm-init.yaml

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube

  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.

Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:

  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.16.21.143:6443 --token abcdef.0123456789abcdef \

    --discovery-token-ca-cert-hash sha256:ad6da20e29c78eca1e2e54a74f1443a134828b57830adb679c1c9e625cef5a39

node節點加入集羣

kubeadm join 172.16.21.143:6443 --token abcdef.0123456789abcdef \

    --discovery-token-ca-cert-hash sha256:ad6da20e29c78eca1e2e54a74f1443a134828b57830adb679c1c9e625cef5a39

獲得token和hash值

1)獲取token

kubeadm token list

默認情況下 Token 過期是時間是24小時,如果 Token 過期以後,可以輸入以下命令,生成新的 Token

kubeadm token create

2)獲取hash值

openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'

master節點去除taint

kubectl taint node k8s-master node-role.kubernetes.io/master:NoSchedule-

calico

# 查看kubernetes版本對應的其他組件的版本

https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/

修改配置文件

---

# Source: calico/templates/calico-config.yaml

# This ConfigMap is used to configure a self-hosted Calico installation.

kind: ConfigMap

apiVersion: v1

metadata:

  name: calico-config

  namespace: kube-system

data:

  # Typha is disabled.

  typha_service_name: "calico-typha"

  # Configure the backend to use.

  calico_backend: "bird"

  # Configure the MTU to use

  veth_mtu: "1440"

  # The CNI network configuration to install on each node.  The special

  # values in this config will be automatically populated.

  cni_network_config: |-

    {

      "name": "k8s-pod-network",

      "cniVersion": "0.3.1",

      "plugins": [

        {

          "type": "calico",

          "log_level": "info",

          "datastore_type": "kubernetes",

          "nodename": "__KUBERNETES_NODE_NAME__",

          "mtu": __CNI_MTU__,

          "ipam": {

              "type": "calico-ipam"

          },

          "policy": {

              "type": "k8s"

....

....

...

            # Enable IPIP

            - name: CALICO_IPV4POOL_IPIP

              value: "off"

            - name: FELIX_IPINIPENABLED

              value: "false"

            # Set MTU for tunnel device used if ipip is enabled

            - name: FELIX_IPINIPMTU

              valueFrom:

                configMapKeyRef:

                  name: calico-config

                  key: veth_mtu

            # The default IPv4 pool to create on startup if none exists. Pod IPs will be

            # chosen from this range. Changing this value after installation will have

            # no effect. This should fall within `--cluster-cidr`.

            - name: CALICO_IPV4POOL_CIDR

              value: "10.96.0.0/16"

            # Disable file logging so `kubectl logs` works.

            - name: CALICO_DISABLE_FILE_LOGGING

              value: "true"

spacer.gif

dashboard

kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta1/aio/deploy/recommended.yaml

修改配置文件

kind: Service

apiVersion: v1

metadata:

  labels:

    k8s-app: kubernetes-dashboard

  name: kubernetes-dashboard

  namespace: kubernetes-dashboard

spec:

  type: NodePort

  ports:

    - port: 443

      targetPort: 8443

      nodePort: 30001

  selector:

    k8s-app: kubernetes-dashboard

創建權限

[root@k8s-master ~]# cat dashboard-admin.yaml

apiVersion: v1

kind: ServiceAccount

metadata:

  name: admin-user

  namespace: kube-system

---

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRoleBinding

metadata:

  name: admin-user

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: ClusterRole

  name: cluster-admin

subjects:

- kind: ServiceAccount

  name: admin-user

  namespace: kube-system

獲取token

kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')

通過運行dashboard的節點IP+port進行訪問

https://172.16.20.24:30001/#/overview?namespace=default


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章