k8s資源對象總結(官方Java api調用)

pod 資源

k8s集羣最小的調度單位。

一個或多個container組成的pod,同一個命名空間下的不同pod共享資源

一個pod可以運行多個container。可以通過同一個ClusterIP+不同的端口訪問同個pod裏面的container。

下面是個標準創建pod的yaml

apiVersion: v1
kind: Pod
metadata:
   name: nginx
spec: 
  containers:
  - name: nginx
    image: nginx
    ports:
    - containerPort: 80
  - name: busybox
    image: busybox
    command: ["/bin/sh"]
    arge: ["-c","while true; do echo hello; sleep 10; done"]

在用Java調用api操控pod的時候,首先要創建出pod對象,在根據pod對象進行操作。

class V1Pod {
    apiVersion: null
    kind: null
    metadata: null
    spec: null
    status: null
}
class V1APIVersions {
    apiVersion: null
    kind: null
    serverAddressByClientCIDRs: []
    versions: []
}

class V1PodSpec {
    activeDeadlineSeconds: null
    affinity: null
    automountServiceAccountToken: null
    containers: []
    dnsConfig: null
    dnsPolicy: null
    enableServiceLinks: null
    hostAliases: null
    hostIPC: null
    hostNetwork: null
    hostPID: null
    hostname: null
    imagePullSecrets: null
    initContainers: null
    nodeName: null
    nodeSelector: null
    priority: null
    priorityClassName: null
    readinessGates: null
    restartPolicy: null
    runtimeClassName: null
    schedulerName: null
    securityContext: null
    serviceAccount: null
    serviceAccountName: null
    shareProcessNamespace: null
    subdomain: null
    terminationGracePeriodSeconds: null
    tolerations: null
    volumes: null
}

用這幾個對象湊出完整的pod對象,並將這個對象通過api調用。

Namepace資源對象

隔離不同用戶的資源

不同命名空間下的用戶不共享所有資源

不同命名空間下的相同資源可以重命名

下面是通過api獲取的Namespace對象的信息

metadata {
  name: "anotherpod"
  generateName: ""
  namespace: "default"
  selfLink: "/api/v1/namespaces/default/pods/anotherpod"
  uid: "14f42eb6-bd6c-11e9-a7a3-000c29519baf"
  resourceVersion: "365611"
  generation: 0
  creationTimestamp {
    seconds: 1565660751
    nanos: 0
  }
  clusterName: ""
}
spec {
  volumes {
    name: "default-token-4rwl6"
    volumeSource {
      secret {
        secretName: "default-token-4rwl6"
        defaultMode: 420
      }
    }
  }
  containers {
    name: "www"
    image: "nginx"
    workingDir: ""
    resources {
    }
    volumeMounts {
      name: "default-token-4rwl6"
      readOnly: true
      mountPath: "/var/run/secrets/kubernetes.io/serviceaccount"
      subPath: ""
    }
    terminationMessagePath: "/dev/termination-log"
    imagePullPolicy: "Always"
    stdin: false
    stdinOnce: false
    tty: false
    terminationMessagePolicy: "File"
  }
  restartPolicy: "Always"
  terminationGracePeriodSeconds: 30
  dnsPolicy: "ClusterFirst"
  serviceAccountName: "default"
  serviceAccount: "default"
  nodeName: "node1"
  hostNetwork: false
  hostPID: false
  hostIPC: false
  securityContext {
  }
  hostname: ""
  subdomain: ""
  schedulerName: "default-scheduler"
  tolerations {
    key: "node.kubernetes.io/not-ready"
    operator: "Exists"
    value: ""
    effect: "NoExecute"
    tolerationSeconds: 300
  }
  tolerations {
    key: "node.kubernetes.io/unreachable"
    operator: "Exists"
    value: ""
    effect: "NoExecute"
    tolerationSeconds: 300
  }
  priorityClassName: ""
  priority: 0
  enableServiceLinks: true
}
status {
  phase: "Running"
  conditions {
    type: "Initialized"
    status: "True"
    lastProbeTime {
    }
    lastTransitionTime {
      seconds: 1565660751
      nanos: 0
    }
    reason: ""
    message: ""
  }
  conditions {
    type: "Ready"
    status: "True"
    lastProbeTime {
    }
    lastTransitionTime {
      seconds: 1566349768
      nanos: 0
    }
    reason: ""
    message: ""
  }
  conditions {
    type: "ContainersReady"
    status: "True"
    lastProbeTime {
    }
    lastTransitionTime {
      seconds: 1566349768
      nanos: 0
    }
    reason: ""
    message: ""
  }
  conditions {
    type: "PodScheduled"
    status: "True"
    lastProbeTime {
    }
    lastTransitionTime {
      seconds: 1565660751
      nanos: 0
    }
    reason: ""
    message: ""
  }
  message: ""
  reason: ""
  hostIP: "192.168.80.145"
  podIP: "10.244.1.31"
  startTime {
    seconds: 1565660751
    nanos: 0
  }
  containerStatuses {
    name: "www"
    state {
      running {
        startedAt {
          seconds: 1566349768
          nanos: 0
        }
      }
    }
    lastState {
      terminated {
        exitCode: 0
        signal: 0
        reason: "Completed"
        message: ""
        startedAt {
          seconds: 1565745718
          nanos: 0
        }
        finishedAt {
          seconds: 1566295101
          nanos: 0
        }
        containerID: "docker://87fbe39b2acbe2ae487cd8f5d562c85af94ff70f129466abd02804cb487c061f"
      }
    }
    ready: true
    restartCount: 2
    image: "nginx:latest"
    imageID: "docker-pullable://nginx@sha256:53ddb41e46de3d63376579acf46f9a41a8d7de33645db47a486de9769201fec9"
    containerID: "docker://0fbc7bb33b1125985f263b48ed83e245efe5bd6d37223ab3dde847f669247c1b"
  }
  qosClass: "BestEffort"
  nominatedNodeName: ""
}

下面是空的Namespace對象的信息 需要將信息根據需求進行補全,然後進行調用

class V1NamespaceSpec {
    finalizers: null
}
class V1Namespace {
    apiVersion: null
    kind: null
    metadata: null
    spec: null
    status: null
}

之前重複出現的類就不再重複寫出

Deployment 資源對象

監視當前資源的狀態,如果當前資源沒有到達預期的狀態,將改變當前狀態,是指到達預期狀態。

下面是爲空數據的Deployment的對象

class V1Deployment {
    apiVersion: null
    kind: null
    metadata: null
    spec: null
    status: null
}
class V1DeploymentSpec {
    minReadySeconds: null
    paused: null
    progressDeadlineSeconds: null
    replicas: null
    revisionHistoryLimit: null
    selector: null
    strategy: null
    template: null
}

可以參考下面的yaml文件將其填充

apiVersion: v1
kind: Deployment
metadata:
   name: nginx
spec:
   selector:
      matchLabels:
        app:nginx
   replicas: 2
   tetadata:
      metadata:
        labels:
          app: nginx
      spec: 
        containers:
        - name: nginx
          image: nginx:1.6.6
          ports:
          - containerPort: 80

Replicaset資源對象

正真的維護Deployment資源對象的更新操作的對象

class V1ReplicaSet {
    apiVersion: null
    kind: null
    metadata: null
    spec: null
    status: null
}
class V1ReplicaSetSpec {
    minReadySeconds: null
    replicas: null
    selector: null
    template: null
}

Service資源對象

爲pod創建外部連接的的對象

class V1Service {
    apiVersion: null
    kind: null
    metadata: null
    spec: null
    status: null
}
class V1ServiceSpec {
    clusterIP: null
    externalIPs: null
    externalName: null
    externalTrafficPolicy: null
    healthCheckNodePort: null
    loadBalancerIP: null
    loadBalancerSourceRanges: null
    ports: null
    publishNotReadyAddresses: null
    selector: null
    sessionAffinity: null
    sessionAffinityConfig: null
    type: null
}
class V1ServicePort {
    name: null
    nodePort: null
    port: null
    protocol: null
    targetPort: null
}
class V1ServiceStatus {
    loadBalancer: null
}

可以參考下面的yaml闖將對象

apiVersion:  apps/v1
kind: Deployment
metadata:
  name: service-test
spec:
  replicas: 1
  selector:
    matchLabels:
      app: service_test_pod
  template:
    metadata:
      labels:
        app: service_test_pod
    spec:
      containers:
      - name: simple-http
        image: python:2.7
        imagePullPolicy: IfNotPresent
        command: ["/bin/bash"]
        args: ["-c", "echo \"<p>Hello from $(hostname)</p>\" > index.html; sleep 30; python -m SimpleHTTPServer 8080"]
        ports:
        - name: http
          containerPort: 8080
  
  另一個例子

apiVersion: v1
kind: Service
metadata:
  name: service-nginx
spec:
  selector:
    app: nginx
  ports:
  - protocol: TCP
    port: 8080
    targetPort: 80





ingress資源對象

負責外部訪問和負載均衡

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: helloworld-rules
spec:
  rules:
  - host: helloworld-v1.example.com
    http:
      paths:
      - path: /
        backend:
          serviceName: service-helloworld-1
          servicePort: 80
  - host: helloworld-v2.example.com
    http:
      paths:
      - path: /
        backend:
          serviceName: service-helloworld-2
          servicePort: 80

---
apiVersion:  apps/v1
kind: Deployment
metadata:
  name: deploy-helloworld-1
spec:
  replicas: 1
  selector:
    matchLabels:
      app: helloworld-1
  template:
    metadata:
      labels:
        app: helloworld-1
    spec:
      containers:
      - name: simple-http
        image: python:2.7
        imagePullPolicy: IfNotPresent
        command: ["/bin/bash"]
        args: ["-c", "echo \"<p>Hello 1 from $(hostname)</p>\" > index.html; python -m SimpleHTTPServer 8080"]
        ports:
        - name: http
          containerPort: 8080
---
apiVersion: v1
kind: Service
metadata:
  name: service-helloworld-1
spec:
  type: NodePort
  ports:
  - port: 80
    nodePort: 30303
    targetPort: 8080
    protocol: TCP
    name: http
  selector:
    app: helloworld-1
----
apiVersion:  apps/v1
kind: Deployment
metadata:
  name: deploy-helloworld-2
spec:
  replicas: 1
  selector:
    matchLabels:
      app: helloworld-2
  template:
    metadata:
      labels:
        app: helloworld-2
    spec:
      containers:
      - name: simple-http
        image: python:2.7
        imagePullPolicy: IfNotPresent
        command: ["/bin/bash"]
        args: ["-c", "echo \"<p>Hello 2 from $(hostname)</p>\" > index.html; python -m SimpleHTTPServer 8080"]
        ports:
        - name: http
          containerPort: 8080
---
apiVersion: v1
kind: Service
metadata:
  name: service-helloworld-2
spec:
  type: NodePort
  ports:
  - port: 80
    nodePort: 30304
    targetPort: 8080
    protocol: TCP
    name: http
  selector:
    app: helloworld-2


class V1beta1Ingress {
    apiVersion: null
    kind: null
    metadata: null
    spec: null
    status: null
}
class V1beta1IngressSpec {
    backend: null
    rules: null
    tls: null
}

Volume 資源對象

Volume是hostPath,掛載的是本地磁盤的目錄

class V1Volume {
    awsElasticBlockStore: null
    azureDisk: null
    azureFile: null
    cephfs: null
    cinder: null
    configMap: null
    downwardAPI: null
    emptyDir: null
    fc: null
    flexVolume: null
    flocker: null
    gcePersistentDisk: null
    gitRepo: null
    glusterfs: null
    hostPath: null
    iscsi: null
    name: null
    nfs: null
    persistentVolumeClaim: null
    photonPersistentDisk: null
    portworxVolume: null
    projected: null
    quobyte: null
    rbd: null
    scaleIO: null
    secret: null
    storageos: null
    vsphereVolume: null
}

apiVersion: v1
kind: Pod
metadata:
  name: test-pd
spec:
  containers:
  - name: busybox1
    image: busybox
    command: ["/bin/sh"]
    args: ["-c", "while true; do echo hello; sleep 10;done"]
    volumeMounts:
    - mountPath: /test-pd
      name: test-volume
  - name: busybox2
    image: busybox
    command: ["/bin/sh"]
    args: ["-c", "while true; do echo hello; sleep 10;done"]
    volumeMounts:
    - mountPath: /test-pd
      name: test-volume
  volumes:
  - name: test-volume
    hostPath:
      # directory location on host
      path: /data
      # this field is optional
      type: Directory

PVC資源對象

pvc對象可以掛載多個pod 並且容量變大

class V1PersistentVolumeClaim {
    apiVersion: null
    kind: null
    metadata: null
    spec: null
    status: null
}
class V1PersistentVolumeClaimSpec {
    accessModes: null
    dataSource: null
    resources: null
    selector: null
    storageClassName: null
    volumeMode: null
    volumeName: null
}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: hello-pvc
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 20Gi

Seret資源對象

用來存放重要的數據

class V1Secret {
    apiVersion: null
    data: null
    kind: null
    metadata: null
    stringData: null
    type: null
}

apiVersion: v1
kind: Secret
metadata:
  name: my-secret
type: Opaque
data:
  root-password: abc123
  no-root-password: abc123



apiVersion: v1
kind: Pod
metadata:
  name: secret_busybox
spec:
  containers:
  - name: busybox
    image: busybox
    command: ["/bin/sh"]
    args: ["-c", "while true; do echo hello; sleep 10;done"]
    volumeMounts:
      - name: secret_key
        mountPath: "/tmp/apikey"
        readOnly: true
  volumes:
  - name: secret_key
    secret:
      secretName: my-secret
      



apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
  labels:
    app: mysql
spec:
  replicas: 1
  selector:
    matchLabels:
      app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
        - image: mysql:5.6
          name: mysql
          env:
            - name: MYSQL_ROOT_PASSWORD
              valueFrom:
                secretKeyRef:
                  name: my-secret
                  key: root-password
          ports:
            - containerPort: 3306
              name: mysql


ConfigMap資源對象

保存基本的配置

user       www www;  ## Default: nobody
worker_processes  5;  ## Default: 1
error_log  logs/error.log;
pid        logs/nginx.pid;
worker_rlimit_nofile 8192;

events {
  worker_connections  4096;  ## Default: 1024
}

http {
  include    conf/mime.types;
  include    /etc/nginx/proxy.conf;
  include    /etc/nginx/fastcgi.conf;
  index    index.html index.htm index.php;

  default_type application/octet-stream;
  log_format   main '$remote_addr - $remote_user [$time_local]  $status '
    '"$request" $body_bytes_sent "$http_referer" '
    '"$http_user_agent" "$http_x_forwarded_for"';
  access_log   logs/access.log  main;
  sendfile     on;
  tcp_nopush   on;
  server_names_hash_bucket_size 128; # this seems to be required for some vhosts

  server { # php/fastcgi
    listen       80;
    server_name  domain1.com www.domain1.com;
    access_log   logs/domain1.access.log  main;
    root         html;

    location ~ \.php$ {
      fastcgi_pass   127.0.0.1:1025;
    }
  }

  server { # simple reverse-proxy
    listen       80;
    server_name  domain2.com www.domain2.com;
    access_log   logs/domain2.access.log  main;

    # serve static files
    location ~ ^/(images|javascript|js|css|flash|media|static)/  {
      root    /var/www/virtual/big.server.com/htdocs;
      expires 30d;
    }

    # pass requests for dynamic content to rails/turbogears/zope, et al
    location / {
      proxy_pass      http://127.0.0.1:8080;
    }
  }

  upstream big_server_com {
    server 127.0.0.3:8000 weight=5;
    server 127.0.0.3:8001 weight=5;
    server 192.168.0.1:8000;
    server 192.168.0.1:8001;
  }

  server { # simple load balancing
    listen          80;
    server_name     big.server.com;
    access_log      logs/big.server.access.log main;

    location / {
      proxy_pass      http://big_server_com;
    }
  }
}

apiVersion: v1
kind: Pod
metadata:
  name: busybox-2
spec:
  containers:
  - name: busybox
    image: busybox
    command: ["/bin/sh"]
    args: ["-c", "while true; do echo hello; sleep 10;done"]
    volumeMounts:
      - name: config-volume
        mountPath: /etc/config
  volumes:
      - name: config-volume
        configMap:
          name: config-2


# kubectl create configmap config-1 --from-literal=host=1.1.1.1 --from-literal=port=3000
apiVersion: v1
kind: ConfigMap
metadata:
  name: config-1
  namespace: default
data:
  host: 1.1.1.1
  port: "3000"



apiVersion: v1
kind: Pod
metadata:
  name: busybox-1
spec:
  containers:
  - name: busybox
    image: busybox
    command: ["/bin/sh"]
    args: ["-c", "while true; do echo hello; sleep 10;done"]
    env:
      - name: HOST
        valueFrom:
          configMapKeyRef:
            name: config-1
            key: host
      - name: PORT
        valueFrom:
          configMapKeyRef:
            name: config-1
            key: port

class V1ConfigMap {
    apiVersion: null
    binaryData: null
    data: null
    kind: null
    metadata: null
}

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章