26.基于二进制包安装kubernetes v1.21 --集群部署(三)

224 阅读24分钟

“ 本文正在参加「金石计划 . 瓜分6万现金大奖」 ”

10.安装Calico

[root@k8s-master01 ~]# cat calico-etcd.yaml
---
# Source: calico/templates/calico-etcd-secrets.yaml
# The following contains k8s Secrets for use with a TLS enabled etcd cluster.
# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/
apiVersion: v1
kind: Secret
type: Opaque
metadata:
  name: calico-etcd-secrets
  namespace: kube-system
data:
  # Populate the following with etcd TLS configuration if desired, but leave blank if
  # not using TLS for etcd.
  # The keys below should be uncommented and the values populated with the base64
  # encoded contents of each file that would be associated with the TLS data.
  # Example command for encoding a file contents: cat <file> | base64 -w 0
  # etcd-key: null
  # etcd-cert: null
  # etcd-ca: null
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
  name: calico-config
  namespace: kube-system
data:
  # Configure this with the location of your etcd cluster.
  etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"
  # If you're using TLS enabled etcd uncomment the following.
  # You must also populate the Secret below with these files.
  etcd_ca: ""   # "/calico-secrets/etcd-ca"
  etcd_cert: "" # "/calico-secrets/etcd-cert"
  etcd_key: ""  # "/calico-secrets/etcd-key"
  # Typha is disabled.
  typha_service_name: "none"
  # Configure the backend to use.
  calico_backend: "bird"
  # Configure the MTU to use for workload interfaces and tunnels.
  # - If Wireguard is enabled, set to your network MTU - 60
  # - Otherwise, if VXLAN or BPF mode is enabled, set to your network MTU - 50
  # - Otherwise, if IPIP is enabled, set to your network MTU - 20
  # - Otherwise, if not using any encapsulation, set to your network MTU.
  veth_mtu: "1440"

  # The CNI network configuration to install on each node. The special
  # values in this config will be automatically populated.
  cni_network_config: |-
    {
      "name": "k8s-pod-network",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "calico",
          "log_level": "info",
          "etcd_endpoints": "__ETCD_ENDPOINTS__",
          "etcd_key_file": "__ETCD_KEY_FILE__",
          "etcd_cert_file": "__ETCD_CERT_FILE__",
          "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
          "mtu": __CNI_MTU__,
          "ipam": {
              "type": "calico-ipam"
          },
          "policy": {
              "type": "k8s"
          },
          "kubernetes": {
              "kubeconfig": "__KUBECONFIG_FILEPATH__"
          }
        },
        {
          "type": "portmap",
          "snat": true,
          "capabilities": {"portMappings": true}
        },
        {
          "type": "bandwidth",
          "capabilities": {"bandwidth": true}
        }
      ]
    }

---
# Source: calico/templates/calico-kube-controllers-rbac.yaml

# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: calico-kube-controllers
rules:
  # Pods are monitored for changing labels.
  # The node controller monitors Kubernetes nodes.
  # Namespace and serviceaccount labels are used for policy.
  - apiGroups: [""]
    resources:
      - pods
      - nodes
      - namespaces
      - serviceaccounts
    verbs:
      - watch
      - list
      - get
  # Watch for changes to Kubernetes NetworkPolicies.
  - apiGroups: ["networking.k8s.io"]
    resources:
      - networkpolicies
    verbs:
      - watch
      - list
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: calico-kube-controllers
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: calico-kube-controllers
subjects:
- kind: ServiceAccount
  name: calico-kube-controllers
  namespace: kube-system
---

---
# Source: calico/templates/calico-node-rbac.yaml
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: calico-node
rules:
  # The CNI plugin needs to get pods, nodes, and namespaces.
  - apiGroups: [""]
    resources:
      - pods
      - nodes
      - namespaces
    verbs:
      - get
  - apiGroups: [""]
    resources:
      - endpoints
      - services
    verbs:
      # Used to discover service IPs for advertisement.
      - watch
      - list
  # Pod CIDR auto-detection on kubeadm needs access to config maps.
  - apiGroups: [""]
    resources:
      - configmaps
    verbs:
      - get
  - apiGroups: [""]
    resources:
      - nodes/status
    verbs:
      # Needed for clearing NodeNetworkUnavailable flag.
      - patch

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: calico-node
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: calico-node
subjects:
- kind: ServiceAccount
  name: calico-node
  namespace: kube-system

---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
  name: calico-node
  namespace: kube-system
  labels:
    k8s-app: calico-node
spec:
  selector:
    matchLabels:
      k8s-app: calico-node
  updateStrategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  template:
    metadata:
      labels:
        k8s-app: calico-node
    spec:
      nodeSelector:
        kubernetes.io/os: linux
      hostNetwork: true
      tolerations:
        # Make sure calico-node gets scheduled on all nodes.
        - effect: NoSchedule
          operator: Exists
        # Mark the pod as a critical add-on for rescheduling.
        - key: CriticalAddonsOnly
          operator: Exists
        - effect: NoExecute
          operator: Exists
      serviceAccountName: calico-node
      # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
      # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
      terminationGracePeriodSeconds: 0
      priorityClassName: system-node-critical
      initContainers:
        # This container installs the CNI binaries
        # and CNI network config file on each node.
        - name: install-cni
          image: docker.io/calico/cni:v3.15.3
          command: ["/install-cni.sh"]
          env:
            # Name of the CNI config file to create.
            - name: CNI_CONF_NAME
              value: "10-calico.conflist"
            # The CNI network config to install on each node.
            - name: CNI_NETWORK_CONFIG
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: cni_network_config
            # The location of the etcd cluster.
            - name: ETCD_ENDPOINTS
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_endpoints
            # CNI MTU Config variable
            - name: CNI_MTU
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: veth_mtu
            # Prevents the container from sleeping forever.
            - name: SLEEP
              value: "false"
          volumeMounts:
            - mountPath: /host/opt/cni/bin
              name: cni-bin-dir
            - mountPath: /host/etc/cni/net.d
              name: cni-net-dir
            - mountPath: /calico-secrets
              name: etcd-certs
          securityContext:
            privileged: true
        # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
        # to communicate with Felix over the Policy Sync API.
        - name: flexvol-driver
          image: docker.io/calico/pod2daemon-flexvol:v3.15.3
          volumeMounts:
          - name: flexvol-driver-host
            mountPath: /host/driver
          securityContext:
            privileged: true
      containers:
        # Runs calico-node container on each Kubernetes node. This
        # container programs network policy and routes on each
        # host.
        - name: calico-node
          image: docker.io/calico/node:v3.15.3
          env:
            # The location of the etcd cluster.
            - name: ETCD_ENDPOINTS
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_endpoints
            # Location of the CA certificate for etcd.
            - name: ETCD_CA_CERT_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_ca
            # Location of the client key for etcd.
            - name: ETCD_KEY_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_key
            # Location of the client certificate for etcd.
            - name: ETCD_CERT_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_cert
            # Set noderef for node controller.
            - name: CALICO_K8S_NODE_REF
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
            # Choose the backend to use.
            - name: CALICO_NETWORKING_BACKEND
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: calico_backend
            # Cluster type to identify the deployment type
            - name: CLUSTER_TYPE
              value: "k8s,bgp"
            # Auto-detect the BGP IP address.
            - name: IP
              value: "autodetect"
            # Enable IPIP
            - name: CALICO_IPV4POOL_IPIP
              value: "Always"
            # Enable or Disable VXLAN on the default IP pool.
            - name: CALICO_IPV4POOL_VXLAN
              value: "Never"
            # Set MTU for tunnel device used if ipip is enabled
            - name: FELIX_IPINIPMTU
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: veth_mtu
            # Set MTU for the VXLAN tunnel device.
            - name: FELIX_VXLANMTU
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: veth_mtu
            # Set MTU for the Wireguard tunnel device.
            - name: FELIX_WIREGUARDMTU
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: veth_mtu
            # The default IPv4 pool to create on startup if none exists. Pod IPs will be
            # chosen from this range. Changing this value after installation will have
            # no effect. This should fall within `--cluster-cidr`.
            # - name: CALICO_IPV4POOL_CIDR
            #   value: "192.168.0.0/16"
            # Disable file logging so `kubectl logs` works.
            - name: CALICO_DISABLE_FILE_LOGGING
              value: "true"
            # Set Felix endpoint to host default action to ACCEPT.
            - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
              value: "ACCEPT"
            # Disable IPv6 on Kubernetes.
            - name: FELIX_IPV6SUPPORT
              value: "false"
            # Set Felix logging to "info"
            - name: FELIX_LOGSEVERITYSCREEN
              value: "info"
            - name: FELIX_HEALTHENABLED
              value: "true"
          securityContext:
            privileged: true
          resources:
            requests:
              cpu: 250m
          livenessProbe:
            exec:
              command:
              - /bin/calico-node
              - -felix-live
              - -bird-live
            periodSeconds: 10
            initialDelaySeconds: 10
            failureThreshold: 6
          readinessProbe:
            exec:
              command:
              - /bin/calico-node
              - -felix-ready
              - -bird-ready
            periodSeconds: 10
          volumeMounts:
            - mountPath: /lib/modules
              name: lib-modules
              readOnly: true
            - mountPath: /run/xtables.lock
              name: xtables-lock
              readOnly: false
            - mountPath: /var/run/calico
              name: var-run-calico
              readOnly: false
            - mountPath: /var/lib/calico
              name: var-lib-calico
              readOnly: false
            - mountPath: /calico-secrets
              name: etcd-certs
            - name: policysync
              mountPath: /var/run/nodeagent
      volumes:
        # Used by calico-node.
        - name: lib-modules
          hostPath:
            path: /lib/modules
        - name: var-run-calico
          hostPath:
            path: /var/run/calico
        - name: var-lib-calico
          hostPath:
            path: /var/lib/calico
        - name: xtables-lock
          hostPath:
            path: /run/xtables.lock
            type: FileOrCreate
        # Used to install CNI.
        - name: cni-bin-dir
          hostPath:
            path: /opt/cni/bin
        - name: cni-net-dir
          hostPath:
            path: /etc/cni/net.d
        # Mount in the etcd TLS secrets with mode 400.
        # See https://kubernetes.io/docs/concepts/configuration/secret/
        - name: etcd-certs
          secret:
            secretName: calico-etcd-secrets
            defaultMode: 0400
        # Used to create per-pod Unix Domain Sockets
        - name: policysync
          hostPath:
            type: DirectoryOrCreate
            path: /var/run/nodeagent
        # Used to install Flex Volume Driver
        - name: flexvol-driver-host
          hostPath:
            type: DirectoryOrCreate
            path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
---

apiVersion: v1
kind: ServiceAccount
metadata:
  name: calico-node
  namespace: kube-system

---
# Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
  name: calico-kube-controllers
  namespace: kube-system
  labels:
    k8s-app: calico-kube-controllers
spec:
  # The controllers can only have a single active instance.
  replicas: 1
  selector:
    matchLabels:
      k8s-app: calico-kube-controllers
  strategy:
    type: Recreate
  template:
    metadata:
      name: calico-kube-controllers
      namespace: kube-system
      labels:
        k8s-app: calico-kube-controllers
    spec:
      nodeSelector:
        kubernetes.io/os: linux
      tolerations:
        # Mark the pod as a critical add-on for rescheduling.
        - key: CriticalAddonsOnly
          operator: Exists
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      serviceAccountName: calico-kube-controllers
      priorityClassName: system-cluster-critical
      # The controllers must run in the host network namespace so that
      # it isn't governed by policy that would prevent it from working.
      hostNetwork: true
      containers:
        - name: calico-kube-controllers
          image: docker.io/calico/kube-controllers:v3.15.3
          env:
            # The location of the etcd cluster.
            - name: ETCD_ENDPOINTS
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_endpoints
            # Location of the CA certificate for etcd.
            - name: ETCD_CA_CERT_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_ca
            # Location of the client key for etcd.
            - name: ETCD_KEY_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_key
            # Location of the client certificate for etcd.
            - name: ETCD_CERT_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_cert
            # Choose which controllers to run.
            - name: ENABLED_CONTROLLERS
              value: policy,namespace,serviceaccount,workloadendpoint,node
          volumeMounts:
            # Mount in the etcd TLS secrets.
            - mountPath: /calico-secrets
              name: etcd-certs
          readinessProbe:
            exec:
              command:
              - /usr/bin/check-status
              - -r
      volumes:
        # Mount in the etcd TLS secrets with mode 400.
        # See https://kubernetes.io/docs/concepts/configuration/secret/
        - name: etcd-certs
          secret:
            secretName: calico-etcd-secrets
            defaultMode: 0400

---

apiVersion: v1
kind: ServiceAccount
metadata:
  name: calico-kube-controllers
  namespace: kube-system

---
# Source: calico/templates/calico-typha.yaml

---
# Source: calico/templates/configure-canal.yaml

---
# Source: calico/templates/kdd-crds.yaml

修改calico-etcd.yaml的以下位置

[root@k8s-master01 ~]# grep "etcd_endpoints:.*" calico-etcd.yaml 
  etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"

[root@k8s-master01 ~]# sed -i 's#etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"#etcd_endpoints: "https://172.31.3.108:2379,https://172.31.3.109:2379,https://172.31.3.110:2379"#g' calico-etcd.yaml

[root@k8s-master01 ~]# grep "etcd_endpoints:.*" calico-etcd.yaml 
  etcd_endpoints: "https://172.31.3.108:2379,https://172.31.3.109:2379,https://172.31.3.110:2379"

[root@k8s-master01 ~]# grep -E "(.*etcd-key:.*|.*etcd-cert:.*|.*etcd-ca:.*)" calico-etcd.yaml 
  # etcd-key: null
  # etcd-cert: null
  # etcd-ca: null

[root@k8s-master01 ~]# ETCD_KEY=`cat /etc/kubernetes/pki/etcd/etcd-key.pem | base64 | tr -d '\n'`
[root@k8s-master01 ~]# ETCD_CERT=`cat /etc/kubernetes/pki/etcd/etcd.pem | base64 | tr -d '\n'`
[root@k8s-master01 ~]# ETCD_CA=`cat /etc/kubernetes/pki/etcd/etcd-ca.pem | base64 | tr -d '\n'`

[root@k8s-master01 ~]# sed -i "s@# etcd-key: null@etcd-key: ${ETCD_KEY}@g; s@# etcd-cert: null@etcd-cert: ${ETCD_CERT}@g; s@# etcd-ca: null@etcd-ca: ${ETCD_CA}@g" calico-etcd.yaml

[root@k8s-master01 ~]# grep -E "(.*etcd-key:.*|.*etcd-cert:.*|.*etcd-ca:.*)" calico-etcd.yaml 
  etcd-key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBcGh0ejhrbzRURnV4T2VVTDBoSWpFdHBmcC9BRGYrcGR3SWNkeVA2QnV5dGxmSzJECjF4eEpRUGVhOFNwMGlFaVBxTEdNWkl5bjNjbHd4Mm9TYkpJd1ZzeEt6N2RybFErdUx2Qzl3Y3lPUktOZVpEd24KMTNDemk4eURENkZmL3NLcXhzNXVEMnNsNWNBMGdPK3orMkdOeUh5YkhOTytodG93bnh0MjhuNHFKWmRnK2l5VQp3R3psT0xQblY5UlJESWJLTW9YT3FLUUt1WWVhMm8rU2E4Rkp1anlvT2Uyc0t5UndTQk5xcjYyZnRTK0ZWSHFxCmVKalJYS245NFM0TDFwd2I5cUxnUDJmaU41bFRadk4va1dkZnMxd2RXVElWUVNaZE92TmhhZGp4b0Y5TWlsSGEKZ0l4NzZaNU1YL2lNZWpQb3Z4M2pDTXJzdWFUS0tnSGt6eTRLU3dJREFRQUJBb0lCQUFlVi8yQ1VWU2ZmbENOeAp1MjEzbUpSMjFxR0R5NVVlN2ZNcCtJbENYa2hlL2Y2SXFobTcxL2lZbGtIblQzVWQ0em13Q2hwWmRoMGg0djJvCmNYajE0REZHbVRBTlQyTjZXTmtaODRDVFIvZ0lnZm9QNlQza2pyNldzM0dXVEIwRlpPazVhanRZQ0Y0S3Zoc1oKVjEzbW9hUURWTTRuT1c5TkxhVkdpdE1lUWV4L2YzV1ZSc2M2TWdaUlVvRGU5THR4bk5nb1hWZmVYcVpZbElzVQplSFJQb1JGYnpXYi9UdEduTnFRMzJkemtyYTNNWnFzd1R4QjdMMGNWUW0xTGxMUXQ1KzkvWnRLd3Zwa0w0QTUvCldwUEYvWGhSSTBBQ0dhUEo3YWNlRUlwUlRSellzbnQ0dlZHNHNob3Y3MEQrYjdLT1lhN1FyU2FmNUlLRVlydFkKV3pjM0tQa0NnWUVBd1dwQk41enFxTWllVWpVODhLVVVDTkhNdUxMSHp5TTZQQ29OZXMrWGNIY1U1L1kxZUV0TwpMd3Z6djd3QVR5UW92RU8ycldtNEF2RXRSaG1QUFc2YU52ZUpPc2FZNnlXaVJ2R0RiN2dzb093eW9DYVlKd08vCnF5MEVLM29qTy9XRVZhNFpyTUlXOUxNWEkwajlKeldpUWI4NytNaENJcVpoZnYvUUhuWW5VU1VDZ1lFQTI5c2cKRzFJZ1hXamVyNHhiTWdMVkFnOXk1K3g1NlQ1RTZWNE5vdUJUZUlhUStob1cvU0w2UFMyS2ZjLzJweXVweFd3egp3aVRXdSt2L1NIUTVudlMrRHAzU0J5U0NqMEJJalg3N2VXS2g0SW1Hd2NoVzV5WnVBM3BVS3paSnV2VXpIdUFNCnFRc0NnR0ZnZGo4Zm1qYWV6ZENOVTI2TUhSZTRNaUJ2cHhSUHFxOENnWUFQamxNMmZObG12OVB6K3JJdkRLZmkKMmJUa2VnU1dCVmhPdEhjbkZJRXltM0ZFQXNwa0pYSmhXRTIvY3doM1ZRb3RzaWlFSkFlWHZQd09Na29SLzg1SgpjM2xIRCtnR3FaMDJwWUFUd1RWZHNBR1dYZVJJNXdWSWFETjRwN2Nqd0doblY3eGE1N1ZlOHZSK2N3VmhYTy95CjU4V1VDYzgvNkMvWlBndm9GMHFzUFFLQmdBaHNjZU42RnhGZEprTVZucHpnN09aaVR5WEJzcjRVQzdIaFQ2WncKNytITFRoeTNDVEJ6dWFERWNPejNIZDB6MkJKZlhmQlBWd2JtT09hK3hVSm80Q3RSTXEzaFlUczUzRTNIa3IwSQo0V2puL0FqS3MwR3lBRDhUM2N1MkRjY2pBKzFuNmpSRDNybXFnWGFtWG9DYkhTU0huQktaUnJjS3BKMFBEeGdZCnVDQ3pBb0dBSjh0SXk1UHRya3lUN3ExZURJNTF1Q2YwWDhrRWJoeFZ1RC9oVW82SkFURkRnRG0vN0Z5UFNvMnAKSFZVaEtpZmtQNUVoYTBYTDMrK3VxOWhITXJvNHVuaksrZSs2Y3VrZkhOWkk4MFVHazBOWUY3WGd1VTdETlJ1aApHQ1dJRkNhcjB0TE9lK1pBRzJQaHFQMno4cXlmNVNEckk0bmJtUHlabjZPMVFYZ0Q1REU9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
  etcd-cert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVKakNDQXc2Z0F3SUJBZ0lVVHNTUDBUVlZqaE9UZEFUNnlncFpXcERRb0dJd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1p6RUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeERUQUxCZ05WQkFvVEJHVjBZMlF4RmpBVUJnTlZCQXNURFVWMFkyUWdVMlZqZFhKcGRIa3hEVEFMCkJnTlZCQU1UQkdWMFkyUXdJQmNOTWpJd01USXlNRGd4TkRBd1doZ1BNakV5TVRFeU1qa3dPREUwTURCYU1HY3gKQ3pBSkJnTlZCQVlUQWtOT01SQXdEZ1lEVlFRSUV3ZENaV2xxYVc1bk1SQXdEZ1lEVlFRSEV3ZENaV2xxYVc1bgpNUTB3Q3dZRFZRUUtFd1JsZEdOa01SWXdGQVlEVlFRTEV3MUZkR05rSUZObFkzVnlhWFI1TVEwd0N3WURWUVFECkV3UmxkR05rTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUFwaHR6OGtvNFRGdXgKT2VVTDBoSWpFdHBmcC9BRGYrcGR3SWNkeVA2QnV5dGxmSzJEMXh4SlFQZWE4U3AwaUVpUHFMR01aSXluM2Nsdwp4Mm9TYkpJd1ZzeEt6N2RybFErdUx2Qzl3Y3lPUktOZVpEd24xM0N6aTh5REQ2RmYvc0txeHM1dUQyc2w1Y0EwCmdPK3orMkdOeUh5YkhOTytodG93bnh0MjhuNHFKWmRnK2l5VXdHemxPTFBuVjlSUkRJYktNb1hPcUtRS3VZZWEKMm8rU2E4Rkp1anlvT2Uyc0t5UndTQk5xcjYyZnRTK0ZWSHFxZUpqUlhLbjk0UzRMMXB3YjlxTGdQMmZpTjVsVApadk4va1dkZnMxd2RXVElWUVNaZE92TmhhZGp4b0Y5TWlsSGFnSXg3Nlo1TVgvaU1lalBvdngzakNNcnN1YVRLCktnSGt6eTRLU3dJREFRQUJvNEhITUlIRU1BNEdBMVVkRHdFQi93UUVBd0lGb0RBZEJnTlZIU1VFRmpBVUJnZ3IKQmdFRkJRY0RBUVlJS3dZQkJRVUhBd0l3REFZRFZSMFRBUUgvQkFJd0FEQWRCZ05WSFE0RUZnUVU5cXI4N3RsZApyTGJPdGxMUEYvT0xBN1QvcEVFd0h3WURWUjBqQkJnd0ZvQVVpbkFQc1JrQ3pPenZ6N3ZwWmdQdUhUNGt3QTR3ClJRWURWUjBSQkQ0d1BJSUthemh6TFdWMFkyUXdNWUlLYXpoekxXVjBZMlF3TW9JS2F6aHpMV1YwWTJRd000Y0UKZndBQUFZY0VyQjhEYkljRXJCOERiWWNFckI4RGJqQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFlb28rL0NVYQpTa2hkVEY0ekJLa3ExREs0cFFaVEdhQUNHNEUvWUUwNXFNWS9QcTlpam5nNGtRdFB0d2lXaE5WN1JZWGl5QnhjCitIMTBDc3JVSTQrTFVjVjI0T1d5UFA2Q09yY2sycDBDZUhTL0E0ZEhYaEhReC8rZFRoUGxWcno1RzdlblhKRE0KaTlhZGxOR21BSWVlZEE4ekNENlVvbHFOOVdrZ29jTWw0ckdFZDJ3WFZMcFA5ZzhybGlyNVJrSy9seHFmQ1dBWgpBeDZPejJTYTNEbEVGdXpNdGxYejBobnRPdGpBdUJ6eEdIdlJVMllDdlcyL3pDUTJTQ0ZodkJXMGtPVCtiUVc1CkkrVTZGeVpCSU1XQlBPQmZsNm03M2pkNjdiSzRreVJXTEhQUnl0T2w1N3RMdlljOEgybFBQbS9VS3BWYkx5NjkKdXBuNHhOZUhaYXZ5ckE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
  etcd-ca: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR4RENDQXF5Z0F3SUJBZ0lVSW02eEIzNlN2dXE1TDhUaks5cHV5bjJHWEp3d0RRWUpLb1pJaHZjTkFRRUwKQlFBd1p6RUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeERUQUxCZ05WQkFvVEJHVjBZMlF4RmpBVUJnTlZCQXNURFVWMFkyUWdVMlZqZFhKcGRIa3hEVEFMCkJnTlZCQU1UQkdWMFkyUXdJQmNOTWpJd01USXlNRGd4TXpBd1doZ1BNakV5TVRFeU1qa3dPREV6TURCYU1HY3gKQ3pBSkJnTlZCQVlUQWtOT01SQXdEZ1lEVlFRSUV3ZENaV2xxYVc1bk1SQXdEZ1lEVlFRSEV3ZENaV2xxYVc1bgpNUTB3Q3dZRFZRUUtFd1JsZEdOa01SWXdGQVlEVlFRTEV3MUZkR05rSUZObFkzVnlhWFI1TVEwd0N3WURWUVFECkV3UmxkR05rTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF1cDRWVEQzS1JaRWgKcXA2TW0wTXF3amFrVkFKTFJ0YlFjd3FLNWsvQ2s4MEFjTDUyOGl6YldSdGRXcDVpNk9td241M3BGNGdpZG9EYQphOUpadEF4ZUl0RmNkbExxRzZrdjFCU3pyVVlMMXZyOFZNckRZd0VrYW9RdlZ3cHFrZDJiR3pUd21oVnJXZ3AxCmMrMjcwSWI1L2NVa25mWmtubEVTcWlyQzI5Z09oZnh0OFNrc1FTSUNtcXhuajFDVnltL3dML3AwMDUzNE5BNjAKeXk5aDdkZjU1R0ZFbjdLaytzOEdkbUVmL3ludXVsT1VUY25mTXppeWVoQW5uUStZMjZMWGJzSWw3eHg3YzRpZgpManFPN3d1Qm5WS3M2WllENzI0V1Z0QUY0VWllL1NqRXVabE5GWGNIdTg0Ly9jNHBLL1Avb0dxNklUaVZYWUJyClY1TW1jdTRPV3dJREFRQUJvMll3WkRBT0JnTlZIUThCQWY4RUJBTUNBUVl3RWdZRFZSMFRBUUgvQkFnd0JnRUIKL3dJQkFqQWRCZ05WSFE0RUZnUVVpbkFQc1JrQ3pPenZ6N3ZwWmdQdUhUNGt3QTR3SHdZRFZSMGpCQmd3Rm9BVQppbkFQc1JrQ3pPenZ6N3ZwWmdQdUhUNGt3QTR3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUpmNWJwd2FJYjFTCmtiRUcyVDlRb3d4WU52VGRYSGllbzkwazlPSEFqN3A3RGdzekk0alUwUnkxOHN4c1h0aW5TMCtNU3U5L2d1VHYKZEprK3c4TnhyNHNZZEt3N2VSVVpUbUREQ2l0VldkY0JHNk14Y1BTTDJaQnVJMi8wOTRnN0ZNd2ZIc09lVEdHZgpScVVrV1lTRjRRbU9iRTZwNTA3QWlxRlZqMEhzUHRmTTdpQjZ3ZXRyYzlTVzlZd3R5Tm9PVFhnZEdDdDc5akNBCllUTG9TaHFxcGRvUWEwd0hzYWZqSDd5N2VIZEdRRmZtSWo2RVFQU1ZRSFhQUmhFOXVadDgxbDByeENseUQxa3kKOEhVYTJpOFpHblF0cVJxd3JORHRHeEdlYUdMbCtNYkZVb1N4SW9nTTNaK2x0a2NNbUVZK3hxc3dBbVlMUTJnTwpNMUtoRVJxT1JsMD0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=

[root@k8s-master01 ~]# grep -E "(.*etcd_ca:.*|.*etcd_cert:.*|.*etcd_key:.*)" calico-etcd.yaml 
  etcd_ca: ""   # "/calico-secrets/etcd-ca"
  etcd_cert: "" # "/calico-secrets/etcd-cert"
  etcd_key: ""  # "/calico-secrets/etcd-key"

[root@k8s-master01 ~]# sed -i 's#etcd_ca: ""#etcd_ca: "/calico-secrets/etcd-ca"#g; s#etcd_cert: ""#etcd_cert: "/calico-secrets/etcd-cert"#g; s#etcd_key: "" #etcd_key: "/calico-secrets/etcd-key" #g' calico-etcd.yaml

[root@k8s-master01 ~]# grep -E "(.*etcd_ca:.*|.*etcd_cert:.*|.*etcd_key:.*)" calico-etcd.yaml 
  etcd_ca: "/calico-secrets/etcd-ca"   # "/calico-secrets/etcd-ca"
  etcd_cert: "/calico-secrets/etcd-cert" # "/calico-secrets/etcd-cert"
  etcd_key: "/calico-secrets/etcd-key"  # "/calico-secrets/etcd-key"

# 更改此处为自己的pod网段
[root@k8s-master01 ~]# POD_SUBNET="192.168.0.0/12"

# 注意下面的这个步骤是把calico-etcd.yaml文件里面的CALICO_IPV4POOL_CIDR下的网段改成自己的Pod网段,也就是把192.168.x.x/16改成自己的集群网段,并打开注释:

[root@k8s-master01 ~]# grep -E "(.*CALICO_IPV4POOL_CIDR.*|.*192.168.0.0.*)" calico-etcd.yaml 
            # - name: CALICO_IPV4POOL_CIDR
            #   value: "192.168.0.0/16"

[root@k8s-master01 ~]# sed -i 's@# - name: CALICO_IPV4POOL_CIDR@- name: CALICO_IPV4POOL_CIDR@g; s@#   value: "192.168.0.0/16"@  value: '"${POD_SUBNET}"'@g' calico-etcd.yaml

[root@k8s-master01 ~]# grep -E "(.*CALICO_IPV4POOL_CIDR.*|.*192.168.0.0.*)" calico-etcd.yaml 
            - name: CALICO_IPV4POOL_CIDR
              value: 192.168.0.0/12

[root@k8s-master01 ~]# grep "image:" calico-etcd.yaml
          image: docker.io/calico/cni:v3.15.3
          image: docker.io/calico/pod2daemon-flexvol:v3.15.3
          image: docker.io/calico/node:v3.15.3
          image: docker.io/calico/kube-controllers:v3.15.3

下载calico镜像并上传harbor

[root@k8s-master01 ~]# cat download_calico_images.sh 
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-01-11
#FileName:      download_calico_images.sh
#URL:           raymond.blog.csdn.net
#Description:   The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \\033[01;31m"
END='\033[0m'

images=$(awk -F "/"  '/image:/{print $NF}' calico-etcd.yaml)
HARBOR_DOMAIN=harbor.raymonds.cc

images_download(){
    ${COLOR}"开始下载Calico镜像"${END}
    for i in ${images};do 
        docker pull registry.cn-beijing.aliyuncs.com/raymond9/$i
        docker tag registry.cn-beijing.aliyuncs.com/raymond9/$i ${HARBOR_DOMAIN}/google_containers/$i
        docker rmi registry.cn-beijing.aliyuncs.com/raymond9/$i
        docker push ${HARBOR_DOMAIN}/google_containers/$i
    done
    ${COLOR}"Calico镜像下载完成"${END}
}

images_download

[root@k8s-master01 ~]# bash download_calico_images.sh 

[root@k8s-master01 ~]# docker images|grep calico
harbor.raymonds.cc/calico/node                     v3.15.3             d45bf977dfbf        16 months ago       262MB
harbor.raymonds.cc/calico/pod2daemon-flexvol       v3.15.3             963564fb95ed        16 months ago       22.8MB
harbor.raymonds.cc/calico/cni                      v3.15.3             ca5564c06ea0        16 months ago       110MB
harbor.raymonds.cc/calico/kube-controllers         v3.15.3             0cb2976cbb7d        16 months ago       52.9MB

[root@k8s-master01 ~]# sed -ri 's@(.*image:) docker.io/calico(/.*)@\1 harbor.raymonds.cc/google_containers\2@g' calico-etcd.yaml

[root@k8s-master01 ~]# grep "image:" calico-etcd.yaml
          image: harbor.raymonds.cc/google_containers/cni:v3.15.3
          image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.15.3
          image: harbor.raymonds.cc/google_containers/node:v3.15.3
          image: harbor.raymonds.cc/google_containers/kube-controllers:v3.15.3

[root@k8s-master01 ~]# kubectl apply -f calico-etcd.yaml
secret/calico-etcd-secrets created
configmap/calico-config created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created

#查看容器状态
[root@k8s-master01 ~]# kubectl get pod -n kube-system |grep calico
calico-kube-controllers-5d8579866c-lb9jt   1/1     Running   0          32s
calico-node-2bn8v                          1/1     Running   0          32s
calico-node-bz8gh                          1/1     Running   0          32s
calico-node-jbvm7                          1/1     Running   0          32s
calico-node-mch4p                          1/1     Running   0          32s
calico-node-qpxd7                          1/1     Running   0          32s
calico-node-v7v4h                          1/1     Running   0          32s

#查看集群状态
[root@k8s-master01 ~]# kubectl get nodes 
NAME                         STATUS   ROLES    AGE     VERSION
k8s-master01.example.local   Ready    <none>   4m23s   v1.21.8
k8s-master02.example.local   Ready    <none>   4m20s   v1.21.8
k8s-master03.example.local   Ready    <none>   4m20s   v1.21.8
k8s-node01.example.local     Ready    <none>   2m36s   v1.21.8
k8s-node02.example.local     Ready    <none>   2m36s   v1.21.8
k8s-node03.example.local     Ready    <none>   2m36s   v1.21.8

11.安装CoreDNS

[root@k8s-master01 ~]# cat coredns.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
rules:
  - apiGroups:
    - ""
    resources:
    - endpoints
    - services
    - pods
    - namespaces
    verbs:
    - list
    - watch
  - apiGroups:
    - discovery.k8s.io
    resources:
    - endpointslices
    verbs:
    - list
    - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
data:
  Corefile: |
    .:53 {
        errors
        health {
          lameduck 5s
        }
        ready
        kubernetes cluster.local in-addr.arpa ip6.arpa {
          fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf {
          max_concurrent 1000
        }
        cache 30
        loop
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/name: "CoreDNS"
spec:
  # replicas: not specified here:
  # 1. Default is 1.
  # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        kubernetes.io/os: linux
      affinity:
         podAntiAffinity:
           preferredDuringSchedulingIgnoredDuringExecution:
           - weight: 100
             podAffinityTerm:
               labelSelector:
                 matchExpressions:
                   - key: k8s-app
                     operator: In
                     values: ["kube-dns"]
               topologyKey: kubernetes.io/hostname
      containers:
      - name: coredns
        image: registry.aliyuncs.com/google_containers/coredns:1.8.3
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /ready
            port: 8181
            scheme: HTTP
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 192.168.0.10
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP

#如果更改了k8s service的网段需要将coredns的serviceIP改成k8s service网段的第十个IP
[root@k8s-master01 ~]# sed -i "s#10.96.0.10#10.96.0.10#g" coredns.yaml

安装coredns

[root@k8s-master01 ~]# grep "image:" coredns.yaml 
        image: registry.aliyuncs.com/google_containers/coredns:1.8.3

[root@k8s-master01 ~]# cat download_coredns_images.sh 
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-01-11
#FileName:      download_metrics_images.sh
#URL:           raymond.blog.csdn.net
#Description:   The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \\033[01;31m"
END='\033[0m'

images=$(awk -F "/"  '/image:/{print $NF}' coredns.yaml)
HARBOR_DOMAIN=harbor.raymonds.cc

images_download(){
    ${COLOR}"开始下载Coredns镜像"${END}
    for i in ${images};do 
        docker pull registry.aliyuncs.com/google_containers/$i
        docker tag registry.aliyuncs.com/google_containers/$i ${HARBOR_DOMAIN}/google_containers/$i
        docker rmi registry.aliyuncs.com/google_containers/$i
        docker push ${HARBOR_DOMAIN}/google_containers/$i
    done
    ${COLOR}"Coredns镜像下载完成"${END}
}

images_download

[root@k8s-master01 ~]# bash download_coredns_images.sh

[root@k8s-master01 ~]# docker images |grep coredns
harbor.raymonds.cc/google_containers/coredns       1.8.3               bfe3a36ebd25        19 months ago       45.2MB

[root@k8s-master01 ~]# sed -ri 's@(.*image:) registry.aliyuncs.com(/.*)@\1 harbor.raymonds.cc\2@g' coredns.yaml 

[root@k8s-master01 ~]# grep "image:" coredns.yaml 
        image: harbor.raymonds.cc/google_containers/coredns:1.8.3

[root@k8s-master01 ~]# kubectl  create -f coredns.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created

#查看状态
[root@k8s-master01 ~]# kubectl get pod -n kube-system |grep coredns
coredns-7cd47cd6db-gz9vb                   1/1     Running   0          14s

ubuntu会出现如下问题:

root@k8s-master01:~# kubectl get pod -A -o wide|grep coredns
NAMESPACE     NAME                                      READY   STATUS             RESTARTS   AGE   IP              NODE                         NOMINATED NODE   READINESS GATES
kube-system   coredns-847c895554-9jqq5                  0/1     CrashLoopBackOff   1          8s    192.171.30.65   k8s-master02.example.local   <none>           <none>

#由于ubuntu系统有dns本地缓存,造成coredns不能正常解析
#具体问题请参考官方https://coredns.io/plugins/loop/#troubleshooting

root@k8s-master01:~# kubectl edit -n kube-system cm coredns
...
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
    .:53 {
        errors
        health {
          lameduck 5s
        }
        ready
        kubernetes cluster.local in-addr.arpa ip6.arpa {
          fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf {
          max_concurrent 1000
        }
        cache 30
        loop #将loop插件直接删除,避免内部循环
        reload
        loadbalance
    }

root@k8s-master01:~# kubectl get pod -A -o wide |grep coredns
NAMESPACE     NAME                                      READY   STATUS             RESTARTS   AGE    IP               NODE                         NOMINATED NODE   READINESS GATES
kube-system   coredns-847c895554-r9tsd                  0/1     CrashLoopBackOff   4          3m4s   192.170.21.195   k8s-node03.example.local     <none>           <none>

root@k8s-master01:~# kubectl delete pod coredns-847c895554-r9tsd -n kube-system 
pod "coredns-847c895554-r9tsd" deleted

root@k8s-master01:~# kubectl get pod -A -o wide |greo coredns
NAMESPACE     NAME                                      READY   STATUS    RESTARTS   AGE   IP                NODE                         NOMINATED NODE   READINESS GATES
kube-system   coredns-847c895554-cqwl5                  1/1     Running   0          13s   192.167.195.130   k8s-node02.example.local     <none>           <none>
#现在就正常了

12.安装Metrics Server

在新版的Kubernetes中系统资源的采集均使用Metrics-server,可以通过Metrics采集节点和Pod的内存、磁盘、CPU和网络的使用率。

安装metrics server

[root@k8s-master01 ~]# cat components.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
    rbac.authorization.k8s.io/aggregate-to-admin: "true"
    rbac.authorization.k8s.io/aggregate-to-edit: "true"
    rbac.authorization.k8s.io/aggregate-to-view: "true"
  name: system:aggregated-metrics-reader
rules:
  - apiGroups:
      - metrics.k8s.io
    resources:
      - pods
      - nodes
    verbs:
      - get
      - list
      - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
rules:
  - apiGroups:
      - ""
    resources:
      - pods
      - nodes
      - nodes/stats
      - namespaces
      - configmaps
    verbs:
      - get
      - list
      - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server-auth-reader
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: extension-apiserver-authentication-reader
subjects:
  - kind: ServiceAccount
    name: metrics-server
    namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server:system:auth-delegator
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:auth-delegator
subjects:
  - kind: ServiceAccount
    name: metrics-server
    namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:metrics-server
subjects:
  - kind: ServiceAccount
    name: metrics-server
    namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  ports:
    - name: https
      port: 443
      protocol: TCP
      targetPort: https
  selector:
    k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  selector:
    matchLabels:
      k8s-app: metrics-server
  strategy:
    rollingUpdate:
      maxUnavailable: 0
  template:
    metadata:
      labels:
        k8s-app: metrics-server
    spec:
      containers:
        - args:
            - --cert-dir=/tmp
            - --secure-port=4443
            - --metric-resolution=30s
            - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
          imagePullPolicy: IfNotPresent
          livenessProbe:
            failureThreshold: 3
            httpGet:
              path: /livez
              port: https
              scheme: HTTPS
            periodSeconds: 10
          name: metrics-server
          ports:
            - containerPort: 4443
              name: https
              protocol: TCP
          readinessProbe:
            failureThreshold: 3
            httpGet:
              path: /readyz
              port: https
              scheme: HTTPS
            periodSeconds: 10
          securityContext:
            readOnlyRootFilesystem: true
            runAsNonRoot: true
            runAsUser: 1000
          volumeMounts:
            - mountPath: /tmp
              name: tmp-dir
      nodeSelector:
        kubernetes.io/os: linux
      priorityClassName: system-cluster-critical
      serviceAccountName: metrics-server
      volumes:
        - emptyDir: {}
          name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
  labels:
    k8s-app: metrics-server
  name: v1beta1.metrics.k8s.io
spec:
  group: metrics.k8s.io
  groupPriorityMinimum: 100
  insecureSkipTLSVerify: true
  service:
    name: metrics-server
    namespace: kube-system
  version: v1beta1
  versionPriority: 100

修改下面内容:

[root@k8s-master01 ~]# vim components.yaml 
...
    spec:
      containers:
        - args:
            - --cert-dir=/tmp
            - --secure-port=4443
            - --metric-resolution=30s
            - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
#添加下面内容        
            - --kubelet-insecure-tls
            - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem #注意二进制包证书文件是front-proxy-ca.pem
            - --requestheader-username-headers=X-Remote-User
            - --requestheader-group-headers=X-Remote-Group
            - --requestheader-extra-headers-prefix=X-Remote-Extra-
...
          volumeMounts:
            - mountPath: /tmp
              name: tmp-dir
#添加下面内容 
            - name: ca-ssl
              mountPath: /etc/kubernetes/pki
...
      volumes:
        - emptyDir: {}
          name: tmp-dir
#添加下面内容 
        - name: ca-ssl
          hostPath:
            path: /etc/kubernetes/pki
...

下载镜像并修改镜像地址

[root@k8s-master01 ~]# grep "image:" components.yaml
          image: registry.aliyuncs.com/google_containers/metrics-server:v0.4.1

[root@k8s-master01 ~]# cat download_metrics_images.sh 
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-01-11
#FileName:      download_metrics_images.sh
#URL:           raymond.blog.csdn.net
#Description:   The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \\033[01;31m"
END='\033[0m'

images=$(awk -F "/"  '/image:/{print $NF}' components.yaml)
HARBOR_DOMAIN=harbor.raymonds.cc

images_download(){
    ${COLOR}"开始下载Metrics镜像"${END}
    for i in ${images};do 
        docker pull registry.aliyuncs.com/google_containers/$i
        docker tag registry.aliyuncs.com/google_containers/$i ${HARBOR_DOMAIN}/google_containers/$i
        docker rmi registry.aliyuncs.com/google_containers/$i
        docker push ${HARBOR_DOMAIN}/google_containers/$i
    done
    ${COLOR}"Metrics镜像下载完成"${END}
}

images_download

[root@k8s-master01 ~]# bash download_metrics_images.sh

[root@k8s-master01 ~]# docker images |grep metrics
harbor.raymonds.cc/google_containers/metrics-server               v0.4.1              9759a41ccdf0        14 months ago       60.5MB

[root@k8s-master01 ~]# sed -ri 's@(.*image:) registry.aliyuncs.com(/.*)@\1 harbor.raymonds.cc\2@g' components.yaml
[root@k8s-master01 ~]# grep "image:" components.yaml 
          image: harbor.raymonds.cc/google_containers/metrics-server:v0.4.1

安装metrics server

[root@k8s-master01 ~]# kubectl apply -f components.yaml
serviceaccount/metrics-server created
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
clusterrole.rbac.authorization.k8s.io/system:metrics-server created
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
service/metrics-server created
deployment.apps/metrics-server created
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created

查看状态

[root@k8s-master01 ~]# kubectl get pod -n kube-system |grep metrics
metrics-server-7869ccfd68-lgdtm            1/1     Running   0          15s

[root@k8s-master01 ~]# kubectl top node 
W0305 16:31:00.921693   30314 top_node.go:119] Using json format to get metrics. Next release will switch to protocol-buffers, switch early by passing --use-protocol-buffers flag
NAME                         CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
k8s-master01.example.local   730m         36%    1614Mi          42%       
k8s-master02.example.local   184m         9%     1426Mi          37%       
k8s-master03.example.local   468m         23%    1465Mi          38%       
k8s-node01.example.local     61m          3%     663Mi           17%       
k8s-node02.example.local     60m          3%     660Mi           17%       
k8s-node03.example.local     70m          3%     679Mi           17%       

[root@k8s-master01 ~]# kubectl top node --use-protocol-buffers
NAME                         CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
k8s-master01.example.local   730m         36%    1614Mi          42%       
k8s-master02.example.local   184m         9%     1426Mi          37%       
k8s-master03.example.local   468m         23%    1465Mi          38%       
k8s-node01.example.local     61m          3%     663Mi           17%       
k8s-node02.example.local     60m          3%     660Mi           17%       
k8s-node03.example.local     70m          3%     679Mi           17% 

13.安装dashboard

13.1 Dashboard部署

Dashboard用于展示集群中的各类资源,同时也可以通过Dashboard实时查看Pod的日志和在容器中执行一些命令等。

[root@k8s-master01 ~]# cat recommended.yaml 
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

apiVersion: v1
kind: Namespace
metadata:
  name: kubernetes-dashboard

---

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kubernetes-dashboard
type: Opaque

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kubernetes-dashboard
type: Opaque
data:
  csrf: ""

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kubernetes-dashboard
type: Opaque

---

kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kubernetes-dashboard

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  - apiGroups: [""]
    resources: ["secrets"]
    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
    verbs: ["get", "update", "delete"]
    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
  - apiGroups: [""]
    resources: ["configmaps"]
    resourceNames: ["kubernetes-dashboard-settings"]
    verbs: ["get", "update"]
    # Allow Dashboard to get metrics.
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["heapster", "dashboard-metrics-scraper"]
    verbs: ["proxy"]
  - apiGroups: [""]
    resources: ["services/proxy"]
    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
    verbs: ["get"]

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper to get metrics from the Metrics server
  - apiGroups: ["metrics.k8s.io"]
    resources: ["pods", "nodes"]
    verbs: ["get", "list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
        - name: kubernetes-dashboard
          image: kubernetesui/dashboard:v2.2.0 
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            # Uncomment the following line to manually specify Kubernetes API server Host
            # If not specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # - --apiserver-host=http://my-address:port
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
              # Create on-disk volume to store exec logs
            - mountPath: /tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp-volume
          emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 8000
      targetPort: 8000
  selector:
    k8s-app: dashboard-metrics-scraper

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: dashboard-metrics-scraper
  template:
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
    spec:
      containers:
        - name: dashboard-metrics-scraper
          image: kubernetesui/metrics-scraper:v1.0.6 
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
          volumeMounts:
          - mountPath: /tmp
            name: tmp-volume
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
        - name: tmp-volume
          emptyDir: {}

[root@k8s-master01 ~]# vim recommended.yaml 
...
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort #添加这行
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30005 #添加这行
  selector:
    k8s-app: kubernetes-dashboard
...

[root@k8s-master01 ~]# grep "image:" recommended.yaml
          image: kubernetesui/dashboard:v2.2.0 
          image: kubernetesui/metrics-scraper:v1.0.6

下载镜像并上传到harbor

[root@k8s-master01 ~]# cat download_dashboard_images.sh
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-01-11
#FileName:      download_dashboard_images.sh
#URL:           raymond.blog.csdn.net
#Description:   The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \\033[01;31m"
END='\033[0m'

images=$(awk -F "/"  '/image:/{print $NF}' recommended.yaml)
HARBOR_DOMAIN=harbor.raymonds.cc

images_download(){
    ${COLOR}"开始下载Dashboard镜像"${END}
    for i in ${images};do 
        docker pull registry.aliyuncs.com/google_containers/$i
        docker tag registry.aliyuncs.com/google_containers/$i ${HARBOR_DOMAIN}/google_containers/$i
        docker rmi registry.aliyuncs.com/google_containers/$i
        docker push ${HARBOR_DOMAIN}/google_containers/$i
    done
    ${COLOR}"Dashboard镜像下载完成"${END}
}

images_download

[root@k8s-master01 ~]# bash download_dashboard_images.sh

[root@k8s-master01 ~]# sed -ri 's@(.*image:) kubernetesui(/.*)@\1 harbor.raymonds.cc/google_containers\2@g' recommended.yaml

[root@k8s-master01 ~]# grep "image:" recommended.yaml
          image: harbor.raymonds.cc/google_containers/dashboard:v2.2.0 
          image: harbor.raymonds.cc/google_containers/metrics-scraper:v1.0.6 
          
 [root@k8s-master01 ~]# kubectl  create -f recommended.yaml 
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created         

创建管理员用户admin.yaml

[root@k8s-master01 ~]# vim admin.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding 
metadata: 
  name: admin-user
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system

[root@k8s-master01 ~]# kubectl apply -f admin.yaml 
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created

13.2 登录dashboard

在谷歌浏览器(Chrome)启动文件中加入启动参数,用于解决无法访问Dashboard的问题,参考图1-1:

--test-type --ignore-certificate-errors

在这里插入图片描述 ​ 图1-1 谷歌浏览器 Chrome的配置

[root@k8s-master01 ~]# kubectl get svc kubernetes-dashboard -n kubernetes-dashboard
NAME                   TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE
kubernetes-dashboard   NodePort   10.106.189.113   <none>        443:30005/TCP   18s

访问Dashboard:https://172.31.3.101:30005,参考图1-2

在这里插入图片描述 ​ 图1-2 Dashboard登录方式

13.2.1 token登录

查看token值:

[root@k8s-master01 ~]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
Name:         admin-user-token-9dmsd
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin-user
              kubernetes.io/service-account.uid: 7d0d71d5-8454-40c1-877f-63e56e4fceda

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1411 bytes
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6InhpRVlPQzVJRER0TUR2OURobHI4Wnh0Z192eVo1SndMOGdfaEprNFg2RmMifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTlkbXNkIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI3ZDBkNzFkNS04NDU0LTQwYzEtODc3Zi02M2U1NmU0ZmNlZGEiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.AfUDqSA8YtMo7UJdQ_NMMU5BylS_fbf7jb_vTuYKsqVlxVjWe_1AgnaicyG0pO4UnL6_vkzqY3MigTqwlyuYKXrDb58F4MwjzHjMKMLHlesjo9WkzMptdq83fIU_8FQ731TROGaZsXGuBu1zOppiWqag-43d0Lqv2BBVl70-6-F5BAJ_XM5NSbFz7slxIUjbWJ4szauNCnUhy8z89bH4JIwVCD_lqvsC0rvCM8kgEaHHv9qIYL1uFfK8Y5bFy7BMXWHhJo5VwRvQ6-8Nz4bXgfDKWeBgovrnkR71WrgGtK0LZHPYZZo-GrxkVn4ixb0AOdgYxruXgkjs1otwoNvoig

将token值输入到令牌后,单击登录即可访问Dashboard,参考图1-3:

在这里插入图片描述 在这里插入图片描述

13.2.2 使用kubeconfig文件登录dashboard

[root@k8s-master01 ~]# cp /etc/kubernetes/admin.kubeconfig kubeconfig

[root@k8s-master01 ~]# vim kubeconfig 
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ1RENDQXN5Z0F3SUJBZ0lVTExMRWFmWUZtT256NFFZOHMzcWNOTTFFWUFFd0RRWUpLb1pJaHZjTkFRRUwKQlFBd2R6RUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeEV6QVJCZ05WQkFvVENrdDFZbVZ5Ym1WMFpYTXhHakFZQmdOVkJBc1RFVXQxWW1WeWJtVjBaWE10CmJXRnVkV0ZzTVJNd0VRWURWUVFERXdwcmRXSmxjbTVsZEdWek1DQVhEVEl5TURFeE5qRXpORE13TUZvWUR6SXgKTWpFeE1qSXpNVE0wTXpBd1dqQjNNUXN3Q1FZRFZRUUdFd0pEVGpFUU1BNEdBMVVFQ0JNSFFtVnBhbWx1WnpFUQpNQTRHQTFVRUJ4TUhRbVZwYW1sdVp6RVRNQkVHQTFVRUNoTUtTM1ZpWlhKdVpYUmxjekVhTUJnR0ExVUVDeE1SClMzVmlaWEp1WlhSbGN5MXRZVzUxWVd3eEV6QVJCZ05WQkFNVENtdDFZbVZ5Ym1WMFpYTXdnZ0VpTUEwR0NTcUcKU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLQW9JQkFRQzV1S3F2S1RsK3V3SHg3cGZLSGtHd0o0cFoxOXVkZ0xpZApjY0xBemFjSldoNTlIbnVJSzQ4SWQyVXQzenNNbmpWUktkMzR0NUNFQldkMmNQeVFveGl3ck5EdkNySnJBNmdoCkdTY0VMa0dpSldnYzNjN0lKSXlhM3d3akxITVBCbHp3RC80aitqWFFwTTltWElWeE5ndVk0dW1NeStXYzNBTGwKdE8yMEllUzVzTDlOWi9yc0F4MU8wOWtlc3ZYYXl4cWVXTXRJUStKQ1lqUzNETk95R1M1WERwTkRSaExRdUxJUApRUktHVGVvVm1vL0FvNHlIVFcyL0JJSXFJN1p6OGdMRUNWZlFPV3E0Q2JTMWRTbkJJYUZVc3RKRjNoMEd3UWRuCnc4NHBmV25DRlEzMkhFN0N2SVdMckcweFcyTmc3djhyWGIrdGZHQ2FSVEtLREVZQjNzU0RBZ01CQUFHalpqQmsKTUE0R0ExVWREd0VCL3dRRUF3SUJCakFTQmdOVkhSTUJBZjhFQ0RBR0FRSC9BZ0VDTUIwR0ExVWREZ1FXQkJRbApsNW1MUVlLdWw5SGJRNFplc1lKMGc5TDIrekFmQmdOVkhTTUVHREFXZ0JRbGw1bUxRWUt1bDlIYlE0WmVzWUowCmc5TDIrekFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBVHgyVUZSY0xFNzl1YXVpSTRGajBoNldSVndkTHhxMWsKZk90QWRuL0F5bmhHcjVoV09UVW5OS05OMG14NFM1WHZQTmptdVNHRGFCcjJtb3NjSU9pYmVDbHdUNWFSRG56YwpzS3loc2ZhNi9CcTVYNHhuMjdld0dvWjNuaXNSdExOQllSWHNjWTdHZ2U4c1V4eXlPdGdjNTRVbWRWYnJPN1VMCkRJV3VlYVdtT2FxOUxvNzlRWTdGQlFteEZab1lFeDY4ODMxNVZMNEY2bC83cVVKZ1FhOXBVV2Qwb0RDeExEaEwKUFhnZkEyakNBZmVpQVl6RFh3T1BwaURqN3lYSmZQVGlCSXFEQS9lYmYzOXFiTXhGMmtGdTdkOXNNaXNIYXZabgpsYUJqbHlCYTRBQ0d1eE9xTzlLaFc1cG04ZkE5NlBDRmh0eERPUmtURTVmNkcrZHBqWmpUNnc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
    server: https://172.31.3.188:6443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kubernetes-admin
  name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
  user:
    client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQvRENDQXVTZ0F3SUJBZ0lVTTJNRVRqTnFFRFVlVzkxQWVacUxJVE5rNTlRd0RRWUpLb1pJaHZjTkFRRUwKQlFBd2R6RUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeEV6QVJCZ05WQkFvVENrdDFZbVZ5Ym1WMFpYTXhHakFZQmdOVkJBc1RFVXQxWW1WeWJtVjBaWE10CmJXRnVkV0ZzTVJNd0VRWURWUVFERXdwcmRXSmxjbTVsZEdWek1DQVhEVEl5TURFeE5qRXpOVEF3TUZvWUR6SXgKTWpFeE1qSXpNVE0xTURBd1dqQjJNUXN3Q1FZRFZRUUdFd0pEVGpFUU1BNEdBMVVFQ0JNSFFtVnBhbWx1WnpFUQpNQTRHQTFVRUJ4TUhRbVZwYW1sdVp6RVhNQlVHQTFVRUNoTU9jM2x6ZEdWdE9tMWhjM1JsY25NeEdqQVlCZ05WCkJBc1RFVXQxWW1WeWJtVjBaWE10YldGdWRXRnNNUTR3REFZRFZRUURFd1ZoWkcxcGJqQ0NBU0l3RFFZSktvWkkKaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNd3M0U0pjOXlxMGI5OTRoUUc4Sis3a2wwenQ4dmxVY3k5RwpZMnhGbWFqU3dsYmFEOG13YmtqU05BckdrSjF4TC9Pd1FkTWxYUTJMT3dnSERqTERRUzlLa2QwZ2FWY2M3RjdvCm8xZGE1TEJWQW5uSzVzWUFwSjJ5ZHpZcFFqc3IwZkFEdjNkS3d2OWIwaXZkZCt1cGQ0cWU2cFVmK0IxalozV1IKTVpSSnFmN2hCWTdoR3BUU09ZR2dlTGFDUXFNTDBhMzJmVVZHaHJ3WmFveWVQSzBab2dTSi9HVHRmQTltWnFEaQorOW4xa1pwQlBhN2xCd3h1eng4T1hweUpwWmZYSEh0Zis2MTNoVDV6RnkxZUpQQnZHQkhnMXhVZXNmT0xDazZ5Cm9penFOSjYxaVk1Y0plTWU5U2NvR2VXQ0xPNGV1eU14MVRjMzVvUUlsMzlqUEhkVzdkOENBd0VBQWFOL01IMHcKRGdZRFZSMFBBUUgvQkFRREFnV2dNQjBHQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CQmdnckJnRUZCUWNEQWpBTQpCZ05WSFJNQkFmOEVBakFBTUIwR0ExVWREZ1FXQkJRSWdua25wWW5xRzFKNmpUY3ROZHhnYXRtZCt6QWZCZ05WCkhTTUVHREFXZ0JRbGw1bUxRWUt1bDlIYlE0WmVzWUowZzlMMit6QU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUEKUUJXVCs0TmZNT3ZDRyt6ZTBzZjZTRWNSNGR5c2d6N2wrRUJHWGZRc015WWZ2V1IzdlVqSXFDbVhkdGdRVGVoQQpCenFjdXRnU0NvQWROM05oamF5eWZ2WWx5TGp1dFN4L1llSFM2N2IxMG5oY3FQV2ZRdiswWnB3dW1Tblp1R2JzCm8xdDF4aUhRRFFJeGxnNnZ6NjV2TXM0RDhYMGIrNkZlYVE2QVhJU0FFNENla0V6aTBGVjFFUUZuV2FOU24yT1AKNERoR2VsajJHRWpValNybDNQY0JnWG1Za1hMSHMvMFB5a3JjVnI0WWtudHJ4Wkp1WWd6cURTS1NJQk91WkpXVwpabkZXb0x1aWZEWXJZVjI1WXUzVXoyY2JYSUxDaVRvc1BRUDBhU3hMV25vMXJlV0VZWVFnNTdHbHBWVkxHcXVWCjNTRndWQjJwTE1NKy9WYi9JRGJWa0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
    client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBekN6aElsejNLclJ2MzNpRkFid243dVNYVE8zeStWUnpMMFpqYkVXWnFOTENWdG9QCnliQnVTTkkwQ3NhUW5YRXY4N0JCMHlWZERZczdDQWNPTXNOQkwwcVIzU0JwVnh6c1h1aWpWMXJrc0ZVQ2Vjcm0KeGdDa25iSjNOaWxDT3l2UjhBTy9kMHJDLzF2U0s5MTM2NmwzaXA3cWxSLzRIV05uZFpFeGxFbXAvdUVGanVFYQpsTkk1Z2FCNHRvSkNvd3ZScmZaOVJVYUd2Qmxxako0OHJSbWlCSW44Wk8xOEQyWm1vT0w3MmZXUm1rRTlydVVICkRHN1BIdzVlbkltbGw5Y2NlMS83clhlRlBuTVhMVjRrOEc4WUVlRFhGUjZ4ODRzS1RyS2lMT28wbnJXSmpsd2wKNHg3MUp5Z1o1WUlzN2g2N0l6SFZOemZtaEFpWGYyTThkMWJ0M3dJREFRQUJBb0lCQVFDSmZWOU5xSlM0cVREOApwMGZKMTA1OHpHb21YOFhTcUUrNGNnblppelRpUHFxbm1jZ3Y1U01lM280MUEybTIyOVdTb0FwemlTR1VVVUc3Ck1pVVpnZXFQVWdQUGlGZm5WWTdHaXBvVDVSMUNzTHd1RDdnL2RZZGt1aDBVMTh2RjFNaFdlKytmQVRVMmlEcUwKVjJPOXlpeTVxRElIb2JPTzlyVmdzaGxVNWhZWGozTzY0UHdhanltSlVCNjZkK25RYVNnVXdtNGFMNzdVOCtyeApTQlFkOG16Vy8xMGgxQ3RXMkozYVcxbGwyaDJvZTlEUGVmWHhwUElsamhWbkZBRzZQQkhvb1Bna0hDOXM3OWJnCkpPck1IcGxneVRmeGxRNi82VU9wcEd0ZjlzOElsdnVMQkd0bDQvMEt6UVo2L2VRcXJONCsyelFOSkhVUmM1YXgKNVBvOVd5YmhBb0dCQU5qVU1jeG03N0VnbHFKWk5KWDFtVHJ3STFrMDhLR1J6UlZDMU83ZzhIWUZ3ekgveWJuNgpVTlUraTFqMDJMdXB2SVhFQ1ZRblJIcy81Tk00TXFqOE9xQ0l5L0pkUWFCeCtwcUQ3TlhJcnZhaEkyMzI0WE1ICjRuQzRzZHc0Rm5oWlNJTTg1d0VnU3hkVG1wNzBCdlNPckFwOGVsT2wzbG4yOWljb0pGaE56OFY3QW9HQkFQRVAKZk8vME9yb3JrWjhzSk5YY2RvM0FNenRKVzZxMlorWXBXcVBlaTlDNnBxZE9GMmhFMERXdy9uSG4vaDRiL2hZZwpUVmJscUxkYUtTSUZGVE9FUWkxREFieDdSZ3U2SHdvL1ZnRlpaYWNwM016YUlkMDYwOXBnR2drYW5MLzJ4MkI1ClVoMjNrK0RsYmlZTEFLcU5WbmcrL1pBTFpTOGx2cWJLT0JHYWhPSHRBb0dBUzY2TkR6Wml0V1dWam1jcWxxa1oKNmR1Rnl3NVNhMkt6dlpjTk1hL3IzcFlXVXE1Z1gvekNHQnh6a1FJdFlCdFh4U3p1d0tQUUlHRGw0dCs3dHdZTApCSnVhN0NhbTBIVFlMdlNiUnVkOFFuTnVKV1RGdmx2aktzc2NzYXdXRTcyK05LaWVUT05Uc25tby81QlhtU2J2Clg5Mmc2Tzk5VTlPQ2lacFdUVWdqbkY4Q2dZQkx1RnU4Vy9FZWpaVCtkTFZWWUJ6MVJjeFI4U2NVSnB2WVZtRWMKWEVsNjFVYUlBeVdqSVFwdDh4eloxcytoMFpVc2loVUJHTDY0YVYvR1NlWncramgzVXpiMlo1cUhFSDJ6a0ZXSgpzdlVWWHpiMk9nYXRJVTl1cHdWR21zOW1GVFJuZjNSbDFVWmtQRzB2RWdHeGtSZjZTWDhJZ2l2VWRYeS9rNEd0Ck5lWkx1UUtCZ0NjNkdseE9TNDhhRWRlSmZtQVR2OXY5eVhHWW1Ta2w5YnRpV25iL2dud1RFOHlTaTFPdXY5a3EKNllaellwNmNQN0FUNUQvc29yb25pTFRsanlOUXJ3bUh1WjhKUkJFbzExc3dMZlRoTlB2R2ZHSEFBRTJ6eDZBMQpQZXhQd2lwczhOVFl6ZG5JczA0VkZ2YVI0V2lidUFXZmxCRUFiUUVtYnhnM1A2MmYwbnBvCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
    token: eyJhbGciOiJSUzI1NiIsImtpZCI6InhpRVlPQzVJRER0TUR2OURobHI4Wnh0Z192eVo1SndMOGdfaEprNFg2RmMifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTlkbXNkIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI3ZDBkNzFkNS04NDU0LTQwYzEtODc3Zi02M2U1NmU0ZmNlZGEiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.AfUDqSA8YtMo7UJdQ_NMMU5BylS_fbf7jb_vTuYKsqVlxVjWe_1AgnaicyG0pO4UnL6_vkzqY3MigTqwlyuYKXrDb58F4MwjzHjMKMLHlesjo9WkzMptdq83fIU_8FQ731TROGaZsXGuBu1zOppiWqag-43d0Lqv2BBVl70-6-F5BAJ_XM5NSbFz7slxIUjbWJ4szauNCnUhy8z89bH4JIwVCD_lqvsC0rvCM8kgEaHHv9qIYL1uFfK8Y5bFy7BMXWHhJo5VwRvQ6-8Nz4bXgfDKWeBgovrnkR71WrgGtK0LZHPYZZo-GrxkVn4ixb0AOdgYxruXgkjs1otwoNvoig

在这里插入图片描述

14.集群验证

安装busybox

cat<<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - name: busybox
    image: busybox:1.28
    command:
      - sleep
      - "3600"
    imagePullPolicy: IfNotPresent
  restartPolicy: Always
EOF
  1. Pod必须能解析Service

  2. Pod必须能解析跨namespace的Service

  3. 每个节点都必须要能访问Kubernetes的kubernetes svc 443和kube-dns的service 53

  4. Pod和Pod之前要能通

​ a) 同namespace能通信

​ b) 跨namespace能通信

​ c) 跨机器能通信

验证解析

[root@k8s-master01 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   3h9m

#Pod必须能解析Service
[root@k8s-master01 ~]# kubectl exec  busybox -n default -- nslookup kubernetes
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local

#Pod必须能解析跨namespace的Service
[root@k8s-master01 ~]# kubectl exec  busybox -n default -- nslookup kube-dns.kube-system
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      kube-dns.kube-system
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

每个节点都必须要能访问Kubernetes的kubernetes svc 443和kube-dns的service 53

[root@k8s-master01 ~]# telnet 10.96.0.1 443
Trying 10.96.0.1...
Connected to 10.96.0.1.
Escape character is '^]'.

[root@k8s-master02 ~]# telnet 10.96.0.1 443
[root@k8s-master03 ~]# telnet 10.96.0.1 443

[root@k8s-node01 ~]# telnet 10.96.0.1 443
[root@k8s-node02 ~]# telnet 10.96.0.1 443
[root@k8s-node03 ~]# telnet 10.96.0.1 443

[root@k8s-master01 ~]# kubectl get svc -n kube-system
NAME             TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                  AGE
kube-dns         ClusterIP   10.96.0.10       <none>        53/UDP,53/TCP,9153/TCP   23h
metrics-server   ClusterIP   10.109.243.246   <none>        443/TCP                  23h

[root@k8s-master01 ~]# telnet 10.96.0.10 53
Trying 10.96.0.10...
Connected to 10.96.0.10.
Escape character is '^]'.
Connection closed by foreign host.

[root@k8s-master02 ~]# telnet 10.96.0.10 53
[root@k8s-master03 ~]# telnet 10.96.0.10 53

[root@k8s-node01 ~]# telnet 10.96.0.10 53
[root@k8s-node02 ~]# telnet 10.96.0.10 53
[root@k8s-node03 ~]# telnet 10.96.0.10 53

[root@k8s-master01 ~]# curl 10.96.0.10:53
curl: (52) Empty reply from server

[root@k8s-master02 ~]# curl 10.96.0.10:53
[root@k8s-master03 ~]# curl 10.96.0.10:53

[root@k8s-node01 ~]# curl 10.96.0.10:53
[root@k8s-node02 ~]# curl 10.96.0.10:53
[root@k8s-node03 ~]# curl 10.96.0.10:53

Pod和Pod之前要能通

[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide
NAME                                       READY   STATUS    RESTARTS   AGE   IP               NODE                         NOMINATED NODE   READINESS GATES
calico-kube-controllers-5d8579866c-lb9jt   1/1     Running   1          23h   172.31.3.102     k8s-master02.example.local   <none>           <none>
calico-node-2bn8v                          1/1     Running   1          23h   172.31.3.101     k8s-master01.example.local   <none>           <none>
calico-node-bz8gh                          1/1     Running   1          23h   172.31.3.102     k8s-master02.example.local   <none>           <none>
calico-node-jbvm7                          1/1     Running   1          23h   172.31.3.112     k8s-node02.example.local     <none>           <none>
calico-node-mch4p                          1/1     Running   3          23h   172.31.3.113     k8s-node03.example.local     <none>           <none>
calico-node-qpxd7                          1/1     Running   2          23h   172.31.3.111     k8s-node01.example.local     <none>           <none>
calico-node-v7v4h                          1/1     Running   1          23h   172.31.3.103     k8s-master03.example.local   <none>           <none>
coredns-7cd47cd6db-gz9vb                   1/1     Running   1          23h   192.170.21.194   k8s-node03.example.local     <none>           <none>
metrics-server-7869ccfd68-lgdtm            1/1     Running   1          23h   192.165.109.66   k8s-master03.example.local   <none>           <none>

[root@k8s-master01 ~]# kubectl get pod  -o wide
NAME      READY   STATUS    RESTARTS   AGE     IP              NODE                         NOMINATED NODE   READINESS GATES
busybox   1/1     Running   0          3m54s   192.162.55.65   k8s-master01.example.local   <none>           <none>

[root@k8s-master01 ~]# kubectl exec -it busybox
error: you must specify at least one command for the container
[root@k8s-master01 ~]# kubectl exec -it busybox -- sh
/ # ping 192.162.55.65
PING 192.162.55.65 (192.162.55.65): 56 data bytes
64 bytes from 192.162.55.65: seq=0 ttl=64 time=0.032 ms
64 bytes from 192.162.55.65: seq=1 ttl=64 time=0.045 ms
64 bytes from 192.162.55.65: seq=2 ttl=64 time=0.060 ms
64 bytes from 192.162.55.65: seq=3 ttl=64 time=0.056 ms
64 bytes from 192.162.55.65: seq=4 ttl=64 time=0.088 ms
^C
--- 192.162.55.65 ping statistics ---
5 packets transmitted, 5 packets received, 0% packet loss
round-trip min/avg/max = 0.032/0.056/0.088 ms
/ # exit

[root@k8s-master01 ~]# kubectl create deploy nginx --image=nginx --replicas=3
deployment.apps/nginx created

[root@k8s-master01 ~]# kubectl get deploy
NAME    READY   UP-TO-DATE   AVAILABLE   AGE
nginx   3/3     3            3           93s
[root@k8s-master01 ~]# kubectl get pod  -o wide|grep nginx
nginx-6799fc88d8-j8hbq   1/1     Running   0          64s    192.162.55.66     k8s-master01.example.local   <none>           <none>
nginx-6799fc88d8-r5snm   1/1     Running   0          64s    192.165.109.67    k8s-master03.example.local   <none>           <none>
nginx-6799fc88d8-wd9bx   1/1     Running   0          64s    192.167.195.131   k8s-node02.example.local     <none>           <none>

[root@k8s-master01 ~]# kubectl delete deploy nginx
deployment.apps "nginx" deleted
[root@k8s-master01 ~]# kubectl delete pod busybox
pod "busybox" deleted

15.生产环境关键性配置

docker参数配置:

vim /etc/docker/daemon.json
{  
    "registry-mirrors": [ #docker镜像加速
    "https://registry.docker-cn.com",
    "http://hub-mirror.c.163.com",
    "https://docker.mirrors.ustc.edu.cn"
    ],
    "exec-opts": ["native.cgroupdriver=systemd"], #k8s需要systemd启动docker
    "max-concurrent-downloads": 10, #并发下载线程数
    "max-concurrent-uploads": 5, #并发上传线程数
    "log-opts": {
        "max-size": "300m", #docker日志文件最大300m
        "max-file": "2" #最大2个文件
    },
    "live-restore": true #docker服务重启,容器不会重启
}

controller-manager 参数配置:

[root@k8s-master01 ~]# vim /lib/systemd/system/kube-controller-manager.service
# --feature-gates=RotateKubeletClientCertificate=true,RotateKubeletServerCertificate=true \ #这个是bootstrap自动颁发证书,新版默认就是true,不用设置
      --cluster-signing-duration=876000h0m0s \ #用来控制签发证书的有效期限。

[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do scp -o StrictHostKeyChecking=no /lib/systemd/system/kube-controller-manager.service $NODE:/lib/systemd/system/; done
kube-controller-manager.service                                                                              100% 1113   670.4KB/s   00:00    
kube-controller-manager.service                                                                              100% 1113     1.0MB/s   00:00

[root@k8s-master01 ~]# systemctl daemon-reload && systemctl restart kube-controller-manager
[root@k8s-master02 ~]# systemctl daemon-reload && systemctl restart kube-controller-manager
[root@k8s-master03 ~]# systemctl daemon-reload && systemctl restart kube-controller-manager

10-kubelet.conf 参数配置:

[root@k8s-master01 ~]# vim /etc/systemd/system/kubelet.service.d/10-kubelet.conf
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384    --image-pull-progress-deadline=30m"

#--tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384  #更改k8s的加密算法
#--image-pull-progress-deadline=30m  #如果在该参数值所设置的期限之前没有拉取镜像的进展,镜像拉取操作将被取消。仅当容器运行环境设置为 docker 时,此特定于 docker 的参数才有效。

[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03 k8s-node01 k8s-node02 k8s-node03; do scp -o StrictHostKeyChecking=no /etc/systemd/system/kubelet.service.d/10-kubelet.conf $NODE:/etc/systemd/system/kubelet.service.d/ ;done 

kubelet-conf.yml 参数配置:

[root@k8s-master01 ~]# vim /etc/kubernetes/kubelet-conf.yml
#添加如下配置
rotateServerCertificates: true
allowedUnsafeSysctls: #允许容器设置内核,有安全风险,根据实际需求设置
 - "net.core*"
 - "net.ipv4.*"
kubeReserved: #k8s预留资源
  cpu: "1"
  memory: 1Gi
  ephemeral-storage: 10Gi
systemReserved: #系统预留资源
  cpu: "1"
  memory: 1Gi
  ephemeral-storage: 10Gi
  
#rotateServerCertificates: true  #当证书即将过期时自动从 kube-apiserver 请求新的证书进行轮换。要求启用 RotateKubeletServerCertificate 特性开关,以及对提交的 CertificateSigningRequest 对象进行批复(approve)操作。
  
[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03 k8s-node01 k8s-node02 k8s-node03; do scp /etc/kubernetes/kubelet-conf.yml $NODE:/etc/kubernetes/ ;done

[root@k8s-master01 ~]# systemctl daemon-reload && systemctl restart kubelet
[root@k8s-master02 ~]# systemctl daemon-reload && systemctl restart kubelet
[root@k8s-master03 ~]# systemctl daemon-reload && systemctl restart kubelet
[root@k8s-node01 ~]# systemctl daemon-reload && systemctl restart kubelet
[root@k8s-node02 ~]# systemctl daemon-reload && systemctl restart kubelet
[root@k8s-node03 ~]# systemctl daemon-reload && systemctl restart kubelet

添加label:

[root@k8s-master01 ~]# kubectl get nodes
NAME                         STATUS   ROLES    AGE   VERSION
k8s-master01.example.local   Ready    <none>   23h   v1.21.8
k8s-master02.example.local   Ready    <none>   23h   v1.21.8
k8s-master03.example.local   Ready    <none>   23h   v1.21.8
k8s-node01.example.local     Ready    <none>   23h   v1.21.8
k8s-node02.example.local     Ready    <none>   23h   v1.21.8
k8s-node03.example.local     Ready    <none>   23h   v1.21.8

[root@k8s-master01 ~]# kubectl get node --show-labels
NAME                         STATUS   ROLES    AGE   VERSION   LABELS
k8s-master01.example.local   Ready    <none>   23h   v1.21.8   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-master01.example.local,kubernetes.io/os=linux,node.kubernetes.io/node=
k8s-master02.example.local   Ready    <none>   23h   v1.21.8   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-master02.example.local,kubernetes.io/os=linux,node.kubernetes.io/node=
k8s-master03.example.local   Ready    <none>   23h   v1.21.8   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-master03.example.local,kubernetes.io/os=linux,node.kubernetes.io/node=
k8s-node01.example.local     Ready    <none>   23h   v1.21.8   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-node01.example.local,kubernetes.io/os=linux,node.kubernetes.io/node=
k8s-node02.example.local     Ready    <none>   23h   v1.21.8   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-node02.example.local,kubernetes.io/os=linux,node.kubernetes.io/node=
k8s-node03.example.local     Ready    <none>   23h   v1.21.8   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-node03.example.local,kubernetes.io/os=linux,node.kubernetes.io/node=

[root@k8s-master01 ~]# kubectl label node k8s-master01.example.local node-role.kubernetes.io/control-plane='' node-role.kubernetes.io/master=''
node/k8s-master01.example.local labeled

[root@k8s-master01 ~]# kubectl get nodes
NAME                         STATUS   ROLES                  AGE   VERSION
k8s-master01.example.local   Ready    control-plane,master   23h   v1.21.8
k8s-master02.example.local   Ready    <none>                 23h   v1.21.8
k8s-master03.example.local   Ready    <none>                 23h   v1.21.8
k8s-node01.example.local     Ready    <none>                 23h   v1.21.8
k8s-node02.example.local     Ready    <none>                 23h   v1.21.8
k8s-node03.example.local     Ready    <none>                 23h   v1.21.8

[root@k8s-master01 ~]# kubectl label node k8s-master02.example.local node-role.kubernetes.io/control-plane='' node-role.kubernetes.io/master='' 
node/k8s-master02.example.local labeled
[root@k8s-master01 ~]# kubectl label node k8s-master03.example.local node-role.kubernetes.io/control-plane='' node-role.kubernetes.io/master='' 
node/k8s-master03.example.local labeled

[root@k8s-master01 ~]# kubectl get nodes
NAME                         STATUS   ROLES                  AGE   VERSION
k8s-master01.example.local   Ready    control-plane,master   23h   v1.21.8
k8s-master02.example.local   Ready    control-plane,master   23h   v1.21.8
k8s-master03.example.local   Ready    control-plane,master   23h   v1.21.8
k8s-node01.example.local     Ready    <none>                 23h   v1.21.8
k8s-node02.example.local     Ready    <none>                 23h   v1.21.8
k8s-node03.example.local     Ready    <none>                 23h   v1.21.8

安装总结:

1、 kubeadm

2、 二进制

3、 自动化安装

​ a) Ansible

​ i. Master节点安装不需要写自动化。

​ ii. 添加Node节点,playbook。

4、 安装需要注意的细节

​ a) 上面的细节配置

​ b) 生产环境中etcd一定要和系统盘分开,一定要用ssd硬盘。

​ c) Docker数据盘也要和系统盘分开,有条件的话可以使用ssd硬盘