a33.ansible 生产实战案例 -- 基于二进制包安装kubernetes v1.21 -- 集群部署(三)

104 阅读9分钟

14.安装Calico

14.1 安装calico

[root@ansible-server ansible]# mkdir -p roles/calico/{tasks,vars,templates}
[root@ansible-server ansible]# cd roles/calico
[root@ansible-server calico]# ls
tasks  templates  vars

#下面HARBOR_DOMAIN的地址设置成自己的harbor域名地址,POD_SUBNET改成自己规划的容器网段
[root@ansible-server calico]# vim vars/main.yml
HARBOR_DOMAIN: harbor.raymonds.cc
POD_SUBNET: 192.168.0.0/12

[root@ansible-server calico]# cat templates/calico-etcd.yaml.j2
---
# Source: calico/templates/calico-etcd-secrets.yaml
# The following contains k8s Secrets for use with a TLS enabled etcd cluster.
# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/
apiVersion: v1
kind: Secret
type: Opaque
metadata:
  name: calico-etcd-secrets
  namespace: kube-system
data:
  # Populate the following with etcd TLS configuration if desired, but leave blank if
  # not using TLS for etcd.
  # The keys below should be uncommented and the values populated with the base64
  # encoded contents of each file that would be associated with the TLS data.
  # Example command for encoding a file contents: cat <file> | base64 -w 0
  # etcd-key: null
  # etcd-cert: null
  # etcd-ca: null
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
  name: calico-config
  namespace: kube-system
data:
  # Configure this with the location of your etcd cluster.
  etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"
  # If you're using TLS enabled etcd uncomment the following.
  # You must also populate the Secret below with these files.
  etcd_ca: ""   # "/calico-secrets/etcd-ca"
  etcd_cert: "" # "/calico-secrets/etcd-cert"
  etcd_key: ""  # "/calico-secrets/etcd-key"
  # Typha is disabled.
  typha_service_name: "none"
  # Configure the backend to use.
  calico_backend: "bird"
  # Configure the MTU to use for workload interfaces and tunnels.
  # - If Wireguard is enabled, set to your network MTU - 60
  # - Otherwise, if VXLAN or BPF mode is enabled, set to your network MTU - 50
  # - Otherwise, if IPIP is enabled, set to your network MTU - 20
  # - Otherwise, if not using any encapsulation, set to your network MTU.
  veth_mtu: "1440"

  # The CNI network configuration to install on each node. The special
  # values in this config will be automatically populated.
  cni_network_config: |-
    {
      "name": "k8s-pod-network",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "calico",
          "log_level": "info",
          "etcd_endpoints": "__ETCD_ENDPOINTS__",
          "etcd_key_file": "__ETCD_KEY_FILE__",
          "etcd_cert_file": "__ETCD_CERT_FILE__",
          "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
          "mtu": __CNI_MTU__,
          "ipam": {
              "type": "calico-ipam"
          },
          "policy": {
              "type": "k8s"
          },
          "kubernetes": {
              "kubeconfig": "__KUBECONFIG_FILEPATH__"
          }
        },
        {
          "type": "portmap",
          "snat": true,
          "capabilities": {"portMappings": true}
        },
        {
          "type": "bandwidth",
          "capabilities": {"bandwidth": true}
        }
      ]
    }

---
# Source: calico/templates/calico-kube-controllers-rbac.yaml

# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: calico-kube-controllers
rules:
  # Pods are monitored for changing labels.
  # The node controller monitors Kubernetes nodes.
  # Namespace and serviceaccount labels are used for policy.
  - apiGroups: [""]
    resources:
      - pods
      - nodes
      - namespaces
      - serviceaccounts
    verbs:
      - watch
      - list
      - get
  # Watch for changes to Kubernetes NetworkPolicies.
  - apiGroups: ["networking.k8s.io"]
    resources:
      - networkpolicies
    verbs:
      - watch
      - list
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: calico-kube-controllers
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: calico-kube-controllers
subjects:
- kind: ServiceAccount
  name: calico-kube-controllers
  namespace: kube-system
---

---
# Source: calico/templates/calico-node-rbac.yaml
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: calico-node
rules:
  # The CNI plugin needs to get pods, nodes, and namespaces.
  - apiGroups: [""]
    resources:
      - pods
      - nodes
      - namespaces
    verbs:
      - get
  - apiGroups: [""]
    resources:
      - endpoints
      - services
    verbs:
      # Used to discover service IPs for advertisement.
      - watch
      - list
  # Pod CIDR auto-detection on kubeadm needs access to config maps.
  - apiGroups: [""]
    resources:
      - configmaps
    verbs:
      - get
  - apiGroups: [""]
    resources:
      - nodes/status
    verbs:
      # Needed for clearing NodeNetworkUnavailable flag.
      - patch

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: calico-node
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: calico-node
subjects:
- kind: ServiceAccount
  name: calico-node
  namespace: kube-system

---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
  name: calico-node
  namespace: kube-system
  labels:
    k8s-app: calico-node
spec:
  selector:
    matchLabels:
      k8s-app: calico-node
  updateStrategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  template:
    metadata:
      labels:
        k8s-app: calico-node
    spec:
      nodeSelector:
        kubernetes.io/os: linux
      hostNetwork: true
      tolerations:
        # Make sure calico-node gets scheduled on all nodes.
        - effect: NoSchedule
          operator: Exists
        # Mark the pod as a critical add-on for rescheduling.
        - key: CriticalAddonsOnly
          operator: Exists
        - effect: NoExecute
          operator: Exists
      serviceAccountName: calico-node
      # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
      # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
      terminationGracePeriodSeconds: 0
      priorityClassName: system-node-critical
      initContainers:
        # This container installs the CNI binaries
        # and CNI network config file on each node.
        - name: install-cni
          image: registry.cn-beijing.aliyuncs.com/dotbalo/cni:v3.15.3
          command: ["/install-cni.sh"]
          env:
            # Name of the CNI config file to create.
            - name: CNI_CONF_NAME
              value: "10-calico.conflist"
            # The CNI network config to install on each node.
            - name: CNI_NETWORK_CONFIG
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: cni_network_config
            # The location of the etcd cluster.
            - name: ETCD_ENDPOINTS
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_endpoints
            # CNI MTU Config variable
            - name: CNI_MTU
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: veth_mtu
            # Prevents the container from sleeping forever.
            - name: SLEEP
              value: "false"
          volumeMounts:
            - mountPath: /host/opt/cni/bin
              name: cni-bin-dir
            - mountPath: /host/etc/cni/net.d
              name: cni-net-dir
            - mountPath: /calico-secrets
              name: etcd-certs
          securityContext:
            privileged: true
        # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
        # to communicate with Felix over the Policy Sync API.
        - name: flexvol-driver
          image: registry.cn-beijing.aliyuncs.com/dotbalo/pod2daemon-flexvol:v3.15.3
          volumeMounts:
          - name: flexvol-driver-host
            mountPath: /host/driver
          securityContext:
            privileged: true
      containers:
        # Runs calico-node container on each Kubernetes node. This
        # container programs network policy and routes on each
        # host.
        - name: calico-node
          image: registry.cn-beijing.aliyuncs.com/dotbalo/node:v3.15.3
          env:
            # The location of the etcd cluster.
            - name: ETCD_ENDPOINTS
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_endpoints
            # Location of the CA certificate for etcd.
            - name: ETCD_CA_CERT_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_ca
            # Location of the client key for etcd.
            - name: ETCD_KEY_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_key
            # Location of the client certificate for etcd.
            - name: ETCD_CERT_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_cert
            # Set noderef for node controller.
            - name: CALICO_K8S_NODE_REF
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
            # Choose the backend to use.
            - name: CALICO_NETWORKING_BACKEND
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: calico_backend
            # Cluster type to identify the deployment type
            - name: CLUSTER_TYPE
              value: "k8s,bgp"
            # Auto-detect the BGP IP address.
            - name: IP
              value: "autodetect"
            # Enable IPIP
            - name: CALICO_IPV4POOL_IPIP
              value: "Always"
            # Enable or Disable VXLAN on the default IP pool.
            - name: CALICO_IPV4POOL_VXLAN
              value: "Never"
            # Set MTU for tunnel device used if ipip is enabled
            - name: FELIX_IPINIPMTU
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: veth_mtu
            # Set MTU for the VXLAN tunnel device.
            - name: FELIX_VXLANMTU
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: veth_mtu
            # Set MTU for the Wireguard tunnel device.
            - name: FELIX_WIREGUARDMTU
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: veth_mtu
            # The default IPv4 pool to create on startup if none exists. Pod IPs will be
            # chosen from this range. Changing this value after installation will have
            # no effect. This should fall within `--cluster-cidr`.
            # - name: CALICO_IPV4POOL_CIDR
            #   value: "192.168.0.0/16"
            # Disable file logging so `kubectl logs` works.
            - name: CALICO_DISABLE_FILE_LOGGING
              value: "true"
            # Set Felix endpoint to host default action to ACCEPT.
            - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
              value: "ACCEPT"
            # Disable IPv6 on Kubernetes.
            - name: FELIX_IPV6SUPPORT
              value: "false"
            # Set Felix logging to "info"
            - name: FELIX_LOGSEVERITYSCREEN
              value: "info"
            - name: FELIX_HEALTHENABLED
              value: "true"
          securityContext:
            privileged: true
          resources:
            requests:
              cpu: 250m
          livenessProbe:
            exec:
              command:
              - /bin/calico-node
              - -felix-live
              - -bird-live
            periodSeconds: 10
            initialDelaySeconds: 10
            failureThreshold: 6
          readinessProbe:
            exec:
              command:
              - /bin/calico-node
              - -felix-ready
              - -bird-ready
            periodSeconds: 10
          volumeMounts:
            - mountPath: /lib/modules
              name: lib-modules
              readOnly: true
            - mountPath: /run/xtables.lock
              name: xtables-lock
              readOnly: false
            - mountPath: /var/run/calico
              name: var-run-calico
              readOnly: false
            - mountPath: /var/lib/calico
              name: var-lib-calico
              readOnly: false
            - mountPath: /calico-secrets
              name: etcd-certs
            - name: policysync
              mountPath: /var/run/nodeagent
      volumes:
        # Used by calico-node.
        - name: lib-modules
          hostPath:
            path: /lib/modules
        - name: var-run-calico
          hostPath:
            path: /var/run/calico
        - name: var-lib-calico
          hostPath:
            path: /var/lib/calico
        - name: xtables-lock
          hostPath:
            path: /run/xtables.lock
            type: FileOrCreate
        # Used to install CNI.
        - name: cni-bin-dir
          hostPath:
            path: /opt/cni/bin
        - name: cni-net-dir
          hostPath:
            path: /etc/cni/net.d
        # Mount in the etcd TLS secrets with mode 400.
        # See https://kubernetes.io/docs/concepts/configuration/secret/
        - name: etcd-certs
          secret:
            secretName: calico-etcd-secrets
            defaultMode: 0400
        # Used to create per-pod Unix Domain Sockets
        - name: policysync
          hostPath:
            type: DirectoryOrCreate
            path: /var/run/nodeagent
        # Used to install Flex Volume Driver
        - name: flexvol-driver-host
          hostPath:
            type: DirectoryOrCreate
            path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
---

apiVersion: v1
kind: ServiceAccount
metadata:
  name: calico-node
  namespace: kube-system

---
# Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
  name: calico-kube-controllers
  namespace: kube-system
  labels:
    k8s-app: calico-kube-controllers
spec:
  # The controllers can only have a single active instance.
  replicas: 1
  selector:
    matchLabels:
      k8s-app: calico-kube-controllers
  strategy:
    type: Recreate
  template:
    metadata:
      name: calico-kube-controllers
      namespace: kube-system
      labels:
        k8s-app: calico-kube-controllers
    spec:
      nodeSelector:
        kubernetes.io/os: linux
      tolerations:
        # Mark the pod as a critical add-on for rescheduling.
        - key: CriticalAddonsOnly
          operator: Exists
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      serviceAccountName: calico-kube-controllers
      priorityClassName: system-cluster-critical
      # The controllers must run in the host network namespace so that
      # it isn't governed by policy that would prevent it from working.
      hostNetwork: true
      containers:
        - name: calico-kube-controllers
          image: registry.cn-beijing.aliyuncs.com/dotbalo/kube-controllers:v3.15.3
          env:
            # The location of the etcd cluster.
            - name: ETCD_ENDPOINTS
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_endpoints
            # Location of the CA certificate for etcd.
            - name: ETCD_CA_CERT_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_ca
            # Location of the client key for etcd.
            - name: ETCD_KEY_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_key
            # Location of the client certificate for etcd.
            - name: ETCD_CERT_FILE
              valueFrom:
                configMapKeyRef:
                  name: calico-config
                  key: etcd_cert
            # Choose which controllers to run.
            - name: ENABLED_CONTROLLERS
              value: policy,namespace,serviceaccount,workloadendpoint,node
          volumeMounts:
            # Mount in the etcd TLS secrets.
            - mountPath: /calico-secrets
              name: etcd-certs
          readinessProbe:
            exec:
              command:
              - /usr/bin/check-status
              - -r
      volumes:
        # Mount in the etcd TLS secrets with mode 400.
        # See https://kubernetes.io/docs/concepts/configuration/secret/
        - name: etcd-certs
          secret:
            secretName: calico-etcd-secrets
            defaultMode: 0400

---

apiVersion: v1
kind: ServiceAccount
metadata:
  name: calico-kube-controllers
  namespace: kube-system

---
# Source: calico/templates/calico-typha.yaml

---
# Source: calico/templates/configure-canal.yaml

---
# Source: calico/templates/kdd-crds.yaml

#修改下面内容
[root@ansible-server calico]# grep "etcd_endpoints:.*" templates/calico-etcd.yaml.j2 
  etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"

[root@ansible-server calico]# sed -i 's#etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"#etcd_endpoints: "{% for i in groups.etcd %}https://{{ hostvars[i].ansible_default_ipv4.address }}:2379{% if not loop.last %},{% endif %}{% endfor %}"#g' templates/calico-etcd.yaml.j2  

[root@ansible-server calico]# grep "etcd_endpoints:.*" templates/calico-etcd.yaml.j2
  etcd_endpoints: {% for i in groups.etcd %}https://{{ hostvars[i].ansible_default_ipv4.address }}{% if not loop.last %},{% endif %}{% endfor %}	

[root@ansible-server calico]# vim tasks/calico_file.yml
- name: copy calico-etcd.yaml file
  template:
    src: calico-etcd.yaml.j2
    dest: /root/calico-etcd.yaml
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico]# vim tasks/config.yml
- name: get ETCD_KEY key
  shell:
    cmd: cat /etc/kubernetes/pki/etcd/etcd-key.pem | base64 | tr -d '\n'
  register: ETCD_KEY
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd-key:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '# (etcd-key:) null'
    replace: '\1 {{ ETCD_KEY.stdout }}'
  when:
    - ansible_hostname=="k8s-master01"
- name: get ETCD_CERT key
  shell:
    cmd: cat /etc/kubernetes/pki/etcd/etcd.pem | base64 | tr -d '\n'
  register: ETCD_CERT
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd-cert:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '# (etcd-cert:) null'
    replace: '\1 {{ ETCD_CERT.stdout }}'
  when:
    - ansible_hostname=="k8s-master01"
- name: get ETCD_CA key
  shell:
    cmd: cat /etc/kubernetes/pki/etcd/etcd-ca.pem | base64 | tr -d '\n'
  when:
    - ansible_hostname=="k8s-master01"
  register: ETCD_CA
- name: Modify the ".*etcd-ca:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '# (etcd-ca:) null'
    replace: '\1 {{ ETCD_CA.stdout }}'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd_ca:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '(etcd_ca:) ""'
    replace: '\1 "/calico-secrets/etcd-ca"'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd_cert:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '(etcd_cert:) ""'
    replace: '\1 "/calico-secrets/etcd-cert"'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*etcd_key:.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '(etcd_key:) ""'
    replace: '\1 "/calico-secrets/etcd-key"'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*CALICO_IPV4POOL_CIDR.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '# (- name: CALICO_IPV4POOL_CIDR)'
    replace: '\1'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the ".*192.168.0.0.*" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '#   (value:) "192.168.0.0/16"'
    replace: '  \1 "{{ POD_SUBNET }}"'
  when:
    - ansible_hostname=="k8s-master01"
- name: Modify the "image:" line
  replace:
    path: /root/calico-etcd.yaml
    regexp: '(.*image:) docker.io/calico(/.*)'
    replace: '\1 {{ HARBOR_DOMAIN }}/google_containers\2'
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico]# vim tasks/download_images.yml
- name: get calico version
  shell:
    chdir: /root
    cmd: awk -F "/"  '/image:/{print $NF}' calico-etcd.yaml
  register: CALICO_VERSION
  when:
    - ansible_hostname=="k8s-master01"
- name: download calico image
  shell: |
    {% for i in CALICO_VERSION.stdout_lines %}
      docker pull registry.cn-beijing.aliyuncs.com/raymond9/{{ i }}
      docker tag registry.cn-beijing.aliyuncs.com/raymond9/{{ i }} {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
      docker rmi registry.cn-beijing.aliyuncs.com/raymond9/{{ i }}
      docker push {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
    {% endfor %}
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico]# vim tasks/install_calico.yml
- name: install calico
  shell:
    chdir: /root
    cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig apply -f calico-etcd.yaml"
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server calico]# vim tasks/main.yml
- include: calico_file.yml
- include: config.yml
- include: download_images.yml
- include: install_calico.yml

[root@ansible-server calico]# cd ../../
[root@ansible-server ansible]# tree roles/calico
roles/calico
├── tasks
│   ├── calico_file.yml
│   ├── config.yml
│   ├── download_images.yml
│   ├── install_calico.yml
│   └── main.yml
├── templates
│   └── calico-etcd.yaml.j2
└── vars
    └── main.yml

3 directories, 7 files

[root@ansible-server ansible]# vim calico_role.yml 
---
- hosts: master:etcd

  roles:
    - role: calico

[root@ansible-server ansible]# ansible-playbook calico_role.yml 

14.2 验证calico

[root@k8s-master01 ~]# kubectl get pod -n kube-system |grep calico
calico-kube-controllers-5d8579866c-2wfnf   1/1     Running   0          33s
calico-node-4q8tk                          1/1     Running   0          33s
calico-node-8gmqf                          1/1     Running   0          33s
calico-node-ff9wk                          1/1     Running   0          33s
calico-node-jpmfd                          1/1     Running   0          33s
calico-node-prdbh                          1/1     Running   0          33s
calico-node-zq9ks                          1/1     Running   0          33s

[root@k8s-master01 ~]# kubectl get nodes
NAME                         STATUS   ROLES    AGE     VERSION
k8s-master01.example.local   Ready    <none>   5m26s   v1.21.8
k8s-master02.example.local   Ready    <none>   5m26s   v1.21.8
k8s-master03.example.local   Ready    <none>   5m26s   v1.21.8
k8s-node01.example.local     Ready    <none>   2m19s   v1.21.8
k8s-node02.example.local     Ready    <none>   2m19s   v1.21.8
k8s-node03.example.local     Ready    <none>   2m19s   v1.21.8

15.安装 CoreDNS

15.1 安装 CoreDNS

[root@ansible-server ansible]# mkdir -p roles/coredns/{tasks,templates,vars}
[root@ansible-server ansible]# cd roles/coredns/
[root@ansible-server coredns]# ls
tasks  templates  vars

#下面CLUSTERDNS改成自己规划的service网段的第10个IP地址,HARBOR_DOMAIN的地址设置成自己的harbor域名地址
[root@ansible-server coredns]# vim vars/main.yml
CLUSTERDNS: 10.96.0.10
HARBOR_DOMAIN: harbor.raymonds.cc

[root@ansible-server coredns]# cat templates/coredns.yaml.j2 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
rules:
  - apiGroups:
    - ""
    resources:
    - endpoints
    - services
    - pods
    - namespaces
    verbs:
    - list
    - watch
  - apiGroups:
    - discovery.k8s.io
    resources:
    - endpointslices
    verbs:
    - list
    - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
data:
  Corefile: |
    .:53 {
        errors
        health {
          lameduck 5s
        }
        ready
        kubernetes cluster.local in-addr.arpa ip6.arpa {
          fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf {
          max_concurrent 1000
        }
        cache 30
        loop
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/name: "CoreDNS"
spec:
  # replicas: not specified here:
  # 1. Default is 1.
  # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        kubernetes.io/os: linux
      affinity:
         podAntiAffinity:
           preferredDuringSchedulingIgnoredDuringExecution:
           - weight: 100
             podAffinityTerm:
               labelSelector:
                 matchExpressions:
                   - key: k8s-app
                     operator: In
                     values: ["kube-dns"]
               topologyKey: kubernetes.io/hostname
      containers:
      - name: coredns
        image: registry.aliyuncs.com/google_containers/coredns:1.8.3
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /ready
            port: 8181
            scheme: HTTP
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 192.168.0.10
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP

[root@ansible-server coredns]# vim templates/coredns.yaml.j2
...
data:
  Corefile: |
    .:53 {
        errors
        health {
          lameduck 5s
        }
        ready
        kubernetes cluster.local in-addr.arpa ip6.arpa {
          fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf {
          max_concurrent 1000
        }
        cache 30
        loop ##将loop插件直接删除,避免内部循环
        reload
        loadbalance
    }
...
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: {{ CLUSTERDNS }} #修改这里
...

[root@ansible-server coredns]# vim tasks/coredns_file.yml
- name: copy coredns.yaml file
  template:
    src: coredns.yaml.j2
    dest: /root/coredns.yaml
  when:
    - ansible_hostname=="k8s-master01"

[root@ansible-server coredns]# vim tasks/config.yml
- name: Modify the "image:" line
  replace:
    path: /root/coredns.yaml
    regexp: '(.*image:) registry.aliyuncs.com(/.*)'
    replace: '\1 {{ HARBOR_DOMAIN }}\2'

[root@ansible-server coredns]# vim tasks/download_images.yml
- name: get coredns version
  shell:
    chdir: /root
    cmd: awk -F "/"  '/image:/{print $NF}' coredns.yaml
  register: COREDNS_VERSION
- name: download coredns image
  shell: |
    {% for i in COREDNS_VERSION.stdout_lines %}
      docker pull registry.aliyuncs.com/google_containers/{{ i }}
      docker tag registry.aliyuncs.com/google_containers/{{ i }} {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
      docker rmi registry.aliyuncs.com/google_containers/{{ i }}
      docker push {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
    {% endfor %}

[root@ansible-server coredns]# vim tasks/install_coredns.yml
- name: install coredns
  shell:
    chdir: /root
    cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig apply -f coredns.yaml"

[root@ansible-server coredns]# vim tasks/main.yml
- include: coredns_file.yml
- include: config.yml
- include: download_images.yml
- include: install_coredns.yml

[root@ansible-server coredns]# cd ../../
[root@ansible-server ansible]# tree roles/coredns/
roles/coredns/
├── tasks
│   ├── config.yml
│   ├── coredns_file.yml
│   ├── download_images.yml
│   ├── install_coredns.yml
│   └── main.yml
├── templates
│   └── coredns.yaml.j2
└── vars
    └── main.yml

3 directories, 7 files

[root@ansible-server ansible]# vim coredns_role.yml
---
- hosts: master01

  roles:
    - role: coredns

[root@ansible-server ansible]# ansible-playbook coredns_role.yml

15.2 验证coredns

[root@k8s-master01 ~]# kubectl get pod -n kube-system |grep coredns
coredns-7cd47cd6db-gz9vb                   1/1     Running   0          14s

16.安装Metrics

16.1 安装metrics

[root@ansible-server ansible]# mkdir -p roles/metrics/{files,vars,tasks}
[root@ansible-server ansible]# cd roles/metrics/
[root@ansible-server metrics]# ls
files  tasks  vars

#下面HARBOR_DOMAIN的地址设置成自己的harbor域名地址
[root@ansible-server metrics]# vim vars/main.yml
HARBOR_DOMAIN: harbor.raymonds.cc
 
[root@ansible-server metrics]# cat files/components.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
    rbac.authorization.k8s.io/aggregate-to-admin: "true"
    rbac.authorization.k8s.io/aggregate-to-edit: "true"
    rbac.authorization.k8s.io/aggregate-to-view: "true"
  name: system:aggregated-metrics-reader
rules:
  - apiGroups:
      - metrics.k8s.io
    resources:
      - pods
      - nodes
    verbs:
      - get
      - list
      - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
rules:
  - apiGroups:
      - ""
    resources:
      - pods
      - nodes
      - nodes/stats
      - namespaces
      - configmaps
    verbs:
      - get
      - list
      - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server-auth-reader
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: extension-apiserver-authentication-reader
subjects:
  - kind: ServiceAccount
    name: metrics-server
    namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server:system:auth-delegator
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:auth-delegator
subjects:
  - kind: ServiceAccount
    name: metrics-server
    namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:metrics-server
subjects:
  - kind: ServiceAccount
    name: metrics-server
    namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  ports:
    - name: https
      port: 443
      protocol: TCP
      targetPort: https
  selector:
    k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  selector:
    matchLabels:
      k8s-app: metrics-server
  strategy:
    rollingUpdate:
      maxUnavailable: 0
  template:
    metadata:
      labels:
        k8s-app: metrics-server
    spec:
      containers:
        - args:
            - --cert-dir=/tmp
            - --secure-port=4443
            - --metric-resolution=30s
            - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
          imagePullPolicy: IfNotPresent
          livenessProbe:
            failureThreshold: 3
            httpGet:
              path: /livez
              port: https
              scheme: HTTPS
            periodSeconds: 10
          name: metrics-server
          ports:
            - containerPort: 4443
              name: https
              protocol: TCP
          readinessProbe:
            failureThreshold: 3
            httpGet:
              path: /readyz
              port: https
              scheme: HTTPS
            periodSeconds: 10
          securityContext:
            readOnlyRootFilesystem: true
            runAsNonRoot: true
            runAsUser: 1000
          volumeMounts:
            - mountPath: /tmp
              name: tmp-dir
      nodeSelector:
        kubernetes.io/os: linux
      priorityClassName: system-cluster-critical
      serviceAccountName: metrics-server
      volumes:
        - emptyDir: {}
          name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
  labels:
    k8s-app: metrics-server
  name: v1beta1.metrics.k8s.io
spec:
  group: metrics.k8s.io
  groupPriorityMinimum: 100
  insecureSkipTLSVerify: true
  service:
    name: metrics-server
    namespace: kube-system
  version: v1beta1
  versionPriority: 100

[root@k8s-master01 ~]# vim components.yaml 
...
    spec:
      containers:
        - args:
            - --cert-dir=/tmp
            - --secure-port=4443
            - --metric-resolution=30s
            - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
#添加下面内容        
            - --kubelet-insecure-tls
            - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
            - --requestheader-username-headers=X-Remote-User
            - --requestheader-group-headers=X-Remote-Group
            - --requestheader-extra-headers-prefix=X-Remote-Extra-
...
          volumeMounts:
            - mountPath: /tmp
              name: tmp-dir
#添加下面内容 
            - name: ca-ssl
              mountPath: /etc/kubernetes/pki
...
      volumes:
        - emptyDir: {}
          name: tmp-dir
#添加下面内容 
        - name: ca-ssl
          hostPath:
            path: /etc/kubernetes/pki
...

[root@ansible-server metrics]# vim tasks/metrics_file.yml
- name: copy components.yaml file
  copy:
    src: components.yaml
    dest: /root/components.yaml

[root@ansible-server metrics]# vim tasks/config.yml
- name: Modify the "image:" line
  replace:
    path: /root/components.yaml
    regexp: '(.*image:) k8s.gcr.io/metrics-server(/.*)'
    replace: '\1 {{ HARBOR_DOMAIN }}/google_containers\2'

[root@ansible-server metrics]# vim tasks/download_images.yml
- name: get metrics version
  shell:
    chdir: /root
    cmd: awk -F "/"  '/image:/{print $NF}' components.yaml
  register: METRICS_VERSION
- name: download metrics image
  shell: |
    {% for i in METRICS_VERSION.stdout_lines %}
      docker pull registry.aliyuncs.com/google_containers/{{ i }}
      docker tag registry.aliyuncs.com/google_containers/{{ i }} {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
      docker rmi registry.aliyuncs.com/google_containers/{{ i }}
      docker push {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
    {% endfor %}

[root@ansible-server metrics]# vim tasks/install_metrics.yml
- name: install metrics
  shell:
    chdir: /root
    cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig apply -f components.yaml"

[root@ansible-server metrics]# vim tasks/main.yml 
- include: metrics_file.yml
- include: config.yml
- include: download_images.yml
- include: install_metrics.yml

[root@ansible-server metrics]# cd ../../
[root@ansible-server ansible]# tree roles/metrics/
roles/metrics/
├── files
│   └── components.yaml
├── tasks
│   ├── config.yml
│   ├── download_images.yml
│   ├── install_metrics.yml
│   ├── main.yml
│   └── metrics_file.yml
└── vars
    └── main.yml

3 directories, 7 files

[root@ansible-server ansible]# vim metrics_role.yml
---
- hosts: master01

  roles:
    - role: metrics

[root@ansible-server ansible]# ansible-playbook metrics_role.yml

16.2 验证metrics

[root@k8s-master01 ~]# kubectl get pod -n kube-system |grep metrics
metrics-server-7869ccfd68-tlbjx            1/1     Running   0          11s
      
[root@k8s-master01 ~]# kubectl top node --use-protocol-buffers
NAME                         CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
k8s-master01.example.local   662m         33%    1566Mi          41%       
k8s-master02.example.local   160m         8%     1408Mi          36%       
k8s-master03.example.local   295m         14%    1441Mi          37%       
k8s-node01.example.local     102m         5%     693Mi           18%       
k8s-node02.example.local     103m         5%     651Mi           17%       
k8s-node03.example.local     93m          4%     642Mi           16% 

17.安装dashboard

17.1 安装dashboard

[root@ansible-server ansible]# mkdir -p roles/dashboard/{tasks,vars,files,templates}
[root@ansible-server ansible]# cd roles/dashboard/
[root@ansible-server dashboard]# ls
files  tasks  templates  vars

#下面HARBOR_DOMAIN的地址设置成自己的harbor域名地址
[root@ansible-server dashboard]# vim vars/main.yml
HARBOR_DOMAIN: harbor.raymonds.cc
NODEPORT: 30005

[root@ansible-server dashboard]# cat templates/recommended.yaml.j2
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

apiVersion: v1
kind: Namespace
metadata:
  name: kubernetes-dashboard

---

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kubernetes-dashboard
type: Opaque

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kubernetes-dashboard
type: Opaque
data:
  csrf: ""

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kubernetes-dashboard
type: Opaque

---

kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kubernetes-dashboard

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  - apiGroups: [""]
    resources: ["secrets"]
    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
    verbs: ["get", "update", "delete"]
    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
  - apiGroups: [""]
    resources: ["configmaps"]
    resourceNames: ["kubernetes-dashboard-settings"]
    verbs: ["get", "update"]
    # Allow Dashboard to get metrics.
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["heapster", "dashboard-metrics-scraper"]
    verbs: ["proxy"]
  - apiGroups: [""]
    resources: ["services/proxy"]
    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
    verbs: ["get"]

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper to get metrics from the Metrics server
  - apiGroups: ["metrics.k8s.io"]
    resources: ["pods", "nodes"]
    verbs: ["get", "list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
        - name: kubernetes-dashboard
          image: kubernetesui/dashboard:v2.2.0 
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            # Uncomment the following line to manually specify Kubernetes API server Host
            # If not specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # - --apiserver-host=http://my-address:port
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
              # Create on-disk volume to store exec logs
            - mountPath: /tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp-volume
          emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 8000
      targetPort: 8000
  selector:
    k8s-app: dashboard-metrics-scraper

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: dashboard-metrics-scraper
  template:
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
    spec:
      containers:
        - name: dashboard-metrics-scraper
          image: kubernetesui/metrics-scraper:v1.0.6 
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
          volumeMounts:
          - mountPath: /tmp
            name: tmp-volume
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
        - name: tmp-volume
          emptyDir: {}

[root@ansible-server dashboard]# vim templates/recommended.yaml.j2
...
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort #添加这行
  ports:
    - port: 443
      targetPort: 8443
      nodePort: {{ NODEPORT }} #添加这行
  selector:
    k8s-app: kubernetes-dashboard
...

[root@ansible-server dashboard]# vim files/admin.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding 
metadata: 
  name: admin-user
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system

[root@ansible-server dashboard]# vim tasks/dashboard_file.yml
- name: copy recommended.yaml file
  template:
    src: recommended.yaml.j2
    dest: /root/recommended.yaml
- name: copy admin.yaml file
  copy:
    src: admin.yaml
    dest: /root/admin.yaml

[root@ansible-server dashboard]# vim tasks/config.yml
- name: Modify the "image:" line
  replace:
    path: /root/recommended.yaml
    regexp: '(.*image:) kubernetesui(/.*)'
    replace: '\1 {{ HARBOR_DOMAIN }}/google_containers\2'

[root@ansible-server dashboard]# vim tasks/download_images.yml
- name: get dashboard version
  shell:
    chdir: /root
    cmd: awk -F "/"  '/image:/{print $NF}' recommended.yaml
  register: DASHBOARD_VERSION
- name: download dashboard image
  shell: |
    {% for i in DASHBOARD_VERSION.stdout_lines %}
      docker pull registry.aliyuncs.com/google_containers/{{ i }}
      docker tag registry.aliyuncs.com/google_containers/{{ i }} {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
      docker rmi registry.aliyuncs.com/google_containers/{{ i }}
      docker push {{ HARBOR_DOMAIN }}/google_containers/{{ i }}
    {% endfor %}

[root@ansible-server dashboard]# vim tasks/install_dashboard.yml
- name: install dashboard
  shell:
    chdir: /root
    cmd: "kubectl --kubeconfig=/etc/kubernetes/admin.kubeconfig apply -f recommended.yaml -f admin.yaml"

[root@ansible-server dashboard]# vim tasks/main.yml
- include: dashboard_file.yml
- include: config.yml
- include: download_images.yml
- include: install_dashboard.yml

[root@ansible-server dashboard]# cd ../../
[root@ansible-server ansible]# tree roles/dashboard/
roles/dashboard/
├── files
│   └── admin.yaml
├── tasks
│   ├── config.yml
│   ├── dashboard_file.yml
│   ├── download_images.yml
│   ├── install_dashboard.yml
│   └── main.yml
├── templates
│   └── recommended.yaml.j2
└── vars
    └── main.yml

4 directories, 8 files

[root@ansible-server ansible]# vim dashboard_role.yml
---
- hosts: master01

  roles:
    - role: dashboard

[root@ansible-server ansible]# ansible-playbook dashboard_role.yml

17.2 登录dashboard

https://172.31.3.101:30005
在这里插入图片描述
查看token值:

[root@k8s-master01 ~]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
Name:         admin-user-token-gv2wt
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin-user
              kubernetes.io/service-account.uid: 6a9ac08f-672c-4c51-81a1-d1a48539aa1e

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1411 bytes
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IkFpTHJHZUppeEV4MTMzUXVtNFBFNm9Vck5TOGhiaWo5a0pyVnFIRWlrcUEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWd2Mnd0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI2YTlhYzA4Zi02NzJjLTRjNTEtODFhMS1kMWE0ODUzOWFhMWUiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.oTSKkV7x9Nlaoe-TJni1uKyax2-FwDv47CuIzQbl5tmibmcEJ1vmio9V5eY4kwwoWlDOVYj4MVQ192OWyMVyoNkYOEv8_ZvxqK1xxpicPDDz64-qUYKKI4PbGDMsO40djeiJR-8PKM1D4LmxBsaBR-QjPq9rwG6iKJ2poOr-GTgoODTCCn7DTShEuKNtPQxDf7rGP3Ofccc-C1jBcd7M07am-6qtWsg8GFEwhbk4HXTZLyThQGe99Tth74OxQFu1izUzbN_JnE8HM9jjf1sqgFeAXVoxx6hitTKah8KQplfta5OCLEhxc9Q82rpeD7RjNUyrc1AN7CfLcXS-dKvJfw

在这里插入图片描述