22.基于kubeadm安装kubernetes v1.21 -- 集群部署(三)

138 阅读15分钟

“ 本文正在参加「金石计划 . 瓜分6万现金大奖」 ”

13.1 Dashboard部署

[root@k8s-master01 ~]# cat recommended.yaml 
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

apiVersion: v1
kind: Namespace
metadata:
  name: kubernetes-dashboard

---

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kubernetes-dashboard
type: Opaque

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kubernetes-dashboard
type: Opaque
data:
  csrf: ""

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kubernetes-dashboard
type: Opaque

---

kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kubernetes-dashboard

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  - apiGroups: [""]
    resources: ["secrets"]
    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
    verbs: ["get", "update", "delete"]
    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
  - apiGroups: [""]
    resources: ["configmaps"]
    resourceNames: ["kubernetes-dashboard-settings"]
    verbs: ["get", "update"]
    # Allow Dashboard to get metrics.
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["heapster", "dashboard-metrics-scraper"]
    verbs: ["proxy"]
  - apiGroups: [""]
    resources: ["services/proxy"]
    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
    verbs: ["get"]

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper to get metrics from the Metrics server
  - apiGroups: ["metrics.k8s.io"]
    resources: ["pods", "nodes"]
    verbs: ["get", "list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
        - name: kubernetes-dashboard
          image: kubernetesui/dashboard:v2.2.0 
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            # Uncomment the following line to manually specify Kubernetes API server Host
            # If not specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # - --apiserver-host=http://my-address:port
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
              # Create on-disk volume to store exec logs
            - mountPath: /tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp-volume
          emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 8000
      targetPort: 8000
  selector:
    k8s-app: dashboard-metrics-scraper

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: dashboard-metrics-scraper
  template:
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
    spec:
      containers:
        - name: dashboard-metrics-scraper
          image: kubernetesui/metrics-scraper:v1.0.6 
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
          volumeMounts:
          - mountPath: /tmp
            name: tmp-volume
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
        - name: tmp-volume
          emptyDir: {}

[root@k8s-master01 ~]# vim recommended.yaml 
...
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort #添加这行
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30005 #添加这行
  selector:
    k8s-app: kubernetes-dashboard
...

[root@k8s-master01 ~]# grep "image:" recommended.yaml
          image: kubernetesui/dashboard:v2.2.0 
          image: kubernetesui/metrics-scraper:v1.0.6

下载镜像并上传到harbor

[root@k8s-master01 ~]# cat download_dashboard_images.sh 
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-01-11
#FileName:      download_dashboard_images.sh
#URL:           raymond.blog.csdn.net
#Description:   The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \\033[01;31m"
END='\033[0m'

images=$(awk -F "/"  '/image:/{print $NF}' recommended.yaml)
HARBOR_DOMAIN=harbor.raymonds.cc

images_download(){
    ${COLOR}"开始下载Dashboard镜像"${END}
    for i in ${images};do 
        docker pull registry.aliyuncs.com/google_containers/$i
        docker tag registry.aliyuncs.com/google_containers/$i ${HARBOR_DOMAIN}/google_containers/$i
        docker rmi registry.aliyuncs.com/google_containers/$i
        docker push ${HARBOR_DOMAIN}/google_containers/$i
    done
    ${COLOR}"Dashboard镜像下载完成"${END}
}

images_download

[root@k8s-master01 ~]# bash download_dashboard_images.sh 

[root@k8s-master01 ~]# sed -ri 's@(.*image:) kubernetesui(/.*)@\1 harbor.raymonds.cc/google_containers\2@g' recommended.yaml 
[root@k8s-master01 ~]# grep "image:" recommended.yaml
          image: harbor.raymonds.cc/google_containers/dashboard:v2.2.0 
          image: harbor.raymonds.cc/google_containers/metrics-scraper:v1.0.6 

[root@k8s-master01 ~]# kubectl  create -f recommended.yaml
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created

创建管理员用户admin.yaml

[root@k8s-master01 ~]# vim admin.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding 
metadata: 
  name: admin-user
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system

[root@k8s-master01 ~]# kubectl apply -f admin.yaml 
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created

13.2 登录dashboard

在谷歌浏览器(Chrome)启动文件中加入启动参数,用于解决无法访问Dashboard的问题,参考图1-1:

--test-type --ignore-certificate-errors

在这里插入图片描述

[root@k8s-master01 ~]# kubectl get svc kubernetes-dashboard -n kubernetes-dashboard
NAME                   TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE
kubernetes-dashboard   NodePort   10.101.181.206   <none>        443:30005/TCP   66s

访问Dashboard:https://172.31.3.101:30005,参考图1-2 在这里插入图片描述 ​ 图1-2 Dashboard登录方式

13.2.1 token登录

查看token值:

[root@k8s-master01 ~]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
Name:         admin-user-token-k728l
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin-user
              kubernetes.io/service-account.uid: d983856d-c896-44cb-812f-69215864f845

Type:  kubernetes.io/service-account-token

Data
====
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6InhHSlloc2pJTWNxdVVhSlNrR0UwY3pLd1JLSk1IeThBcDVPWjZ4dW5OMU0ifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWs3MjhsIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkOTgzODU2ZC1jODk2LTQ0Y2ItODEyZi02OTIxNTg2NGY4NDUiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.Gn6BORdZG1SO29erk9vapnijsaJdLEg7dsiZhqRxM5fB6jscA3Q6aHdBVpRA1IhIgGcx-naEPu-YRCCKfuVh3ZYWUVpLEgQAfKISeUYb6nXQVz1RygPhjGHZgc7XjiIT5sXqIPs6WmteQ3lZkXEr6U__E93nany4bsr9jr1XbfCICLeriVsjNyR6KyCR1F03KmNm_zP-BRApq5JjZhE-nEnWWrLMTJGXMaqjfi7I7uF4584wj20RA28ktOLEZV7BP3XjjS4jJzLR2Vl3BihME8H5YjCDxOh1yO7QUlarh06aRSmHoQkqx1YnVa_eS5bYFW7daI9SOaZ0cSMRbVnsrA
ca.crt:     1066 bytes

将token值输入到令牌后,单击登录即可访问Dashboard,参考图1-3: 在这里插入图片描述 在这里插入图片描述

13.2.2 使用kubeconfig文件登录dashboard

[root@k8s-master01 ~]# cp /etc/kubernetes/admin.conf kubeconfig

[root@k8s-master01 ~]# vim kubeconfig
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1ETXdOREE0TVRjeE5sb1hEVE15TURNd01UQTRNVGN4Tmxvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTXpWCnNGam02Qjk5cUlCOFVSZ3A3Nm9vZUJjT2kyWHZidzR6WFk1QmJQU1J4bFRPZUJrdEQ1SXAwNnFnMzlpb00rbm0KdUIvb0VuRzhmVkxQZi93QmtwcWlERTArSFNhTzFGU2JhSlJNZXVKN2J2STdNR3RkRnFzdHo0amZ2ak9uZUpaRQo1K0g4MmV4aXFLK0pkRENYVENMT0doK1YyQWROcXgySmo1bk1QYi9JU0hoQ0lPSUVvRVRJV0w5Wm41bUtlMnhJCjlFcWJaRlZQcVVWOG1zLzArNGs4eG1sQ1R0M09HZEd3UUtXSC94dE5zenFzVHY1RGM3Zmg4eXN6a3JVNVFHOWsKdDRnTjVWc2w1YmxuTE5YSDlodFVQTjVPY0hyOEEvNFczU08xVkVubEQyelVPN2pJdXB1RjhjZWNvM2dXcVJBYwpob2IvdGNkUTM2c2hza0ZGbFhVQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZJZmpuRGtYOUNLK0dTMms5amZTRVA1RjR3Y2hNQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFDRFAydFp0R0lHK1hmVGs3Y01EeXdxdm1XdnlVYUFCdlNoYk5sZTA3OUIyeDhoSE1RegppOXZtUTVNU3hZcnQwL0ZDZXNPT2wraUk4aFFOeDV2M3E3MTJRWXZnLy95R0pGL0N3a1ltbmc5UUFRRktYR1NsCnRJMXIwZWVuTTNCVEpzS1VlaEVqMnZUa0tXWTlUcDFMbUw2M1Q1QUU1SWJTTnFRU2M4MjBIck1pRERMNFR3Q2UKdThXSnoxZ1lGek1ZRUcyeDBxaXZPZUxuSjFtZTY0Y1gybVRGc1FmcGt5TEhmd1hLcWJqbElzSW5Edk5qaC9Kdwp5cy9UZGFRSlFBTmxxWmV2M09LcU9jNUtJbFE3ZTZRaWt5UVZQQ200SmROU1JZSU9pUXQ5VStBQUhWZ1NMN2V6ClJGaFUxVUpPekVFVTVTVVoxdWdaN3pOY0dYWmVKUFpZaHpPSwotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
    server: https://172.31.3.188:6443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kubernetes-admin
  name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
  user:
    client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJVENDQWdtZ0F3SUJBZ0lJYXFaUzIrTitIZFl3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TWpBek1EUXdPREUzTVRaYUZ3MHlNekF6TURRd09ERTNNVGhhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXByUDhyMkxKL0hYcHJEQ04Kb24zSHI5M0pNd2c0b1Y3ZGV5b0x0ekJlK2JVV2JNYlk5dWFRTU9KZkZrdmEyNmtUN3R6cmw2R1pNRzNTYlVveQpsNU9vajJkYkRrK3pYM01mZmhNVTVIdGdFYTcvcmx1VDJXWC9rSy9FTVJPSVBKOTBqdmNZMkhZTTNwN0lyaXg0CkZQOGloOFJNdUowTEJENjRyOE9tSDMwZzVFdy8yY1lra0JlaVdndU83aG1ISjJKVFVjVCsyRHREZFpxUjg0MTYKMzBEY2ZrTVBSMUlhalIrRXV5SUlIMjI1NElsSWRXcnBBQmcrQmRlU1dEQ0FHQnpLRnBOSmwrUDFqYURBV2wxTQpCN3BGOEFTNUVwd1lUNkcwWGt3R1NMRmpxc3pUelowZWhxa2tkNmZDUmVyMGtSRmpwVHdkREppdHUzUXE4MmR0CmRUbDZWd0lEQVFBQm8xWXdWREFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JTSDQ1dzVGL1FpdmhrdHBQWTMwaEQrUmVNSApJVEFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBUXluMDhJQS9GNUJlNVZaNzFyL0tkOXU1eERWREsvYkhvSHQ4CmRSYVR1Tm1rVzM2SkhBQXVhTk5GRjhBNENZSWhwTTJSaDR2ZDhTUXFIQlpNTnZXWlJqTGFXaWpDeE5rQjJJVWcKbU95QWtsanQ1c1daZ1h6U25oQ3EwSTdtUG9FVElmNzkyZnEzNks5UFVQSldRczlOQmdLV0srQ01nbGx3THJiKwpaZHFveUJrcWY2S2ZnQWlNVEJFSWFaTFNFU1I5cHk1VFdjNjh1WktVUmVtMElRVDZsTGoyR3A4cWY0SFZ0OG1PCnd5UUlhSlVOeVlCekUvV0pUeFU1SSsybHlhSWlCNjM5eVc2bEJGcC9IMi95ZWZmUXFvYnhaYnlqTG0yQldPbkwKUWZVSktzb0R6ZUk3WHF2ZXVUMmVhUzNxZTRjbDl6Z1Qrb2taM0prbkEzdzZ3QWhUWFE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
    client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBcHJQOHIyTEovSFhwckRDTm9uM0hyOTNKTXdnNG9WN2RleW9MdHpCZStiVVdiTWJZCjl1YVFNT0pmRmt2YTI2a1Q3dHpybDZHWk1HM1NiVW95bDVPb2oyZGJEayt6WDNNZmZoTVU1SHRnRWE3L3JsdVQKMldYL2tLL0VNUk9JUEo5MGp2Y1kySFlNM3A3SXJpeDRGUDhpaDhSTXVKMExCRDY0cjhPbUgzMGc1RXcvMmNZawprQmVpV2d1TzdobUhKMkpUVWNUKzJEdERkWnFSODQxNjMwRGNma01QUjFJYWpSK0V1eUlJSDIyNTRJbElkV3JwCkFCZytCZGVTV0RDQUdCektGcE5KbCtQMWphREFXbDFNQjdwRjhBUzVFcHdZVDZHMFhrd0dTTEZqcXN6VHpaMGUKaHFra2Q2ZkNSZXIwa1JGanBUd2RESml0dTNRcTgyZHRkVGw2VndJREFRQUJBb0lCQURUSjBCMTF5Z0tCRUVMdgppUnBJZkVCcm1mZm9CWTdNTEpTQTV2N1dlV3pYZEVkejNHYXE4a1FZbXdYRGRZV1RqL1ZXemQvUlUySXJsTFA1CkIvWkowdG0rWCs0elBOamY2cndRck5BQlpaV1Q5ZkV2bkt6K0NLUzNsQWY2YUd6U0RhWHNCMWtjWU16cU9TQTIKZ3J3YllzTGRYWElrZ01XU1d3ZUZqQWh5cGtURWdWWTJmRkpkdG84TGNhV1hkek1ySElkNzVLTitiT0hNL3JQdQpkMHEwR2VoQmozaGNSK0hmN25wcWlrKzZGTTloZTFQY00zYXNlR1FYcDlTMDdIaWZBZTloYXloREhtdW4xdEhMCmFGVWRRTE5kU09GdFJuNEtFbUhIVVFnM1M4ZDJQZ2xUSEdSRWFDSlE5a3VEVXVZc0FqU1BmTTdGUkJnaFpIUjQKakVOQ0c5a0NnWUVBd0hxNTY0NjBTTHRqUXpNbG5DV2xkdDBMdHk1N2dUek1IRnlhdXNZbWZ4ZWZKVjBYVUJBUwpXQUZ4ZVlicDJGTUhMbktGZXR3UGI3NVlVYk9FVlBKUEZUVHlkTHNlVkRzZGpuU2N5ekpxTXpkWDN5dkMzZmFtCjQvU29WTTF1dVV4UTdtWThMbWZDRkdaUzRBM2wwMjN2RUc5M24xNTBZMldlM0k2bTRESG1nWE1DZ1lFQTNiZVkKT1ltbGRSSENzcDdzelkzUkM3NUFONWp2VlJMelBOOGlSc3pqbGwvZjU2dTlvWjVyNVNtVjJwZUdBaXkxR2JZRwoyd3BxQzliU3pXQVFyVEhmeUowTTYrSnVQSkYxS2R2MGFPVlVWL0RqZUZTN1dpZzQvaFNzL3lPL2M4QUxzcTl3Cjgvb2gvQnkzVTRjSXRzbEZSNFZ2TmV2TFJDbWcvd2M1VHRYSG1vMENnWUEyMm9ManE2NmJUN1B4TWNGVko3RnAKRjRQQU42SUE2Y2V0aVlLYkdkOWN0dW1tNTBtK2ltWC82S0ZqSmR5OHhxUzBTdlRaVjUrYWs5OXRvVUJrU1EzYQptRlZFaVJNU3BDby9MK0tEVWlITDZNakZGQjZielRBd3BNZllLeGVUL1RqUFNCbDhqQnRXWHZiZFhYaVM4YkNBCnVaT1l1bjFtMms0VXd4NnhvK2J5R3dLQmdHbGdWTzBqeW91MWdUKzc0UGhoS3JYV0dsZFNUSTIyai95aVlId2QKN1IwUFBDK3pwRkVvSmtlb1dCeW9YM3l4R3ZHNVhkREZSbG1TSkdhUC9kMFRFelVaejRaYys5KzFrbVh1c0Y4YQpmOWU4R2FDYlJPQTR0YjJzdmZ3VlZXMENIWTB6YlEvSUZsYm9ud2tINk5CWElLSTVmUDIzcjhnWnNQY0V5K3Y0CkZ3SVJBb0dBYlpudFRKSTg2ekV6aUY5VnpPK2hXdFpPNVpMMEpKOGNKaGNSWFRZQ2d2MkZkeUVSd2d2SFRzdy8Kc0s0cVI5TytPVGdnMU5TekEwZ0lqemJJamtCazA1VXhwVWRZSUhIcUFodlB5WC9qZVlrdnlrck92aFlzaXlmUwpxZGtITng1dkQzclE5T2c2R3l2Z2hPVzE4dG1yVFdCV1plVSt0ZnRoblRHZU1kQkE0TWM9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
    token: eyJhbGciOiJSUzI1NiIsImtpZCI6InhHSlloc2pJTWNxdVVhSlNrR0UwY3pLd1JLSk1IeThBcDVPWjZ4dW5OMU0ifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWs3MjhsIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkOTgzODU2ZC1jODk2LTQ0Y2ItODEyZi02OTIxNTg2NGY4NDUiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.Gn6BORdZG1SO29erk9vapnijsaJdLEg7dsiZhqRxM5fB6jscA3Q6aHdBVpRA1IhIgGcx-naEPu-YRCCKfuVh3ZYWUVpLEgQAfKISeUYb6nXQVz1RygPhjGHZgc7XjiIT5sXqIPs6WmteQ3lZkXEr6U__E93nany4bsr9jr1XbfCICLeriVsjNyR6KyCR1F03KmNm_zP-BRApq5JjZhE-nEnWWrLMTJGXMaqjfi7I7uF4584wj20RA28ktOLEZV7BP3XjjS4jJzLR2Vl3BihME8H5YjCDxOh1yO7QUlarh06aRSmHoQkqx1YnVa_eS5bYFW7daI9SOaZ0cSMRbVnsrA

在这里插入图片描述

14.一些必须的配置更改

将Kube-proxy改为ipvs模式,因为在初始化集群的时候注释了ipvs配置,所以需要自行修改一下:

在master01节点执行

[root@k8s-master01 ~]# curl 127.0.0.1:10249/proxyMode
iptables

[root@k8s-master01 ~]# kubectl edit cm kube-proxy -n kube-system
    mode: "ipvs"

更新Kube-Proxy的Pod:

[root@k8s-master01 ~]# kubectl patch daemonset kube-proxy -p "{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"date\":\"`date +'%s'`\"}}}}}" -n kube-system
daemonset.apps/kube-proxy patched

验证Kube-Proxy模式

[root@k8s-master01 ~]# curl 127.0.0.1:10249/proxyMode
ipvs

[root@k8s-master01 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.17.0.1:30005 rr
  -> 192.169.111.141:8443         Masq    1      0          0         
TCP  172.31.3.101:30005 rr
  -> 192.169.111.141:8443         Masq    1      0          0         
TCP  192.162.55.64:30005 rr
  -> 192.169.111.141:8443         Masq    1      0          0         
TCP  10.96.0.1:443 rr
  -> 172.31.3.101:6443            Masq    1      0          0         
  -> 172.31.3.102:6443            Masq    1      0          0         
  -> 172.31.3.103:6443            Masq    1      0          0         
TCP  10.96.0.10:53 rr
  -> 192.170.21.193:53            Masq    1      0          0         
  -> 192.170.21.194:53            Masq    1      0          0         
TCP  10.96.0.10:9153 rr
  -> 192.170.21.193:9153          Masq    1      0          0         
  -> 192.170.21.194:9153          Masq    1      0          0         
TCP  10.99.167.144:443 rr
  -> 192.167.195.132:4443         Masq    1      0          0         
TCP  10.101.88.7:8000 rr
  -> 192.169.111.140:8000         Masq    1      0          0         
TCP  10.106.189.113:443 rr
  -> 192.169.111.141:8443         Masq    1      0          0         
TCP  127.0.0.1:30005 rr
  -> 192.169.111.141:8443         Masq    1      0          0         
UDP  10.96.0.10:53 rr
  -> 192.170.21.193:53            Masq    1      0          0         
  -> 192.170.21.194:53            Masq    1      0          0

15.注意事项

注意:kubeadm安装的集群,证书有效期默认是一年。master节点的kube-apiserver、kube-scheduler、kube-controller-manager、etcd都是以容器运行的。可以通过kubectl get po -n kube-system查看。

启动和二进制不同的是,

kubelet的配置文件在/etc/sysconfig/kubelet和/var/lib/kubelet/config.yaml,修改后需要重启kubelet进程

ipvs[root@k8s-master01 ~]# ls /etc/sysconfig/kubelet
/etc/sysconfig/kubelet
[root@k8s-master01 ~]# ls /var/lib/kubelet/config.yaml 
/var/lib/kubelet/config.yaml

其他组件的配置文件在/etc/kubernetes/manifests目录下,比如kube-apiserver.yaml,该yaml文件更改后,kubelet会自动刷新配置,也就是会重启pod。不能再次创建该文件

[root@k8s-master01 ~]# ls /etc/kubernetes/manifests
etcd.yaml  kube-apiserver.yaml  kube-controller-manager.yaml  kube-scheduler.yaml
[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide
NAME                                                 READY   STATUS    RESTARTS   AGE     IP                NODE                         NOMINATED NODE   READINESS GATES
calico-kube-controllers-5d8579866c-fmqvq             1/1     Running   0          29m     172.31.3.109      k8s-node02.example.local     <none>           <none>
calico-node-4x957                                    1/1     Running   0          29m     172.31.3.101      k8s-master01.example.local   <none>           <none>
calico-node-m9wll                                    1/1     Running   0          29m     172.31.3.103      k8s-master03.example.local   <none>           <none>
calico-node-ph8vb                                    1/1     Running   0          29m     172.31.3.109      k8s-node02.example.local     <none>           <none>
calico-node-pzf57                                    1/1     Running   0          29m     172.31.3.110      k8s-node03.example.local     <none>           <none>
calico-node-qmn42                                    1/1     Running   0          29m     172.31.3.108      k8s-node01.example.local     <none>           <none>
calico-node-t89ws                                    1/1     Running   0          29m     172.31.3.102      k8s-master02.example.local   <none>           <none>
coredns-78db7484ff-9zfrs                             1/1     Running   0          54m     192.169.111.129   k8s-node01.example.local     <none>           <none>
coredns-78db7484ff-m6hhp                             1/1     Running   0          54m     192.170.21.193    k8s-node03.example.local     <none>           <none>
etcd-k8s-master01.example.local                      1/1     Running   0          54m     172.31.3.101      k8s-master01.example.local   <none>           <none>
etcd-k8s-master02.example.local                      1/1     Running   0          50m     172.31.3.102      k8s-master02.example.local   <none>           <none>
etcd-k8s-master03.example.local                      1/1     Running   0          43m     172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-apiserver-k8s-master01.example.local            1/1     Running   0          54m     172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-apiserver-k8s-master02.example.local            1/1     Running   0          50m     172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-apiserver-k8s-master03.example.local            1/1     Running   0          43m     172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-controller-manager-k8s-master01.example.local   1/1     Running   1          54m     172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-controller-manager-k8s-master02.example.local   1/1     Running   0          50m     172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-controller-manager-k8s-master03.example.local   1/1     Running   0          43m     172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-proxy-4hsmt                                     1/1     Running   0          3m2s    172.31.3.109      k8s-node02.example.local     <none>           <none>
kube-proxy-5nshs                                     1/1     Running   0          3m      172.31.3.110      k8s-node03.example.local     <none>           <none>
kube-proxy-d9v8g                                     1/1     Running   0          3m11s   172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-proxy-gnqgd                                     1/1     Running   0          3m24s   172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-proxy-snqwb                                     1/1     Running   0          3m14s   172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-proxy-twpdb                                     1/1     Running   0          3m32s   172.31.3.108      k8s-node01.example.local     <none>           <none>
kube-scheduler-k8s-master01.example.local            1/1     Running   1          54m     172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-scheduler-k8s-master02.example.local            1/1     Running   0          50m     172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-scheduler-k8s-master03.example.local            1/1     Running   0          43m     172.31.3.103      k8s-master03.example.local   <none>           <none>
metrics-server-9787b55bd-5gjbf                       1/1     Running   0          24m     192.167.195.129   k8s-node02.example.local     <none>           <none>

[root@k8s-master01 ~]# kubectl get pod -A -o  wide
NAMESPACE              NAME                                                 READY   STATUS    RESTARTS   AGE     IP                NODE                         NOMINATED NODE   READINESS GATES
kube-system            calico-kube-controllers-5d8579866c-fmqvq             1/1     Running   0          29m     172.31.3.109      k8s-node02.example.local     <none>           <none>
kube-system            calico-node-4x957                                    1/1     Running   0          29m     172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            calico-node-m9wll                                    1/1     Running   0          29m     172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            calico-node-ph8vb                                    1/1     Running   0          29m     172.31.3.109      k8s-node02.example.local     <none>           <none>
kube-system            calico-node-pzf57                                    1/1     Running   0          29m     172.31.3.110      k8s-node03.example.local     <none>           <none>
kube-system            calico-node-qmn42                                    1/1     Running   0          29m     172.31.3.108      k8s-node01.example.local     <none>           <none>
kube-system            calico-node-t89ws                                    1/1     Running   0          29m     172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            coredns-78db7484ff-9zfrs                             1/1     Running   0          54m     192.169.111.129   k8s-node01.example.local     <none>           <none>
kube-system            coredns-78db7484ff-m6hhp                             1/1     Running   0          54m     192.170.21.193    k8s-node03.example.local     <none>           <none>
kube-system            etcd-k8s-master01.example.local                      1/1     Running   0          54m     172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            etcd-k8s-master02.example.local                      1/1     Running   0          50m     172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            etcd-k8s-master03.example.local                      1/1     Running   0          44m     172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            kube-apiserver-k8s-master01.example.local            1/1     Running   0          54m     172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            kube-apiserver-k8s-master02.example.local            1/1     Running   0          50m     172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            kube-apiserver-k8s-master03.example.local            1/1     Running   0          44m     172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            kube-controller-manager-k8s-master01.example.local   1/1     Running   1          54m     172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            kube-controller-manager-k8s-master02.example.local   1/1     Running   0          50m     172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            kube-controller-manager-k8s-master03.example.local   1/1     Running   0          44m     172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            kube-proxy-4hsmt                                     1/1     Running   0          3m27s   172.31.3.109      k8s-node02.example.local     <none>           <none>
kube-system            kube-proxy-5nshs                                     1/1     Running   0          3m25s   172.31.3.110      k8s-node03.example.local     <none>           <none>
kube-system            kube-proxy-d9v8g                                     1/1     Running   0          3m36s   172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            kube-proxy-gnqgd                                     1/1     Running   0          3m49s   172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            kube-proxy-snqwb                                     1/1     Running   0          3m39s   172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            kube-proxy-twpdb                                     1/1     Running   0          3m57s   172.31.3.108      k8s-node01.example.local     <none>           <none>
kube-system            kube-scheduler-k8s-master01.example.local            1/1     Running   1          54m     172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            kube-scheduler-k8s-master02.example.local            1/1     Running   0          50m     172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            kube-scheduler-k8s-master03.example.local            1/1     Running   0          44m     172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            metrics-server-9787b55bd-5gjbf                       1/1     Running   0          25m     192.167.195.129   k8s-node02.example.local     <none>           <none>
kubernetes-dashboard   dashboard-metrics-scraper-6cd7688f4b-ghl2w           1/1     Running   0          8m51s   192.169.111.130   k8s-node01.example.local     <none>           <none>
kubernetes-dashboard   kubernetes-dashboard-7bd4bf94d8-d9hq7                1/1     Running   0          8m51s   192.170.21.194    k8s-node03.example.local     <none>           <none>

Kubeadm安装后,master节点默认不允许部署pod,可以通过以下方式打开:

查看Taints:

[root@k8s-master01 ~]# kubectl  describe node -l node-role.kubernetes.io/master=  | grep Taints
Taints:             node-role.kubernetes.io/master:NoSchedule
Taints:             node-role.kubernetes.io/master:NoSchedule
Taints:             node-role.kubernetes.io/master:NoSchedule

删除Taint:

[root@k8s-master01 ~]# kubectl  taint node  -l node-role.kubernetes.io/master node-role.kubernetes.io/master:NoSchedule-
node/k8s-master01 untainted
node/k8s-master02 untainted
node/k8s-master03 untainted

[root@k8s-master01 ~]# kubectl  describe node -l node-role.kubernetes.io/master=  | grep Taints
Taints:             <none>
Taints:             <none>
Taints:             <none>

kube-proxy的配置在kube-system命名空间下的configmap中,可以通过

kubectl edit cm kube-proxy -n kube-system

进行更改,更改完成后,可以通过patch重启kube-proxy

kubectl patch daemonset kube-proxy -p "{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"date\":\"`date +'%s'`\"}}}}}" -n kube-system

16.集群验证

#查看集群所有namespace的容器
[root@k8s-master01 ~]# kubectl get pod --all-namespaces
NAMESPACE              NAME                                                 READY   STATUS    RESTARTS   AGE
kube-system            calico-kube-controllers-5d8579866c-fmqvq             1/1     Running   0          30m
kube-system            calico-node-4x957                                    1/1     Running   0          30m
kube-system            calico-node-m9wll                                    1/1     Running   0          30m
kube-system            calico-node-ph8vb                                    1/1     Running   0          30m
kube-system            calico-node-pzf57                                    1/1     Running   0          30m
kube-system            calico-node-qmn42                                    1/1     Running   0          30m
kube-system            calico-node-t89ws                                    1/1     Running   0          30m
kube-system            coredns-78db7484ff-9zfrs                             1/1     Running   0          55m
kube-system            coredns-78db7484ff-m6hhp                             1/1     Running   0          55m
kube-system            etcd-k8s-master01.example.local                      1/1     Running   0          55m
kube-system            etcd-k8s-master02.example.local                      1/1     Running   0          51m
kube-system            etcd-k8s-master03.example.local                      1/1     Running   0          45m
kube-system            kube-apiserver-k8s-master01.example.local            1/1     Running   0          55m
kube-system            kube-apiserver-k8s-master02.example.local            1/1     Running   0          51m
kube-system            kube-apiserver-k8s-master03.example.local            1/1     Running   0          45m
kube-system            kube-controller-manager-k8s-master01.example.local   1/1     Running   1          55m
kube-system            kube-controller-manager-k8s-master02.example.local   1/1     Running   0          51m
kube-system            kube-controller-manager-k8s-master03.example.local   1/1     Running   0          45m
kube-system            kube-proxy-4hsmt                                     1/1     Running   0          4m24s
kube-system            kube-proxy-5nshs                                     1/1     Running   0          4m22s
kube-system            kube-proxy-d9v8g                                     1/1     Running   0          4m33s
kube-system            kube-proxy-gnqgd                                     1/1     Running   0          4m46s
kube-system            kube-proxy-snqwb                                     1/1     Running   0          4m36s
kube-system            kube-proxy-twpdb                                     1/1     Running   0          4m54s
kube-system            kube-scheduler-k8s-master01.example.local            1/1     Running   1          55m
kube-system            kube-scheduler-k8s-master02.example.local            1/1     Running   0          51m
kube-system            kube-scheduler-k8s-master03.example.local            1/1     Running   0          45m
kube-system            metrics-server-9787b55bd-5gjbf                       1/1     Running   0          26m
kubernetes-dashboard   dashboard-metrics-scraper-6cd7688f4b-ghl2w           1/1     Running   0          9m48s
kubernetes-dashboard   kubernetes-dashboard-7bd4bf94d8-d9hq7                1/1     Running   0          9m48s

#查看所有pod的cpu和内存使用率
[root@k8s-master01 ~]# kubectl top pod --use-protocol-buffers -n kube-system
NAME                                                 CPU(cores)   MEMORY(bytes)   
calico-kube-controllers-5d8579866c-fmqvq             2m           18Mi            
calico-node-4x957                                    22m          58Mi            
calico-node-m9wll                                    23m          59Mi            
calico-node-ph8vb                                    23m          61Mi            
calico-node-pzf57                                    23m          55Mi            
calico-node-qmn42                                    26m          54Mi            
calico-node-t89ws                                    18m          57Mi            
coredns-78db7484ff-9zfrs                             2m           16Mi            
coredns-78db7484ff-m6hhp                             2m           13Mi            
etcd-k8s-master01.example.local                      30m          84Mi            
etcd-k8s-master02.example.local                      25m          94Mi            
etcd-k8s-master03.example.local                      28m          76Mi            
kube-apiserver-k8s-master01.example.local            41m          315Mi           
kube-apiserver-k8s-master02.example.local            37m          426Mi           
kube-apiserver-k8s-master03.example.local            38m          307Mi           
kube-controller-manager-k8s-master01.example.local   12m          62Mi            
kube-controller-manager-k8s-master02.example.local   1m           25Mi            
kube-controller-manager-k8s-master03.example.local   1m           26Mi            
kube-proxy-4hsmt                                     11m          20Mi            
kube-proxy-5nshs                                     1m           18Mi            
kube-proxy-d9v8g                                     10m          20Mi            
kube-proxy-gnqgd                                     6m           20Mi            
kube-proxy-snqwb                                     7m           18Mi            
kube-proxy-twpdb                                     1m           20Mi            
kube-scheduler-k8s-master01.example.local            2m           24Mi            
kube-scheduler-k8s-master02.example.local            2m           27Mi            
kube-scheduler-k8s-master03.example.local            2m           25Mi            
metrics-server-9787b55bd-5gjbf                       2m           16Mi 

#查看svc
[root@k8s-master01 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   57m
[root@k8s-master01 ~]# kubectl get svc -n kube-system
NAME             TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                  AGE
kube-dns         ClusterIP   10.96.0.10       <none>        53/UDP,53/TCP,9153/TCP   57m
metrics-server   ClusterIP   10.103.246.114   <none>        443/TCP                  27m

[root@k8s-master01 ~]# telnet 10.96.0.1 443
Trying 10.96.0.1...
Connected to 10.96.0.1.
Escape character is '^]'.
quit
Connection closed by foreign host.

[root@k8s-master01 ~]# telnet 10.96.0.10 53
Trying 10.96.0.10...
Connected to 10.96.0.10.
Escape character is '^]'.
Connection closed by foreign host.

[root@k8s-master01 ~]# kubectl get pod --all-namespaces -o wide
NAMESPACE              NAME                                                 READY   STATUS    RESTARTS   AGE     IP                NODE                         NOMINATED NODE   READINESS GATES
kube-system            calico-kube-controllers-5d8579866c-fmqvq             1/1     Running   0          32m     172.31.3.109      k8s-node02.example.local     <none>           <none>
kube-system            calico-node-4x957                                    1/1     Running   0          32m     172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            calico-node-m9wll                                    1/1     Running   0          32m     172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            calico-node-ph8vb                                    1/1     Running   0          32m     172.31.3.109      k8s-node02.example.local     <none>           <none>
kube-system            calico-node-pzf57                                    1/1     Running   0          32m     172.31.3.110      k8s-node03.example.local     <none>           <none>
kube-system            calico-node-qmn42                                    1/1     Running   0          32m     172.31.3.108      k8s-node01.example.local     <none>           <none>
kube-system            calico-node-t89ws                                    1/1     Running   0          32m     172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            coredns-78db7484ff-9zfrs                             1/1     Running   0          57m     192.169.111.129   k8s-node01.example.local     <none>           <none>
kube-system            coredns-78db7484ff-m6hhp                             1/1     Running   0          57m     192.170.21.193    k8s-node03.example.local     <none>           <none>
kube-system            etcd-k8s-master01.example.local                      1/1     Running   0          57m     172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            etcd-k8s-master02.example.local                      1/1     Running   0          53m     172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            etcd-k8s-master03.example.local                      1/1     Running   0          47m     172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            kube-apiserver-k8s-master01.example.local            1/1     Running   0          57m     172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            kube-apiserver-k8s-master02.example.local            1/1     Running   0          53m     172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            kube-apiserver-k8s-master03.example.local            1/1     Running   0          47m     172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            kube-controller-manager-k8s-master01.example.local   1/1     Running   1          57m     172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            kube-controller-manager-k8s-master02.example.local   1/1     Running   0          53m     172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            kube-controller-manager-k8s-master03.example.local   1/1     Running   0          47m     172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            kube-proxy-4hsmt                                     1/1     Running   0          6m21s   172.31.3.109      k8s-node02.example.local     <none>           <none>
kube-system            kube-proxy-5nshs                                     1/1     Running   0          6m19s   172.31.3.110      k8s-node03.example.local     <none>           <none>
kube-system            kube-proxy-d9v8g                                     1/1     Running   0          6m30s   172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            kube-proxy-gnqgd                                     1/1     Running   0          6m43s   172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            kube-proxy-snqwb                                     1/1     Running   0          6m33s   172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            kube-proxy-twpdb                                     1/1     Running   0          6m51s   172.31.3.108      k8s-node01.example.local     <none>           <none>
kube-system            kube-scheduler-k8s-master01.example.local            1/1     Running   1          57m     172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            kube-scheduler-k8s-master02.example.local            1/1     Running   0          53m     172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            kube-scheduler-k8s-master03.example.local            1/1     Running   0          47m     172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            metrics-server-9787b55bd-5gjbf                       1/1     Running   0          28m     192.167.195.129   k8s-node02.example.local     <none>           <none>
kubernetes-dashboard   dashboard-metrics-scraper-6cd7688f4b-ghl2w           1/1     Running   0          11m     192.169.111.130   k8s-node01.example.local     <none>           <none>
kubernetes-dashboard   kubernetes-dashboard-7bd4bf94d8-d9hq7                1/1     Running   0          11m     192.170.21.194    k8s-node03.example.local     <none>           <none>

[root@k8s-master01 ~]# kubectl get pod -n kubernetes-dashboard
NAME                                         READY   STATUS    RESTARTS   AGE
dashboard-metrics-scraper-6cd7688f4b-ghl2w   1/1     Running   0          12m
kubernetes-dashboard-7bd4bf94d8-d9hq7        1/1     Running   0          12m

[root@k8s-master01 ~]# kubectl get svc -n kubernetes-dashboard
NAME                        TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE
dashboard-metrics-scraper   ClusterIP   10.108.144.19    <none>        8000/TCP        12m
kubernetes-dashboard        NodePort    10.101.181.206   <none>        443:30005/TCP   12m

k8s创建容器并测试内部网络

[root@k8s-master01 ~]# kubectl run net-test1 --image=alpine sleep 500000
pod/net-test1 created
[root@k8s-master01 ~]# kubectl run net-test2 --image=alpine sleep 500000
pod/net-test2 created

[root@k8s-master01 ~]# kubectl get pod  -o wide
NAME        READY   STATUS    RESTARTS   AGE     IP                NODE                       NOMINATED NODE   READINESS GATES
net-test1   1/1     Running   0          2m18s   192.167.195.130   k8s-node02.example.local   <none>           <none>
net-test2   1/1     Running   0          2m10s   192.170.21.195    k8s-node03.example.local   <none>           <none>

[root@k8s-master01 ~]# kubectl exec -it net-test1 sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # ping -c4 192.170.21.195
PING 192.170.21.195 (192.170.21.195): 56 data bytes
64 bytes from 192.170.21.195: seq=0 ttl=62 time=0.464 ms
64 bytes from 192.170.21.195: seq=1 ttl=62 time=0.455 ms
64 bytes from 192.170.21.195: seq=2 ttl=62 time=0.635 ms
64 bytes from 192.170.21.195: seq=3 ttl=62 time=0.512 ms

--- 192.170.21.195 ping statistics ---
4 packets transmitted, 4 packets received, 0% packet loss
round-trip min/avg/max = 0.455/0.516/0.635 ms
/ # exit