14.基于kubeadm安装kubernetes v1.20 -- 集群部署(三)

379 阅读15分钟

“ 本文正在参加「金石计划 . 瓜分6万现金大奖」 ”

13.Dashboard部署

Dashboard用于展示集群中的各类资源,同时也可以通过Dashboard实时查看Pod的日志和在容器中执行一些命令等。

13.1 Dashboard部署

[root@k8s-master01 ~]# cat recommended.yaml 
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

apiVersion: v1
kind: Namespace
metadata:
  name: kubernetes-dashboard

---

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kubernetes-dashboard
type: Opaque

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kubernetes-dashboard
type: Opaque
data:
  csrf: ""

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kubernetes-dashboard
type: Opaque

---

kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kubernetes-dashboard

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  - apiGroups: [""]
    resources: ["secrets"]
    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
    verbs: ["get", "update", "delete"]
    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
  - apiGroups: [""]
    resources: ["configmaps"]
    resourceNames: ["kubernetes-dashboard-settings"]
    verbs: ["get", "update"]
    # Allow Dashboard to get metrics.
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["heapster", "dashboard-metrics-scraper"]
    verbs: ["proxy"]
  - apiGroups: [""]
    resources: ["services/proxy"]
    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
    verbs: ["get"]

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper to get metrics from the Metrics server
  - apiGroups: ["metrics.k8s.io"]
    resources: ["pods", "nodes"]
    verbs: ["get", "list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
        - name: kubernetes-dashboard
          image: kubernetesui/dashboard:v2.0.4
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            # Uncomment the following line to manually specify Kubernetes API server Host
            # If not specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # - --apiserver-host=http://my-address:port
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
              # Create on-disk volume to store exec logs
            - mountPath: /tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp-volume
          emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 8000
      targetPort: 8000
  selector:
    k8s-app: dashboard-metrics-scraper

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: dashboard-metrics-scraper
  template:
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
    spec:
      containers:
        - name: dashboard-metrics-scraper
          image: kubernetesui/metrics-scraper:v1.0.4
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
          volumeMounts:
            - mountPath: /tmp
              name: tmp-volume
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
        - name: tmp-volume
          emptyDir: {}

[root@k8s-master01 ~]# vim recommended.yaml 
...
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort #添加这行
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30005 #添加这行
  selector:
    k8s-app: kubernetes-dashboard
...

[root@k8s-master01 ~]# grep "image:" recommended.yaml 
          image: kubernetesui/dashboard:v2.0.4
          image: kubernetesui/metrics-scraper:v1.0.4

下载镜像并上传到harbor

[root@k8s-master01 ~]# cat download_dashboard_images.sh 
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-01-11
#FileName:      download_dashboard_images.sh
#URL:           raymond.blog.csdn.net
#Description:   The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \\033[01;31m"
END='\033[0m'

images=$(awk -F "/"  '/image:/{print $NF}' recommended.yaml)
HARBOR_DOMAIN=harbor.raymonds.cc

images_download(){
    ${COLOR}"开始下载Dashboard镜像"${END}
    for i in ${images};do 
        docker pull registry.aliyuncs.com/google_containers/$i
        docker tag registry.aliyuncs.com/google_containers/$i ${HARBOR_DOMAIN}/google_containers/$i
        docker rmi registry.aliyuncs.com/google_containers/$i
        docker push ${HARBOR_DOMAIN}/google_containers/$i
    done
    ${COLOR}"Dashboard镜像下载完成"${END}
}

images_download

[root@k8s-master01 ~]# bash download_dashboard_images.sh 

[root@k8s-master01 ~]# sed -ri 's@(.*image:) kubernetesui(/.*)@\1 harbor.raymonds.cc/google_containers\2@g' recommended.yaml 
[root@k8s-master01 ~]# grep "image:" recommended.yaml 
          image: harbor.raymonds.cc/google_containers/dashboard:v2.0.4
          image: harbor.raymonds.cc/google_containers/metrics-scraper:v1.0.4

[root@k8s-master01 ~]# kubectl  create -f recommended.yaml
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created

创建管理员用户admin.yaml

[root@k8s-master01 ~]# vim admin.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding 
metadata: 
  name: admin-user
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system

[root@k8s-master01 ~]# kubectl apply -f admin.yaml 
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created

13.2 登录dashboard

在谷歌浏览器(Chrome)启动文件中加入启动参数,用于解决无法访问Dashboard的问题,参考图1-1:

--test-type --ignore-certificate-errors

在这里插入图片描述

[root@k8s-master01 ~]# kubectl get svc kubernetes-dashboard -n kubernetes-dashboard
NAME                   TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE
kubernetes-dashboard   NodePort   10.106.189.113   <none>        443:30005/TCP   18s

访问Dashboard:https://172.31.3.101:30005,参考图1-2 在这里插入图片描述 ​ 图1-2 Dashboard登录方式

13.2.1 token登录

查看token值:

[root@k8s-master01 ~]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
Name:         admin-user-token-6bvhm
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin-user
              kubernetes.io/service-account.uid: c6ae76a2-322c-483a-9db1-eaf102859165

Type:  kubernetes.io/service-account-token

Data
====
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IldlTXE5S29BbW1YYVdJNWljRnBGamVEX1E0YV9xRVU5UWM1Ykh0dGQ0UkkifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTZidmhtIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJjNmFlNzZhMi0zMjJjLTQ4M2EtOWRiMS1lYWYxMDI4NTkxNjUiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.Y_VSCPM9F3L00v5XBGcT6JwpnLoAfrKUw8ufOpOhH5AhuFcXctRwAWvZDzGPOH3mH0GaIiyi1G7GOlZRRnJJVy5C7I3VKnE4mZzMScBvKCEheU40Y6x28CvkZDTmuaaDgSWrm3cfjAvTEJIg45TrtaN25at79GB27_A1LJ3JQUHY59OpG6YUbnFWjW899bCUN99lmYTMGe9M5cjY2RCufuyEam296QEz6b23tyEHdMCcPDJJH6IEDf2I4XhA5e5GWqfdkX1qX5XZ21MRyXXXTSVYqeLvvdNvQS3MxLlNaB5my0WcruRihydkC_n1UamgzXBu-XWfM4QWwk3gzsQ9yg
ca.crt:     1066 bytes
namespace:  11 bytes

将token值输入到令牌后,单击登录即可访问Dashboard,参考图1-3:

在这里插入图片描述 在这里插入图片描述

13.2.2 使用kubeconfig文件登录dashboard

[root@k8s-master01 ~]# cp /etc/kubernetes/admin.conf kubeconfig

[root@k8s-master01 ~]# vim kubeconfig 
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1ERXhNVEV6TURJMU1Gb1hEVE15TURFd09URXpNREkxTUZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBT0JoCkQxV3h3Myt3bE9WNU02MEtPYjlEZmo1U09EREZBYjdMTGd3ZXgrK3d3eEVHcnFpaGUxVmVLZnlIMHJmTnEvakEKVHArekxyVXhRNHdzNEw3Z29Na2tJcDc3aXRqOHc1VWJXYUh0c3IwMkp1VVBQQzZiWktieG5hTmFXTldTNjRBegpORFhzeSszU3dxcTNyU3h4WkloTS9ubVZRTEZKL21OanU5MUNVWE03ak9jcXhaMUI2QitSbzhSdHFpRStZUlhFCm1JS1ZCeWhpUXhQWE53VEcwN0NKMnY5WnduNmlxK2VUMUdNbVFsZ0Z1M0pqQm9NUTFteWhYODM3QTNTdXVQNDkKYU1HKzd2YTh5TFFkMWltZEZjSVpDcmNHU2FMekR5SDFmMUQ3ZTM4Qm01MTd4S1ZZZkJQQkNBSjROb3VKQmVXSgpPN1lLK2RFb1liaURHWVBtdWxNQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZQVUlaLzVqSSs0WHQ3b1FROC9USU5RQ1gxbXNNQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFBRnVkbFNQUTNTM0VXdUZ0YnhiekVsc2IyR2F2NUQzd1VDWStBdlZWRWhzcmZvYzlqKwp5REEwdjZSamEvS3VRWUpjMG9vVkN5cTkveHVyenZyOU9DS3ZwejBDZDJHWkYyeFFFcDZ6QlMvM3A5VUh5YnU3Cm9Kb0E2S0h4OTd0KzVzaWQyamQ4U29qUGNwSGdzZloySmxJckc3ckJpMktuSTZFSlprdWxjMlVIN09kY2RJWmwKTXpkMWFlVG5xdHlsVkZYSDN6ZkNCTTJyZ045d0RqSHphNjUyMkFRZVQ2ODN0ZTZXRWIxeWwvVEdVUld0RFhmKwpQbXV6b3g5eGpwSFJoVDZlcVYwelVHVGZJUlI3WmRIb3p2TzNRVlhtYmNUdDQxVFFsaDRIMHBkQ2p6dmZLTDA0CnNHMmRIaFRBL0wzUlc0RXlDY2NPQ0o2bWNiT1hyZzNOUnhxWQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
    server: https://172.31.3.188:6443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kubernetes-admin
  name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
  user:
    client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURFekNDQWZ1Z0F3SUJBZ0lJUnQ3eHBrbVg3cjh3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TWpBeE1URXhNekF5TlRCYUZ3MHlNekF4TVRFeE16QXlOVEphTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQWxKeHBjcmlNTjh0aGg3ODEKT2FMckxmZmRtU3BRYUdSTmJQZjZUdTMwK1hCMEtUTXloR2EwRC83TWtaajZ5MjAzM0R5SEtpSUlhY2d3QXBnYQpjZE9TcHhwaitsd2pRSy9rN3M3QVJLcVExY2VueUtiaXp0RGMweCt2dGFXN0djcVlQSkpvU2dqWWxuZ0FWSmh4CnlWZDI3R3I2SEVWRFFMSVlra2tqWnFSTzI0U0ZoMDlUK2JCZlhSRGVZaHk1UW1qem5lc0VWbk1nUkdSVElnNTgKYjFBRHR1d1VTZ3BQNTFITTlKWHZtSTBqUytqSXBJNllYQUtodlpLbnhLRjh2d1lpZnhlZDV4ZjhNNVJHWnJEMQpGbFZ5NWQ5ZUNjV2dpQ0tNYVgvdzM4b2pTbE5OZGFwUzlzQXVObXNnbHlMT0MrWVh1TlBNRWZHbDdmeG5yUWl2ClV1dkFMUUlEQVFBQm8wZ3dSakFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0h3WURWUjBqQkJnd0ZvQVU5UWhuL21NajdoZTN1aEJEejlNZzFBSmZXYXd3RFFZSktvWklodmNOQVFFTApCUUFEZ2dFQkFGU0tBZm9kYi9xSVJmdWIrcXlLdkhEeDFuNEtoNEVWQ2M5ZlRPZG1NdHBHU2tUbCtMbmptc0pNClpncWdTYUtMY0xYWS9JWTVsd3N3OXRsbzBwSElyMUNxYXBYa3M5WDZiSjJzc0pFdGN5ODFocXJSd2pqYzQzdEoKZUp0QkhsNWpvV2tkV0ZCMXpsRVhyWEYwdmU0ckRueVdWL04zSTV3bzVUYXpRMTRZRmZ0c2RVYlYwNXdXa0F6cgo5YWtLd25pWWRVZTRjdlpwNkFMb01uQVJXa29La1h0elI1SElJUFhaTGlHWnEwWGpHMWdpODBvR01ZZXlWb1ZCCnRUMmt1MElJNmhIbzh3VXNJdWlDT3EyQjRMWFpobW9DQU5kcnFDc0FUaXRjTll0bGdkM1RtQUx4ZmpMMkN1cWUKL1lieXZORWhndnh4dFlwN2lJWE9jZks1RDF3VSthdz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
    client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBbEp4cGNyaU1OOHRoaDc4MU9hTHJMZmZkbVNwUWFHUk5iUGY2VHUzMCtYQjBLVE15CmhHYTBELzdNa1pqNnkyMDMzRHlIS2lJSWFjZ3dBcGdhY2RPU3B4cGorbHdqUUsvazdzN0FSS3FRMWNlbnlLYmkKenREYzB4K3Z0YVc3R2NxWVBKSm9TZ2pZbG5nQVZKaHh5VmQyN0dyNkhFVkRRTElZa2tralpxUk8yNFNGaDA5VAorYkJmWFJEZVloeTVRbWp6bmVzRVZuTWdSR1JUSWc1OGIxQUR0dXdVU2dwUDUxSE05Slh2bUkwalMraklwSTZZClhBS2h2WktueEtGOHZ3WWlmeGVkNXhmOE01UkdackQxRmxWeTVkOWVDY1dnaUNLTWFYL3czOG9qU2xOTmRhcFMKOXNBdU5tc2dseUxPQytZWHVOUE1FZkdsN2Z4bnJRaXZVdXZBTFFJREFRQUJBb0lCQURrV0tHK1lNc3pRQktRWApzRU4yc083VWt6eGVBOHRHRkhQeWdpWEZ4Ti80OGJaTjQyNzI0TjV3RzNjbWs5aUhHUGt5Q3g0Rk9zUWYwVWw5CjBsSzlXazEwbHNrNmtaUXN2VDE3RUdLUVB0alFQRVNZenZGeFRCS1J6blp4dG9DKzBXSWJQNUtJK1dJN3NLek8KYm85UVdPK1NYSWQxbDlNSFZ1Y0N6MldEWW9OeU85bmFobWdzSWpIRnRqVEo5NWQ2cWRmWDNHZXBSRHA0em5EaQprTVFJMWRBdTg1TE9HMVZyd2lMRUxPa2JVOW5hNGdJS1VIVmY5RW90SndXVzI2K2kxS1JNYVJJVmlkbDVqTm1aCnZwM3JVOUM3L253c01pVktMMTF2MW8wdGptc2gzbkxnTVNEcEJtUE5pTGcxR3AxK0FPYVBXVFNDVEJZTDdOOG8KNGJxcEw0VUNnWUVBeEVpSWhKMzNMS0FTTHBGY3NtZ2RKUDBZOWRwZzZmcHlvOXA4NlpuejYxZXpUVkhyZ0p1SQptc09tTXQ0eHRINGVJbHhRYklWSGNJWC9iZis0aCtkUFJ0Q1ExRUdUTWRaaW9qSkJCd2JhRS9xd0YwMjZpRkRnCm9TZFpiemhFbk5BWmV5NjI1Skp2QXdRdldIanRPRHRNdDQ0dWZmYndGRDErZEtQc3JobkQzWThDZ1lFQXdkTHUKdGJTWDZYUFovTndHaXl6TnBrWHZST0hzNU1TaGFiVW9ibmxMbWxsL3gwUS9WQVkxdmhhakFiQ2t2WUk0T3VrUgowZWl2Wmx1bVNrazFJTlB5VXBNQ1dHR1lVTGJlWURidXhnZDlZd3Z1SWZQRmpwWU1RR0FRcE1SangzTCtMMzlQClplRW9lRmF3ZzdIVTgrYWVWWU9jTk5aaHYvbHhadUM5MzRkSW9JTUNnWUVBb3ZiRndiV1ZYb3VZRE9uTFdLUncKYmlGazg5cFgxR3VIZXRzUUVyTXJmUjNYVkQ3TGxIK05yMUQ1VUFxQ29pU0R5R3QwcW1VTnB6TFptKzVRdXlVbApBTnB4SklrOU9JZVNaSy9zcFhUZTR1K2orL1VoQmNTQWU4dzd5TWVpejc5SEtLcmtWbW50bVVlRU42Uk83L3pyCitRb25ONVlxUmVPNGRnY1Rub2p0d2FrQ2dZQTZYeVVHMGdtQ0JDTGROUUkvZmRHOVJvaUZqU2pEeUxmMzF0Z0QKVlVKQWpMMmZyRjBLR0FpdFk3SFp1M0lScEpyOG10NkVBZmg0OGhjRmZrQ2l6MUhHTG9IaFRoc0tDOWl5enpoZgpxVGZJMFhuNC9hbzhnOUhTdlZ1bDA0TmRPTE4yYUhmbjdjUTdZWmd0UVN3cC9BVXBLY2FzWHZmM1VjOG1OWDdaClI2dkdzd0tCZ1FDd2VBcmptSVV1ejV5cXhkREszbElQd0VsQTlGU3lMTjROU0owNTVrQ2tkTUZMS0xpcUZ0Y2UKSXBrWWhIbXNRc28yRTZwTStHQ0dIMU81YWVRSFNSZWh2SkRZeGdEMVhYaHA5UjRNdHpjTmw2U3cwcTQ4MVNZZQplNVp5Zk9CcWVDbzdOQmZ0dS9ua0tZTDFCTUNMS1hOM0JYNkVpQ0JPUjlSUDJHeEh6S3FBa2c9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
    token: eyJhbGciOiJSUzI1NiIsImtpZCI6IldlTXE5S29BbW1YYVdJNWljRnBGamVEX1E0YV9xRVU5UWM1Ykh0dGQ0UkkifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTZidmhtIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJjNmFlNzZhMi0zMjJjLTQ4M2EtOWRiMS1lYWYxMDI4NTkxNjUiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.Y_VSCPM9F3L00v5XBGcT6JwpnLoAfrKUw8ufOpOhH5AhuFcXctRwAWvZDzGPOH3mH0GaIiyi1G7GOlZRRnJJVy5C7I3VKnE4mZzMScBvKCEheU40Y6x28CvkZDTmuaaDgSWrm3cfjAvTEJIg45TrtaN25at79GB27_A1LJ3JQUHY59OpG6YUbnFWjW899bCUN99lmYTMGe9M5cjY2RCufuyEam296QEz6b23tyEHdMCcPDJJH6IEDf2I4XhA5e5GWqfdkX1qX5XZ21MRyXXXTSVYqeLvvdNvQS3MxLlNaB5my0WcruRihydkC_n1UamgzXBu-XWfM4QWwk3gzsQ9yg

在这里插入图片描述

14.一些必须的配置更改

将Kube-proxy改为ipvs模式,因为在初始化集群的时候注释了ipvs配置,所以需要自行修改一下:

在master01节点执行

[root@k8s-master01 ~]# curl 127.0.0.1:10249/proxyMode
iptables

[root@k8s-master01 ~]# kubectl edit cm kube-proxy -n kube-system
    mode: "ipvs"

更新Kube-Proxy的Pod:

[root@k8s-master01 ~]# kubectl patch daemonset kube-proxy -p "{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"date\":\"`date +'%s'`\"}}}}}" -n kube-system
daemonset.apps/kube-proxy patched

验证Kube-Proxy模式

[root@k8s-master01 ~]# curl 127.0.0.1:10249/proxyMode
ipvs

[root@k8s-master01 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.17.0.1:30005 rr
  -> 192.169.111.141:8443         Masq    1      0          0         
TCP  172.31.3.101:30005 rr
  -> 192.169.111.141:8443         Masq    1      0          0         
TCP  192.162.55.64:30005 rr
  -> 192.169.111.141:8443         Masq    1      0          0         
TCP  10.96.0.1:443 rr
  -> 172.31.3.101:6443            Masq    1      0          0         
  -> 172.31.3.102:6443            Masq    1      0          0         
  -> 172.31.3.103:6443            Masq    1      0          0         
TCP  10.96.0.10:53 rr
  -> 192.170.21.193:53            Masq    1      0          0         
  -> 192.170.21.194:53            Masq    1      0          0         
TCP  10.96.0.10:9153 rr
  -> 192.170.21.193:9153          Masq    1      0          0         
  -> 192.170.21.194:9153          Masq    1      0          0         
TCP  10.99.167.144:443 rr
  -> 192.167.195.132:4443         Masq    1      0          0         
TCP  10.101.88.7:8000 rr
  -> 192.169.111.140:8000         Masq    1      0          0         
TCP  10.106.189.113:443 rr
  -> 192.169.111.141:8443         Masq    1      0          0         
TCP  127.0.0.1:30005 rr
  -> 192.169.111.141:8443         Masq    1      0          0         
UDP  10.96.0.10:53 rr
  -> 192.170.21.193:53            Masq    1      0          0         
  -> 192.170.21.194:53            Masq    1      0          0

15.注意事项

注意:kubeadm安装的集群,证书有效期默认是一年。master节点的kube-apiserver、kube-scheduler、kube-controller-manager、etcd都是以容器运行的。可以通过kubectl get po -n kube-system查看。

启动和二进制不同的是,

kubelet的配置文件在/etc/sysconfig/kubelet和/var/lib/kubelet/config.yaml,修改后需要重启kubelet进程

ipvs[root@k8s-master01 ~]# ls /etc/sysconfig/kubelet
/etc/sysconfig/kubelet
[root@k8s-master01 ~]# ls /var/lib/kubelet/config.yaml 
/var/lib/kubelet/config.yaml

其他组件的配置文件在/etc/kubernetes/manifests目录下,比如kube-apiserver.yaml,该yaml文件更改后,kubelet会自动刷新配置,也就是会重启pod。不能再次创建该文件

[root@k8s-master01 ~]# ls /etc/kubernetes/manifests
etcd.yaml  kube-apiserver.yaml  kube-controller-manager.yaml  kube-scheduler.yaml
[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide
NAME                                                 READY   STATUS    RESTARTS   AGE   IP                NODE                         NOMINATED NODE   READINESS GATES
calico-kube-controllers-55bfb655fc-bgdlr             1/1     Running   1          16h   192.167.195.130   k8s-node02.example.local     <none>           <none>
calico-node-2bggs                                    1/1     Running   1          16h   172.31.3.102      k8s-master02.example.local   <none>           <none>
calico-node-2rgfb                                    1/1     Running   1          16h   172.31.3.101      k8s-master01.example.local   <none>           <none>
calico-node-449ws                                    1/1     Running   1          16h   172.31.3.110      k8s-node03.example.local     <none>           <none>
calico-node-4p9t5                                    1/1     Running   1          16h   172.31.3.103      k8s-master03.example.local   <none>           <none>
calico-node-bljzq                                    1/1     Running   1          16h   172.31.3.108      k8s-node01.example.local     <none>           <none>
calico-node-cbv29                                    1/1     Running   1          16h   172.31.3.109      k8s-node02.example.local     <none>           <none>
coredns-5ffd5c4586-rvsm4                             1/1     Running   1          18h   192.170.21.194    k8s-node03.example.local     <none>           <none>
coredns-5ffd5c4586-xzrwx                             1/1     Running   1          18h   192.170.21.193    k8s-node03.example.local     <none>           <none>
etcd-k8s-master01.example.local                      1/1     Running   1          18h   172.31.3.101      k8s-master01.example.local   <none>           <none>
etcd-k8s-master02.example.local                      1/1     Running   1          18h   172.31.3.102      k8s-master02.example.local   <none>           <none>
etcd-k8s-master03.example.local                      1/1     Running   1          18h   172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-apiserver-k8s-master01.example.local            1/1     Running   1          18h   172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-apiserver-k8s-master02.example.local            1/1     Running   1          18h   172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-apiserver-k8s-master03.example.local            1/1     Running   1          18h   172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-controller-manager-k8s-master01.example.local   1/1     Running   2          18h   172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-controller-manager-k8s-master02.example.local   1/1     Running   1          18h   172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-controller-manager-k8s-master03.example.local   1/1     Running   1          18h   172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-proxy-6k8vv                                     1/1     Running   0          88s   172.31.3.108      k8s-node01.example.local     <none>           <none>
kube-proxy-flt2l                                     1/1     Running   0          75s   172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-proxy-ftqqm                                     1/1     Running   0          42s   172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-proxy-m9h72                                     1/1     Running   0          96s   172.31.3.110      k8s-node03.example.local     <none>           <none>
kube-proxy-mjssk                                     1/1     Running   0          54s   172.31.3.109      k8s-node02.example.local     <none>           <none>
kube-proxy-zz2sl                                     1/1     Running   0          61s   172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-scheduler-k8s-master01.example.local            1/1     Running   2          18h   172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-scheduler-k8s-master02.example.local            1/1     Running   1          18h   172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-scheduler-k8s-master03.example.local            1/1     Running   1          18h   172.31.3.103      k8s-master03.example.local   <none>           <none>
metrics-server-5b7c76b46c-2tkz6                      1/1     Running   0          92m   192.167.195.132   k8s-node02.example.local     <none>           <none>

[root@k8s-master01 ~]# kubectl get pod -A -o  wide
NAMESPACE              NAME                                                 READY   STATUS    RESTARTS   AGE     IP                NODE                         NOMINATED NODE   READINESS GATES
kube-system            calico-kube-controllers-55bfb655fc-bgdlr             1/1     Running   1          16h     192.167.195.130   k8s-node02.example.local     <none>           <none>
kube-system            calico-node-2bggs                                    1/1     Running   1          16h     172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            calico-node-2rgfb                                    1/1     Running   1          16h     172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            calico-node-449ws                                    1/1     Running   1          16h     172.31.3.110      k8s-node03.example.local     <none>           <none>
kube-system            calico-node-4p9t5                                    1/1     Running   1          16h     172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            calico-node-bljzq                                    1/1     Running   1          16h     172.31.3.108      k8s-node01.example.local     <none>           <none>
kube-system            calico-node-cbv29                                    1/1     Running   1          16h     172.31.3.109      k8s-node02.example.local     <none>           <none>
kube-system            coredns-5ffd5c4586-rvsm4                             1/1     Running   1          18h     192.170.21.194    k8s-node03.example.local     <none>           <none>
kube-system            coredns-5ffd5c4586-xzrwx                             1/1     Running   1          18h     192.170.21.193    k8s-node03.example.local     <none>           <none>
kube-system            etcd-k8s-master01.example.local                      1/1     Running   1          18h     172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            etcd-k8s-master02.example.local                      1/1     Running   1          18h     172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            etcd-k8s-master03.example.local                      1/1     Running   1          18h     172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            kube-apiserver-k8s-master01.example.local            1/1     Running   1          18h     172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            kube-apiserver-k8s-master02.example.local            1/1     Running   1          18h     172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            kube-apiserver-k8s-master03.example.local            1/1     Running   1          18h     172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            kube-controller-manager-k8s-master01.example.local   1/1     Running   2          18h     172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            kube-controller-manager-k8s-master02.example.local   1/1     Running   1          18h     172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            kube-controller-manager-k8s-master03.example.local   1/1     Running   1          18h     172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            kube-proxy-6k8vv                                     1/1     Running   0          2m12s   172.31.3.108      k8s-node01.example.local     <none>           <none>
kube-system            kube-proxy-flt2l                                     1/1     Running   0          119s    172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            kube-proxy-ftqqm                                     1/1     Running   0          86s     172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            kube-proxy-m9h72                                     1/1     Running   0          2m20s   172.31.3.110      k8s-node03.example.local     <none>           <none>
kube-system            kube-proxy-mjssk                                     1/1     Running   0          98s     172.31.3.109      k8s-node02.example.local     <none>           <none>
kube-system            kube-proxy-zz2sl                                     1/1     Running   0          105s    172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            kube-scheduler-k8s-master01.example.local            1/1     Running   2          18h     172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            kube-scheduler-k8s-master02.example.local            1/1     Running   1          18h     172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            kube-scheduler-k8s-master03.example.local            1/1     Running   1          18h     172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            metrics-server-5b7c76b46c-2tkz6                      1/1     Running   0          93m     192.167.195.132   k8s-node02.example.local     <none>           <none>
kubernetes-dashboard   dashboard-metrics-scraper-575d79bd97-l25f4           1/1     Running   0          21m     192.169.111.140   k8s-node01.example.local     <none>           <none>
kubernetes-dashboard   kubernetes-dashboard-68965ddf9f-c5f7g                1/1     Running   0          21m     192.169.111.141   k8s-node01.example.local     <none>           <none>

Kubeadm安装后,master节点默认不允许部署pod,可以通过以下方式打开:

查看Taints:

[root@k8s-master01 ~]# kubectl  describe node -l node-role.kubernetes.io/master=  | grep Taints
Taints:             node-role.kubernetes.io/master:NoSchedule
Taints:             node-role.kubernetes.io/master:NoSchedule
Taints:             node-role.kubernetes.io/master:NoSchedule

删除Taint:

[root@k8s-master01 ~]# kubectl  taint node  -l node-role.kubernetes.io/master node-role.kubernetes.io/master:NoSchedule-
node/k8s-master01 untainted
node/k8s-master02 untainted
node/k8s-master03 untainted

[root@k8s-master01 ~]# kubectl  describe node -l node-role.kubernetes.io/master=  | grep Taints
Taints:             <none>
Taints:             <none>
Taints:             <none>

kube-proxy的配置在kube-system命名空间下的configmap中,可以通过

kubectl edit cm kube-proxy -n kube-system

进行更改,更改完成后,可以通过patch重启kube-proxy

kubectl patch daemonset kube-proxy -p "{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"date\":\"`date +'%s'`\"}}}}}" -n kube-system

16.集群验证

#查看集群所有namespace的容器
[root@k8s-master01 ~]# kubectl get pod --all-namespaces
NAMESPACE              NAME                                                 READY   STATUS    RESTARTS   AGE
kube-system            calico-kube-controllers-6fdd497b59-xtldf             1/1     Running   1          3d22h
kube-system            calico-node-4dbgm                                    1/1     Running   1          3d22h
kube-system            calico-node-4vqrz                                    1/1     Running   1          3d22h
kube-system            calico-node-6fgtr                                    1/1     Running   2          3d22h
kube-system            calico-node-c5g75                                    1/1     Running   1          3d22h
kube-system            calico-node-vnqgf                                    1/1     Running   1          3d22h
kube-system            calico-node-x8hcl                                    1/1     Running   1          3d22h
kube-system            coredns-5ffd5c4586-nwqgd                             1/1     Running   1          3d22h
kube-system            coredns-5ffd5c4586-z8rs8                             1/1     Running   1          3d22h
kube-system            etcd-k8s-master01.example.local                      1/1     Running   1          3d22h
kube-system            etcd-k8s-master02.example.local                      1/1     Running   1          3d22h
kube-system            etcd-k8s-master03.example.local                      1/1     Running   1          3d22h
kube-system            kube-apiserver-k8s-master01.example.local            1/1     Running   1          3d22h
kube-system            kube-apiserver-k8s-master02.example.local            1/1     Running   1          3d22h
kube-system            kube-apiserver-k8s-master03.example.local            1/1     Running   1          3d22h
kube-system            kube-controller-manager-k8s-master01.example.local   1/1     Running   2          3d22h
kube-system            kube-controller-manager-k8s-master02.example.local   1/1     Running   1          3d22h
kube-system            kube-controller-manager-k8s-master03.example.local   1/1     Running   1          3d22h
kube-system            kube-proxy-7xnlg                                     1/1     Running   1          3d22h
kube-system            kube-proxy-pq2jk                                     1/1     Running   1          3d22h
kube-system            kube-proxy-sbsfn                                     1/1     Running   1          3d22h
kube-system            kube-proxy-vkqc8                                     1/1     Running   1          3d22h
kube-system            kube-proxy-xp24c                                     1/1     Running   1          3d22h
kube-system            kube-proxy-zk5n4                                     1/1     Running   1          3d22h
kube-system            kube-scheduler-k8s-master01.example.local            1/1     Running   2          3d22h
kube-system            kube-scheduler-k8s-master02.example.local            1/1     Running   1          3d22h
kube-system            kube-scheduler-k8s-master03.example.local            1/1     Running   1          3d22h
kube-system            metrics-server-dd9ddfbb-blwb8                        1/1     Running   2          3d22h
kubernetes-dashboard   dashboard-metrics-scraper-6d5db67fb7-fscb9           1/1     Running   1          3d22h
kubernetes-dashboard   kubernetes-dashboard-6b5967f475-g9qmj                1/1     Running   2          3d22h

#查看所有pod的cpu和内存使用率
[root@k8s-master01 ~]# kubectl top pod -n kube-system
NAME                                                 CPU(cores)   MEMORY(bytes)   
calico-kube-controllers-6fdd497b59-xtldf             2m           18Mi            
calico-node-4dbgm                                    18m          105Mi           
calico-node-4vqrz                                    22m          102Mi           
calico-node-6fgtr                                    18m          66Mi            
calico-node-c5g75                                    18m          103Mi           
calico-node-vnqgf                                    16m          110Mi           
calico-node-x8hcl                                    20m          105Mi           
coredns-5ffd5c4586-nwqgd                             2m           13Mi            
coredns-5ffd5c4586-z8rs8                             2m           13Mi            
etcd-k8s-master01.example.local                      25m          78Mi            
etcd-k8s-master02.example.local                      31m          80Mi            
etcd-k8s-master03.example.local                      24m          78Mi            
kube-apiserver-k8s-master01.example.local            32m          229Mi           
kube-apiserver-k8s-master02.example.local            33m          239Mi           
kube-apiserver-k8s-master03.example.local            37m          242Mi           
kube-controller-manager-k8s-master01.example.local   1m           21Mi            
kube-controller-manager-k8s-master02.example.local   13m          54Mi            
kube-controller-manager-k8s-master03.example.local   1m           22Mi            
kube-proxy-7xnlg                                     1m           24Mi            
kube-proxy-pq2jk                                     1m           21Mi            
kube-proxy-sbsfn                                     4m           23Mi            
kube-proxy-vkqc8                                     1m           24Mi            
kube-proxy-xp24c                                     1m           24Mi            
kube-proxy-zk5n4                                     1m           22Mi            
kube-scheduler-k8s-master01.example.local            2m           20Mi            
kube-scheduler-k8s-master02.example.local            2m           18Mi            
kube-scheduler-k8s-master03.example.local            2m           21Mi            
metrics-server-dd9ddfbb-blwb8                        3m           18Mi 

#查看svc
[root@k8s-master01 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   3d22h
[root@k8s-master01 ~]# kubectl get svc -n kube-system
NAME             TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)                  AGE
kube-dns         ClusterIP   10.96.0.10    <none>        53/UDP,53/TCP,9153/TCP   3d22h
metrics-server   ClusterIP   10.104.2.18   <none>        443/TCP                  3d22h

[root@k8s-master01 ~]# telnet 10.96.0.1 443
Trying 10.96.0.1...
Connected to 10.96.0.1.
Escape character is '^]'.
quit
Connection closed by foreign host.

[root@k8s-master01 ~]# telnet 10.96.0.10 53
Trying 10.96.0.10...
Connected to 10.96.0.10.
Escape character is '^]'.
Connection closed by foreign host.

[root@k8s-master01 ~]# kubectl get pod --all-namespaces -o wide
NAMESPACE              NAME                                                 READY   STATUS    RESTARTS   AGE     IP                NODE                         NOMINATED NODE   READINESS GATES
kube-system            calico-kube-controllers-6fdd497b59-xtldf             1/1     Running   1          3d22h   172.31.3.110      k8s-node03.example.local     <none>           <none>
kube-system            calico-node-4dbgm                                    1/1     Running   1          3d22h   172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            calico-node-4vqrz                                    1/1     Running   1          3d22h   172.31.3.110      k8s-node03.example.local     <none>           <none>
kube-system            calico-node-6fgtr                                    1/1     Running   2          3d22h   172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            calico-node-c5g75                                    1/1     Running   1          3d22h   172.31.3.108      k8s-node01.example.local     <none>           <none>
kube-system            calico-node-vnqgf                                    1/1     Running   1          3d22h   172.31.3.109      k8s-node02.example.local     <none>           <none>
kube-system            calico-node-x8hcl                                    1/1     Running   1          3d22h   172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            coredns-5ffd5c4586-nwqgd                             1/1     Running   1          3d22h   192.169.111.131   k8s-node01.example.local     <none>           <none>
kube-system            coredns-5ffd5c4586-z8rs8                             1/1     Running   1          3d22h   192.167.195.132   k8s-node02.example.local     <none>           <none>
kube-system            etcd-k8s-master01.example.local                      1/1     Running   1          3d22h   172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            etcd-k8s-master02.example.local                      1/1     Running   1          3d22h   172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            etcd-k8s-master03.example.local                      1/1     Running   1          3d22h   172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            kube-apiserver-k8s-master01.example.local            1/1     Running   1          3d22h   172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            kube-apiserver-k8s-master02.example.local            1/1     Running   1          3d22h   172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            kube-apiserver-k8s-master03.example.local            1/1     Running   1          3d22h   172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            kube-controller-manager-k8s-master01.example.local   1/1     Running   2          3d22h   172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            kube-controller-manager-k8s-master02.example.local   1/1     Running   1          3d22h   172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            kube-controller-manager-k8s-master03.example.local   1/1     Running   1          3d22h   172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            kube-proxy-7xnlg                                     1/1     Running   1          3d22h   172.31.3.109      k8s-node02.example.local     <none>           <none>
kube-system            kube-proxy-pq2jk                                     1/1     Running   1          3d22h   172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            kube-proxy-sbsfn                                     1/1     Running   1          3d22h   172.31.3.108      k8s-node01.example.local     <none>           <none>
kube-system            kube-proxy-vkqc8                                     1/1     Running   1          3d22h   172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            kube-proxy-xp24c                                     1/1     Running   1          3d22h   172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            kube-proxy-zk5n4                                     1/1     Running   1          3d22h   172.31.3.110      k8s-node03.example.local     <none>           <none>
kube-system            kube-scheduler-k8s-master01.example.local            1/1     Running   2          3d22h   172.31.3.101      k8s-master01.example.local   <none>           <none>
kube-system            kube-scheduler-k8s-master02.example.local            1/1     Running   1          3d22h   172.31.3.102      k8s-master02.example.local   <none>           <none>
kube-system            kube-scheduler-k8s-master03.example.local            1/1     Running   1          3d22h   172.31.3.103      k8s-master03.example.local   <none>           <none>
kube-system            metrics-server-dd9ddfbb-blwb8                        1/1     Running   2          3d22h   192.170.21.194    k8s-node03.example.local     <none>           <none>
kubernetes-dashboard   dashboard-metrics-scraper-6d5db67fb7-fscb9           1/1     Running   1          3d22h   192.167.195.131   k8s-node02.example.local     <none>           <none>
kubernetes-dashboard   kubernetes-dashboard-6b5967f475-g9qmj                1/1     Running   2          3d22h   192.169.111.132   k8s-node01.example.local     <none>           <none>

[root@k8s-master01 ~]# kubectl get pod -n kubernetes-dashboard
NAME                                         READY   STATUS    RESTARTS   AGE
dashboard-metrics-scraper-6d5db67fb7-fscb9   1/1     Running   1          3d22h
kubernetes-dashboard-6b5967f475-g9qmj        1/1     Running   2          3d22h

[root@k8s-master01 ~]# kubectl get svc -n kubernetes-dashboard
NAME                        TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE
dashboard-metrics-scraper   ClusterIP   10.99.15.144     <none>        8000/TCP        3d22h
kubernetes-dashboard        NodePort    10.106.152.125   <none>        443:30005/TCP   3d22h

k8s创建容器并测试内部网络

[root@k8s-master01 ~]# kubectl run net-test1 --image=alpine sleep 500000
pod/net-test1 created
[root@k8s-master01 ~]# kubectl run net-test2 --image=alpine sleep 500000
pod/net-test2 created

[root@k8s-master01 ~]# kubectl get pod  -o wide
NAME        READY   STATUS    RESTARTS   AGE   IP                NODE                       NOMINATED NODE   READINESS GATES
net-test1   1/1     Running   0          30s   192.169.111.142   k8s-node01.example.local   <none>           <none>
net-test2   1/1     Running   0          25s   192.167.195.133   k8s-node02.example.local   <none>           <none>

[root@k8s-master01 ~]# kubectl exec -it net-test1 sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # ping -c4 192.167.195.133
PING 192.167.195.133 (192.167.195.133): 56 data bytes
64 bytes from 192.167.195.133: seq=0 ttl=62 time=0.491 ms
64 bytes from 192.167.195.133: seq=1 ttl=62 time=0.677 ms
64 bytes from 192.167.195.133: seq=2 ttl=62 time=0.408 ms
64 bytes from 192.167.195.133: seq=3 ttl=62 time=0.443 ms

--- 192.167.195.133 ping statistics ---
4 packets transmitted, 4 packets received, 0% packet loss
round-trip min/avg/max = 0.408/0.504/0.677 ms
/ # exit