“我报名参加金石计划1期挑战——瓜分10万奖池,这是我的第2篇文章,点击查看活动详情”
3.14 基于文件初始化高可用master方式
Master01节点创建kubeadm-config.yaml配置文件如下:
Master01:(# 注意,如果不是高可用集群,172.31.3.188:6443改为master01的地址,注意更改v1.18.5自己服务器kubeadm的版本:kubeadm version)
注意
以下文件内容,宿主机网段、podSubnet网段、serviceSubnet网段不能重复
[root@k8s-master01 ~]# kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"25", GitVersion:"v1.25.0", GitCommit:"a866cbe2e5bbaa01cfd5e969aa3e033f3282a8a2", GitTreeState:"clean", BuildDate:"2022-08-23T17:43:25Z", GoVersion:"go1.19", Compiler:"gc", Platform:"linux/amd64"}
#将默认配置输出⾄⽂件
[root@k8s-master01 ~]# kubeadm config print init-defaults > kubeadm-config.yaml
#修改后的配置文件
[root@k8s-master01 ~]# cat kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 172.31.3.101 #master01的IP地址
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock #容器运行时
imagePullPolicy: IfNotPresent
name: k8s-master01.example.local #设置master01的hostname
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
certSANs:
- kubeapi.raymonds.cc #VIP地址
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: kubeapi.raymonds.cc:6443 #haproxy代理后端地址
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: harbor.raymonds.cc/google_containers #harbor镜像地址,如果没有harbor改成“registry.aliyuncs.com/google_containers”
kind: ClusterConfiguration
kubernetesVersion: v1.25.0 #更改版本号
networking:
dnsDomain: cluster.local #dnsdomain
podSubnet: 192.168.0.0/12 #pod网段
serviceSubnet: 10.96.0.0/12 #service网段
scheduler: {}
更新kubeadm文件
[root@k8s-master01 ~]# kubeadm config migrate --old-config kubeadm-config.yaml --new-config new.yaml
[root@k8s-master01 ~]# cat new.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 172.31.3.101
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
name: k8s-master01.example.local
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
certSANs:
- kubeapi.raymonds.cc
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: kubeapi.raymonds.cc:6443
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: harbor.raymonds.cc/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.25.0
networking:
dnsDomain: cluster.local
podSubnet: 192.168.0.0/12
serviceSubnet: 10.96.0.0/12
scheduler: {}
Master01节点初始化,初始化以后会在/etc/kubernetes目录下生成对应的证书和配置文件,之后其他Master节点加入Master01即可:
#如果已经初始化过,重新初始化用下面命令reset集群后,再进行初始化
#master和node上执行
kubeadm reset -f
rm -rf /etc/cni/net.d/
rm -rf $HOME/.kube/config
reboot
[root@k8s-master01 ~]# kubeadm init --config /root/new.yaml --upload-certs
...
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
kubeadm join kubeapi.raymonds.cc:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:1563ff1330b12d780b7215d7f2909b0d01de2b17353743b700489f5434cee3b7 \
--control-plane --certificate-key 06df38a4dfeb8abcb8839a4621e442dee61edcfa47480494ee19bc11039b2857
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join kubeapi.raymonds.cc:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:1563ff1330b12d780b7215d7f2909b0d01de2b17353743b700489f5434cee3b7
生成 kubectl 命令的授权文件,重复4.10
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01.example.local NotReady control-plane 36s v1.25.0
高可用master,参考4.12
#添加master02和master03
kubeadm join kubeapi.raymonds.cc:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:1563ff1330b12d780b7215d7f2909b0d01de2b17353743b700489f5434cee3b7 \
--control-plane --certificate-key 06df38a4dfeb8abcb8839a4621e442dee61edcfa47480494ee19bc11039b2857
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01.example.local NotReady control-plane 2m1s v1.25.0
k8s-master02.example.local NotReady control-plane 36s v1.25.0
k8s-master03.example.local NotReady control-plane 5s v1.25.0
高可用node,参考4.13
kubeadm join kubeapi.raymonds.cc:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:1563ff1330b12d780b7215d7f2909b0d01de2b17353743b700489f5434cee3b7
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01.example.local NotReady control-plane 3m31s v1.25.0
k8s-master02.example.local NotReady control-plane 2m6s v1.25.0
k8s-master03.example.local NotReady control-plane 95s v1.25.0
k8s-node01.example.local NotReady <none> 45s v1.25.0
k8s-node02.example.local NotReady <none> 26s v1.25.0
k8s-node03.example.local NotReady <none> 9s v1.25.0
3.15 网络组件calico部署
docs.projectcalico.org/maintenance…
calico安装:docs.projectcalico.org/getting-sta…
[root@k8s-master01 ~]# curl https://docs.projectcalico.org/manifests/calico.yaml -O
[root@k8s-master01 ~]# POD_SUBNET=`cat /etc/kubernetes/manifests/kube-controller-manager.yaml | grep cluster-cidr= | awk -F= '{print $NF}'`
[root@k8s-master01 ~]# echo $POD_SUBNET
192.168.0.0/12
[root@k8s-master01 ~]# grep -E "(.*CALICO_IPV4POOL_CIDR.*|.*192.168.0.0.*)" calico.yaml
# - name: CALICO_IPV4POOL_CIDR
# value: "192.168.0.0/16"
[root@k8s-master01 ~]# sed -i 's@# - name: CALICO_IPV4POOL_CIDR@- name: CALICO_IPV4POOL_CIDR@g; s@# value: "192.168.0.0/16"@ value: '"${POD_SUBNET}"'@g' calico.yaml
[root@k8s-master01 ~]# grep -E "(.*CALICO_IPV4POOL_CIDR.*|.*192.168.0.0.*)" calico.yaml
- name: CALICO_IPV4POOL_CIDR
value: 192.168.0.0/12
[root@k8s-master01 ~]# grep "image:" calico.yaml
image: docker.io/calico/cni:v3.24.1
image: docker.io/calico/cni:v3.24.1
image: docker.io/calico/node:v3.24.1
image: docker.io/calico/node:v3.24.1
image: docker.io/calico/kube-controllers:v3.24.1
下载calico镜像并上传harbor:
#注意:如果没有harbor不用执行下面脚本
[root@k8s-master01 ~]# cat download_calico_images.sh
#!/bin/bash
#
#**********************************************************************************************
#Author: Raymond
#QQ: 88563128
#Date: 2022-01-11
#FileName: download_calico_images.sh
#URL: raymond.blog.csdn.net
#Description: The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \033[01;31m"
END='\033[0m'
images=$(awk -F "/" '/image:/{print $NF}' calico.yaml |uniq)
HARBOR_DOMAIN=harbor.raymonds.cc
images_download(){
${COLOR}"开始下载Calico镜像"${END}
for i in ${images};do
nerdctl pull registry.cn-beijing.aliyuncs.com/raymond9/$i
nerdctl tag registry.cn-beijing.aliyuncs.com/raymond9/$i ${HARBOR_DOMAIN}/google_containers/$i
nerdctl rmi registry.cn-beijing.aliyuncs.com/raymond9/$i
nerdctl push ${HARBOR_DOMAIN}/google_containers/$i
done
${COLOR}"Calico镜像下载完成"${END}
}
images_download
[root@k8s-master01 ~]# bash download_calico_images.sh
root@k8s-master01:~# nerdctl images | grep 3.24.1
harbor.raymonds.cc/google_containers/cni v3.24.1 21df750b80ba About a minute ago linux/amd64 188.4 MiB 83.3 MiB
harbor.raymonds.cc/google_containers/kube-controllers v3.24.1 b65317537174 About a minute ago linux/amd64 68.1 MiB 29.7 MiB
harbor.raymonds.cc/google_containers/node v3.24.1 135054e0bc90 About a minute ago linux/amd64 221.5 MiB 76.5 MiB
[root@k8s-master01 ~]# sed -ri 's@(.*image:) docker.io/calico(/.*)@\1 harbor.raymonds.cc/google_containers\2@g' calico.yaml
[root@k8s-master01 ~]# grep "image:" calico.yaml
image: harbor.raymonds.cc/google_containers/cni:v3.24.1
image: harbor.raymonds.cc/google_containers/cni:v3.24.1
image: harbor.raymonds.cc/google_containers/node:v3.24.1
image: harbor.raymonds.cc/google_containers/node:v3.24.1
image: harbor.raymonds.cc/google_containers/kube-controllers:v3.24.1
#注意:如果没有harbor执行下面命令
[root@k8s-master01 ~]# sed -ri 's@(.*image:) docker.io/calico(/.*)@\1 registry.cn-beijing.aliyuncs.com/raymond9\2@g' calico.yaml
[root@k8s-master01 ~]# kubectl apply -f calico.yaml
#查看容器状态
[root@k8s-master01 ~]# kubectl get pod -n kube-system |grep calico
calico-kube-controllers-5477499cbc-txrnf 1/1 Running 0 36s
calico-node-7kbrp 1/1 Running 0 36s
calico-node-7z76n 1/1 Running 0 36s
calico-node-hr5mj 1/1 Running 0 36s
calico-node-hsldl 1/1 Running 0 36s
calico-node-ntb4c 1/1 Running 0 36s
calico-node-wd78c 1/1 Running 0 36s
#查看集群状态
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01.example.local Ready control-plane 10m v1.25.0
k8s-master02.example.local Ready control-plane 9m19s v1.25.0
k8s-master03.example.local Ready control-plane 8m48s v1.25.0
k8s-node01.example.local Ready <none> 7m58s v1.25.0
k8s-node02.example.local Ready <none> 7m39s v1.25.0
k8s-node03.example.local Ready <none> 7m22s v1.25.0
测试应用编排及服务访问,参考4.15
[root@k8s-master01 ~]# kubectl create deployment demoapp --image=registry.cn-hangzhou.aliyuncs.com/raymond9/demoapp:v1.0 --replicas=3
deployment.apps/demoapp created
[root@k8s-master01 ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
demoapp-c4787f9fc-926r7 1/1 Running 0 8s 192.169.111.132 k8s-node01.example.local <none> <none>
demoapp-c4787f9fc-9qghz 1/1 Running 0 8s 192.170.21.193 k8s-node03.example.local <none> <none>
demoapp-c4787f9fc-xzz2z 1/1 Running 0 8s 192.167.195.129 k8s-node02.example.local <none> <none>
[root@k8s-master01 ~]# curl 192.169.111.132
raymond demoapp v1.0 !! ClientIP: 192.162.55.64, ServerName: demoapp-c4787f9fc-926r7, ServerIP: 192.169.111.132!
[root@k8s-master01 ~]# curl 192.170.21.193
raymond demoapp v1.0 !! ClientIP: 192.162.55.64, ServerName: demoapp-c4787f9fc-9qghz, ServerIP: 192.170.21.193!
[root@k8s-master01 ~]# curl 192.167.195.129
raymond demoapp v1.0 !! ClientIP: 192.162.55.64, ServerName: demoapp-c4787f9fc-xzz2z, ServerIP: 192.167.195.129!
#使用如下命令了解Service对象demoapp使用的NodePort,格式:<集群端口>:<POd端口>,以便于在集群外部进行访问
[root@k8s-master01 ~]# kubectl create service nodeport demoapp --tcp=80:80
service/demoapp created
[root@k8s-master01 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
demoapp NodePort 10.100.33.92 <none> 80:32184/TCP 6s
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 11m
[root@k8s-master01 ~]# curl 10.100.33.92
raymond demoapp v1.0 !! ClientIP: 192.162.55.64, ServerName: demoapp-c4787f9fc-926r7, ServerIP: 192.169.111.132!
[root@k8s-master01 ~]# curl 10.100.33.92
raymond demoapp v1.0 !! ClientIP: 192.162.55.64, ServerName: demoapp-c4787f9fc-9qghz, ServerIP: 192.170.21.193!
[root@k8s-master01 ~]# curl 10.100.33.92
raymond demoapp v1.0 !! ClientIP: 192.162.55.64, ServerName: demoapp-c4787f9fc-xzz2z, ServerIP: 192.167.195.129!
#用户可以于集群外部通过“http://NodeIP:32184”这个URL访问demoapp上的应用,例如于集群外通过浏览器访问“http://<kubernetes-node>:32184”。
[root@rocky8 ~]# curl http://172.31.3.101:32184
raymond demoapp v1.0 !! ClientIP: 192.162.55.64, ServerName: demoapp-c4787f9fc-9qghz, ServerIP: 192.170.21.193!
[root@rocky8 ~]# curl http://172.31.3.101:32184
raymond demoapp v1.0 !! ClientIP: 192.162.55.64, ServerName: demoapp-c4787f9fc-xzz2z, ServerIP: 192.167.195.129!
[root@rocky8 ~]# curl http://172.31.3.101:32184
raymond demoapp v1.0 !! ClientIP: 192.162.55.64, ServerName: demoapp-c4787f9fc-926r7, ServerIP: 192.169.111.132!
#扩容
[root@k8s-master01 ~]# kubectl scale deployment demoapp --replicas 5
deployment.apps/demoapp scaled
[root@k8s-master01 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
demoapp-c4787f9fc-926r7 1/1 Running 0 3m3s
demoapp-c4787f9fc-9qghz 1/1 Running 0 3m3s
demoapp-c4787f9fc-g27kz 1/1 Running 0 6s
demoapp-c4787f9fc-xzz2z 1/1 Running 0 3m3s
demoapp-c4787f9fc-zwlnn 1/1 Running 0 6s
#缩容
[root@k8s-master01 ~]# kubectl scale deployment demoapp --replicas 2
deployment.apps/demoapp scaled
#可以看到销毁pod的过程
[root@k8s-master01 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
demoapp-c4787f9fc-926r7 1/1 Running 0 3m20s
demoapp-c4787f9fc-9qghz 1/1 Terminating 0 3m20s
demoapp-c4787f9fc-g27kz 1/1 Terminating 0 23s
demoapp-c4787f9fc-xzz2z 1/1 Running 0 3m20s
demoapp-c4787f9fc-zwlnn 1/1 Terminating 0 23s
#再次查看,最终缩容成功
[root@k8s-master01 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
demoapp-c4787f9fc-926r7 1/1 Running 0 3m53s
demoapp-c4787f9fc-xzz2z 1/1 Running 0 3m53s
3.16 Metrics部署
在新版的Kubernetes中系统资源的采集均使用Metrics-server,可以通过Metrics采集节点和Pod的内存、磁盘、CPU和网络的使用率。
[root@k8s-master01 ~]# wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
将Master01节点的front-proxy-ca.crt复制到所有Node节点
[root@k8s-master01 ~]# for i in k8s-node01 k8s-node02 k8s-node03;do scp -o StrictHostKeyChecking=no /etc/kubernetes/pki/front-proxy-ca.crt $i:/etc/kubernetes/pki/front-proxy-ca.crt ; done
修改下面内容:
[root@k8s-master01 ~]# vim components.yaml
...
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
#添加下面内容
- --kubelet-insecure-tls
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt #kubeadm证书文件是front-proxy-ca.crt
- --requestheader-username-headers=X-Remote-User
- --requestheader-group-headers=X-Remote-Group
- --requestheader-extra-headers-prefix=X-Remote-Extra-
...
volumeMounts:
- mountPath: /tmp
name: tmp-dir
#添加下面内容
- name: ca-ssl
mountPath: /etc/kubernetes/pki
...
volumes:
- emptyDir: {}
name: tmp-dir
#添加下面内容
- name: ca-ssl
hostPath:
path: /etc/kubernetes/pki
...
下载镜像并修改镜像地址:
#注意:如果没有harbor不用执行下面脚本
[root@k8s-master01 ~]# grep "image:" components.yaml
image: k8s.gcr.io/metrics-server/metrics-server:v0.6.1
[root@k8s-master01 ~]# cat download_metrics_images.sh
#!/bin/bash
#
#**********************************************************************************************
#Author: Raymond
#QQ: 88563128
#Date: 2022-01-11
#FileName: download_metrics_images.sh
#URL: raymond.blog.csdn.net
#Description: The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \033[01;31m"
END='\033[0m'
images=$(awk -F "/" '/image:/{print $NF}' components.yaml)
HARBOR_DOMAIN=harbor.raymonds.cc
images_download(){
${COLOR}"开始下载Metrics镜像"${END}
for i in ${images};do
nerdctl pull registry.aliyuncs.com/google_containers/$i
nerdctl tag registry.aliyuncs.com/google_containers/$i ${HARBOR_DOMAIN}/google_containers/$i
nerdctl rmi registry.aliyuncs.com/google_containers/$i
nerdctl push ${HARBOR_DOMAIN}/google_containers/$i
done
${COLOR}"Metrics镜像下载完成"${END}
}
images_download
[root@k8s-master01 ~]# bash download_metrics_images.sh
root@k8s-master01:~# nerdctl images |grep metrics
harbor.raymonds.cc/google_containers/metrics-server v0.6.1 5ddc6458eb95 23 seconds ago linux/amd64 69.3 MiB 26.8 MiB
[root@k8s-master01 ~]# sed -ri 's@(.*image:) k8s.gcr.io/metrics-server(/.*)@\1 harbor.raymonds.cc/google_containers\2@g' components.yaml
[root@k8s-master01 ~]# grep "image:" components.yaml
image: harbor.raymonds.cc/google_containers/metrics-server:v0.6.1
#注意:如果没有harbor执行下面命令
[root@k8s-master01 ~]# sed -ri 's@(.*image:) k8s.gcr.io/metrics-server(/.*)@\1 registry.aliyuncs.com/google_containers\2@g' components.yaml
[root@k8s-master01 ~]# kubectl apply -f components.yaml
查看状态
[root@k8s-master01 ~]# kubectl get pod -n kube-system |grep metrics
metrics-server-6dcf48c9dc-mxkw7 1/1 Running 0 32s
[root@k8s-master01 ~]# kubectl top node
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
k8s-master01.example.local 252m 12% 1314Mi 34%
k8s-master02.example.local 220m 11% 986Mi 25%
k8s-master03.example.local 195m 9% 1002Mi 26%
k8s-node01.example.local 110m 5% 695Mi 18%
k8s-node02.example.local 84m 4% 645Mi 16%
k8s-node03.example.local 105m 5% 652Mi 17%
3.17 Dashboard部署
Dashboard用于展示集群中的各类资源,同时也可以通过Dashboard实时查看Pod的日志和在容器中执行一些命令等。
查看对应版本兼容的kubernetes版本
可以看到上图dashboard v2.7.0是支持kuberneres 1.25版本的
[root@k8s-master01 ~]# wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
[root@k8s-master01 ~]# vim recommended.yaml
...
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort #添加这行
ports:
- port: 443
targetPort: 8443
nodePort: 30005 #添加这行
selector:
k8s-app: kubernetes-dashboard
...
[root@k8s-master01 ~]# grep "image:" recommended.yaml
image: kubernetesui/dashboard:v2.7.0
image: kubernetesui/metrics-scraper:v1.0.8
#注意:如果没有harbor不用执行下面脚本
[root@k8s-master01 ~]# cat download_dashboard_images.sh
#!/bin/bash
#
#**********************************************************************************************
#Author: Raymond
#QQ: 88563128
#Date: 2022-01-11
#FileName: download_dashboard_images.sh
#URL: raymond.blog.csdn.net
#Description: The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \033[01;31m"
END='\033[0m'
images=$(awk -F "/" '/image:/{print $NF}' recommended.yaml)
HARBOR_DOMAIN=harbor.raymonds.cc
images_download(){
${COLOR}"开始下载Dashboard镜像"${END}
for i in ${images};do
nerdctl pull registry.aliyuncs.com/google_containers/$i
nerdctl tag registry.aliyuncs.com/google_containers/$i ${HARBOR_DOMAIN}/google_containers/$i
nerdctl rmi registry.aliyuncs.com/google_containers/$i
nerdctl push ${HARBOR_DOMAIN}/google_containers/$i
done
${COLOR}"Dashboard镜像下载完成"${END}
}
images_download
[root@k8s-master01 ~]# bash download_dashboard_images.sh
root@k8s-master01:~# nerdctl images | grep -E "(dashboard|metrics-scraper)"
harbor.raymonds.cc/google_containers/dashboard v2.7.0 2e500d29e9d5 29 seconds ago linux/amd64 245.8 MiB 72.3 MiB
harbor.raymonds.cc/google_containers/metrics-scraper v1.0.8 76049887f07a 20 seconds ago linux/amd64 41.8 MiB 18.8 MiB
[root@k8s-master01 ~]# sed -ri 's@(.*image:) kubernetesui(/.*)@\1 harbor.raymonds.cc/google_containers\2@g' recommended.yaml
[root@k8s-master01 ~]# grep "image:" recommended.yaml
image: harbor.raymonds.cc/google_containers/dashboard:v2.7.0
image: harbor.raymonds.cc/google_containers/metrics-scraper:v1.0.8
#注意:如果没有harbor执行下面命令
[root@k8s-master01 ~]# sed -ri 's@(.*image:) kubernetesui(/.*)@\1 registry.aliyuncs.com/google_containers\2@g' recommended.yaml
[root@k8s-master01 ~]# kubectl apply -f recommended.yaml
[root@k8s-master01 ~]# kubectl get pod -n kubernetes-dashboard
NAME READY STATUS RESTARTS AGE
dashboard-metrics-scraper-764c76b989-p5jdr 1/1 Running 0 9s
kubernetes-dashboard-865c67b459-xfv9j 1/1 Running 0 9s
创建管理员用户admin.yaml
[root@k8s-master01 ~]# cat > admin.yaml <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
EOF
[root@k8s-master01 ~]# kubectl apply -f admin.yaml
3.17.1 登录dashboard
在谷歌浏览器(Chrome)启动文件中加入启动参数,用于解决无法访问Dashboard的问题,参考图1-1:
--test-type --ignore-certificate-errors
图1-1 谷歌浏览器 Chrome的配置
[root@k8s-master01 ~]# kubectl get svc kubernetes-dashboard -n kubernetes-dashboard
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes-dashboard NodePort 10.96.173.161 <none> 443:30005/TCP 2m3s
访问Dashboard:https://172.31.3.101:30005,参考图1-2
图1-2 Dashboard登录方式
3.17.2 token登录
创建token:
[root@k8s-master01 ~]# kubectl -n kubernetes-dashboard create token admin-user
eyJhbGciOiJSUzI1NiIsImtpZCI6InBWZzRyck5xcWljTjdIQi0ydFhjTDRSYlQyVC1TSk1KQUU2X0oyMng4ZGsifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNjYzNDEzODc1LCJpYXQiOjE2NjM0MTAyNzUsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiY2FiYzNmYjMtZTE5ZS00YmY1LWEzNjMtYTA5OGFlMzY2N2Q4In19LCJuYmYiOjE2NjM0MTAyNzUsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDphZG1pbi11c2VyIn0.QhG3el-uMJH3gJjbDBmUkwjRpesqZxaOaTv2wupLlc8wFcBM0S4G7YpCtRl9twMbx2weUNJqEIleKC8zwY1JnVpgDxbPYz7FOg-gCE7FWEwGscFRhbS3fPMd5cv6l-gzSSUoPEuFotZad0yHXYsrSVxaopKoVxMO6MqSbchdZRssdjCDPhtwDps17aSDprt6QIS4_Tdk_9INLpAH4I4lZBCsnltorU8H93NntTA06t3l-fysHgYmh7puLWIKBwYw9f43n7JFUbLeSRg1a8nxOgTJYLsr3xbG41KPts9_1WHvPOoBTlvAXGOihIkxwsiYJglkT_BpSpGHJx7YaKBv7g
将token值输入到令牌后,单击登录即可访问Dashboard,参考图1-3:
3.17.3 使用kubeconfig文件登录dashboard
[root@k8s-master01 ~]# cp /etc/kubernetes/admin.conf kubeconfig
root@k8s-master01:~# vim kubeconfig
...
#在最下面添加token
token: eyJhbGciOiJSUzI1NiIsImtpZCI6InBWZzRyck5xcWljTjdIQi0ydFhjTDRSYlQyVC1TSk1KQUU2X0oyMng4ZGsifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNjYzNDEzODc1LCJpYXQiOjE2NjM0MTAyNzUsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiY2FiYzNmYjMtZTE5ZS00YmY1LWEzNjMtYTA5OGFlMzY2N2Q4In19LCJuYmYiOjE2NjM0MTAyNzUsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDphZG1pbi11c2VyIn0.QhG3el-uMJH3gJjbDBmUkwjRpesqZxaOaTv2wupLlc8wFcBM0S4G7YpCtRl9twMbx2weUNJqEIleKC8zwY1JnVpgDxbPYz7FOg-gCE7FWEwGscFRhbS3fPMd5cv6l-gzSSUoPEuFotZad0yHXYsrSVxaopKoVxMO6MqSbchdZRssdjCDPhtwDps17aSDprt6QIS4_Tdk_9INLpAH4I4lZBCsnltorU8H93NntTA06t3l-fysHgYmh7puLWIKBwYw9f43n7JFUbLeSRg1a8nxOgTJYLsr3xbG41KPts9_1WHvPOoBTlvAXGOihIkxwsiYJglkT_BpSpGHJx7YaKBv7g
4.一些必须的配置更改
将Kube-proxy改为ipvs模式,因为在初始化集群的时候注释了ipvs配置,所以需要自行修改一下:
在master01节点执行
[root@k8s-master01 ~]# curl 127.0.0.1:10249/proxyMode
iptables
[root@k8s-master01 ~]# kubectl edit cm kube-proxy -n kube-system
...
mode: "ipvs"
更新Kube-Proxy的Pod:
[root@k8s-master01 ~]# kubectl patch daemonset kube-proxy -p "{"spec":{"template":{"metadata":{"annotations":{"date":"`date +'%s'`"}}}}}" -n kube-system
daemonset.apps/kube-proxy patched
验证Kube-Proxy模式
[root@k8s-master01 ~]# curl 127.0.0.1:10249/proxyMode
ipvs
[root@k8s-master01 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 172.31.3.101:30005 rr
-> 192.170.21.196:8443 Masq 1 0 0
TCP 172.31.3.101:32184 rr
-> 192.167.195.129:80 Masq 1 0 0
-> 192.169.111.132:80 Masq 1 0 0
TCP 192.162.55.64:30005 rr
-> 192.170.21.196:8443 Masq 1 0 0
TCP 192.162.55.64:32184 rr
-> 192.167.195.129:80 Masq 1 0 0
-> 192.169.111.132:80 Masq 1 0 0
TCP 10.96.0.1:443 rr
-> 172.31.3.101:6443 Masq 1 0 0
-> 172.31.3.102:6443 Masq 1 0 0
-> 172.31.3.103:6443 Masq 1 1 0
TCP 10.96.0.10:53 rr
-> 192.169.111.129:53 Masq 1 0 0
-> 192.169.111.131:53 Masq 1 0 0
TCP 10.96.0.10:9153 rr
-> 192.169.111.129:9153 Masq 1 0 0
-> 192.169.111.131:9153 Masq 1 0 0
TCP 10.98.38.228:8000 rr
-> 192.167.195.131:8000 Masq 1 0 0
TCP 10.99.151.204:443 rr
-> 192.170.21.195:4443 Masq 1 0 0
TCP 10.99.239.87:443 rr
-> 192.170.21.196:8443 Masq 1 0 0
TCP 10.100.33.92:80 rr
-> 192.167.195.129:80 Masq 1 0 0
-> 192.169.111.132:80 Masq 1 0 0
UDP 10.96.0.10:53 rr
-> 192.169.111.129:53 Masq 1 0 0
-> 192.169.111.131:53 Masq 1 0 0