1.kubeadm 安装方式升级
升级k8s集群必须 先升级kubeadm版本到目的k8s版本,也就是说kubeadm是k8s升级的准升证。
1.1 升级准备
在k8s的所有master节点进行组件升级,将管理端服务kube-controller-manager、kube-apiserver、kube-scheduler、kube-proxy进行版本升级。
1.1.1 验证当前k8s master版本
[root@k8s-master01 ~]# kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"22", GitVersion:"v1.22.8", GitCommit:"7061dbbf75f9f82e8ab21f9be7e8ffcaae8e0d44", GitTreeState:"clean", BuildDate:"2022-03-16T14:08:54Z", GoVersion:"go1.16.15", Compiler:"gc", Platform:"linux/amd64"}
1.1.2 验证当前k8s node版本
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01.example.local Ready control-plane,master 116m v1.22.8
k8s-master02.example.local Ready control-plane,master 111m v1.22.8
k8s-master03.example.local Ready control-plane,master 106m v1.22.8
k8s-node01.example.local Ready <none> 110m v1.22.8
k8s-node02.example.local Ready <none> 109m v1.22.8
k8s-node03.example.local Ready <none> 103m v1.22.8
1.2 升级k8s master节点版本
升级各k8s master节点版本
1.2.1 查看升级计划
[root@k8s-master01 ~]# kubeadm upgrade plan
[upgrade/config] Making sure the configuration is correct:
[upgrade/config] Reading configuration from the cluster...
[upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[preflight] Running pre-flight checks.
[upgrade] Running cluster health checks
[upgrade] Fetching available versions to upgrade to
[upgrade/versions] Cluster version: v1.22.8
[upgrade/versions] kubeadm version: v1.22.8
I0417 13:53:21.710378 11061 version.go:255] remote version is much newer: v1.23.5; falling back to: stable-1.22
[upgrade/versions] Target version: v1.22.8
[upgrade/versions] Latest version in the v1.22 series: v1.22.8
1.2.2 升级 k8s master节点版本
master01
#CentOS
[root@k8s-master01 ~]# yum list kubeadm.x86_64 --showduplicates | grep 1.23
kubeadm.x86_64 1.23.0-0 kubernetes
kubeadm.x86_64 1.23.1-0 kubernetes
kubeadm.x86_64 1.23.2-0 kubernetes
kubeadm.x86_64 1.23.3-0 kubernetes
kubeadm.x86_64 1.23.4-0 kubernetes
kubeadm.x86_64 1.23.5-0 kubernetes
#Ubuntu
root@k8s-master01:~# apt-cache madison kubeadm |grep 1.21.*
kubeadm | 1.21.10-00 | https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
kubeadm | 1.21.9-00 | https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
kubeadm | 1.21.8-00 | https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
kubeadm | 1.21.7-00 | https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
kubeadm | 1.21.6-00 | https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
kubeadm | 1.21.5-00 | https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
kubeadm | 1.21.4-00 | https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
kubeadm | 1.21.3-00 | https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
kubeadm | 1.21.2-00 | https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
kubeadm | 1.21.1-00 | https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
kubeadm | 1.21.0-00 | https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial/main amd64 Packages
#ha01和ha02上安装
#CentOS
[root@k8s-ha01 ~]# yum -y install socat
#Ubuntu
root@k8s-master01:~# apt -y install socat
#下线master01
[root@k8s-master01 ~]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/172.31.3.101" | socat stdio /var/lib/haproxy/haproxy.sock"
#CentOS
[root@k8s-master01 ~]# yum -y install kubeadm-1.23.5 kubelet-1.23.5 kubeadm-1.23.5
#Ubuntu
root@k8s-master01:~# apt -y install kubeadm=1.23.5-00 kubelet=1.23.5-00 kubectl=1.23.5-00
[root@k8s-master01 ~]# kubeadm config images list --kubernetes-version v1.23.5
k8s.gcr.io/kube-apiserver:v1.23.5
k8s.gcr.io/kube-controller-manager:v1.23.5
k8s.gcr.io/kube-scheduler:v1.23.5
k8s.gcr.io/kube-proxy:v1.23.5
k8s.gcr.io/pause:3.6
k8s.gcr.io/etcd:3.5.1-0
k8s.gcr.io/coredns/coredns:v1.8.6
[root@k8s-master01 ~]# cat download_kubeadm_images_1.23.sh
#!/bin/bash
#
#**********************************************************************************************
#Author: Raymond
#QQ: 88563128
#Date: 2022-01-11
#FileName: download_kubeadm_images.sh
#URL: raymond.blog.csdn.net
#Description: The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \\033[01;31m"
END='\033[0m'
KUBEADM_VERSION=1.23.5
images=$(kubeadm config images list --kubernetes-version=v${KUBEADM_VERSION} | awk -F "/" '{print $NF}')
HARBOR_DOMAIN=harbor.raymonds.cc
images_download(){
${COLOR}"开始下载Kubeadm镜像"${END}
for i in ${images};do
docker pull registry.aliyuncs.com/google_containers/$i
docker tag registry.aliyuncs.com/google_containers/$i ${HARBOR_DOMAIN}/google_containers/$i
docker rmi registry.aliyuncs.com/google_containers/$i
docker push ${HARBOR_DOMAIN}/google_containers/$i
done
${COLOR}"Kubeadm镜像下载完成"${END}
}
images_download
[root@k8s-master01 ~]# bash download_kubeadm_images_1.23.sh
[root@k8s-master01 ~]# kubeadm upgrade apply v1.23.5
[upgrade/config] Making sure the configuration is correct:
[upgrade/config] Reading configuration from the cluster...
[upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[preflight] Running pre-flight checks.
[upgrade] Running cluster health checks
[upgrade/version] You have chosen to change the cluster version to "v1.23.5"
[upgrade/versions] Cluster version: v1.22.8
[upgrade/versions] kubeadm version: v1.23.5
[upgrade/confirm] Are you sure you want to proceed with the upgrade? [y/N]: y
[upgrade/prepull] Pulling images required for setting up a Kubernetes cluster
[upgrade/prepull] This might take a minute or two, depending on the speed of your internet connection
[upgrade/prepull] You can also perform this action in beforehand using 'kubeadm config images pull'
[upgrade/apply] Upgrading your Static Pod-hosted control plane to version "v1.23.5"...
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
[upgrade/etcd] Upgrading to TLS for etcd
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
[upgrade/staticpods] Preparing for "etcd" upgrade
[upgrade/staticpods] Renewing etcd-server certificate
[upgrade/staticpods] Renewing etcd-peer certificate
[upgrade/staticpods] Renewing etcd-healthcheck-client certificate
[upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/etcd.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2022-04-17-14-04-58/etcd.yaml"
[upgrade/staticpods] Waiting for the kubelet to restart the component
[upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s)
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: e145c79ad0635c11ac565c7cc57230a4
Static pod: etcd-k8s-master01 hash: 43aef8b4c68218e91249b4059c42d251
[apiclient] Found 3 Pods for label selector component=etcd
[upgrade/staticpods] Component "etcd" upgraded successfully!
[upgrade/etcd] Waiting for etcd to become available
[upgrade/staticpods] Writing new Static Pod manifests to "/etc/kubernetes/tmp/kubeadm-upgraded-manifests3737724865"
[upgrade/staticpods] Preparing for "kube-apiserver" upgrade
[upgrade/staticpods] Renewing apiserver certificate
[upgrade/staticpods] Renewing apiserver-kubelet-client certificate
[upgrade/staticpods] Renewing front-proxy-client certificate
[upgrade/staticpods] Renewing apiserver-etcd-client certificate
[upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-apiserver.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2022-04-17-14-04-58/kube-apiserver.yaml"
[upgrade/staticpods] Waiting for the kubelet to restart the component
[upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s)
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 489e9154ef8128df4858434cc8fd3927
Static pod: kube-apiserver-k8s-master01 hash: 30c526514d5ac6a8e1e45cf6f0fb0c36
[apiclient] Found 3 Pods for label selector component=kube-apiserver
[upgrade/staticpods] Component "kube-apiserver" upgraded successfully!
[upgrade/staticpods] Preparing for "kube-controller-manager" upgrade
[upgrade/staticpods] Renewing controller-manager.conf certificate
[upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-controller-manager.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2022-04-17-14-04-58/kube-controller-manager.yaml"
[upgrade/staticpods] Waiting for the kubelet to restart the component
[upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s)
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: b85ddaba73fafbdf37841e4ad554efb4
Static pod: kube-controller-manager-k8s-master01 hash: c3eca32165c5048ce60fc85cf5a49d7d
[apiclient] Found 3 Pods for label selector component=kube-controller-manager
[upgrade/staticpods] Component "kube-controller-manager" upgraded successfully!
[upgrade/staticpods] Preparing for "kube-scheduler" upgrade
[upgrade/staticpods] Renewing scheduler.conf certificate
[upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-scheduler.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2022-04-17-14-04-58/kube-scheduler.yaml"
[upgrade/staticpods] Waiting for the kubelet to restart the component
[upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s)
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: 2c44ce0d2e6a12fcbf71a13a6944ed2d
Static pod: kube-scheduler-k8s-master01 hash: d15b9e0d5bd877e1dd4e311d907c52a1
[apiclient] Found 3 Pods for label selector component=kube-scheduler
[upgrade/staticpods] Component "kube-scheduler" upgraded successfully!
[upgrade/postupgrade] Applying label node-role.kubernetes.io/control-plane='' to Nodes with label node-role.kubernetes.io/master='' (deprecated)
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.23" in namespace kube-system with the configuration for the kubelets in the cluster
NOTE: The "kubelet-config-1.23" naming of the kubelet ConfigMap is deprecated. Once the UnversionedKubeletConfigMap feature gate graduates to Beta the default name will become just "kubelet-config". Kubeadm upgrade will handle this transition transparently.
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
[upgrade/successful] SUCCESS! Your cluster was upgraded to "v1.23.5". Enjoy!
[upgrade/kubelet] Now that your control plane is upgraded, please proceed with upgrading your kubelets if you haven't already done so.
[root@k8s-master01 ~]# systemctl daemon-reload && systemctl restart kubelet
#上线master01
[root@k8s-master01 ~]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/172.31.3.101" | socat stdio /var/lib/haproxy/haproxy.sock"
master02
#下线master02
[root@k8s-master01 ~]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/172.31.3.102" | socat stdio /var/lib/haproxy/haproxy.sock"
#CentOS
[root@k8s-master02 ~]# yum -y install kubeadm-1.23.5 kubelet-1.23.5 kubectl-1.23.5
#Ubuntu
root@k8s-master02:~# apt -y install kubeadm=1.23.5-00 kubelet=1.23.5-00 kubectl=1.23.5-00
[root@k8s-master02 ~]# cat download_kubeadm_images_1.23-2.sh
#!/bin/bash
#
#**********************************************************************************************
#Author: Raymond
#QQ: 88563128
#Date: 2022-01-11
#FileName: download_kubeadm_images.sh
#URL: raymond.blog.csdn.net
#Description: The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \\033[01;31m"
END='\033[0m'
KUBEADM_VERSION=1.23.5
images=$(kubeadm config images list --kubernetes-version=v${KUBEADM_VERSION} | awk -F "/" '{print $NF}')
HARBOR_DOMAIN=harbor.raymonds.cc
images_download(){
${COLOR}"开始下载Kubeadm镜像"${END}
for i in ${images};do
docker pull ${HARBOR_DOMAIN}/google_containers/$i
done
${COLOR}"Kubeadm镜像下载完成"${END}
}
images_download
[root@k8s-master02 ~]# bash download_kubeadm_images_1.23-2.sh
[root@k8s-master02 ~]# kubeadm upgrade apply v1.23.5
[root@k8s-master02 ~]# systemctl daemon-reload && systemctl restart kubelet
#上线master02
[root@k8s-master01 ~]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/172.31.3.102" | socat stdio /var/lib/haproxy/haproxy.sock"
master03
#下线master03
[root@k8s-master01 ~]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/172.31.3.103" | socat stdio /var/lib/haproxy/haproxy.sock"
#CentOS
[root@k8s-master02 ~]# yum -y install kubeadm-1.23.5 kubelet-1.23.5 kubectl-1.23.5
#Ubuntu
root@k8s-master02:~# apt -y install kubeadm=1.23.5-00 kubelet=1.23.5-00 kubectl=1.23.5-00
[root@k8s-master03 ~]# bash download_kubeadm_images_1.23-2.sh
[root@k8s-master03 ~]# kubeadm upgrade apply v1.23.5
[root@k8s-master03 ~]# systemctl daemon-reload && systemctl restart kubelet
#上线master03
[root@k8s-master01 ~]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/172.31.3.103" | socat stdio /var/lib/haproxy/haproxy.sock"
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01.example.local Ready control-plane,master 140m v1.23.5
k8s-master02.example.local Ready control-plane,master 135m v1.23.5
k8s-master03.example.local Ready control-plane,master 130m v1.23.5
k8s-node01.example.local Ready <none> 134m v1.22.8
k8s-node02.example.local Ready <none> 133m v1.22.8
k8s-node03.example.local Ready <none> 127m v1.22.8
[root@k8s-master01 ~]# kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.5", GitCommit:"c285e781331a3785a7f436042c65c5641ce8a9e9", GitTreeState:"clean", BuildDate:"2022-03-16T15:57:37Z", GoVersion:"go1.17.8", Compiler:"gc", Platform:"linux/amd64"}
[root@k8s-master01 ~]# kubectl version
Client Version: version.Info{Major:"1", Minor:"22", GitVersion:"v1.22.8", GitCommit:"7061dbbf75f9f82e8ab21f9be7e8ffcaae8e0d44", GitTreeState:"clean", BuildDate:"2022-03-16T14:10:06Z", GoVersion:"go1.16.15", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.5", GitCommit:"c285e781331a3785a7f436042c65c5641ce8a9e9", GitTreeState:"clean", BuildDate:"2022-03-16T15:52:18Z", GoVersion:"go1.17.8", Compiler:"gc", Platform:"linux/amd64"}
[root@k8s-master01 ~]# kubelet --version
Kubernetes v1.23.5
1.3 升级calico
[root@k8s-master01 ~]# curl https://docs.projectcalico.org/manifests/calico-etcd.yaml -O
[root@k8s-master01 ~]# vim calico-etcd.yaml
...
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: OnDelete #修改这里,calico不会滚动更新,只有重启了kubelet,才会更新
template:
metadata:
labels:
k8s-app: calico-node
...
修改calico-etcd.yaml的以下位置
[root@k8s-master01 ~]# grep "etcd_endpoints:.*" calico-etcd.yaml
etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"
[root@k8s-master01 ~]# sed -i 's#etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"#etcd_endpoints: "https://172.31.3.101:2379,https://172.31.3.102:2379,https://172.31.3.103:2379"#g' calico-etcd.yaml
[root@k8s-master01 ~]# grep "etcd_endpoints:.*" calico-etcd.yaml
etcd_endpoints: "https://172.31.3.101:2379,https://172.31.3.102:2379,https://172.31.3.103:2379"
[root@k8s-master01 ~]# grep -E "(.*etcd-key:.*|.*etcd-cert:.*|.*etcd-ca:.*)" calico-etcd.yaml
# etcd-key: null
# etcd-cert: null
# etcd-ca: null
[root@k8s-master01 ~]# ETCD_KEY=`cat /etc/kubernetes/pki/etcd/server.key | base64 | tr -d '\n'`
[root@k8s-master01 ~]# ETCD_CERT=`cat /etc/kubernetes/pki/etcd/server.crt | base64 | tr -d '\n'`
[root@k8s-master01 ~]# ETCD_CA=`cat /etc/kubernetes/pki/etcd/ca.crt | base64 | tr -d '\n'`
[root@k8s-master01 ~]# sed -i "s@# etcd-key: null@etcd-key: ${ETCD_KEY}@g; s@# etcd-cert: null@etcd-cert: ${ETCD_CERT}@g; s@# etcd-ca: null@etcd-ca: ${ETCD_CA}@g" calico-etcd.yaml
[root@k8s-master01 ~]# grep -E "(.*etcd-key:.*|.*etcd-cert:.*|.*etcd-ca:.*)" calico-etcd.yaml
etcd-key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdzFyRE5lanVxbnVDRVBEa01xdjlmcVNWeHhmSzQvQ0wrY2Z3YnY5WHovSS9ScHZmCmNtRjdUM0wzYnBDNS9JMjVuVlZ6N2plb2JUZnN2RFhDWjVIQml1MkYrSzR1SHdna3dtREJYWjZzSUd6clNSS1oKRTdldWhxN1F3SWVqOWtTcTN2RjA5d1NsbWJJeFJaNTZmMXgvYXVMcnFpNUZnanBmWUhMWit3MVdBcUM3WWtVWgoraHoxN3pFRjVnaWlYWE9ucGR1c2VCZDZoVHd1T2JwNmg0clpEb0IrY1VMMXplOTNPTFJGbDBpL2ppRzFzMGw2CmlUaUgwU3d1SFlXQ1NXNWZ5NzBjVDFseUduWG5jNUFwOGRYZWt5dnZYRFBzNEVnVVcvbUczWlpoalAreC9DWTcKNGp6M2dRZVFHYitSMUdRb1djdlh4UGZneGsxK2IwK01ZUjFmVXdJREFRQUJBb0lCQUYwWTQrN05FdkFyNjlBbQovSmtwWGFUOHltUVc4cG11Q1FjQVFaU2tHelQrUFNscEh4TmpZV0I3YVc5SGlWclNMNkxMRm5Sd0VkUDYwdGJlCng4YVRyNmlGaVZMNXJ3RWE0R25Cc21Uck9SdzZ5K1lHOXV4dW5MMlNrZWt1dXZTaHhNeDZSVU55ODNoTGN5KzYKVnFaYmJsMkJ4czFUUDh6UUJLUHlGKytNYTNEVVV1RnhNZ0h3cnFOYVA5bzdOaW9ycTFzZTdHV2F1dHNXSW1HZwpLZjJDREU5SVlHbGQyd1pnWCtoVWhQOU1UcWVKVGJrQVJiUEVsS1BLUDBSeUxaNi9tcE42K0VONGovS0NHeW9PCmNmTzUrazlpUlpwYytldnhpelZkNXNyWER2azBlK1pyU3Y2eUhtV0hpaUxCMm9PN0hkMUVKbjhQa09scE1ISjcKU0hoTzBBRUNnWUVBN29DMFZicmVQNXhZRjUyalI2RUtDUkVLR0lWcExrTTBuNUpSRUdpNElXcWNyWEJ1eGhIYwpQWVgzdnJKRWM5Nm8va28rZTRVNk94cWFISjZRSGNVODFCRTcyc0d0RkY2WHlnNlRkSjRJd2ZIenpKNjlDK2JtCmRhSlNqbG1UeE9GOEhNSkpjdUt3RGRxOFlLNlRHZzN0MXJTcVNtczMzV1BxdG9zbW5Takp0cThDZ1lFQTBhK3kKTGxIWWl5U2NVSG0vY3hYWEZSVkNUWjJqaWo0MzF4MXNkNFE3amc0MlQ0dmNCUElsUmJGQjdRYjVLallZUjZkYQp2cGJMV0hLbE1OblUrM1dvcnJnNjNWNlZwa3lCck1VcnNSemlQaUIxM1lXVENsZjUwdDJERVZ5dS9aWDZPc2FuCjY4MDJwRFc0YnhKcmNPam9aM3BjUm9Fcy96N0RGKzArZStseWlwMENnWUVBdXR2WGJmdDBPUDR5L24yZytYT3cKT3g1QWZLbTVtR2RMQ1dKSFpNWEd6VmVMM1U3ald3ZVBPQnlIMTc0dloyQ2hvbWxrdnIzSXU1bkIrSDQ2aHppSwp5ZE9ldzJ0T1FWRkROeWxvV2N1ZkxPUjFrSEVseC9kbHcvQWpJaWdJWUE0UmdTNnZBUFdkM1p6c1RnczRjUWRNCnVoVGQvbVEyWnB2cnZvMFMrYnFGSHowQ2dZRUFnVnN3UXQ3L0JhZktQdU04dGxTczRUYkNObnVmWGpOUDQ0Y2wKV1AzY2Q2QlE1UFhVLzhBYU9rcEY3Mkd6Nk5TQ1dnSG1PMWx2ak5yOUNZdjRsa0JabFovVndLY1BEdzUzbVF2eQpEa3RSVHg1YldCT0ZTSVpKZWtwcEJ4YjBaVUJXcEZmVlUrUy9ac0kxUzJCRG85NHJNVnNNL2ZuR3RwZ1RadmxXCjZMNTFpUWtDZ1lBUkVRSElYTmhlYW1RSFE1TEpicEZzMFltSzRVZDluL2w1Vng1MEdUbG0vUEx5VlBWWU9TUWUKenYyYS96RHY2dVJ6ZGROU0tpSkFMVUJDZG5RSDRraklBWGg3NDBTQXNGUDZraW4zNm11RDB4RTlEODBOMlNyMgpDL3hQWHdINWp0Ry9jUkdHZGU4SGdjQTg4NkFKYkMyenlxYURpY3h1ejRQcll4Z2dPNG9iTmc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
etcd-cert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURRVENDQWltZ0F3SUJBZ0lJR2VNdy9ETExMK2t3RFFZSktvWklodmNOQVFFTEJRQXdFakVRTUE0R0ExVUUKQXhNSFpYUmpaQzFqWVRBZUZ3MHlNakF4TWpZeE16TTFOVGRhRncweU16QXhNall4TXpNMU5UZGFNQmN4RlRBVApCZ05WQkFNVERHczRjeTF0WVhOMFpYSXdNVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DCmdnRUJBTU5hd3pYbzdxcDdnaER3NURLci9YNmtsY2NYeXVQd2kvbkg4RzcvVjgveVAwYWIzM0poZTA5eTkyNlEKdWZ5TnVaMVZjKzQzcUcwMzdMdzF3bWVSd1lydGhmaXVMaDhJSk1KZ3dWMmVyQ0JzNjBrU21STzNyb2F1ME1DSApvL1pFcXQ3eGRQY0VwWm15TVVXZWVuOWNmMnJpNjZvdVJZSTZYMkJ5MmZzTlZnS2d1MkpGR2ZvYzllOHhCZVlJCm9sMXpwNlhickhnWGVvVThMam02ZW9lSzJRNkFmbkZDOWMzdmR6aTBSWmRJdjQ0aHRiTkplb2s0aDlFc0xoMkYKZ2tsdVg4dTlIRTlaY2hwMTUzT1FLZkhWM3BNcjcxd3o3T0JJRkZ2NWh0MldZWXovc2Z3bU8rSTg5NEVIa0JtLwprZFJrS0ZuTDE4VDM0TVpOZm05UGpHRWRYMU1DQXdFQUFhT0JsVENCa2pBT0JnTlZIUThCQWY4RUJBTUNCYUF3CkhRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUdDQ3NHQVFVRkJ3TUNNQjhHQTFVZEl3UVlNQmFBRk1Ha3dwKy8KZGdBTjRHdnRDT0ZrbE5OSnBCSkNNRUFHQTFVZEVRUTVNRGVDREdzNGN5MXRZWE4wWlhJd01ZSUpiRzlqWVd4bwpiM04waHdTc0h3Tmxod1IvQUFBQmh4QUFBQUFBQUFBQUFBQUFBQUFBQUFBQk1BMEdDU3FHU0liM0RRRUJDd1VBCkE0SUJBUUFMOE53a0I5aUprc0RnOGp6dFRTTjB4U3pyaXQyK242M2QrV0dGS3l1K2d6Z2pTLzZaOXhYQkpZN3YKL2c1SEZrUnpxTmJXTDdoV0dtY1ZPUGJpQmNpZnJtcmpFNUFMdzhPNmZBVGg2V3RtaVN4RlRwa1Nhc3R5OW82RApJcGlmYzhSTS8rSS9EVWdTQXQ3ZzFucUJodjlxdnFSRWNiM1J1SmRYWTJjNi90LzNZb3gzTUFmVzNJaUVDNUorCkNTSXl2UUtmUDlBWVlXK2F4Y1dQelhXNzEwUVdNTnozZXVQMzJqZENkanBzbFVLNldpaHJQYjdnaURTdDdFVFYKWk5EeEh4NUp3WXlpYmFxbGQzQUlicFhNRmxnY2NubWttM0pwWnIrTUI4bGlGYThHZlU5L005N1ZueXFZN0huNgpDNkdXTWlJNWFvc0lGaE9INUJ3NFFNa0NzSXlvCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
etcd-ca: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM0VENDQWNtZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFTTVJBd0RnWURWUVFERXdkbGRHTmsKTFdOaE1CNFhEVEl5TURFeU5qRXpNelUxTjFvWERUTXlNREV5TkRFek16VTFOMW93RWpFUU1BNEdBMVVFQXhNSApaWFJqWkMxallUQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU5OcnNlMnd1aG45CnVKazdpOEZLN1A5SHU1YlIwOEY5R0VQTEtBelg0NHdKSE45QjZ1MTBrZmNoL0dlRTBremErQjhJejQ1UjRrdmUKbVpVYS9XbmVXdjNxRm1odURiTEdvU1B1Ykl1aXh1aTkwdllzbjBaRUJCV2dMbWdRNkdHNnd5OWFNbG55VGlWYworOTdSNTg2b3dMVGRTU3NiNjd2c0w0U2U0U2lXOHdTQTQ2K3FXSEJKNHc5Q2s2QXljam9vbDBMbXREVkJ1QlpqCjlNeWdDbUE4M3lkTnV4eUhDSGJpM2FRdkovVUNyQnoyNk5zYTVha1NlMlRQNGJ1US9PWjBIYnhsNUE5NXIyeGgKNkM1NGx3cHFLeTkxb2craWQ1ZlZMRFVVdDR0d1pvd0dITnZxMWRrRnI3VjA2SDJjdXo0eXlMajQ0a0xPNk9LMgo4OGplaWhBREhiY0NBd0VBQWFOQ01FQXdEZ1lEVlIwUEFRSC9CQVFEQWdLa01BOEdBMVVkRXdFQi93UUZNQU1CCkFmOHdIUVlEVlIwT0JCWUVGTUdrd3ArL2RnQU40R3Z0Q09Ga2xOTkpwQkpDTUEwR0NTcUdTSWIzRFFFQkN3VUEKQTRJQkFRQTI5SWlldmxmdk9MSmFCOGlNR3ZLTmVXMExGZkhkTGRoeUl3T2dDeThjaHNaTVR5SjhXdzFrUUVKUgozbTY0MGVHK2UvOTE0QmE5Wk5iL0NQMkN1eHA2MkQrVWl0U0FjS001NWtTNURuVEVrcURwbVdETjdLTjhISk1QCkcwdlRXYnNrVTVicXJqb0JVQVNPNUsxeDl4WENSUDU2elBVZ3E5QTY4SmM4N1Mya29PNk56Mm53ZE9zc042TW0KRzFNQmdHQ2lqQXB3MDZJM2NuT1ExcFFhVk1RNVovT0tDSEoyTFFFUFJISVZqb2E4clBBcmNyYXFGUnpPeTk4agpOc3FxcWYvNDhMamVwZDZvOFlZc08zRng2M3c2YmhaOG94WDFxT090WTRlQ0pPeWRTZkRMR21tYkMrc1ozZlJiCjU0RkVLQ1RWKzhqQjBYNmZJYjl2OHg3WU5MNFgKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
[root@k8s-master01 ~]# grep -E "(.*etcd_ca:.*|.*etcd_cert:.*|.*etcd_key:.*)" calico-etcd.yaml
etcd_ca: "" # "/calico-secrets/etcd-ca"
etcd_cert: "" # "/calico-secrets/etcd-cert"
etcd_key: "" # "/calico-secrets/etcd-key"
[root@k8s-master01 ~]# sed -i 's#etcd_ca: ""#etcd_ca: "/calico-secrets/etcd-ca"#g; s#etcd_cert: ""#etcd_cert: "/calico-secrets/etcd-cert"#g; s#etcd_key: "" #etcd_key: "/calico-secrets/etcd-key" #g' calico-etcd.yaml
[root@k8s-master01 ~]# grep -E "(.*etcd_ca:.*|.*etcd_cert:.*|.*etcd_key:.*)" calico-etcd.yaml
etcd_ca: "/calico-secrets/etcd-ca" # "/calico-secrets/etcd-ca"
etcd_cert: "/calico-secrets/etcd-cert" # "/calico-secrets/etcd-cert"
etcd_key: "/calico-secrets/etcd-key" # "/calico-secrets/etcd-key"
[root@k8s-master01 ~]# POD_SUBNET=`cat /etc/kubernetes/manifests/kube-controller-manager.yaml | grep cluster-cidr= | awk -F= '{print $NF}'`
[root@k8s-master01 ~]# echo $POD_SUBNET
192.168.0.0/12
# 注意下面的这个步骤是把calico-etcd.yaml文件里面的CALICO_IPV4POOL_CIDR下的网段改成自己的Pod网段,也就是把192.168.x.x/16改成自己的集群网段,并打开注释:
[root@k8s-master01 ~]# grep -E "(.*CALICO_IPV4POOL_CIDR.*|.*192.168.0.0.*)" calico-etcd.yaml
# - name: CALICO_IPV4POOL_CIDR
# value: "192.168.0.0/16"
[root@k8s-master01 ~]# sed -i 's@# - name: CALICO_IPV4POOL_CIDR@- name: CALICO_IPV4POOL_CIDR@g; s@# value: "192.168.0.0/16"@ value: '"${POD_SUBNET}"'@g' calico-etcd.yaml
[root@k8s-master01 ~]# grep -E "(.*CALICO_IPV4POOL_CIDR.*|.*192.168.0.0.*)" calico-etcd.yaml
- name: CALICO_IPV4POOL_CIDR
value: 192.168.0.0/12
[root@k8s-master01 ~]# grep "image:" calico-etcd.yaml
image: docker.io/calico/cni:v3.22.2
image: docker.io/calico/pod2daemon-flexvol:v3.22.2
image: docker.io/calico/node:v3.22.2
image: docker.io/calico/kube-controllers:v3.22.2
下载calico镜像并上传harbor
[root@k8s-master01 ~]# cat download_calico_images.sh
#!/bin/bash
#
#**********************************************************************************************
#Author: Raymond
#QQ: 88563128
#Date: 2022-01-11
#FileName: download_calico_images.sh
#URL: raymond.blog.csdn.net
#Description: The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \\033[01;31m"
END='\033[0m'
images=$(awk -F "/" '/image:/{print $NF}' calico-etcd.yaml)
HARBOR_DOMAIN=harbor.raymonds.cc
images_download(){
${COLOR}"开始下载Calico镜像"${END}
for i in ${images};do
docker pull registry.cn-beijing.aliyuncs.com/raymond9/$i
docker tag registry.cn-beijing.aliyuncs.com/raymond9/$i ${HARBOR_DOMAIN}/google_containers/$i
docker rmi registry.cn-beijing.aliyuncs.com/raymond9/$i
docker push ${HARBOR_DOMAIN}/google_containers/$i
done
${COLOR}"Calico镜像下载完成"${END}
}
images_download
[root@k8s-master01 ~]# bash download_calico_images.sh
[root@k8s-master01 ~]# sed -ri 's@(.*image:) docker.io/calico(/.*)@\1 harbor.raymonds.cc/google_containers\2@g' calico-etcd.yaml
[root@k8s-master01 ~]# grep "image:" calico-etcd.yaml
image: harbor.raymonds.cc/google_containers/cni:v3.22.2
image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2
image: harbor.raymonds.cc/google_containers/node:v3.22.2
image: harbor.raymonds.cc/google_containers/kube-controllers:v3.22.2
[root@k8s-master01 ~]# kubectl apply -f calico-etcd.yaml
secret/calico-etcd-secrets configured
configmap/calico-config unchanged
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers unchanged
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers unchanged
clusterrole.rbac.authorization.k8s.io/calico-node unchanged
clusterrolebinding.rbac.authorization.k8s.io/calico-node unchanged
daemonset.apps/calico-node configured
serviceaccount/calico-node unchanged
deployment.apps/calico-kube-controllers configured
serviceaccount/calico-kube-controllers unchanged
Warning: policy/v1beta1 PodDisruptionBudget is deprecated in v1.21+, unavailable in v1.25+; use policy/v1 PodDisruptionBudget
poddisruptionbudget.policy/calico-kube-controllers unchanged
[root@k8s-master01 ~]# vim calico-etcd.yaml
...
apiVersion: policy/v1
...
[root@k8s-master01 ~]# kubectl apply -f calico-etcd.yaml
secret/calico-etcd-secrets unchanged
configmap/calico-config unchanged
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers unchanged
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers unchanged
clusterrole.rbac.authorization.k8s.io/calico-node unchanged
clusterrolebinding.rbac.authorization.k8s.io/calico-node unchanged
daemonset.apps/calico-node configured
serviceaccount/calico-node unchanged
deployment.apps/calico-kube-controllers unchanged
serviceaccount/calico-kube-controllers unchanged
poddisruptionbudget.policy/calico-kube-controllers configured
#下线master01
[root@k8s-master01 ~]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/172.31.3.101" | socat stdio /var/lib/haproxy/haproxy.sock"
[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide |grep calico |grep master01
calico-node-bptdl 1/1 Running 0 88s 172.31.3.101 k8s-master01 <none> <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-czqgs -n kube-system -o yaml |grep "image:"
image: harbor.raymonds.cc/google_containers/node:v3.21.4
image: harbor.raymonds.cc/google_containers/cni:v3.21.4
- image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4
image: harbor.raymonds.cc/google_containers/node:v3.21.4
image: harbor.raymonds.cc/google_containers/cni:v3.21.4
image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4
#镜像并没有升级
[root@k8s-master01 ~]# kubectl delete pod calico-node-bptdl -n kube-system
pod "calico-node-bptdl" deleted
[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide |grep calico |grep master01
calico-node-qn9nd 1/1 Running 0 33s 172.31.3.101 k8s-master01 <none> <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-qn9nd -n kube-system -o yaml |grep "image:"
image: harbor.raymonds.cc/google_containers/node:v3.22.2
image: harbor.raymonds.cc/google_containers/cni:v3.22.2
- image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2
image: calico/node:v3.22.2
image: calico/cni:v3.22.2
image: calico/pod2daemon-flexvol:v3.22.2
#上线master01
[root@k8s-master01 ~]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/172.31.3.101" | socat stdio /var/lib/haproxy/haproxy.sock"
#下线master02
[root@k8s-master01 ~]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/172.31.3.102" | socat stdio /var/lib/haproxy/haproxy.sock"
[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide |grep calico |grep master02
calico-node-cqbdc 1/1 Running 1 (67m ago) 42h 172.31.3.102 k8s-master02.example.local <none> <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-cqbdc -n kube-system -o yaml |grep "image:"
image: harbor.raymonds.cc/google_containers/node:v3.21.4
image: harbor.raymonds.cc/google_containers/cni:v3.21.4
- image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4
image: harbor.raymonds.cc/google_containers/node:v3.21.4
image: harbor.raymonds.cc/google_containers/cni:v3.21.4
image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4
[root@k8s-master01 ~]# kubectl delete pod calico-node-cqbdc -n kube-system
pod "calico-node-cqbdc" deleted
[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide |grep calico |grep master02
calico-node-t5ghr 0/1 Init:0/2 0 5s 172.31.3.102 k8s-master02.example.local <none> <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-t5ghr -n kube-system -o yaml |grep "image:"
image: harbor.raymonds.cc/google_containers/node:v3.22.2
image: harbor.raymonds.cc/google_containers/cni:v3.22.2
- image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2
image: harbor.raymonds.cc/google_containers/node:v3.22.2
image: harbor.raymonds.cc/google_containers/cni:v3.22.2
image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2
#上线master02
[root@k8s-master01 ~]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/172.31.3.102" | socat stdio /var/lib/haproxy/haproxy.sock"
#下线master03
[root@k8s-master01 ~]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/172.31.3.103" | socat stdio /var/lib/haproxy/haproxy.sock"
[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide |grep calico |grep master03
calico-node-ns2p4 1/1 Running 1 (68m ago) 42h 172.31.3.103 k8s-master03.example.local <none> <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-ns2p4 -n kube-system -o yaml |grep "image:"
image: harbor.raymonds.cc/google_containers/node:v3.21.4
image: harbor.raymonds.cc/google_containers/cni:v3.21.4
- image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4
image: harbor.raymonds.cc/google_containers/node:v3.21.4
image: harbor.raymonds.cc/google_containers/cni:v3.21.4
image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4
[root@k8s-master01 ~]# kubectl delete pod calico-node-ns2p4 -n kube-system
pod "calico-node-ns2p4" deleted
[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide |grep calico |grep master03
calico-node-wxlfn 0/1 Init:0/2 0 3s 172.31.3.103 k8s-master03.example.local <none> <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-wxlfn -n kube-system -o yaml |grep "image:"
image: harbor.raymonds.cc/google_containers/node:v3.22.2
image: harbor.raymonds.cc/google_containers/cni:v3.22.2
- image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2
image: harbor.raymonds.cc/google_containers/node:v3.22.2
image: harbor.raymonds.cc/google_containers/cni:v3.22.2
image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2
#上线master03
[root@k8s-master01 ~]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/172.31.3.103" | socat stdio /var/lib/haproxy/haproxy.sock"
1.4 升级k8s node节点版本
[root@k8s-master01 ~]# kubectl drain k8s-node01.example.local --delete-emptydir-data --force --ignore-daemonsets
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready control-plane,master 42h v1.23.5
k8s-master02.example.local Ready control-plane,master 42h v1.23.5
k8s-master03.example.local Ready control-plane,master 42h v1.23.5
k8s-node01.example.local Ready,SchedulingDisabled <none> 42h v1.22.8
k8s-node02.example.local Ready <none> 42h v1.22.8
k8s-node03.example.local Ready <none> 42h v1.22.8
#CentOS
[root@k8s-node01 ~]# yum -y install kubeadm-1.23.5 kubelet-1.23.5
#Ubuntu
root@k8s-node01:~# apt -y install kubeadm=1.23.5-00 kubelet=1.23.5-00
[root@k8s-node01 ~]# systemctl daemon-reload && systemctl restart kubelet
[root@k8s-master01 ~]# kubectl get pod -A -o wide|grep calico |grep node01
kube-system calico-node-l2grz 1/1 Running 1 (76m ago) 42h 172.31.3.108 k8s-node01.example.local <none> <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-l2grz -n kube-system -o yaml |grep "image:"
image: harbor.raymonds.cc/google_containers/node:v3.21.4
image: harbor.raymonds.cc/google_containers/cni:v3.21.4
- image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4
image: harbor.raymonds.cc/google_containers/node:v3.21.4
image: harbor.raymonds.cc/google_containers/cni:v3.21.4
image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4
[root@k8s-master01 ~]# kubectl delete pod calico-node-l2grz -n kube-system
pod "calico-node-l2grz" deleted
[root@k8s-master01 ~]# kubectl get pod -A -o wide|grep calico |grep node01
kube-system calico-node-4wjvh 0/1 Init:0/2 0 5s 172.31.3.108 k8s-node01.example.local <none> <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-4wjvh -n kube-system -o yaml |grep "image:"
image: harbor.raymonds.cc/google_containers/node:v3.22.2
image: harbor.raymonds.cc/google_containers/cni:v3.22.2
- image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2
image: harbor.raymonds.cc/google_containers/node:v3.22.2
image: harbor.raymonds.cc/google_containers/cni:v3.22.2
image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2
[root@k8s-master01 ~]# kubectl uncordon k8s-node01.example.local
node/k8s-node01.example.local uncordoned
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready control-plane,master 42h v1.23.5
k8s-master02.example.local Ready control-plane,master 42h v1.23.5
k8s-master03.example.local Ready control-plane,master 42h v1.23.5
k8s-node01.example.local Ready <none> 42h v1.23.5
k8s-node02.example.local Ready <none> 42h v1.22.8
k8s-node03.example.local Ready <none> 42h v1.22.8
[root@k8s-master01 ~]# kubectl drain k8s-node02.example.local --delete-emptydir-data --force --ignore-daemonsets
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready control-plane,master 42h v1.23.5
k8s-master02.example.local Ready control-plane,master 42h v1.23.5
k8s-master03.example.local Ready control-plane,master 42h v1.23.5
k8s-node01.example.local Ready <none> 42h v1.23.5
k8s-node02.example.local Ready,SchedulingDisabled <none> 42h v1.22.8
k8s-node03.example.local Ready <none> 42h v1.22.8
#CentOS
[root@k8s-node02 ~]# yum -y install kubeadm-1.23.5 kubelet-1.23.5
#Ubuntu
root@k8s-node03:~# apt -y install kubeadm=1.23.5-00 kubelet=1.23.5-00
[root@k8s-node02 ~]# systemctl daemon-reload && systemctl restart kubelet
[root@k8s-master01 ~]# kubectl get pod -A -o wide|grep calico |grep node02 | tail -n1
kube-system calico-node-hsmgt 1/1 Running 1 (81m ago) 42h 172.31.3.109 k8s-node02.example.local <none> <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-hsmgt -n kube-system -o yaml| grep "image:"
image: harbor.raymonds.cc/google_containers/node:v3.21.4
image: harbor.raymonds.cc/google_containers/cni:v3.21.4
- image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4
image: harbor.raymonds.cc/google_containers/node:v3.21.4
image: harbor.raymonds.cc/google_containers/cni:v3.21.4
image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4
[root@k8s-master01 ~]# kubectl delete pod calico-node-hsmgt -n kube-system
pod "calico-node-hsmgt" deleted
[root@k8s-master01 ~]# kubectl get pod -A -o wide|grep calico |grep node02 | tail -n1
kube-system calico-node-lhflp 0/1 Init:0/2 0 3s 172.31.3.109 k8s-node02.example.local <none> <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-lhflp -n kube-system -o yaml| grep "image:"
image: harbor.raymonds.cc/google_containers/node:v3.22.2
image: harbor.raymonds.cc/google_containers/cni:v3.22.2
- image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2
image: harbor.raymonds.cc/google_containers/node:v3.22.2
image: harbor.raymonds.cc/google_containers/cni:v3.22.2
image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2
[root@k8s-master01 ~]# kubectl uncordon k8s-node02.example.local
node/k8s-node02.example.local uncordoned
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready control-plane,master 42h v1.23.5
k8s-master02.example.local Ready control-plane,master 42h v1.23.5
k8s-master03.example.local Ready control-plane,master 42h v1.23.5
k8s-node01.example.local Ready <none> 42h v1.23.5
k8s-node02.example.local Ready <none> 42h v1.23.5
k8s-node03.example.local Ready <none> 42h v1.22.8
[root@k8s-master01 ~]# kubectl drain k8s-node03.example.local --delete-emptydir-data --force --ignore-daemonsets
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready control-plane,master 42h v1.23.5
k8s-master02.example.local Ready control-plane,master 42h v1.23.5
k8s-master03.example.local Ready control-plane,master 42h v1.23.5
k8s-node01.example.local Ready <none> 42h v1.23.5
k8s-node02.example.local Ready <none> 42h v1.23.5
k8s-node03.example.local Ready,SchedulingDisabled <none> 42h v1.22.8
#CentOS
[root@k8s-node03 ~]# yum -y install kubeadm-1.23.5 kubelet-1.23.5
#Ubuntu
root@k8s-node03:~# apt -y install kubeadm=1.23.5-00 kubelet=1.23.5-00
[root@k8s-node03 ~]# systemctl daemon-reload && systemctl restart kubelet
[root@k8s-master01 ~]# kubectl get pod -A -o wide|grep calico |grep node03 | tail -n1
kube-system calico-node-snkfc 1/1 Running 1 (84m ago) 42h 172.31.3.110 k8s-node03.example.local <none> <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-snkfc -n kube-system -o yaml| grep "image:"
image: harbor.raymonds.cc/google_containers/node:v3.21.4
image: harbor.raymonds.cc/google_containers/cni:v3.21.4
- image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4
image: harbor.raymonds.cc/google_containers/node:v3.21.4
image: harbor.raymonds.cc/google_containers/cni:v3.21.4
image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4
[root@k8s-master01 ~]# kubectl delete pod calico-node-snkfc -n kube-system
pod "calico-node-snkfc" deleted
[root@k8s-master01 ~]# kubectl get pod -A -o wide|grep calico |grep node03 | tail -n1
kube-system calico-node-htjsq 0/1 Init:0/2 0 4s 172.31.3.110 k8s-node03.example.local <none> <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-htjsq -n kube-system -o yaml| grep "image:"
image: harbor.raymonds.cc/google_containers/node:v3.22.2
image: harbor.raymonds.cc/google_containers/cni:v3.22.2
- image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2
image: harbor.raymonds.cc/google_containers/node:v3.22.2
image: harbor.raymonds.cc/google_containers/cni:v3.22.2
image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.22.2
[root@k8s-master01 ~]# kubectl uncordon k8s-node03.example.local
node/k8s-node03.example.local uncordoned
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready control-plane,master 42h v1.23.5
k8s-master02.example.local Ready control-plane,master 42h v1.23.5
k8s-master03.example.local Ready control-plane,master 42h v1.23.5
k8s-node01.example.local Ready <none> 42h v1.23.5
k8s-node02.example.local Ready <none> 42h v1.23.5
k8s-node03.example.local Ready <none> 42h v1.23.5
1.5 升级metrics-server
[root@k8s-master01 ~]# wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
#修改成下面内容
[root@k8s-master01 ~]# vim components.yaml
...
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
#添加下面内容
- --kubelet-insecure-tls
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
- --requestheader-username-headers=X-Remote-User
- --requestheader-group-headers=X-Remote-Group
- --requestheader-extra-headers-prefix=X-Remote-Extra-
...
volumeMounts:
- mountPath: /tmp
name: tmp-dir
#添加下面内容
- name: ca-ssl
mountPath: /etc/kubernetes/pki
...
volumes:
- emptyDir: {}
name: tmp-dir
#添加下面内容
- name: ca-ssl
hostPath:
path: /etc/kubernetes/pki
...
[root@k8s-master01 ~]# grep "image:" components.yaml
image: k8s.gcr.io/metrics-server/metrics-server:v0.6.1
[root@k8s-master01 ~]# cat download_metrics_images.sh
#!/bin/bash
#
#**********************************************************************************************
#Author: Raymond
#QQ: 88563128
#Date: 2022-01-11
#FileName: download_metrics_images.sh
#URL: raymond.blog.csdn.net
#Description: The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \\033[01;31m"
END='\033[0m'
images=$(awk -F "/" '/image:/{print $NF}' components.yaml)
HARBOR_DOMAIN=harbor.raymonds.cc
images_download(){
${COLOR}"开始下载Metrics镜像"${END}
for i in ${images};do
docker pull registry.aliyuncs.com/google_containers/$i
docker tag registry.aliyuncs.com/google_containers/$i ${HARBOR_DOMAIN}/google_containers/$i
docker rmi registry.aliyuncs.com/google_containers/$i
docker push ${HARBOR_DOMAIN}/google_containers/$i
done
${COLOR}"Metrics镜像下载完成"${END}
}
images_download
[root@k8s-master01 ~]# bash download_metrics_images.sh
[root@k8s-master01 ~]# docker images |grep metrics
harbor.raymonds.cc/google_containers/metrics-server v0.6.1 f73640fb5061 8 weeks ago 64.3MB
[root@k8s-master01 ~]# sed -ri 's@(.*image:) k8s.gcr.io/metrics-server(/.*)@\1 harbor.raymonds.cc/google_containers\2@g' components.yaml
[root@k8s-master01 ~]# grep "image:" components.yaml
image: harbor.raymonds.cc/google_containers/metrics-server:v0.6.1
[root@k8s-master01 ~]# kubectl apply -f components.yaml
serviceaccount/metrics-server unchanged
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader unchanged
clusterrole.rbac.authorization.k8s.io/system:metrics-server configured
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader unchanged
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator unchanged
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server unchanged
service/metrics-server unchanged
deployment.apps/metrics-server configured
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io unchanged
查看状态
[root@k8s-master01 ~]# kubectl get pod -n kube-system |grep metrics
metrics-server-8d5598b6f-z7n6s 1/1 Running 0 32s
[root@k8s-master01 ~]# kubectl top node
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
k8s-master01 113m 5% 2034Mi 56%
k8s-master02.example.local 172m 8% 1659Mi 45%
k8s-master03.example.local 180m 9% 1719Mi 47%
k8s-node01.example.local 70m 3% 1002Mi 27%
k8s-node02.example.local 83m 4% 942Mi 26%
k8s-node03.example.local 69m 3% 849Mi 23%
1.6 升级dashboard
官方GitHub地址:github.com/kubernetes/…
可以在官方dashboard查看到最新版dashboard
[root@k8s-master01 ~]# wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.5.1/aio/deploy/recommended.yaml
[root@k8s-master01 ~]# vim recommended.yaml
...
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort #添加这行
ports:
- port: 443
targetPort: 8443
nodePort: 30005 #添加这行
selector:
k8s-app: kubernetes-dashboard
...
[root@k8s-master01 ~]# grep "image:" recommended.yaml
image: kubernetesui/dashboard:v2.5.1
image: kubernetesui/metrics-scraper:v1.0.7
[root@k8s-master01 ~]# cat download_dashboard_images.sh
#!/bin/bash
#
#**********************************************************************************************
#Author: Raymond
#QQ: 88563128
#Date: 2022-01-11
#FileName: download_dashboard_images.sh
#URL: raymond.blog.csdn.net
#Description: The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \\033[01;31m"
END='\033[0m'
images=$(awk -F "/" '/image:/{print $NF}' recommended.yaml)
HARBOR_DOMAIN=harbor.raymonds.cc
images_download(){
${COLOR}"开始下载Dashboard镜像"${END}
for i in ${images};do
docker pull registry.aliyuncs.com/google_containers/$i
docker tag registry.aliyuncs.com/google_containers/$i ${HARBOR_DOMAIN}/google_containers/$i
docker rmi registry.aliyuncs.com/google_containers/$i
docker push ${HARBOR_DOMAIN}/google_containers/$i
done
${COLOR}"Dashboard镜像下载完成"${END}
}
images_download
[root@k8s-master01 ~]# bash download_dashboard_images.sh
[root@k8s-master01 ~]# docker images |grep -E "(dashboard|metrics-scraper)"
harbor.raymonds.cc/google_containers/dashboard v2.5.1 7fff914c4a61 5 weeks ago 243MB
harbor.raymonds.cc/google_containers/metrics-scraper v1.0.7 7801cfc6d5c0 10 months ago 34.4MB
[root@k8s-master01 ~]# sed -ri 's@(.*image:) kubernetesui(/.*)@\1 harbor.raymonds.cc/google_containers\2@g' recommended.yaml
[root@k8s-master01 ~]# grep "image:" recommended.yaml
image: harbor.raymonds.cc/google_containers/dashboard:v2.5.1
image: harbor.raymonds.cc/google_containers/metrics-scraper:v1.0.7
[root@k8s-master01 ~]# kubectl apply -f recommended.yaml
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created
创建管理员用户admin.yaml
[root@k8s-master01 ~]# vim admin.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system
[root@k8s-master01 ~]# kubectl apply -f admin.yaml
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created
[root@k8s-master01 ~]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
Name: admin-user-token-dfj8p
Namespace: kube-system
Labels: <none>
Annotations: kubernetes.io/service-account.name: admin-user
kubernetes.io/service-account.uid: 1c9b4c06-82ce-4d4d-bf0d-76b00651927b
Type: kubernetes.io/service-account-token
Data
====
ca.crt: 1099 bytes
namespace: 11 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6Imp0NC0wNGpkcFJFM2NZWDVMSU1KMDhBN3RfM1FOTFJJMzZJVEZja0NGQzAifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWRmajhwIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiIxYzliNGMwNi04MmNlLTRkNGQtYmYwZC03NmIwMDY1MTkyN2IiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.BvzSLGX7W6zl4daAS_sZ86s0Y6NsCM_eYTJ7OKH8NARIdqc2WsaGEc7Z91T170sAKK-jq9HNSta7choPfwxT7dx9a2R8D0LU-gx1-jP3SoIbALfvXJUyRHWAtc-kPY53hhbD6XgKJEzsG9MV01knQqBpJn2wKeTlaZxU6SeHTEhe3dFkFiAS_8PiIOcju6J2qViBQLraVNxYND1MVGfTJ4oIs74_lP6BAb_gShWgtvcoKV5alW_MAY5Vz9HLDv3-LNYYP0HLG-No_CZIUMxLF0V3dRrFsU9vayMJlcUp9Pu8JydrILvYzcg8k5kUnoW5mkzYH5y6iND5_Ub3gW5J6g