19.K8s运维篇-集群升级 -- kubernetes v1.20 二进制包安装方式升级

262 阅读15分钟

“ 本文正在参加「金石计划 . 瓜分6万现金大奖」 ”

2.kubernetes 二进制包安装方式升级

把yaml文件先备份了

[root@k8s-master01 ~]# mkdir bak
[root@k8s-master01 ~]# mv *.yaml bak/
[root@k8s-master01 ~]# ls bak/
admin.yaml  bootstrap.secret.yaml  calico-etcd.yaml  components.yaml  coredns.yaml  recommended.yaml

2.1 升级etcd

github.com/kubernetes/…

这里为了演示etcd升级,选择1.22,具体情况根据升级版本查看是否需要升级 在这里插入图片描述 升级步骤:

1、 备份etcd数据

[root@k8s-etcd01 ~]# export ETCDCTL_API=3

[root@k8s-etcd01 ~]# etcdctl --endpoints="172.31.3.108:2379,172.31.3.109:2379,172.31.3.110:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint status --write-out=table
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|     ENDPOINT      |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| 172.31.3.108:2379 | a9fef56ff96ed75c |  3.4.13 |  9.7 MB |      true |      false |        19 |     160698 |             160698 |        |
| 172.31.3.109:2379 | 8319ef09e8b3d277 |  3.4.13 |  9.7 MB |     false |      false |        19 |     160698 |             160698 |        |
| 172.31.3.110:2379 | 209a1f57c506dba2 |  3.4.13 |  9.5 MB |     false |      false |        19 |     160698 |             160698 |        |
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

#备份etcd
[root@k8s-etcd01 ~]# etcdctl --endpoints="172.31.3.108:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem snapshot save etcd_bak.db
{"level":"info","ts":1643011518.0839436,"caller":"snapshot/v3_snapshot.go:119","msg":"created temporary db file","path":"etcd_bak.db.part"}
{"level":"info","ts":"2022-01-24T16:05:18.092+0800","caller":"clientv3/maintenance.go:200","msg":"opened snapshot stream; downloading"}
{"level":"info","ts":1643011518.0921938,"caller":"snapshot/v3_snapshot.go:127","msg":"fetching snapshot","endpoint":"172.31.3.108:2379"}
{"level":"info","ts":"2022-01-24T16:05:18.281+0800","caller":"clientv3/maintenance.go:208","msg":"completed snapshot read; closing"}
{"level":"info","ts":1643011518.2902994,"caller":"snapshot/v3_snapshot.go:142","msg":"fetched snapshot","endpoint":"172.31.3.108:2379","size":"9.7 MB","took":0.206099745}
{"level":"info","ts":1643011518.2903671,"caller":"snapshot/v3_snapshot.go:152","msg":"saved","path":"etcd_bak.db"}
Snapshot saved at etcd_bak.db

[root@k8s-etcd01 ~]# ls
anaconda-ks.cfg  etcd_bak.db  etcd-v3.4.13-linux-amd64.tar.gz

2、 下载新版etcd包

[root@k8s-etcd01 ~]# wget https://github.com/etcd-io/etcd/releases/download/v3.5.0/etcd-v3.5.0-linux-amd64.tar.gz

3、 停止etcd

[root@k8s-etcd02 ~]# systemctl stop etcd

[root@k8s-etcd01 ~]# etcdctl --endpoints="172.31.3.108:2379,172.31.3.109:2379,172.31.3.110:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint health
{"level":"warn","ts":"2022-01-24T16:09:27.111+0800","caller":"clientv3/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"endpoint://client-a4b03c27-22c8-4a34-843f-cb45439cb810/172.31.3.109:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: all SubConns are in TransientFailure, latest connection error: connection error: desc = \"transport: Error while dialing dial tcp 172.31.3.109:2379: connect: connection refused\""}
172.31.3.108:2379 is healthy: successfully committed proposal: took = 31.172694ms
172.31.3.110:2379 is healthy: successfully committed proposal: took = 31.840346ms
172.31.3.109:2379 is unhealthy: failed to commit proposal: context deadline exceeded
Error: unhealthy cluster

4、 替换etcd和etcdctl

[root@k8s-etcd01 ~]# tar xf etcd-v3.5.0-linux-amd64.tar.gz

[root@k8s-etcd01 ~]# cd etcd-v3.5.0-linux-amd64
[root@k8s-etcd01 etcd-v3.5.0-linux-amd64]# ls
Documentation  etcd  etcdctl  etcdutl  README-etcdctl.md  README-etcdutl.md  README.md  READMEv2-etcdctl.md

[root@k8s-etcd02 ~]# which etcd
/usr/local/bin/etcd

[root@k8s-etcd02 ~]# cd /usr/local/bin/
[root@k8s-etcd02 bin]# ls
etcd  etcdctl
[root@k8s-etcd02 bin]# mkdir bak
[root@k8s-etcd02 bin]# ls
bak  etcd  etcdctl
[root@k8s-etcd02 bin]# cp -r etcd* bak/
[root@k8s-etcd02 bin]# ls bak/
etcd  etcdctl

[root@k8s-etcd01 etcd-v3.5.0-linux-amd64]# scp etcd etcdctl k8s-etcd02:/usr/local/bin/
etcd                                                                                                         100%   22MB 125.1MB/s   00:00    
etcdctl                                                                                                      100%   17MB 127.0MB/s   00:0

[root@k8s-etcd02 bin]# ls
bak  etcd  etcdctl
[root@k8s-etcd02 bin]# etcdctl version
etcdctl version: 3.5.0
API version: 3.5

5、 启动

[root@k8s-etcd02 bin]# systemctl restart etcd

[root@k8s-etcd01 etcd-v3.5.0-linux-amd64]# tail -f /var/log/messages
Jan 24 16:21:43 k8s-etcd01 etcd: the local etcd version 3.4.13 is not up-to-date
Jan 24 16:21:43 k8s-etcd01 etcd: member 8319ef09e8b3d277 has a higher version 3.5.0
Jan 24 16:21:47 k8s-etcd01 etcd: the local etcd version 3.4.13 is not up-to-date
Jan 24 16:21:47 k8s-etcd01 etcd: member 8319ef09e8b3d277 has a higher version 3.5.0

[root@k8s-etcd01 etcd-v3.5.0-linux-amd64]# etcdctl --endpoints="172.31.3.108:2379,172.31.3.109:2379,172.31.3.110:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint health
172.31.3.108:2379 is healthy: successfully committed proposal: took = 13.54818ms
172.31.3.110:2379 is healthy: successfully committed proposal: took = 13.949242ms
172.31.3.109:2379 is healthy: successfully committed proposal: took = 13.726865ms

升级etcd03

[root@k8s-etcd03 ~]# systemctl stop etcd

[root@k8s-etcd01 etcd-v3.5.0-linux-amd64]# etcdctl --endpoints="172.31.3.108:2379,172.31.3.109:2379,172.31.3.110:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint health
{"level":"warn","ts":"2022-01-24T16:24:27.731+0800","caller":"clientv3/retry_interceptor.go:62","msg":"retrying of unary invoker failed","target":"endpoint://client-140e9a95-515a-46da-9d15-0bd2f3db0668/172.31.3.110:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = latest balancer error: all SubConns are in TransientFailure, latest connection error: connection error: desc = \"transport: Error while dialing dial tcp 172.31.3.110:2379: connect: connection refused\""}
172.31.3.108:2379 is healthy: successfully committed proposal: took = 11.766067ms
172.31.3.109:2379 is healthy: successfully committed proposal: took = 11.671741ms
172.31.3.110:2379 is unhealthy: failed to commit proposal: context deadline exceeded
Error: unhealthy cluster

[root@k8s-etcd01 etcd-v3.5.0-linux-amd64]# scp etcd etcdctl k8s-etcd03:/usr/local/bin/
etcd                                                                                                         100%   22MB 109.4MB/s   00:00    
etcdctl                                                                                                      100%   17MB 137.5MB/s   00:00 

[root@k8s-etcd03 ~]# systemctl restart etcd

[root@k8s-etcd01 etcd-v3.5.0-linux-amd64]# etcdctl --endpoints="172.31.3.108:2379,172.31.3.109:2379,172.31.3.110:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint health
172.31.3.108:2379 is healthy: successfully committed proposal: took = 13.231244ms
172.31.3.110:2379 is healthy: successfully committed proposal: took = 13.904006ms
172.31.3.109:2379 is healthy: successfully committed proposal: took = 13.758387ms

升级etcd01

[root@k8s-etcd01 etcd-v3.5.0-linux-amd64]# systemctl stop etcd

[root@k8s-etcd01 etcd-v3.5.0-linux-amd64]# cp etcd etcdctl /usr/local/bin/
cp: overwrite ‘/usr/local/bin/etcd’? y
cp: overwrite ‘/usr/local/bin/etcdctl’? y

[root@k8s-etcd01 etcd-v3.5.0-linux-amd64]# systemctl restart etcd

[root@k8s-etcd01 etcd-v3.5.0-linux-amd64]# etcdctl --endpoints="172.31.3.108:2379,172.31.3.109:2379,172.31.3.110:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint health
172.31.3.109:2379 is healthy: successfully committed proposal: took = 18.256612ms
172.31.3.110:2379 is healthy: successfully committed proposal: took = 18.421631ms
172.31.3.108:2379 is healthy: successfully committed proposal: took = 19.588729ms

[root@k8s-etcd01 etcd-v3.5.0-linux-amd64]# etcdctl --endpoints="172.31.3.108:2379,172.31.3.109:2379,172.31.3.110:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint status --write-out=table
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|     ENDPOINT      |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| 172.31.3.108:2379 | a9fef56ff96ed75c |   3.5.0 |  9.7 MB |     false |      false |        20 |     166066 |             166066 |        |
| 172.31.3.109:2379 | 8319ef09e8b3d277 |   3.5.0 |  9.7 MB |      true |      false |        20 |     166066 |             166066 |        |
| 172.31.3.110:2379 | 209a1f57c506dba2 |   3.5.0 |  9.5 MB |     false |      false |        20 |     166066 |             166066 |        |
+-------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

2.2 升级master

升级master01

#ha01和ha02上安装
#CentOS
[root@k8s-ha01 ~]# yum -y install socat

#Ubuntu
[root@k8s-ha01 ~]# apt -y install socat

#下线master01
[root@k8s-master01 ~]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/172.31.3.101" | socat stdio /var/lib/haproxy/haproxy.sock"

[root@k8s-master01 ~]# wget https://dl.k8s.io/v1.22.6/kubernetes-server-linux-amd64.tar.gz

[root@k8s-master01 ~]# tar xf kubernetes-server-linux-amd64.tar.gz

[root@k8s-master01 ~]# cd kubernetes/
[root@k8s-master01 kubernetes]# ls
addons  kubernetes-src.tar.gz  LICENSES  server
[root@k8s-master01 kubernetes]# cd server/bin/

[root@k8s-master01 bin]# ls
apiextensions-apiserver  kube-apiserver.docker_tag           kube-controller-manager.tar  kube-proxy             kube-scheduler.docker_tag
kubeadm                  kube-apiserver.tar                  kubectl                      kube-proxy.docker_tag  kube-scheduler.tar
kube-aggregator          kube-controller-manager             kubectl-convert              kube-proxy.tar         mounter
kube-apiserver           kube-controller-manager.docker_tag  kubelet                      kube-scheduler

[root@k8s-master01 bin]# systemctl stop kube-apiserver

[root@k8s-master01 bin]# which kube-apiserver
/usr/local/bin/kube-apiserver

[root@k8s-master01 bin]# cd /usr/local/bin/
[root@k8s-master01 bin]# ls
cfssl  cfssljson  kube-apiserver  kube-controller-manager  kubectl  kubelet  kube-proxy  kube-scheduler
[root@k8s-master01 bin]# mkdir bak
[root@k8s-master01 bin]# cp kube* bak/
[root@k8s-master01 bin]# ls bak/
kube-apiserver  kube-controller-manager  kubectl  kubelet  kube-proxy  kube-scheduler

[root@k8s-master01 ~]# cd /root/kubernetes/server/bin/

[root@k8s-master01 bin]# cp kube-apiserver /usr/local/bin/
cp: overwrite ‘/usr/local/bin/kube-apiserver’? y

[root@k8s-master01 bin]# systemctl restart kube-apiserver

[root@k8s-master01 bin]# systemctl status kube-apiserver
● kube-apiserver.service - Kubernetes API Server
   Loaded: loaded (/usr/lib/systemd/system/kube-apiserver.service; enabled; vendor preset: disabled)
   Active: active (running) since Mon 2022-01-24 16:59:09 CST; 34s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 2909 (kube-apiserver)
    Tasks: 10
   Memory: 268.1M
   CGroup: /system.slice/kube-apiserver.service
           └─2909 /usr/local/bin/kube-apiserver --v=2 --logtostderr=true --allow-privileged=true --bind-address=0.0.0.0 --secure-port=6443 -...

Jan 24 16:59:12 k8s-master01.example.local kube-apiserver[2909]: I0124 16:59:12.251056    2909 storage_rbac.go:236] created clusterrole...oller
Jan 24 16:59:12 k8s-master01.example.local kube-apiserver[2909]: I0124 16:59:12.257310    2909 healthz.go:257] poststarthook/rbac/boots...eadyz
Jan 24 16:59:12 k8s-master01.example.local kube-apiserver[2909]: [-]poststarthook/rbac/bootstrap-roles failed: not finished
Jan 24 16:59:12 k8s-master01.example.local kube-apiserver[2909]: I0124 16:59:12.332019    2909 storage_rbac.go:266] created clusterrole...oller
Jan 24 16:59:12 k8s-master01.example.local kube-apiserver[2909]: I0124 16:59:12.355031    2909 healthz.go:257] poststarthook/rbac/boots...eadyz
Jan 24 16:59:12 k8s-master01.example.local kube-apiserver[2909]: [-]poststarthook/rbac/bootstrap-roles failed: not finished
Jan 24 16:59:12 k8s-master01.example.local kube-apiserver[2909]: I0124 16:59:12.391243    2909 storage_rbac.go:266] created clusterrole...oller
Jan 24 16:59:12 k8s-master01.example.local kube-apiserver[2909]: W0124 16:59:12.476694    2909 lease.go:233] Resetting endpoints for ma....103]
Jan 24 16:59:12 k8s-master01.example.local kube-apiserver[2909]: I0124 16:59:12.477951    2909 controller.go:611] quota admission added...oints
Jan 24 16:59:12 k8s-master01.example.local kube-apiserver[2909]: I0124 16:59:12.489294    2909 controller.go:611] quota admission added...8s.io
Hint: Some lines were ellipsized, use -l to show in full.

[root@k8s-master01 bin]# kube-apiserver --version
Kubernetes v1.22.6

[root@k8s-master01 bin]# systemctl stop kube-controller-manager kube-scheduler

[root@k8s-master01 bin]# cp kube-controller-manager kube-scheduler /usr/local/bin/
cp: overwrite ‘/usr/local/bin/kube-controller-manager’? y
cp: overwrite ‘/usr/local/bin/kube-scheduler’? y

[root@k8s-master01 bin]# systemctl restart kube-controller-manager kube-scheduler

[root@k8s-master01 bin]# systemctl status kube-controller-manager kube-scheduler
● kube-controller-manager.service - Kubernetes Controller Manager
   Loaded: loaded (/usr/lib/systemd/system/kube-controller-manager.service; enabled; vendor preset: disabled)
   Active: active (running) since Mon 2022-01-24 17:01:55 CST; 49s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 5615 (kube-controller)
    Tasks: 7
   Memory: 22.0M
   CGroup: /system.slice/kube-controller-manager.service
           └─5615 /usr/local/bin/kube-controller-manager --v=2 --logtostderr=true --address=127.0.0.1 --root-ca-file=/etc/kubernetes/pki/ca....

Jan 24 17:01:56 k8s-master01.example.local kube-controller-manager[5615]: I0124 17:01:56.836177    5615 dynamic_cafile_content.go:117] "...pem"
Jan 24 17:01:56 k8s-master01.example.local kube-controller-manager[5615]: W0124 17:01:56.836195    5615 authorization.go:193] No authori...ork.
Jan 24 17:01:56 k8s-master01.example.local kube-controller-manager[5615]: I0124 17:01:56.836230    5615 controllermanager.go:186] Versio...22.6
Jan 24 17:01:56 k8s-master01.example.local kube-controller-manager[5615]: I0124 17:01:56.836750    5615 tlsconfig.go:178] "Loaded client CA"...
Jan 24 17:01:56 k8s-master01.example.local kube-controller-manager[5615]: I0124 17:01:56.836877    5615 tlsconfig.go:200] "Loaded serving ce...
Jan 24 17:01:56 k8s-master01.example.local kube-controller-manager[5615]: I0124 17:01:56.836976    5615 named_certificates.go:53] "Loaded SN...
Jan 24 17:01:56 k8s-master01.example.local kube-controller-manager[5615]: I0124 17:01:56.836993    5615 secure_serving.go:200] Serving s...0257
Jan 24 17:01:56 k8s-master01.example.local kube-controller-manager[5615]: I0124 17:01:56.837209    5615 leaderelection.go:248] attemptin...r...
Jan 24 17:01:56 k8s-master01.example.local kube-controller-manager[5615]: I0124 17:01:56.837486    5615 dynamic_cafile_content.go:155] "...pem"
Jan 24 17:01:56 k8s-master01.example.local kube-controller-manager[5615]: I0124 17:01:56.837554    5615 tlsconfig.go:240] "Starting Dyna...ler"

● kube-scheduler.service - Kubernetes Scheduler
   Loaded: loaded (/usr/lib/systemd/system/kube-scheduler.service; enabled; vendor preset: disabled)
   Active: active (running) since Mon 2022-01-24 17:01:55 CST; 49s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 5616 (kube-scheduler)
    Tasks: 9
   Memory: 17.3M
   CGroup: /system.slice/kube-scheduler.service
           └─5616 /usr/local/bin/kube-scheduler --v=2 --logtostderr=true --address=127.0.0.1 --leader-elect=true --kubeconfig=/etc/kubernete...

Jan 24 17:01:57 k8s-master01.example.local kube-scheduler[5616]: E0124 17:01:57.671777    5616 reflector.go:138] k8s.io/client-go/infor...ource
Jan 24 17:01:58 k8s-master01.example.local kube-scheduler[5616]: E0124 17:01:58.124841    5616 reflector.go:138] k8s.io/client-go/infor...ource
Jan 24 17:01:59 k8s-master01.example.local kube-scheduler[5616]: E0124 17:01:59.482607    5616 reflector.go:138] k8s.io/client-go/infor...ource
Jan 24 17:02:00 k8s-master01.example.local kube-scheduler[5616]: E0124 17:02:00.780605    5616 reflector.go:138] k8s.io/client-go/infor...ource
Jan 24 17:02:04 k8s-master01.example.local kube-scheduler[5616]: E0124 17:02:04.985034    5616 reflector.go:138] k8s.io/client-go/infor...ource
Jan 24 17:02:05 k8s-master01.example.local kube-scheduler[5616]: E0124 17:02:05.902557    5616 reflector.go:138] k8s.io/client-go/infor...ource
Jan 24 17:02:14 k8s-master01.example.local kube-scheduler[5616]: E0124 17:02:14.080397    5616 reflector.go:138] k8s.io/client-go/infor...ource
Jan 24 17:02:14 k8s-master01.example.local kube-scheduler[5616]: E0124 17:02:14.239113    5616 reflector.go:138] k8s.io/client-go/infor...ource
Jan 24 17:02:38 k8s-master01.example.local kube-scheduler[5616]: E0124 17:02:38.982779    5616 reflector.go:138] k8s.io/client-go/infor...ource
Jan 24 17:02:39 k8s-master01.example.local kube-scheduler[5616]: E0124 17:02:39.322300    5616 reflector.go:138] k8s.io/client-go/infor...ource
Hint: Some lines were ellipsized, use -l to show in full.

[root@k8s-master01 bin]# kube-controller-manager --version
Kubernetes v1.22.6
[root@k8s-master01 bin]# kube-scheduler --version
Kubernetes v1.22.6

[root@k8s-master01 bin]# systemctl stop kube-proxy

[root@k8s-master01 bin]# cp kube-proxy /usr/local/bin/
cp: overwrite ‘/usr/local/bin/kube-proxy’? y

[root@k8s-master01 bin]# systemctl restart kube-proxy
[root@k8s-master01 bin]# systemctl status kube-proxy
● kube-proxy.service - Kubernetes Kube Proxy
   Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Mon 2022-01-24 17:05:15 CST; 6s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 8709 (kube-proxy)
    Tasks: 7
   Memory: 11.9M
   CGroup: /system.slice/kube-proxy.service
           └─8709 /usr/local/bin/kube-proxy --config=/etc/kubernetes/kube-proxy.conf --v=2

Jan 24 17:05:15 k8s-master01.example.local kube-proxy[8709]: I0124 17:05:15.781290    8709 service.go:301] Service kubernetes-dashboar... ports
Jan 24 17:05:15 k8s-master01.example.local kube-proxy[8709]: I0124 17:05:15.781436    8709 service.go:301] Service default/kubernetes ... ports
Jan 24 17:05:15 k8s-master01.example.local kube-proxy[8709]: I0124 17:05:15.781517    8709 service.go:301] Service kube-system/kube-dn... ports
Jan 24 17:05:15 k8s-master01.example.local kube-proxy[8709]: I0124 17:05:15.781579    8709 service.go:301] Service kube-system/metrics... ports
Jan 24 17:05:15 k8s-master01.example.local kube-proxy[8709]: I0124 17:05:15.781590    8709 service.go:301] Service kubernetes-dashboar... ports
Jan 24 17:05:15 k8s-master01.example.local kube-proxy[8709]: I0124 17:05:15.877404    8709 shared_informer.go:247] Caches are synced f...config
Jan 24 17:05:15 k8s-master01.example.local kube-proxy[8709]: I0124 17:05:15.877513    8709 proxier.go:1018] "Not syncing ipvs rules un...aster"
Jan 24 17:05:15 k8s-master01.example.local kube-proxy[8709]: I0124 17:05:15.877544    8709 proxier.go:1018] "Not syncing ipvs rules un...aster"
Jan 24 17:05:17 k8s-master01.example.local kube-proxy[8709]: E0124 17:05:17.320727    8709 reflector.go:138] k8s.io/client-go/informer...source
Jan 24 17:05:19 k8s-master01.example.local kube-proxy[8709]: E0124 17:05:19.438765    8709 reflector.go:138] k8s.io/client-go/informer...source
Hint: Some lines were ellipsized, use -l to show in full.

[root@k8s-master01 bin]# kube-proxy --version
Kubernetes v1.22.6

[root@k8s-master01 bin]# cp kubectl /usr/local/bin/
cp: overwrite ‘/usr/local/bin/kubectl’? y

[root@k8s-master01 bin]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE                         ERROR
scheduler            Healthy   ok                              
controller-manager   Healthy   ok                              
etcd-1               Healthy   {"health":"true","reason":""}   
etcd-0               Healthy   {"health":"true","reason":""}   
etcd-2               Healthy   {"health":"true","reason":""}   
[root@k8s-master01 bin]# kubectl cluster-info 
Kubernetes control plane is running at https://172.31.3.188:6443
CoreDNS is running at https://172.31.3.188:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

升级kubelet

在这里插入图片描述

[root@k8s-master01 bin]# cd

[root@k8s-master01 ~]# vim download_pause_images_3.5.sh
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-01-11
#FileName:      download_pause_images.sh
#URL:           raymond.blog.csdn.net
#Description:   The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \\033[01;31m"
END='\033[0m'

PAUSE_VERSION=3.5
HARBOR_DOMAIN=harbor.raymonds.cc

images_download(){
    ${COLOR}"开始下载Pause镜像"${END}
        docker pull registry.aliyuncs.com/google_containers/pause:${PAUSE_VERSION}
        docker tag registry.aliyuncs.com/google_containers/pause:${PAUSE_VERSION} ${HARBOR_DOMAIN}/google_containers/pause:${PAUSE_VERSION}
        docker rmi registry.aliyuncs.com/google_containers/pause:${PAUSE_VERSION}
        docker push ${HARBOR_DOMAIN}/google_containers/pause:${PAUSE_VERSION}
    ${COLOR}"Pause镜像下载完成"${END}
}

images_download

[root@k8s-master01 ~]# bash download_pause_images_3.5.sh

[root@k8s-master01 ~]# systemctl stop kubelet

[root@k8s-master01 ~]# vim /etc/systemd/system/kubelet.service.d/10-kubelet.conf
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml --pod-infra-container-image=harbor.raymonds.cc/google_containers/pause:3.5" #把harbor仓库改成自己的私有仓库地址
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' "
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS

[root@k8s-master01 ~]# cd kubernetes/server/bin/

[root@k8s-master01 bin]# cp kubelet /usr/local/bin/
cp: overwrite ‘/usr/local/bin/kubelet’? y

[root@k8s-master01 bin]# systemctl daemon-reload && systemctl restart kubelet

[root@k8s-master01 bin]# kubelet --version
Kubernetes v1.22.6

[root@k8s-master01 bin]# kubectl get nodes
NAME                         STATUS   ROLES    AGE    VERSION
k8s-master01.example.local   Ready    <none>   5h8m   v1.22.6
k8s-master02.example.local   Ready    <none>   5h8m   v1.20.14
k8s-master03.example.local   Ready    <none>   5h8m   v1.20.14
k8s-node01.example.local     Ready    <none>   5h8m   v1.20.14
k8s-node02.example.local     Ready    <none>   5h8m   v1.20.14
k8s-node03.example.local     Ready    <none>   5h8m   v1.20.14

#上线master01
[root@k8s-master01 bin]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/172.31.3.101" | socat stdio /var/lib/haproxy/haproxy.sock"

升级master02

#下线master02
[root@k8s-master01 bin]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/172.31.3.102" | socat stdio /var/lib/haproxy/haproxy.sock"

[root@k8s-master02 ~]# systemctl stop kube-apiserver kube-controller-manager kube-scheduler kube-proxy kubelet

[root@k8s-master01 bin]# scp kube-apiserver kube-controller-manager kube-scheduler kube-proxy kubectl kubelet k8s-master02:/usr/local/bin
[root@k8s-master01 bin]# scp /etc/systemd/system/kubelet.service.d/10-kubelet.conf k8s-master02:/etc/systemd/system/kubelet.service.d/

[root@k8s-master02 ~]# systemctl daemon-reload && systemctl restart kube-apiserver kube-controller-manager kube-scheduler kube-proxy kubelet

[root@k8s-master02 ~]# systemctl status kube-apiserver kube-controller-manager kube-scheduler kube-proxy kubelet
● kube-apiserver.service - Kubernetes API Server
   Loaded: loaded (/usr/lib/systemd/system/kube-apiserver.service; enabled; vendor preset: disabled)
   Active: active (running) since Wed 2022-02-09 19:08:07 CST; 37s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 10006 (kube-apiserver)
    Tasks: 8
   Memory: 275.6M
   CGroup: /system.slice/kube-apiserver.service
           └─10006 /usr/local/bin/kube-apiserver --v=2 --logtostderr=true --allow-privileged=true --bind-address=0.0.0.0 --secure-port=6443 ...

Feb 09 19:08:10 k8s-master02.example.local kube-apiserver[10006]: I0209 19:08:10.937264   10006 aggregator.go:234] Finished OpenAPI spe...221ms
Feb 09 19:08:11 k8s-master02.example.local kube-apiserver[10006]: I0209 19:08:11.014246   10006 healthz.go:257] poststarthook/rbac/boot...eadyz
Feb 09 19:08:11 k8s-master02.example.local kube-apiserver[10006]: [-]poststarthook/rbac/bootstrap-roles failed: not finished
Feb 09 19:08:11 k8s-master02.example.local kube-apiserver[10006]: I0209 19:08:11.124788   10006 healthz.go:257] poststarthook/rbac/boot...eadyz
Feb 09 19:08:11 k8s-master02.example.local kube-apiserver[10006]: [-]poststarthook/rbac/bootstrap-roles failed: not finished
Feb 09 19:08:11 k8s-master02.example.local kube-apiserver[10006]: I0209 19:08:11.215623   10006 healthz.go:257] poststarthook/rbac/boot...eadyz
Feb 09 19:08:11 k8s-master02.example.local kube-apiserver[10006]: [-]poststarthook/rbac/bootstrap-roles failed: not finished
Feb 09 19:08:30 k8s-master02.example.local kube-apiserver[10006]: I0209 19:08:30.484660   10006 cacher.go:799] cacher (*coordination.Le...nnel.
Feb 09 19:08:30 k8s-master02.example.local kube-apiserver[10006]: I0209 19:08:30.484699   10006 cacher.go:799] cacher (*coordination.Le...nnel.
Feb 09 19:08:30 k8s-master02.example.local kube-apiserver[10006]: I0209 19:08:30.484673   10006 cacher.go:799] cacher (*coordination.Le...nnel.

● kube-controller-manager.service - Kubernetes Controller Manager
   Loaded: loaded (/usr/lib/systemd/system/kube-controller-manager.service; enabled; vendor preset: disabled)
   Active: active (running) since Wed 2022-02-09 19:08:06 CST; 38s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 9717 (kube-controller)
    Tasks: 5
   Memory: 23.8M
   CGroup: /system.slice/kube-controller-manager.service
           └─9717 /usr/local/bin/kube-controller-manager --v=2 --logtostderr=true --address=127.0.0.1 --root-ca-file=/etc/kubernetes/pki/ca....

Feb 09 19:08:07 k8s-master02.example.local kube-controller-manager[9717]: I0209 19:08:07.011652    9717 dynamic_cafile_content.go:117] "...pem"
Feb 09 19:08:07 k8s-master02.example.local kube-controller-manager[9717]: W0209 19:08:07.011677    9717 authorization.go:193] No authori...ork.
Feb 09 19:08:07 k8s-master02.example.local kube-controller-manager[9717]: I0209 19:08:07.011692    9717 controllermanager.go:186] Versio...22.6
Feb 09 19:08:07 k8s-master02.example.local kube-controller-manager[9717]: I0209 19:08:07.012214    9717 tlsconfig.go:178] "Loaded client CA"...
Feb 09 19:08:07 k8s-master02.example.local kube-controller-manager[9717]: I0209 19:08:07.012336    9717 tlsconfig.go:200] "Loaded serving ce...
Feb 09 19:08:07 k8s-master02.example.local kube-controller-manager[9717]: I0209 19:08:07.012445    9717 named_certificates.go:53] "Loaded SN...
Feb 09 19:08:07 k8s-master02.example.local kube-controller-manager[9717]: I0209 19:08:07.012462    9717 secure_serving.go:200] Serving s...0257
Feb 09 19:08:07 k8s-master02.example.local kube-controller-manager[9717]: I0209 19:08:07.012703    9717 leaderelection.go:248] attemptin...r...
Feb 09 19:08:07 k8s-master02.example.local kube-controller-manager[9717]: I0209 19:08:07.012992    9717 dynamic_cafile_content.go:155] "...pem"
Feb 09 19:08:07 k8s-master02.example.local kube-controller-manager[9717]: I0209 19:08:07.013024    9717 tlsconfig.go:240] "Starting Dyna...ler"

● kube-scheduler.service - Kubernetes Scheduler
   Loaded: loaded (/usr/lib/systemd/system/kube-scheduler.service; enabled; vendor preset: disabled)
   Active: active (running) since Wed 2022-02-09 19:08:06 CST; 38s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 9723 (kube-scheduler)
    Tasks: 8
   Memory: 18.6M
   CGroup: /system.slice/kube-scheduler.service
           └─9723 /usr/local/bin/kube-scheduler --v=2 --logtostderr=true --address=127.0.0.1 --leader-elect=true --kubeconfig=/etc/kubernete...

Feb 09 19:08:07 k8s-master02.example.local kube-scheduler[9723]: I0209 19:08:07.343444    9723 node_tree.go:65] Added node "k8s-node03....eTree
Feb 09 19:08:08 k8s-master02.example.local kube-scheduler[9723]: E0209 19:08:08.525353    9723 reflector.go:138] k8s.io/client-go/infor...ource
Feb 09 19:08:08 k8s-master02.example.local kube-scheduler[9723]: E0209 19:08:08.867445    9723 reflector.go:138] k8s.io/client-go/infor...ource
Feb 09 19:08:10 k8s-master02.example.local kube-scheduler[9723]: E0209 19:08:10.814517    9723 reflector.go:138] k8s.io/client-go/infor...ource
Feb 09 19:08:10 k8s-master02.example.local kube-scheduler[9723]: E0209 19:08:10.843123    9723 reflector.go:138] k8s.io/client-go/infor...ource
Feb 09 19:08:15 k8s-master02.example.local kube-scheduler[9723]: E0209 19:08:15.070085    9723 reflector.go:138] k8s.io/client-go/infor...ource
Feb 09 19:08:16 k8s-master02.example.local kube-scheduler[9723]: E0209 19:08:16.511051    9723 reflector.go:138] k8s.io/client-go/infor...ource
Feb 09 19:08:23 k8s-master02.example.local kube-scheduler[9723]: E0209 19:08:23.703674    9723 reflector.go:138] k8s.io/client-go/infor...ource
Feb 09 19:08:25 k8s-master02.example.local kube-scheduler[9723]: E0209 19:08:25.306890    9723 reflector.go:138] k8s.io/client-go/infor...ource
Feb 09 19:08:41 k8s-master02.example.local kube-scheduler[9723]: E0209 19:08:41.005867    9723 reflector.go:138] k8s.io/client-go/infor...ource

● kube-proxy.service - Kubernetes Kube Proxy
   Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Wed 2022-02-09 19:08:06 CST; 38s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 9729 (kube-proxy)
    Tasks: 5
   Memory: 13.5M
   CGroup: /system.slice/kube-proxy.service
           └─9729 /usr/local/bin/kube-proxy --config=/etc/kubernetes/kube-proxy.conf --v=2

Feb 09 19:08:06 k8s-master02.example.local kube-proxy[9729]: I0209 19:08:06.600188    9729 shared_informer.go:247] Caches are synced f...config
Feb 09 19:08:06 k8s-master02.example.local kube-proxy[9729]: I0209 19:08:06.600259    9729 proxier.go:1018] "Not syncing ipvs rules un...aster"
Feb 09 19:08:06 k8s-master02.example.local kube-proxy[9729]: I0209 19:08:06.600287    9729 proxier.go:1018] "Not syncing ipvs rules un...aster"
Feb 09 19:08:07 k8s-master02.example.local kube-proxy[9729]: E0209 19:08:07.843102    9729 reflector.go:138] k8s.io/client-go/informer...source
Feb 09 19:08:09 k8s-master02.example.local kube-proxy[9729]: E0209 19:08:09.449657    9729 reflector.go:138] k8s.io/client-go/informer...source
Feb 09 19:08:14 k8s-master02.example.local kube-proxy[9729]: E0209 19:08:14.550781    9729 reflector.go:138] k8s.io/client-go/informer...source
Feb 09 19:08:23 k8s-master02.example.local kube-proxy[9729]: E0209 19:08:23.963455    9729 reflector.go:138] k8s.io/client-go/informer...source
Feb 09 19:08:36 k8s-master02.example.local kube-proxy[9729]: I0209 19:08:36.498478    9729 proxier.go:1018] "Not syncing ipvs rules un...aster"
Feb 09 19:08:36 k8s-master02.example.local kube-proxy[9729]: I0209 19:08:36.498478    9729 proxier.go:1018] "Not syncing ipvs rules un...aster"
Feb 09 19:08:42 k8s-master02.example.local kube-proxy[9729]: E0209 19:08:42.989722    9729 reflector.go:138] k8s.io/client-go/informer...source

● kubelet.service - Kubernetes Kubelet
   Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
  Drop-In: /etc/systemd/system/kubelet.service.d
           └─10-kubelet.conf
   Active: active (running) since Wed 2022-02-09 19:08:06 CST; 38s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 9736 (kubelet)
    Tasks: 13
   Memory: 38.5M
   CGroup: /system.slice/kubelet.service
           └─9736 /usr/local/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/ku...

Feb 09 19:08:08 k8s-master02.example.local kubelet[9736]: I0209 19:08:08.148610    9736 reconciler.go:224] "operationExecutor.VerifyControll...
Feb 09 19:08:08 k8s-master02.example.local kubelet[9736]: I0209 19:08:08.148625    9736 reconciler.go:224] "operationExecutor.VerifyCo...a\") "
Feb 09 19:08:08 k8s-master02.example.local kubelet[9736]: I0209 19:08:08.148642    9736 reconciler.go:224] "operationExecutor.VerifyControll...
Feb 09 19:08:08 k8s-master02.example.local kubelet[9736]: I0209 19:08:08.148655    9736 reconciler.go:224] "operationExecutor.VerifyControll...
Feb 09 19:08:08 k8s-master02.example.local kubelet[9736]: I0209 19:08:08.148670    9736 reconciler.go:224] "operationExecutor.VerifyControll...
Feb 09 19:08:08 k8s-master02.example.local kubelet[9736]: I0209 19:08:08.148685    9736 reconciler.go:224] "operationExecutor.VerifyCo...a\") "
Feb 09 19:08:08 k8s-master02.example.local kubelet[9736]: I0209 19:08:08.148703    9736 reconciler.go:224] "operationExecutor.VerifyControll...
Feb 09 19:08:08 k8s-master02.example.local kubelet[9736]: I0209 19:08:08.148717    9736 reconciler.go:224] "operationExecutor.VerifyControll...
Feb 09 19:08:08 k8s-master02.example.local kubelet[9736]: I0209 19:08:08.148724    9736 reconciler.go:157] "Reconciler: start to sync state"
Feb 09 19:08:09 k8s-master02.example.local kubelet[9736]: I0209 19:08:09.893482    9736 prober_manager.go:255] "Failed to trigger a ma...iness"
Hint: Some lines were ellipsized, use -l to show in full.

[root@k8s-master02 ~]# kube-apiserver --version
Kubernetes v1.22.6
[root@k8s-master02 ~]# kube-controller-manager --version
Kubernetes v1.22.6
[root@k8s-master02 ~]# kube-scheduler --version
Kubernetes v1.22.6
[root@k8s-master02 ~]# kube-proxy --version
Kubernetes v1.22.6
[root@k8s-master02 ~]# kubectl version
Client Version: version.Info{Major:"1", Minor:"22", GitVersion:"v1.22.6", GitCommit:"f59f5c2fda36e4036b49ec027e556a15456108f0", GitTreeState:"clean", BuildDate:"2022-01-19T17:33:06Z", GoVersion:"go1.16.12", Compiler:"gc", Platform:"linux/amd64"}
The connection to the server localhost:8080 was refused - did you specify the right host or port?

[root@k8s-master02 ~]# kubelet --version
Kubernetes v1.22.6

[root@k8s-master01 bin]# kubectl get nodes
NAME                         STATUS   ROLES    AGE     VERSION
k8s-master01.example.local   Ready    <none>   5h14m   v1.22.6
k8s-master02.example.local   Ready    <none>   5h14m   v1.22.6
k8s-master03.example.local   Ready    <none>   5h14m   v1.20.14
k8s-node01.example.local     Ready    <none>   5h14m   v1.20.14
k8s-node02.example.local     Ready    <none>   5h14m   v1.20.14
k8s-node03.example.local     Ready    <none>   5h14m   v1.20.14

#上线master02
[root@k8s-master01 bin]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/172.31.3.102" | socat stdio /var/lib/haproxy/haproxy.sock"

升级master03

#下线master03
[root@k8s-master01 bin]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/172.31.3.103" | socat stdio /var/lib/haproxy/haproxy.sock"

[root@k8s-master03 ~]# systemctl stop kube-apiserver kube-controller-manager kube-scheduler kube-proxy kubelet

[root@k8s-master01 bin]# scp kube-apiserver kube-controller-manager kube-scheduler kube-proxy kubectl kubelet k8s-master03:/usr/local/bin
[root@k8s-master01 bin]# scp /etc/systemd/system/kubelet.service.d/10-kubelet.conf k8s-master03:/etc/systemd/system/kubelet.service.d/

[root@k8s-master03 ~]# systemctl daemon-reload && systemctl restart kube-apiserver kube-controller-manager kube-scheduler kube-proxy kubelet
[root@k8s-master03 ~]# systemctl status kube-apiserver kube-controller-manager kube-scheduler kube-proxy kubelet
● kube-apiserver.service - Kubernetes API Server
   Loaded: loaded (/usr/lib/systemd/system/kube-apiserver.service; enabled; vendor preset: disabled)
   Active: active (running) since Wed 2022-02-09 19:12:41 CST; 11s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 14404 (kube-apiserver)
    Tasks: 8
   Memory: 277.6M
   CGroup: /system.slice/kube-apiserver.service
           └─14404 /usr/local/bin/kube-apiserver --v=2 --logtostderr=true --allow-privileged=true --bind-address=0.0.0.0 --secure-port=6443 ...

Feb 09 19:12:45 k8s-master03.example.local kube-apiserver[14404]: [-]poststarthook/rbac/bootstrap-roles failed: not finished
Feb 09 19:12:45 k8s-master03.example.local kube-apiserver[14404]: I0209 19:12:45.951483   14404 healthz.go:257] poststarthook/rbac/boot...eadyz
Feb 09 19:12:45 k8s-master03.example.local kube-apiserver[14404]: [-]poststarthook/rbac/bootstrap-roles failed: not finished
Feb 09 19:12:46 k8s-master03.example.local kube-apiserver[14404]: I0209 19:12:46.057166   14404 healthz.go:257] poststarthook/rbac/boot...eadyz
Feb 09 19:12:46 k8s-master03.example.local kube-apiserver[14404]: [-]poststarthook/rbac/bootstrap-roles failed: not finished
Feb 09 19:12:46 k8s-master03.example.local kube-apiserver[14404]: W0209 19:12:46.172251   14404 lease.go:233] Resetting endpoints for m....103]
Feb 09 19:12:46 k8s-master03.example.local kube-apiserver[14404]: I0209 19:12:46.174282   14404 controller.go:611] quota admission adde...oints
Feb 09 19:12:46 k8s-master03.example.local kube-apiserver[14404]: I0209 19:12:46.182564   14404 controller.go:611] quota admission adde...8s.io
Feb 09 19:12:46 k8s-master03.example.local kube-apiserver[14404]: I0209 19:12:46.493248   14404 available_controller.go:474] "changing ...ssed"
Feb 09 19:12:46 k8s-master03.example.local kube-apiserver[14404]: E0209 19:12:46.504247   14404 available_controller.go:524] v1beta1.me...again

● kube-controller-manager.service - Kubernetes Controller Manager
   Loaded: loaded (/usr/lib/systemd/system/kube-controller-manager.service; enabled; vendor preset: disabled)
   Active: active (running) since Wed 2022-02-09 19:12:41 CST; 11s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 14409 (kube-controller)
    Tasks: 5
   Memory: 22.4M
   CGroup: /system.slice/kube-controller-manager.service
           └─14409 /usr/local/bin/kube-controller-manager --v=2 --logtostderr=true --address=127.0.0.1 --root-ca-file=/etc/kubernetes/pki/ca...

Feb 09 19:12:42 k8s-master03.example.local kube-controller-manager[14409]: I0209 19:12:42.856769   14409 dynamic_cafile_content.go:117] ...pem"
Feb 09 19:12:42 k8s-master03.example.local kube-controller-manager[14409]: W0209 19:12:42.856802   14409 authorization.go:193] No author...ork.
Feb 09 19:12:42 k8s-master03.example.local kube-controller-manager[14409]: I0209 19:12:42.856818   14409 controllermanager.go:186] Versi...22.6
Feb 09 19:12:42 k8s-master03.example.local kube-controller-manager[14409]: I0209 19:12:42.857314   14409 tlsconfig.go:178] "Loaded client CA...
Feb 09 19:12:42 k8s-master03.example.local kube-controller-manager[14409]: I0209 19:12:42.859498   14409 tlsconfig.go:200] "Loaded serving c...
Feb 09 19:12:42 k8s-master03.example.local kube-controller-manager[14409]: I0209 19:12:42.859629   14409 named_certificates.go:53] "Loaded S...
Feb 09 19:12:42 k8s-master03.example.local kube-controller-manager[14409]: I0209 19:12:42.859653   14409 secure_serving.go:200] Serving ...0257
Feb 09 19:12:42 k8s-master03.example.local kube-controller-manager[14409]: I0209 19:12:42.859912   14409 leaderelection.go:248] attempti...r...
Feb 09 19:12:42 k8s-master03.example.local kube-controller-manager[14409]: I0209 19:12:42.860399   14409 dynamic_cafile_content.go:155] ...pem"
Feb 09 19:12:42 k8s-master03.example.local kube-controller-manager[14409]: I0209 19:12:42.860604   14409 tlsconfig.go:240] "Starting Dyn...ler"

● kube-scheduler.service - Kubernetes Scheduler
   Loaded: loaded (/usr/lib/systemd/system/kube-scheduler.service; enabled; vendor preset: disabled)
   Active: active (running) since Wed 2022-02-09 19:12:41 CST; 11s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 14410 (kube-scheduler)
    Tasks: 7
   Memory: 19.4M
   CGroup: /system.slice/kube-scheduler.service
           └─14410 /usr/local/bin/kube-scheduler --v=2 --logtostderr=true --address=127.0.0.1 --leader-elect=true --kubeconfig=/etc/kubernet...

Feb 09 19:12:42 k8s-master03.example.local kube-scheduler[14410]: E0209 19:12:42.908656   14410 plugin.go:138] "getting namespace, assu...oard"
Feb 09 19:12:42 k8s-master03.example.local kube-scheduler[14410]: E0209 19:12:42.908684   14410 plugin.go:138] "getting namespace, assu...stem"
Feb 09 19:12:42 k8s-master03.example.local kube-scheduler[14410]: E0209 19:12:42.908692   14410 plugin.go:138] "getting namespace, assu...stem"
Feb 09 19:12:42 k8s-master03.example.local kube-scheduler[14410]: I0209 19:12:42.939998   14410 node_tree.go:65] Added node "k8s-master...eTree
Feb 09 19:12:42 k8s-master03.example.local kube-scheduler[14410]: I0209 19:12:42.940139   14410 node_tree.go:65] Added node "k8s-master...eTree
Feb 09 19:12:42 k8s-master03.example.local kube-scheduler[14410]: I0209 19:12:42.940163   14410 node_tree.go:65] Added node "k8s-master...eTree
Feb 09 19:12:42 k8s-master03.example.local kube-scheduler[14410]: I0209 19:12:42.940173   14410 node_tree.go:65] Added node "k8s-node01...eTree
Feb 09 19:12:42 k8s-master03.example.local kube-scheduler[14410]: I0209 19:12:42.940190   14410 node_tree.go:65] Added node "k8s-node02...eTree
Feb 09 19:12:42 k8s-master03.example.local kube-scheduler[14410]: I0209 19:12:42.940209   14410 node_tree.go:65] Added node "k8s-node03...eTree
Feb 09 19:12:43 k8s-master03.example.local kube-scheduler[14410]: I0209 19:12:43.000185   14410 leaderelection.go:248] attempting to ac...er...

● kube-proxy.service - Kubernetes Kube Proxy
   Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Wed 2022-02-09 19:12:41 CST; 11s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 14411 (kube-proxy)
    Tasks: 6
   Memory: 18.7M
   CGroup: /system.slice/kube-proxy.service
           └─14411 /usr/local/bin/kube-proxy --config=/etc/kubernetes/kube-proxy.conf --v=2

Feb 09 19:12:41 k8s-master03.example.local kube-proxy[14411]: I0209 19:12:41.589090   14411 shared_informer.go:247] Caches are synced ...config
Feb 09 19:12:41 k8s-master03.example.local kube-proxy[14411]: I0209 19:12:41.589132   14411 service.go:416] Adding new service port "k...43/TCP
Feb 09 19:12:41 k8s-master03.example.local kube-proxy[14411]: I0209 19:12:41.589150   14411 service.go:416] Adding new service port "d...43/TCP
Feb 09 19:12:41 k8s-master03.example.local kube-proxy[14411]: I0209 19:12:41.589156   14411 service.go:416] Adding new service port "k...53/UDP
Feb 09 19:12:41 k8s-master03.example.local kube-proxy[14411]: I0209 19:12:41.589161   14411 service.go:416] Adding new service port "k...53/TCP
Feb 09 19:12:41 k8s-master03.example.local kube-proxy[14411]: I0209 19:12:41.589165   14411 service.go:416] Adding new service port "k...53/TCP
Feb 09 19:12:41 k8s-master03.example.local kube-proxy[14411]: I0209 19:12:41.589170   14411 service.go:416] Adding new service port "k...43/TCP
Feb 09 19:12:41 k8s-master03.example.local kube-proxy[14411]: I0209 19:12:41.589175   14411 service.go:416] Adding new service port "k...00/TCP
Feb 09 19:12:41 k8s-master03.example.local kube-proxy[14411]: I0209 19:12:41.589243   14411 proxier.go:1039] "Stale service" protocol=....0.10"
Feb 09 19:12:41 k8s-master03.example.local kube-proxy[14411]: I0209 19:12:41.684172   14411 proxier.go:1474] "Opened local port" port=...tcp4)"

● kubelet.service - Kubernetes Kubelet
   Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
  Drop-In: /etc/systemd/system/kubelet.service.d
           └─10-kubelet.conf
   Active: active (running) since Wed 2022-02-09 19:12:41 CST; 11s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 14412 (kubelet)
    Tasks: 13
   Memory: 41.9M
   CGroup: /system.slice/kubelet.service
           └─14412 /usr/local/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/k...

Feb 09 19:12:43 k8s-master03.example.local kubelet[14412]: I0209 19:12:43.721223   14412 reconciler.go:224] "operationExecutor.VerifyControl...
Feb 09 19:12:43 k8s-master03.example.local kubelet[14412]: I0209 19:12:43.721264   14412 reconciler.go:224] "operationExecutor.VerifyC...b\") "
Feb 09 19:12:43 k8s-master03.example.local kubelet[14412]: I0209 19:12:43.721278   14412 reconciler.go:224] "operationExecutor.VerifyC...b\") "
Feb 09 19:12:43 k8s-master03.example.local kubelet[14412]: I0209 19:12:43.721297   14412 reconciler.go:224] "operationExecutor.VerifyControl...
Feb 09 19:12:43 k8s-master03.example.local kubelet[14412]: I0209 19:12:43.721312   14412 reconciler.go:224] "operationExecutor.VerifyControl...
Feb 09 19:12:43 k8s-master03.example.local kubelet[14412]: I0209 19:12:43.721326   14412 reconciler.go:224] "operationExecutor.VerifyControl...
Feb 09 19:12:43 k8s-master03.example.local kubelet[14412]: I0209 19:12:43.721339   14412 reconciler.go:224] "operationExecutor.VerifyControl...
Feb 09 19:12:43 k8s-master03.example.local kubelet[14412]: I0209 19:12:43.721352   14412 reconciler.go:224] "operationExecutor.VerifyControl...
Feb 09 19:12:43 k8s-master03.example.local kubelet[14412]: I0209 19:12:43.721359   14412 reconciler.go:157] "Reconciler: start to sync state"
Feb 09 19:12:45 k8s-master03.example.local kubelet[14412]: I0209 19:12:45.431962   14412 prober_manager.go:255] "Failed to trigger a m...iness"
Hint: Some lines were ellipsized, use -l to show in full.

[root@k8s-master03 ~]# kube-apiserver --version
Kubernetes v1.22.6
[root@k8s-master03 ~]# kube-controller-manager --version
Kubernetes v1.22.6
[root@k8s-master03 ~]# kube-scheduler --version
Kubernetes v1.22.6
[root@k8s-master03 ~]# kube-proxy --version
Kubernetes v1.22.6
[root@k8s-master03 ~]# kubectl version
Client Version: version.Info{Major:"1", Minor:"22", GitVersion:"v1.22.6", GitCommit:"f59f5c2fda36e4036b49ec027e556a15456108f0", GitTreeState:"clean", BuildDate:"2022-01-19T17:33:06Z", GoVersion:"go1.16.12", Compiler:"gc", Platform:"linux/amd64"}
The connection to the server localhost:8080 was refused - did you specify the right host or port?

[root@k8s-master03 ~]# kubelet --version
Kubernetes v1.22.6

[root@k8s-master01 bin]# kubectl get nodes
NAME                         STATUS   ROLES    AGE     VERSION
k8s-master01.example.local   Ready    <none>   5h18m   v1.22.6
k8s-master02.example.local   Ready    <none>   5h18m   v1.22.6
k8s-master03.example.local   Ready    <none>   5h18m   v1.22.6
k8s-node01.example.local     Ready    <none>   5h18m   v1.20.14
k8s-node02.example.local     Ready    <none>   5h18m   v1.20.14
k8s-node03.example.local     Ready    <none>   5h18m   v1.20.14

#上线master03
[root@k8s-master01 bin]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/172.31.3.103" | socat stdio /var/lib/haproxy/haproxy.sock"

2.3 升级calico

docs.projectcalico.org/maintenance…

calico安装:docs.projectcalico.org/getting-sta…

[root@k8s-master01 bin]# cd

[root@k8s-master01 ~]# curl https://docs.projectcalico.org/manifests/calico-etcd.yaml -O

[root@k8s-master01 ~]# vim calico-etcd.yaml 
...
spec:
  selector:
    matchLabels:
      k8s-app: calico-node
  updateStrategy:
    type: OnDelete #修改这里,calico不会滚动更新,只有重启了kubelet,才会更新,删除下面两行内容
 template:
    metadata:
      labels:
        k8s-app: calico-node
...
apiVersion: policy/v1 #修改这里为v1
kind: PodDisruptionBudget
metadata:
...

#修改calico-etcd.yaml的以下位置

[root@k8s-master01 ~]# grep "etcd_endpoints:.*" calico-etcd.yaml
  etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"
[root@k8s-master01 ~]# sed -i 's#etcd_endpoints: "http://<ETCD_IP>:<ETCD_PORT>"#etcd_endpoints: "https://172.31.3.108:2379,https://172.31.3.109:2379,https://172.31.3.110:2379"#g' calico-etcd.yaml
[root@k8s-master01 ~]# grep "etcd_endpoints:.*" calico-etcd.yaml
  etcd_endpoints: "https://172.31.3.108:2379,https://172.31.3.109:2379,https://172.31.3.110:2379"

[root@k8s-master01 ~]# grep -E "(.*etcd-key:.*|.*etcd-cert:.*|.*etcd-ca:.*)" calico-etcd.yaml 
  # etcd-key: null
  # etcd-cert: null
  # etcd-ca: null

[root@k8s-master01 ~]# ETCD_KEY=`cat /etc/kubernetes/pki/etcd/etcd-key.pem | base64 | tr -d '\n'`
[root@k8s-master01 ~]# ETCD_CERT=`cat /etc/kubernetes/pki/etcd/etcd.pem | base64 | tr -d '\n'`
[root@k8s-master01 ~]# ETCD_CA=`cat /etc/kubernetes/pki/etcd/etcd-ca.pem | base64 | tr -d '\n'`

[root@k8s-master01 ~]# sed -i "s@# etcd-key: null@etcd-key: ${ETCD_KEY}@g; s@# etcd-cert: null@etcd-cert: ${ETCD_CERT}@g; s@# etcd-ca: null@etcd-ca: ${ETCD_CA}@g" calico-etcd.yaml

[root@k8s-master01 ~]# grep -E "(.*etcd-key:.*|.*etcd-cert:.*|.*etcd-ca:.*)" calico-etcd.yaml
  etcd-key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBcGh0ejhrbzRURnV4T2VVTDBoSWpFdHBmcC9BRGYrcGR3SWNkeVA2QnV5dGxmSzJECjF4eEpRUGVhOFNwMGlFaVBxTEdNWkl5bjNjbHd4Mm9TYkpJd1ZzeEt6N2RybFErdUx2Qzl3Y3lPUktOZVpEd24KMTNDemk4eURENkZmL3NLcXhzNXVEMnNsNWNBMGdPK3orMkdOeUh5YkhOTytodG93bnh0MjhuNHFKWmRnK2l5VQp3R3psT0xQblY5UlJESWJLTW9YT3FLUUt1WWVhMm8rU2E4Rkp1anlvT2Uyc0t5UndTQk5xcjYyZnRTK0ZWSHFxCmVKalJYS245NFM0TDFwd2I5cUxnUDJmaU41bFRadk4va1dkZnMxd2RXVElWUVNaZE92TmhhZGp4b0Y5TWlsSGEKZ0l4NzZaNU1YL2lNZWpQb3Z4M2pDTXJzdWFUS0tnSGt6eTRLU3dJREFRQUJBb0lCQUFlVi8yQ1VWU2ZmbENOeAp1MjEzbUpSMjFxR0R5NVVlN2ZNcCtJbENYa2hlL2Y2SXFobTcxL2lZbGtIblQzVWQ0em13Q2hwWmRoMGg0djJvCmNYajE0REZHbVRBTlQyTjZXTmtaODRDVFIvZ0lnZm9QNlQza2pyNldzM0dXVEIwRlpPazVhanRZQ0Y0S3Zoc1oKVjEzbW9hUURWTTRuT1c5TkxhVkdpdE1lUWV4L2YzV1ZSc2M2TWdaUlVvRGU5THR4bk5nb1hWZmVYcVpZbElzVQplSFJQb1JGYnpXYi9UdEduTnFRMzJkemtyYTNNWnFzd1R4QjdMMGNWUW0xTGxMUXQ1KzkvWnRLd3Zwa0w0QTUvCldwUEYvWGhSSTBBQ0dhUEo3YWNlRUlwUlRSellzbnQ0dlZHNHNob3Y3MEQrYjdLT1lhN1FyU2FmNUlLRVlydFkKV3pjM0tQa0NnWUVBd1dwQk41enFxTWllVWpVODhLVVVDTkhNdUxMSHp5TTZQQ29OZXMrWGNIY1U1L1kxZUV0TwpMd3Z6djd3QVR5UW92RU8ycldtNEF2RXRSaG1QUFc2YU52ZUpPc2FZNnlXaVJ2R0RiN2dzb093eW9DYVlKd08vCnF5MEVLM29qTy9XRVZhNFpyTUlXOUxNWEkwajlKeldpUWI4NytNaENJcVpoZnYvUUhuWW5VU1VDZ1lFQTI5c2cKRzFJZ1hXamVyNHhiTWdMVkFnOXk1K3g1NlQ1RTZWNE5vdUJUZUlhUStob1cvU0w2UFMyS2ZjLzJweXVweFd3egp3aVRXdSt2L1NIUTVudlMrRHAzU0J5U0NqMEJJalg3N2VXS2g0SW1Hd2NoVzV5WnVBM3BVS3paSnV2VXpIdUFNCnFRc0NnR0ZnZGo4Zm1qYWV6ZENOVTI2TUhSZTRNaUJ2cHhSUHFxOENnWUFQamxNMmZObG12OVB6K3JJdkRLZmkKMmJUa2VnU1dCVmhPdEhjbkZJRXltM0ZFQXNwa0pYSmhXRTIvY3doM1ZRb3RzaWlFSkFlWHZQd09Na29SLzg1SgpjM2xIRCtnR3FaMDJwWUFUd1RWZHNBR1dYZVJJNXdWSWFETjRwN2Nqd0doblY3eGE1N1ZlOHZSK2N3VmhYTy95CjU4V1VDYzgvNkMvWlBndm9GMHFzUFFLQmdBaHNjZU42RnhGZEprTVZucHpnN09aaVR5WEJzcjRVQzdIaFQ2WncKNytITFRoeTNDVEJ6dWFERWNPejNIZDB6MkJKZlhmQlBWd2JtT09hK3hVSm80Q3RSTXEzaFlUczUzRTNIa3IwSQo0V2puL0FqS3MwR3lBRDhUM2N1MkRjY2pBKzFuNmpSRDNybXFnWGFtWG9DYkhTU0huQktaUnJjS3BKMFBEeGdZCnVDQ3pBb0dBSjh0SXk1UHRya3lUN3ExZURJNTF1Q2YwWDhrRWJoeFZ1RC9oVW82SkFURkRnRG0vN0Z5UFNvMnAKSFZVaEtpZmtQNUVoYTBYTDMrK3VxOWhITXJvNHVuaksrZSs2Y3VrZkhOWkk4MFVHazBOWUY3WGd1VTdETlJ1aApHQ1dJRkNhcjB0TE9lK1pBRzJQaHFQMno4cXlmNVNEckk0bmJtUHlabjZPMVFYZ0Q1REU9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
  etcd-cert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVKakNDQXc2Z0F3SUJBZ0lVVHNTUDBUVlZqaE9UZEFUNnlncFpXcERRb0dJd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1p6RUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeERUQUxCZ05WQkFvVEJHVjBZMlF4RmpBVUJnTlZCQXNURFVWMFkyUWdVMlZqZFhKcGRIa3hEVEFMCkJnTlZCQU1UQkdWMFkyUXdJQmNOTWpJd01USXlNRGd4TkRBd1doZ1BNakV5TVRFeU1qa3dPREUwTURCYU1HY3gKQ3pBSkJnTlZCQVlUQWtOT01SQXdEZ1lEVlFRSUV3ZENaV2xxYVc1bk1SQXdEZ1lEVlFRSEV3ZENaV2xxYVc1bgpNUTB3Q3dZRFZRUUtFd1JsZEdOa01SWXdGQVlEVlFRTEV3MUZkR05rSUZObFkzVnlhWFI1TVEwd0N3WURWUVFECkV3UmxkR05rTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUFwaHR6OGtvNFRGdXgKT2VVTDBoSWpFdHBmcC9BRGYrcGR3SWNkeVA2QnV5dGxmSzJEMXh4SlFQZWE4U3AwaUVpUHFMR01aSXluM2Nsdwp4Mm9TYkpJd1ZzeEt6N2RybFErdUx2Qzl3Y3lPUktOZVpEd24xM0N6aTh5REQ2RmYvc0txeHM1dUQyc2w1Y0EwCmdPK3orMkdOeUh5YkhOTytodG93bnh0MjhuNHFKWmRnK2l5VXdHemxPTFBuVjlSUkRJYktNb1hPcUtRS3VZZWEKMm8rU2E4Rkp1anlvT2Uyc0t5UndTQk5xcjYyZnRTK0ZWSHFxZUpqUlhLbjk0UzRMMXB3YjlxTGdQMmZpTjVsVApadk4va1dkZnMxd2RXVElWUVNaZE92TmhhZGp4b0Y5TWlsSGFnSXg3Nlo1TVgvaU1lalBvdngzakNNcnN1YVRLCktnSGt6eTRLU3dJREFRQUJvNEhITUlIRU1BNEdBMVVkRHdFQi93UUVBd0lGb0RBZEJnTlZIU1VFRmpBVUJnZ3IKQmdFRkJRY0RBUVlJS3dZQkJRVUhBd0l3REFZRFZSMFRBUUgvQkFJd0FEQWRCZ05WSFE0RUZnUVU5cXI4N3RsZApyTGJPdGxMUEYvT0xBN1QvcEVFd0h3WURWUjBqQkJnd0ZvQVVpbkFQc1JrQ3pPenZ6N3ZwWmdQdUhUNGt3QTR3ClJRWURWUjBSQkQ0d1BJSUthemh6TFdWMFkyUXdNWUlLYXpoekxXVjBZMlF3TW9JS2F6aHpMV1YwWTJRd000Y0UKZndBQUFZY0VyQjhEYkljRXJCOERiWWNFckI4RGJqQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFlb28rL0NVYQpTa2hkVEY0ekJLa3ExREs0cFFaVEdhQUNHNEUvWUUwNXFNWS9QcTlpam5nNGtRdFB0d2lXaE5WN1JZWGl5QnhjCitIMTBDc3JVSTQrTFVjVjI0T1d5UFA2Q09yY2sycDBDZUhTL0E0ZEhYaEhReC8rZFRoUGxWcno1RzdlblhKRE0KaTlhZGxOR21BSWVlZEE4ekNENlVvbHFOOVdrZ29jTWw0ckdFZDJ3WFZMcFA5ZzhybGlyNVJrSy9seHFmQ1dBWgpBeDZPejJTYTNEbEVGdXpNdGxYejBobnRPdGpBdUJ6eEdIdlJVMllDdlcyL3pDUTJTQ0ZodkJXMGtPVCtiUVc1CkkrVTZGeVpCSU1XQlBPQmZsNm03M2pkNjdiSzRreVJXTEhQUnl0T2w1N3RMdlljOEgybFBQbS9VS3BWYkx5NjkKdXBuNHhOZUhaYXZ5ckE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
  etcd-ca: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR4RENDQXF5Z0F3SUJBZ0lVSW02eEIzNlN2dXE1TDhUaks5cHV5bjJHWEp3d0RRWUpLb1pJaHZjTkFRRUwKQlFBd1p6RUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeERUQUxCZ05WQkFvVEJHVjBZMlF4RmpBVUJnTlZCQXNURFVWMFkyUWdVMlZqZFhKcGRIa3hEVEFMCkJnTlZCQU1UQkdWMFkyUXdJQmNOTWpJd01USXlNRGd4TXpBd1doZ1BNakV5TVRFeU1qa3dPREV6TURCYU1HY3gKQ3pBSkJnTlZCQVlUQWtOT01SQXdEZ1lEVlFRSUV3ZENaV2xxYVc1bk1SQXdEZ1lEVlFRSEV3ZENaV2xxYVc1bgpNUTB3Q3dZRFZRUUtFd1JsZEdOa01SWXdGQVlEVlFRTEV3MUZkR05rSUZObFkzVnlhWFI1TVEwd0N3WURWUVFECkV3UmxkR05rTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF1cDRWVEQzS1JaRWgKcXA2TW0wTXF3amFrVkFKTFJ0YlFjd3FLNWsvQ2s4MEFjTDUyOGl6YldSdGRXcDVpNk9td241M3BGNGdpZG9EYQphOUpadEF4ZUl0RmNkbExxRzZrdjFCU3pyVVlMMXZyOFZNckRZd0VrYW9RdlZ3cHFrZDJiR3pUd21oVnJXZ3AxCmMrMjcwSWI1L2NVa25mWmtubEVTcWlyQzI5Z09oZnh0OFNrc1FTSUNtcXhuajFDVnltL3dML3AwMDUzNE5BNjAKeXk5aDdkZjU1R0ZFbjdLaytzOEdkbUVmL3ludXVsT1VUY25mTXppeWVoQW5uUStZMjZMWGJzSWw3eHg3YzRpZgpManFPN3d1Qm5WS3M2WllENzI0V1Z0QUY0VWllL1NqRXVabE5GWGNIdTg0Ly9jNHBLL1Avb0dxNklUaVZYWUJyClY1TW1jdTRPV3dJREFRQUJvMll3WkRBT0JnTlZIUThCQWY4RUJBTUNBUVl3RWdZRFZSMFRBUUgvQkFnd0JnRUIKL3dJQkFqQWRCZ05WSFE0RUZnUVVpbkFQc1JrQ3pPenZ6N3ZwWmdQdUhUNGt3QTR3SHdZRFZSMGpCQmd3Rm9BVQppbkFQc1JrQ3pPenZ6N3ZwWmdQdUhUNGt3QTR3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUpmNWJwd2FJYjFTCmtiRUcyVDlRb3d4WU52VGRYSGllbzkwazlPSEFqN3A3RGdzekk0alUwUnkxOHN4c1h0aW5TMCtNU3U5L2d1VHYKZEprK3c4TnhyNHNZZEt3N2VSVVpUbUREQ2l0VldkY0JHNk14Y1BTTDJaQnVJMi8wOTRnN0ZNd2ZIc09lVEdHZgpScVVrV1lTRjRRbU9iRTZwNTA3QWlxRlZqMEhzUHRmTTdpQjZ3ZXRyYzlTVzlZd3R5Tm9PVFhnZEdDdDc5akNBCllUTG9TaHFxcGRvUWEwd0hzYWZqSDd5N2VIZEdRRmZtSWo2RVFQU1ZRSFhQUmhFOXVadDgxbDByeENseUQxa3kKOEhVYTJpOFpHblF0cVJxd3JORHRHeEdlYUdMbCtNYkZVb1N4SW9nTTNaK2x0a2NNbUVZK3hxc3dBbVlMUTJnTwpNMUtoRVJxT1JsMD0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=

[root@k8s-master01 ~]# grep -E "(.*etcd_ca:.*|.*etcd_cert:.*|.*etcd_key:.*)" calico-etcd.yaml 
  etcd_ca: ""   # "/calico-secrets/etcd-ca"
  etcd_cert: "" # "/calico-secrets/etcd-cert"
  etcd_key: ""  # "/calico-secrets/etcd-key"

[root@k8s-master01 ~]# sed -i 's#etcd_ca: ""#etcd_ca: "/calico-secrets/etcd-ca"#g; s#etcd_cert: ""#etcd_cert: "/calico-secrets/etcd-cert"#g; s#etcd_key: "" #etcd_key: "/calico-secrets/etcd-key" #g' calico-etcd.yaml

[root@k8s-master01 ~]# grep -E "(.*etcd_ca:.*|.*etcd_cert:.*|.*etcd_key:.*)" calico-etcd.yaml 
  etcd_ca: "/calico-secrets/etcd-ca"   # "/calico-secrets/etcd-ca"
  etcd_cert: "/calico-secrets/etcd-cert" # "/calico-secrets/etcd-cert"
  etcd_key: "/calico-secrets/etcd-key"  # "/calico-secrets/etcd-key"

# 更改此处为自己的pod网段
[root@k8s-master01 ~]# POD_SUBNET="192.168.0.0/12"

# 注意下面的这个步骤是把calico-etcd.yaml文件里面的CALICO_IPV4POOL_CIDR下的网段改成自己的Pod网段,也就是把192.168.x.x/16改成自己的集群网段,并打开注释:

[root@k8s-master01 ~]# grep -E "(.*CALICO_IPV4POOL_CIDR.*|.*192.168.0.0.*)" calico-etcd.yaml 
            # - name: CALICO_IPV4POOL_CIDR
            #   value: "192.168.0.0/16"

[root@k8s-master01 ~]# sed -i 's@# - name: CALICO_IPV4POOL_CIDR@- name: CALICO_IPV4POOL_CIDR@g; s@#   value: "192.168.0.0/16"@  value: '"${POD_SUBNET}"'@g' calico-etcd.yaml

[root@k8s-master01 ~]# grep -E "(.*CALICO_IPV4POOL_CIDR.*|.*192.168.0.0.*)" calico-etcd.yaml 
            - name: CALICO_IPV4POOL_CIDR
              value: 192.168.0.0/12

[root@k8s-master01 ~]# grep "image:" calico-etcd.yaml 
          image: docker.io/calico/cni:v3.21.4
          image: docker.io/calico/pod2daemon-flexvol:v3.21.4
          image: docker.io/calico/node:v3.21.4
          image: docker.io/calico/kube-controllers:v3.21.4

[root@k8s-master01 ~]# cat download_calico_images.sh
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-01-11
#FileName:      download_calico_images.sh
#URL:           raymond.blog.csdn.net
#Description:   The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \\033[01;31m"
END='\033[0m'

images=$(awk -F "/"  '/image:/{print $NF}' calico-etcd.yaml)
HARBOR_DOMAIN=harbor.raymonds.cc

images_download(){
    ${COLOR}"开始下载Calico镜像"${END}
    for i in ${images};do 
        docker pull registry.cn-beijing.aliyuncs.com/raymond9/$i
        docker tag registry.cn-beijing.aliyuncs.com/raymond9/$i ${HARBOR_DOMAIN}/google_containers/$i
        docker rmi registry.cn-beijing.aliyuncs.com/raymond9/$i
        docker push ${HARBOR_DOMAIN}/google_containers/$i
    done
    ${COLOR}"Calico镜像下载完成"${END}
}

images_download

[root@k8s-master01 ~]# bash download_calico_images.sh

[root@k8s-master01 ~]# sed -ri 's@(.*image:) docker.io/calico(/.*)@\1 harbor.raymonds.cc/google_containers\2@g' calico-etcd.yaml 

[root@k8s-master01 ~]# grep "image:" calico-etcd.yaml
          image: harbor.raymonds.cc/google_containers/cni:v3.21.4
          image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4
          image: harbor.raymonds.cc/google_containers/node:v3.21.4
          image: harbor.raymonds.cc/google_containers/kube-controllers:v3.21.4

[root@k8s-master01 ~]# kubectl apply -f calico-etcd.yaml
secret/calico-etcd-secrets unchanged
configmap/calico-config configured
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers unchanged
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers unchanged
clusterrole.rbac.authorization.k8s.io/calico-node configured
clusterrolebinding.rbac.authorization.k8s.io/calico-node unchanged
daemonset.apps/calico-node configured
serviceaccount/calico-node unchanged
deployment.apps/calico-kube-controllers configured
serviceaccount/calico-kube-controllers unchanged
poddisruptionbudget.policy/calico-kube-controllers created

#下线master01
[root@k8s-master01 ~]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/172.31.3.101" | socat stdio /var/lib/haproxy/haproxy.sock"

[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide |grep calico |grep master01
calico-node-846qg                          1/1     Running   0          53m   172.31.3.101      k8s-master01.example.local   <none>           <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-846qg -n kube-system -o yaml |grep "image:"
    image: harbor.raymonds.cc/google_containers/node:v3.15.3
    image: harbor.raymonds.cc/google_containers/cni:v3.15.3
  - image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.15.3
    image: harbor.raymonds.cc/google_containers/node:v3.15.3
    image: harbor.raymonds.cc/google_containers/cni:v3.15.3
    image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.15.3
#镜像并没有升级

[root@k8s-master01 ~]# kubectl delete pod calico-node-846qg -n kube-system
pod "calico-node-846qg" deleted
[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide |grep calico |grep master01
calico-node-6nxzg                          1/1     Running   0          19s     172.31.3.101      k8s-master01.example.local   <none>           <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-6nxzg -n kube-system -o yaml |grep "image:"
    image: harbor.raymonds.cc/google_containers/node:v3.21.4
    image: harbor.raymonds.cc/google_containers/cni:v3.21.4
  - image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4
    image: harbor.raymonds.cc/google_containers/node:v3.21.4
    image: harbor.raymonds.cc/google_containers/cni:v3.21.4
    image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4

#上线master01
[root@k8s-master01 ~]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/172.31.3.101" | socat stdio /var/lib/haproxy/haproxy.sock"

#下线master02
[root@k8s-master01 ~]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/172.31.3.102" | socat stdio /var/lib/haproxy/haproxy.sock"

[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide |grep calico |grep master02
calico-node-kdvcc                          1/1     Running   0          56m     172.31.3.102      k8s-master02.example.local   <none>           <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-kdvcc  -n kube-system -o yaml |grep "image:"
    image: harbor.raymonds.cc/google_containers/node:v3.15.3
    image: harbor.raymonds.cc/google_containers/cni:v3.15.3
  - image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.15.3
    image: harbor.raymonds.cc/google_containers/node:v3.15.3
    image: harbor.raymonds.cc/google_containers/cni:v3.15.3
    image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.15.3
[root@k8s-master01 ~]# kubectl delete pod calico-node-kdvcc  -n kube-system
pod "calico-node-kdvcc" deleted
[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide |grep calico |grep master02
calico-node-ds7br                          0/1     Init:0/2   0          4s      172.31.3.102      k8s-master02.example.local   <none>           <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-ds7br  -n kube-system -o yaml |grep "image:"
    image: harbor.raymonds.cc/google_containers/node:v3.21.4
    image: harbor.raymonds.cc/google_containers/cni:v3.21.4
  - image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4
    image: harbor.raymonds.cc/google_containers/node:v3.21.4
    image: harbor.raymonds.cc/google_containers/cni:v3.21.4
    image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4

#上线master02
[root@k8s-master01 ~]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/172.31.3.102" | socat stdio /var/lib/haproxy/haproxy.sock"

#下线master03
[root@k8s-master01 ~]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "disable server kubernetes-6443/172.31.3.103" | socat stdio /var/lib/haproxy/haproxy.sock"

[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide |grep calico |grep master03
calico-node-fshtx                          1/1     Running   0          61m     172.31.3.103      k8s-master03.example.local   <none>           <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-fshtx  -n kube-system -o yaml |grep "image:"
    image: harbor.raymonds.cc/google_containers/node:v3.15.3
    image: harbor.raymonds.cc/google_containers/cni:v3.15.3
  - image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.15.3
    image: harbor.raymonds.cc/google_containers/node:v3.15.3
    image: harbor.raymonds.cc/google_containers/cni:v3.15.3
    image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.15.3
[root@k8s-master01 ~]# kubectl delete pod calico-node-fshtx  -n kube-system
pod "calico-node-fshtx" deleted
[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide |grep calico |grep master03
calico-node-7hqjq                          0/1     Init:0/2   0          3s      172.31.3.103      k8s-master03.example.local   <none>           <none>
[root@k8s-master01 ~]# kubectl get pod calico-node-7hqjq  -n kube-system -o yaml |grep "image:"
    image: harbor.raymonds.cc/google_containers/node:v3.21.4
    image: harbor.raymonds.cc/google_containers/cni:v3.21.4
  - image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4
    image: harbor.raymonds.cc/google_containers/node:v3.21.4
    image: harbor.raymonds.cc/google_containers/cni:v3.21.4
    image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4

#上线master03
[root@k8s-master01 ~]# ssh -o StrictHostKeyChecking=no root@k8s-lb "echo "enable server kubernetes-6443/172.31.3.103" | socat stdio /var/lib/haproxy/haproxy.sock"

2.4 升级node

#下线Node节点
[root@k8s-master01 ~]# kubectl drain k8s-node01.example.local --delete-emptydir-data --force --ignore-daemonsets
node/k8s-node01.example.local cordoned
WARNING: ignoring DaemonSet-managed Pods: kube-system/calico-node-whph4
evicting pod kubernetes-dashboard/kubernetes-dashboard-6c9dd9dbf5-7l8qd
pod/kubernetes-dashboard-6c9dd9dbf5-7l8qd evicted
node/k8s-node01.example.local evicted

[root@k8s-master01 ~]# kubectl get nodes
NAME                         STATUS                     ROLES    AGE   VERSION
k8s-master01.example.local   Ready                      <none>   78m   v1.22.6
k8s-master02.example.local   Ready                      <none>   78m   v1.22.6
k8s-master03.example.local   Ready                      <none>   78m   v1.22.6
k8s-node01.example.local     Ready,SchedulingDisabled   <none>   78m   v1.20.14
k8s-node02.example.local     Ready                      <none>   78m   v1.20.14
k8s-node03.example.local     Ready                      <none>   78m   v1.20.14

[root@k8s-node01 ~]# systemctl stop kubelet kube-proxy

[root@k8s-master01 ~]# cd kubernetes/server/bin/
[root@k8s-master01 bin]# scp kubelet kube-proxy k8s-node01:/usr/local/bin/
[root@k8s-master01 bin]# scp /etc/systemd/system/kubelet.service.d/10-kubelet.conf k8s-master02:/etc/systemd/system/kubelet.service.d/

[root@k8s-node01 ~]# systemctl daemon-reload && systemctl restart kubelet kube-proxy

[root@k8s-master01 bin]# kubectl get pod -n kube-system -o wide |grep node01
calico-node-q4gnl                          1/1     Running   0          5h29m   172.31.3.111     k8s-node01.example.local     <none>           <none>
[root@k8s-master01 bin]# kubectl get pod calico-node-q4gnl -n kube-system -o yaml|grep "image:"
    image: harbor.raymonds.cc/google_containers/node:v3.15.3
    image: harbor.raymonds.cc/google_containers/cni:v3.15.3
  - image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.15.3
    image: harbor.raymonds.cc/google_containers/node:v3.15.3
    image: harbor.raymonds.cc/google_containers/cni:v3.15.3
    image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.15.3
[root@k8s-master01 bin]# kubectl delete pod calico-node-q4gnl -n kube-system 
pod "calico-node-q4gnl" deleted
[root@k8s-master01 bin]# kubectl get pod -n kube-system -o wide |grep node01
calico-node-s5kst                          0/1     Init:0/2   0          5s      172.31.3.111     k8s-node01.example.local     <none>           <none>
[root@k8s-master01 bin]# kubectl get pod calico-node-s5kst -n kube-system -o yaml|grep "image:"
    image: harbor.raymonds.cc/google_containers/node:v3.21.4
    image: harbor.raymonds.cc/google_containers/cni:v3.21.4
  - image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4
    image: harbor.raymonds.cc/google_containers/node:v3.21.4
    image: harbor.raymonds.cc/google_containers/cni:v3.21.4
    image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4

[root@k8s-master01 bin]# kubectl uncordon k8s-node01.example.local
node/k8s-node01.example.local uncordoned
[root@k8s-master01 bin]# kubectl get nodes
NAME                         STATUS   ROLES    AGE     VERSION
k8s-master01.example.local   Ready    <none>   5h43m   v1.22.6
k8s-master02.example.local   Ready    <none>   5h43m   v1.22.6
k8s-master03.example.local   Ready    <none>   5h43m   v1.22.6
k8s-node01.example.local     Ready    <none>   5h43m   v1.22.6
k8s-node02.example.local     Ready    <none>   5h43m   v1.20.14
k8s-node03.example.local     Ready    <none>   5h43m   v1.20.14

[root@k8s-master01 bin]# kubectl drain k8s-node02.example.local --delete-emptydir-data --force --ignore-daemonsets
node/k8s-node02.example.local cordoned
WARNING: ignoring DaemonSet-managed Pods: kube-system/calico-node-tmz9t
evicting pod kube-system/coredns-847c895554-l7nln
pod/coredns-847c895554-l7nln evicted
node/k8s-node02.example.local evicted

[root@k8s-master01 bin]# kubectl get nodes
NAME                         STATUS                     ROLES    AGE   VERSION
k8s-master01.example.local   Ready                      <none>   83m   v1.22.6
k8s-master02.example.local   Ready                      <none>   83m   v1.22.6
k8s-master03.example.local   Ready                      <none>   83m   v1.22.6
k8s-node01.example.local     Ready                      <none>   83m   v1.22.6
k8s-node02.example.local     Ready,SchedulingDisabled   <none>   83m   v1.20.14
k8s-node03.example.local     Ready                      <none>   83m   v1.20.14

[root@k8s-node02 ~]# systemctl stop kubelet kube-proxy

[root@k8s-master01 bin]# scp /etc/systemd/system/kubelet.service.d/10-kubelet.conf k8s-node02:/etc/systemd/system/kubelet.service.d/
[root@k8s-master01 bin]# scp kubelet kube-proxy k8s-node02:/usr/local/bin/

[root@k8s-node02 ~]# systemctl daemon-reload && systemctl restart kubelet kube-proxy

[root@k8s-master01 bin]# kubectl get pod -n kube-system -o wide |grep node02
calico-node-4z5d4                          1/1     Running   0          5h31m   172.31.3.112     k8s-node02.example.local     <none>           <none>
[root@k8s-master01 bin]# kubectl get pod calico-node-4z5d4 -n kube-system -o yaml |grep "image:"
    image: harbor.raymonds.cc/google_containers/node:v3.15.3
    image: harbor.raymonds.cc/google_containers/cni:v3.15.3
  - image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.15.3
    image: harbor.raymonds.cc/google_containers/node:v3.15.3
    image: harbor.raymonds.cc/google_containers/cni:v3.15.3
    image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.15.3
[root@k8s-master01 bin]# kubectl delete pod calico-node-4z5d4 -n kube-system 
pod "calico-node-4z5d4" deleted
[root@k8s-master01 bin]# kubectl get pod -n kube-system -o wide |grep node02
calico-node-2lvsx                          0/1     Init:0/2   0          3s      172.31.3.112     k8s-node02.example.local     <none>           <none>
[root@k8s-master01 bin]# kubectl get pod calico-node-2lvsx -n kube-system -o yaml|grep "image:"
    image: harbor.raymonds.cc/google_containers/node:v3.21.4
    image: harbor.raymonds.cc/google_containers/cni:v3.21.4
  - image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4
    image: harbor.raymonds.cc/google_containers/node:v3.21.4
    image: harbor.raymonds.cc/google_containers/cni:v3.21.4
    image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4

[root@k8s-master01 bin]# kubectl uncordon k8s-node02.example.local
node/k8s-node02.example.local uncordoned
[root@k8s-master01 bin]# kubectl get nodes
NAME                         STATUS   ROLES    AGE   VERSION
k8s-master01.example.local   Ready    <none>   85m   v1.22.6
k8s-master02.example.local   Ready    <none>   85m   v1.22.6
k8s-master03.example.local   Ready    <none>   85m   v1.22.6
k8s-node01.example.local     Ready    <none>   85m   v1.22.6
k8s-node02.example.local     Ready    <none>   84m   v1.22.6
k8s-node03.example.local     Ready    <none>   84m   v1.20.14

[root@k8s-master01 bin]# kubectl drain k8s-node03.example.local --delete-emptydir-data --force --ignore-daemonsets
node/k8s-node03.example.local cordoned
WARNING: ignoring DaemonSet-managed Pods: kube-system/calico-node-qjlk8
evicting pod kube-system/calico-kube-controllers-7dd7f59c79-hjj7t
pod/calico-kube-controllers-7dd7f59c79-hjj7t evicted
node/k8s-node03.example.local evicted

[root@k8s-master01 bin]# kubectl get nodes
NAME                         STATUS                     ROLES    AGE   VERSION
k8s-master01.example.local   Ready                      <none>   85m   v1.22.6
k8s-master02.example.local   Ready                      <none>   85m   v1.22.6
k8s-master03.example.local   Ready                      <none>   85m   v1.22.6
k8s-node01.example.local     Ready                      <none>   85m   v1.22.6
k8s-node02.example.local     Ready                      <none>   85m   v1.22.6
k8s-node03.example.local     Ready,SchedulingDisabled   <none>   85m   v1.20.14

[root@k8s-node03 ~]# systemctl stop kubelet kube-proxy
[root@k8s-master01 bin]# scp /etc/systemd/system/kubelet.service.d/10-kubelet.conf k8s-node03:/etc/systemd/system/kubelet.service.d/
[root@k8s-master01 bin]# scp kubelet kube-proxy k8s-node03:/usr/local/bin/

[root@k8s-node03 ~]# systemctl daemon-reload && systemctl restart kubelet kube-proxy

[root@k8s-master01 bin]# kubectl get pod -n kube-system -o wide |grep node03
calico-node-89ft4                          1/1     Running   0          5h33m   172.31.3.113     k8s-node03.example.local     <none>           <none>
[root@k8s-master01 bin]# kubectl get pod calico-node-89ft4 -n kube-system -o yaml|grep "image:"
    image: harbor.raymonds.cc/google_containers/node:v3.15.3
    image: harbor.raymonds.cc/google_containers/cni:v3.15.3
  - image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.15.3
    image: harbor.raymonds.cc/google_containers/node:v3.15.3
    image: harbor.raymonds.cc/google_containers/cni:v3.15.3
    image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.15.3
[root@k8s-master01 bin]# kubectl delete pod calico-node-89ft4 -n kube-system 
pod "calico-node-89ft4" deleted
[root@k8s-master01 bin]# kubectl get pod -n kube-system -o wide |grep node03
calico-node-2pmjj                          0/1     Init:0/2   0          3s      172.31.3.113     k8s-node03.example.local     <none>           <none>
[root@k8s-master01 bin]# kubectl get pod calico-node-2pmjj -n kube-system -o yaml|grep "image:"
    image: harbor.raymonds.cc/google_containers/node:v3.21.4
    image: harbor.raymonds.cc/google_containers/cni:v3.21.4
  - image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4
    image: harbor.raymonds.cc/google_containers/node:v3.21.4
    image: harbor.raymonds.cc/google_containers/cni:v3.21.4
    image: harbor.raymonds.cc/google_containers/pod2daemon-flexvol:v3.21.4

[root@k8s-master01 bin]# kubectl uncordon k8s-node03.example.local
node/k8s-node03.example.local uncordoned
[root@k8s-master01 bin]# kubectl get nodes
NAME                         STATUS   ROLES    AGE   VERSION
k8s-master01.example.local   Ready    <none>   87m   v1.22.6
k8s-master02.example.local   Ready    <none>   87m   v1.22.6
k8s-master03.example.local   Ready    <none>   87m   v1.22.6
k8s-node01.example.local     Ready    <none>   87m   v1.22.6
k8s-node02.example.local     Ready    <none>   87m   v1.22.6
k8s-node03.example.local     Ready    <none>   87m   v1.22.6

2.5 升级coredns

在这里插入图片描述

github.com/coredns/cor…

[root@k8s-master01 bin]# cd

[root@k8s-master01 ~]# git clone https://github.com/coredns/deployment.git

[root@k8s-master01 ~]# cd coredns-deployment/
[root@k8s-master01 coredns-deployment]# ls
charts  debian  docker  kubernetes  LICENSE  Makefile  README.md  systemd
[root@k8s-master01 coredns-deployment]# cd kubernetes/
[root@k8s-master01 kubernetes]# ls
CoreDNS-k8s_version.md  corefile-tool  FAQs.md    README.md    Scaling_CoreDNS.md
coredns.yaml.sed        deploy.sh      migration  rollback.sh  Upgrading_CoreDNS.md

[root@k8s-master01 kubernetes]# kubectl get cm,deploy -n kube-system 
NAME                                           DATA   AGE
configmap/calico-config                        8      52m
configmap/coredns                              1      2d
configmap/extension-apiserver-authentication   6      2d2h
configmap/kube-root-ca.crt                     1      2d2h

NAME                                      READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/calico-kube-controllers   1/1     1            1           52m
deployment.apps/coredns                   1/1     1            1           2d
deployment.apps/metrics-server            1/1     1            1           47h

[root@k8s-master01 kubernetes]# mkdir /root/coredns-bak

[root@k8s-master01 kubernetes]# kubectl get cm coredns -n kube-system -o yaml > /root/coredns-bak/coredns-cm_bak.yaml
[root@k8s-master01 kubernetes]# kubectl get deploy coredns -n kube-system -o yaml > /root/coredns-bak/coredns-dp_bak.yaml
[root@k8s-master01 kubernetes]# kubectl get clusterrole system:coredns -o yaml > /root/coredns-bak/cr.yaml
[root@k8s-master01 kubernetes]# kubectl get clusterrolebinding system:coredns -o yaml > /root/coredns-bak/crb.yaml
[root@k8s-master01 kubernetes]# ls /root/coredns-bak/
coredns-cm_bak.yaml  coredns-dp_bak.yaml  crb.yaml  cr.yaml

[root@k8s-master01 kubernetes]# ./deploy.sh -s > /root/coredns.yaml
[root@k8s-master01 kubernetes]# cd

[root@k8s-master01 ~]# grep "image:" coredns.yaml 
        image: coredns/coredns:1.8.6

[root@k8s-master01 ~]# cat download_coredns_images.sh
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-01-11
#FileName:      download_metrics_images.sh
#URL:           raymond.blog.csdn.net
#Description:   The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \\033[01;31m"
END='\033[0m'

images=$(awk -F "/"  '/image:/{print $NF}' coredns.yaml)
HARBOR_DOMAIN=harbor.raymonds.cc

images_download(){
    ${COLOR}"开始下载Coredns镜像"${END}
    for i in ${images};do 
        docker pull registry.aliyuncs.com/google_containers/$i
        docker tag registry.aliyuncs.com/google_containers/$i ${HARBOR_DOMAIN}/google_containers/$i
        docker rmi registry.aliyuncs.com/google_containers/$i
        docker push ${HARBOR_DOMAIN}/google_containers/$i
    done
    ${COLOR}"Coredns镜像下载完成"${END}
}

images_download

[root@k8s-master01 ~]# bash download_coredns_images.sh

[root@k8s-master01 ~]# sed -ri 's@(.*image:) coredns(/.*)@\1 harbor.raymonds.cc/google_containers\2@g' coredns.yaml

[root@k8s-master01 ~]# grep "image:" coredns.yaml 
        image: harbor.raymonds.cc/google_containers/coredns:1.8.6

[root@k8s-master01 ~]# kubectl apply -f coredns.yaml 
Warning: resource serviceaccounts/coredns is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically.
serviceaccount/coredns configured
Warning: resource clusterroles/system:coredns is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically.
clusterrole.rbac.authorization.k8s.io/system:coredns configured
Warning: resource clusterrolebindings/system:coredns is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically.
clusterrolebinding.rbac.authorization.k8s.io/system:coredns configured
Warning: resource configmaps/coredns is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically.
configmap/coredns configured
Warning: resource deployments/coredns is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically.
deployment.apps/coredns configured
Warning: resource services/kube-dns is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically.
service/kube-dns configured

[root@k8s-master01 ~]# kubectl get pod -A |grep coredns
kube-system            coredns-76d969b997-h66b9                     1/1     Running   0          25s

[root@k8s-master01 ~]# kubectl logs -f coredns-76d969b997-h66b9 -n kube-system
.:53
[INFO] plugin/reload: Running configuration MD5 = b0741fcbd8bd79287446297caa87f7a1
CoreDNS-1.8.6
linux/amd64, go1.17.1, 13a9191

[root@k8s-master01 ~]# kubectl get svc -n kube-system
NAME             TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)                  AGE
kube-dns         ClusterIP   10.96.0.10     <none>        53/UDP,53/TCP,9153/TCP   107m
metrics-server   ClusterIP   10.102.84.52   <none>        443/TCP                  105m

[root@k8s-master01 ~]# curl 10.96.0.10:53
curl: (52) Empty reply from server

[root@k8s-master01 ~]# telnet 10.96.0.10 53
Trying 10.96.0.10...
Connected to 10.96.0.10.
Escape character is '^]'.
Connection closed by foreign host.

ubuntu会出现如下问题:

root@k8s-master01:~# kubectl get pod -A -o wide|grep coredns
NAMESPACE     NAME                                      READY   STATUS             RESTARTS   AGE   IP              NODE                         NOMINATED NODE   READINESS GATES
kube-system   coredns-847c895554-9jqq5                  0/1     CrashLoopBackOff   1          8s    192.171.30.65   k8s-master02.example.local   <none>           <none>

#由于ubuntu系统有dns本地缓存,造成coredns不能正常解析
#具体问题请参考官方https://coredns.io/plugins/loop/#troubleshooting

root@k8s-master01:~# kubectl edit -n kube-system cm coredns
...
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
    .:53 {
        errors
        health {
          lameduck 5s
        }
        ready
        kubernetes cluster.local in-addr.arpa ip6.arpa {
          fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf {
          max_concurrent 1000
        }
        cache 30
        loop #将loop插件直接删除,避免内部循环
        reload
        loadbalance
    }

root@k8s-master01:~# kubectl get pod -A -o wide |grep coredns
NAMESPACE     NAME                                      READY   STATUS             RESTARTS   AGE    IP               NODE                         NOMINATED NODE   READINESS GATES
kube-system   coredns-847c895554-r9tsd                  0/1     CrashLoopBackOff   4          3m4s   192.170.21.195   k8s-node03.example.local     <none>           <none>

root@k8s-master01:~# kubectl delete pod coredns-847c895554-r9tsd -n kube-system 
pod "coredns-847c895554-r9tsd" deleted

root@k8s-master01:~# kubectl get pod -A -o wide |greo coredns
NAMESPACE     NAME                                      READY   STATUS    RESTARTS   AGE   IP                NODE                         NOMINATED NODE   READINESS GATES
kube-system   coredns-847c895554-cqwl5                  1/1     Running   0          13s   192.167.195.130   k8s-node02.example.local     <none>           <none>
#现在就正常了

2.6 升级metrics-server

github.com/kubernetes-…

[root@k8s-master01 ~]# wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml

#修改成下面内容
[root@k8s-master01 ~]# vim components.yaml
...
    spec:
      containers:
      - args:
        - --cert-dir=/tmp
        - --secure-port=4443
        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
        - --kubelet-use-node-status-port
        - --metric-resolution=15s
#添加下面两行内容
        - --kubelet-insecure-tls
        - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
...
        volumeMounts:
        - mountPath: /tmp
          name: tmp-dir
#添加下面两行内容
        - name: ca-ssl
          mountPath: /etc/kubernetes/pki
...
      volumes:
      - emptyDir: {}
        name: tmp-dir
#添加下面三行内容
      - name: ca-ssl
        hostPath:
          path: /etc/kubernetes/pki
...

[root@k8s-master01 ~]# grep "image:" components.yaml 
        image: k8s.gcr.io/metrics-server/metrics-server:v0.5.2

[root@k8s-master01 ~]# cat download_metrics_images.sh
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-01-11
#FileName:      download_metrics_images.sh
#URL:           raymond.blog.csdn.net
#Description:   The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \\033[01;31m"
END='\033[0m'

images=$(awk -F "/"  '/image:/{print $NF}' components.yaml)
HARBOR_DOMAIN=harbor.raymonds.cc

images_download(){
    ${COLOR}"开始下载Metrics镜像"${END}
    for i in ${images};do 
        docker pull registry.aliyuncs.com/google_containers/$i
        docker tag registry.aliyuncs.com/google_containers/$i ${HARBOR_DOMAIN}/google_containers/$i
        docker rmi registry.aliyuncs.com/google_containers/$i
        docker push ${HARBOR_DOMAIN}/google_containers/$i
    done
    ${COLOR}"Metrics镜像下载完成"${END}
}

images_download

[root@k8s-master01 ~]# bash download_metrics_images.sh

[root@k8s-master01 ~]# docker images |grep metrics
harbor.raymonds.cc/google_containers/metrics-server            v0.5.2              f73640fb5061        8 weeks ago         64.3MB

[root@k8s-master01 ~]# sed -ri 's@(.*image:) k8s.gcr.io/metrics-server(/.*)@\1 harbor.raymonds.cc/google_containers\2@g' components.yaml 

[root@k8s-master01 ~]# grep "image:" components.yaml 
        image: harbor.raymonds.cc/google_containers/metrics-server:v0.5.2

[root@k8s-master01 ~]# kubectl apply -f components.yaml
serviceaccount/metrics-server unchanged
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader unchanged
clusterrole.rbac.authorization.k8s.io/system:metrics-server unchanged
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader unchanged
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator unchanged
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server unchanged
service/metrics-server unchanged
deployment.apps/metrics-server configured
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io unchanged

查看状态

[root@k8s-master01 ~]# kubectl get pod -n kube-system |grep metrics
metrics-server-5f76d7ffb9-xbtdb            1/1     Running   0             32s

[root@k8s-master01 ~]# kubectl top node
NAME                         CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
k8s-master01.example.local   162m         8%     1081Mi          28%       
k8s-master02.example.local   123m         6%     932Mi           24%       
k8s-master03.example.local   155m         7%     974Mi           25%       
k8s-node01.example.local     51m          2%     435Mi           11%       
k8s-node02.example.local     72m          3%     414Mi           10%       
k8s-node03.example.local     90m          4%     463Mi           12% 

2.7 升级dashboard

官方GitHub地址:github.com/kubernetes/…

可以在官方dashboard查看到最新版dashboard 在这里插入图片描述

root@k8s-master01:~# wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.4.0/aio/deploy/recommended.yaml

[root@k8s-master01 ~]# vim recommended.yaml
...
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort #添加这行
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30005 #添加这行
  selector:
    k8s-app: kubernetes-dashboard
...

[root@k8s-master01 ~]# grep "image:" recommended.yaml 
          image: kubernetesui/dashboard:v2.4.0
          image: kubernetesui/metrics-scraper:v1.0.7

[root@k8s-master01 ~]# cat download_dashboard_images.sh
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-01-11
#FileName:      download_dashboard_images.sh
#URL:           raymond.blog.csdn.net
#Description:   The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \\033[01;31m"
END='\033[0m'

images=$(awk -F "/"  '/image:/{print $NF}' recommended.yaml)
HARBOR_DOMAIN=harbor.raymonds.cc

images_download(){
    ${COLOR}"开始下载Dashboard镜像"${END}
    for i in ${images};do 
        docker pull registry.aliyuncs.com/google_containers/$i
        docker tag registry.aliyuncs.com/google_containers/$i ${HARBOR_DOMAIN}/google_containers/$i
        docker rmi registry.aliyuncs.com/google_containers/$i
        docker push ${HARBOR_DOMAIN}/google_containers/$i
    done
    ${COLOR}"Dashboard镜像下载完成"${END}
}

images_download

[root@k8s-master01 ~]# bash download_dashboard_images.sh

[root@k8s-master01 ~]# docker images |grep -E "(dashboard|metrics-scraper)"
harbor.raymonds.cc/kubernetesui/dashboard                      v2.4.0              72f07539ffb5        2 months ago        221MB
harbor.raymonds.cc/kubernetesui/metrics-scraper                v1.0.7              7801cfc6d5c0        7 months ago        34.4MB

[root@k8s-master01 ~]# sed -ri 's@(.*image:) kubernetesui(/.*)@\1 harbor.raymonds.cc/google_containers\2@g' recommended.yaml 

[root@k8s-master01 ~]# grep "image:" recommended.yaml 
          image: harbor.raymonds.cc/google_containers/dashboard:v2.4.0
          image: harbor.raymonds.cc/google_containers/metrics-scraper:v1.0.7

[root@k8s-master01 ~]# kubectl delete -f bak/recommended.yaml 
namespace "kubernetes-dashboard" deleted
serviceaccount "kubernetes-dashboard" deleted
service "kubernetes-dashboard" deleted
secret "kubernetes-dashboard-certs" deleted
secret "kubernetes-dashboard-csrf" deleted
secret "kubernetes-dashboard-key-holder" deleted
configmap "kubernetes-dashboard-settings" deleted
role.rbac.authorization.k8s.io "kubernetes-dashboard" deleted
clusterrole.rbac.authorization.k8s.io "kubernetes-dashboard" deleted
rolebinding.rbac.authorization.k8s.io "kubernetes-dashboard" deleted
clusterrolebinding.rbac.authorization.k8s.io "kubernetes-dashboard" deleted
deployment.apps "kubernetes-dashboard" deleted
service "dashboard-metrics-scraper" deleted
deployment.apps "dashboard-metrics-scraper" deleted

[root@k8s-master01 ~]# kubectl apply -f recommended.yaml
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created

创建管理员用户admin.yaml

[root@k8s-master01 ~]# vim admin.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding 
metadata: 
  name: admin-user
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system

[root@k8s-master01 ~]# kubectl apply -f admin.yaml 
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created

https://172.31.3.101:30005

[root@k8s-master01 ~]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
Name:         admin-user-token-6766b
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin-user
              kubernetes.io/service-account.uid: cd066a7c-0a8b-45f2-90ea-93e22c7f0292

Type:  kubernetes.io/service-account-token

Data
====
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6Ik11OTlpUi1XOWs4dk9KdTZaX0ZvYlBpclhCTHRheEwzR2pEeXF0VWdFOE0ifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTY3NjZiIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJjZDA2NmE3Yy0wYThiLTQ1ZjItOTBlYS05M2UyMmM3ZjAyOTIiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.JIU7sy0p5ijhWi-xUwxuyhOp_TJHobBIp9ZFB92R2VebdJDOq21DrQOaG6ShhSfLPUqOzvZ1u6ORWz9mStuE8WetSrnl3U0ZoIoGMhzmBAvgeWj9b497_uAuNxvTkJ7ITWaHPD8WJBzlKTKj3UJR-y5UPMnouf0sKsvR6KaVScanbpX_Xp7vEj-BQrYibycm869EKw1v1792TKoJ_NrFOzcwUsjz-uSKem9VJ0fAoxbPk2cBoRa60a6mujtqlRPmvR10UDrToL8_HA11EjZMMzM-49gOVjld9Zv6dKEhBQgzXODbOqp2b3OHqon37R1jwP2oxAHWJ5w-lDHFFdJrRA
ca.crt:     1411 bytes

在这里插入图片描述