04.1 K8S节点管理(平滑操作)

162 阅读7分钟

当前配置

centos6 k8s-master1 4g/2c6c/4g
#centos2 k8s-node1 2g/2c →4c/2g
centos3 k8s-node2 2g/2c4c/2g
centos4 k8s-node3 4g/2c4c/2g

# centos5 k8s-node4 4g/2c
    计划驱逐node1/node4;并对其他节点增配
    

当前集群

[root@k8s-master1 ~]# kubectl get pods -o wide --all-namespaces
NAMESPACE              NAME                                         READY   STATUS               RESTARTS       AGE     IP               NODE          NOMINATED NODE   READINESS GATES
kube-system            coredns-8554ccb6dd-j88z6                     1/1     Running              6              14d     172.30.0.19      k8s-master1   <none>           <none>
kube-system            coredns-8554ccb6dd-qb7tk                     1/1     Running              6              14d     172.30.0.20      k8s-master1   <none>           <none>
kube-system            etcd-k8s-master1                             1/1     Running              6 (29m ago)    14d     192.168.50.27    k8s-master1   <none>           <none>
kube-system            kube-apiserver-k8s-master1                   1/1     Running              27 (19m ago)   14d     192.168.50.27    k8s-master1   <none>           <none>
kube-system            kube-controller-manager-k8s-master1          1/1     Running              70             14d     192.168.50.27    k8s-master1   <none>           <none>
kube-system            kube-flannel-ds-42sn7                        1/1     Running              7 (18m ago)    14d     192.168.50.213   k8s-node4     <none>           <none>
kube-system            kube-flannel-ds-5mn77                        0/1     Completed            9              14d     192.168.50.135   k8s-node2     <none>           <none>
kube-system            kube-flannel-ds-fnwvn                        1/1     Running              9 (12m ago)    14d     192.168.50.159   k8s-node3     <none>           <none>
kube-system            kube-flannel-ds-vb6td                        1/1     Running              7              14d     192.168.50.27    k8s-master1   <none>           <none>
kube-system            kube-flannel-ds-z8nlk                        1/1     Running              10             14d     192.168.50.217   k8s-node1     <none>           <none>
kube-system            kube-proxy-f5kph                             0/1     ExitCode:0           6              14d     192.168.50.135   k8s-node2     <none>           <none>
kube-system            kube-proxy-hjmxk                             1/1     Running              6 (29m ago)    14d     192.168.50.27    k8s-master1   <none>           <none>
kube-system            kube-proxy-pxl88                             1/1     Running              6 (18m ago)    14d     192.168.50.213   k8s-node4     <none>           <none>
kube-system            kube-proxy-vx8lh                             1/1     Running              6 (20m ago)    14d     192.168.50.217   k8s-node1     <none>           <none>
kube-system            kube-proxy-xxdr9                             1/1     Running              6 (18m ago)    14d     192.168.50.159   k8s-node3     <none>           <none>
kube-system            kube-scheduler-k8s-master1                   1/1     Running              60 (19m ago)   14d     192.168.50.27    k8s-master1   <none>           <none>
kube-system            metrics-server-5cd4d878d-sg89j               1/1     Running              5 (18m ago)    87m     172.30.4.37      k8s-node4     <none>           <none>
kubernetes-dashboard   dashboard-metrics-scraper-c45b7869d-6fpc4    1/1     Running              1 (29m ago)    87m     172.30.0.18      k8s-master1   <none>           <none>
kubernetes-dashboard   kubernetes-dashboard-576cb95f94-g5xjs        1/1     Running              7 (29m ago)    8d      172.30.0.17      k8s-master1   <none>           <none>
monitoring             alertmanager-main-0                          2/2     Running              6 (18m ago)    4h36m   172.30.3.37      k8s-node3     <none>           <none>
monitoring             alertmanager-main-1                          2/2     Running              6 (10m ago)    5h7m    172.30.4.35      k8s-node4     <none>           <none>
monitoring             alertmanager-main-2                          0/2     ContainerCreating    0              24s     <none>           k8s-node2     <none>           <none>
monitoring             blackbox-exporter-6798fb5bb4-m4glf           3/3     Running              6 (10m ago)    5h1m    172.30.4.41      k8s-node4     <none>           <none>
monitoring             grafana-7476b4c65b-49m58                     1/1     Running              2 (10m ago)    4h40m   172.30.4.36      k8s-node4     <none>           <none>
monitoring             kube-state-metrics-74964b6cd4-nsz8k          3/3     Running              6 (12m ago)    87m     172.30.3.33      k8s-node3     <none>           <none>
monitoring             node-exporter-96tst                          2/2     Running              6 (12m ago)    12d     192.168.50.159   k8s-node3     <none>           <none>
monitoring             node-exporter-hr2hs                          2/2     Running              6 (20m ago)    12d     192.168.50.217   k8s-node1     <none>           <none>
monitoring             node-exporter-q8nxm                          0/2     Completed            5              12d     192.168.50.135   k8s-node2     <none>           <none>
monitoring             node-exporter-qdv5w                          2/2     Running              4 (29m ago)    12d     192.168.50.27    k8s-master1   <none>           <none>
monitoring             node-exporter-qhsrs                          2/2     Running              6 (10m ago)    12d     192.168.50.213   k8s-node4     <none>           <none>
monitoring             prometheus-adapter-5b8db7955f-4l2vr          1/1     Running              2 (18m ago)    87m     172.30.3.34      k8s-node3     <none>           <none>
monitoring             prometheus-adapter-5b8db7955f-kh9wr          1/1     Running              2              87m     172.30.3.36      k8s-node3     <none>           <none>
monitoring             prometheus-k8s-0                             2/2     Running              4 (12m ago)    4h36m   172.30.3.35      k8s-node3     <none>           <none>
monitoring             prometheus-k8s-1                             2/2     Terminating          0              18m     172.30.1.19      k8s-node1     <none>           <none>
monitoring             prometheus-operator-75d9b475d9-pnzmm         2/2     Running              4 (10m ago)    5h1m    172.30.4.38      k8s-node4     <none>           <none>
weave                  weave-scope-agent-9ztgn                      1/1     Running              4 (18m ago)    12d     192.168.50.213   k8s-node4     <none>           <none>
weave                  weave-scope-agent-d6d5m                      1/1     Running              4              12d     192.168.50.159   k8s-node3     <none>           <none>
weave                  weave-scope-agent-ldb8v                      1/1     Running              2              12d     192.168.50.27    k8s-master1   <none>           <none>
weave                  weave-scope-agent-pskbz                      0/1     ContainerCannotRun   3 (19m ago)    12d     192.168.50.135   k8s-node2     <none>           <none>
weave                  weave-scope-agent-z759n                      1/1     Running              3 (20m ago)    12d     192.168.50.217   k8s-node1     <none>           <none>
weave                  weave-scope-app-588b789d4-jwls8              1/1     Running              2 (18m ago)    5h1m    172.30.4.39      k8s-node4     <none>           <none>
weave                  weave-scope-cluster-agent-5d7745c9c9-w8cg6   1/1     Running              2 (18m ago)    87m     172.30.4.40      k8s-node4     <none>           <none>
    
[root@k8s-master1 ~]# kubectl top nodes
NAME          CPU(cores)   CPU%        MEMORY(bytes)   MEMORY%
k8s-master1   1309m        65%         1757Mi          48%
k8s-node3     746m         18%         1332Mi          77%
k8s-node4     453m         22%         1111Mi          64%
k8s-node1     <unknown>    <unknown>   <unknown>       <unknown>
k8s-node2     <unknown>    <unknown>   <unknown>       <unknown>
    
[root@k8s-master1 ~]# kubectl get nodes
NAME          STATUS     ROLES                  AGE   VERSION
k8s-master1   Ready      control-plane,master   14d   v1.22.2
k8s-node1     NotReady   <none>                 14d   v1.22.2
k8s-node2     Ready      <none>                 14d   v1.22.2
k8s-node3     Ready      <none>                 14d   v1.22.2
k8s-node4     Ready      <none>                 14d   v1.22.2
    
    

驱逐node4

[root@k8s-master1 ~]# kubectl cordon k8s-node4
node/k8s-node4 cordoned
    
[root@k8s-master1 ~]# kubectl drain k8s-node4 --delete-local-data --force --ignore-daemonsets
Flag --delete-local-data has been deprecated, This option is deprecated and will be deleted. Use --delete-emptydir-data.
node/k8s-node4 already cordoned
WARNING: ignoring DaemonSet-managed Pods: kube-system/kube-flannel-ds-42sn7, kube-system/kube-proxy-pxl88, monitoring/node-exporter-qhsrs, weave/weave-scope-agent-9ztgn
evicting pod weave/weave-scope-cluster-agent-5d7745c9c9-w8cg6
evicting pod kube-system/metrics-server-5cd4d878d-sg89j
evicting pod monitoring/alertmanager-main-1
evicting pod monitoring/blackbox-exporter-6798fb5bb4-m4glf
evicting pod monitoring/grafana-7476b4c65b-49m58
evicting pod monitoring/prometheus-operator-75d9b475d9-pnzmm
evicting pod weave/weave-scope-app-588b789d4-jwls8
error when evicting pods/"alertmanager-main-1" -n "monitoring" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.
evicting pod monitoring/alertmanager-main-1
error when evicting pods/"alertmanager-main-1" -n "monitoring" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.
evicting pod monitoring/alertmanager-main-1
error when evicting pods/"alertmanager-main-1" -n "monitoring" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.
evicting pod monitoring/alertmanager-main-1
error when evicting pods/"alertmanager-main-1" -n "monitoring" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.
evicting pod monitoring/alertmanager-main-1
error when evicting pods/"alertmanager-main-1" -n "monitoring" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.
evicting pod monitoring/alertmanager-main-1
error when evicting pods/"alertmanager-main-1" -n "monitoring" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.
evicting pod monitoring/alertmanager-main-1
error when evicting pods/"alertmanager-main-1" -n "monitoring" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.
pod/metrics-server-5cd4d878d-sg89j evicted
evicting pod monitoring/alertmanager-main-1
pod/prometheus-operator-75d9b475d9-pnzmm evicted
error when evicting pods/"alertmanager-main-1" -n "monitoring" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.
pod/grafana-7476b4c65b-49m58 evicted
evicting pod monitoring/alertmanager-main-1
error when evicting pods/"alertmanager-main-1" -n "monitoring" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.
pod/weave-scope-app-588b789d4-jwls8 evicted
evicting pod monitoring/alertmanager-main-1
error when evicting pods/"alertmanager-main-1" -n "monitoring" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.
pod/blackbox-exporter-6798fb5bb4-m4glf evicted
evicting pod monitoring/alertmanager-main-1
error when evicting pods/"alertmanager-main-1" -n "monitoring" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.
pod/weave-scope-cluster-agent-5d7745c9c9-w8cg6 evicted
evicting pod monitoring/alertmanager-main-1
error when evicting pods/"alertmanager-main-1" -n "monitoring" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.
evicting pod monitoring/alertmanager-main-1
error when evicting pods/"alertmanager-main-1" -n "monitoring" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.
evicting pod monitoring/alertmanager-main-1
error when evicting pods/"alertmanager-main-1" -n "monitoring" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.
evicting pod monitoring/alertmanager-main-1
error when evicting pods/"alertmanager-main-1" -n "monitoring" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.
evicting pod monitoring/alertmanager-main-1
error when evicting pods/"alertmanager-main-1" -n "monitoring" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.
evicting pod monitoring/alertmanager-main-1
error when evicting pods/"alertmanager-main-1" -n "monitoring" (will retry after 5s): Cannot evict pod as it would violate the pod's disruption budget.
evicting pod monitoring/alertmanager-main-1
pod/alertmanager-main-1 evicted
node/k8s-node4 drained
    
[root@k8s-master1 ~]# kubectl drain k8s-node4 --delete-local-data --force --ignore-daemonsets
Flag --delete-local-data has been deprecated, This option is deprecated and will be deleted. Use --delete-emptydir-data.
node/k8s-node4 already cordoned
WARNING: ignoring DaemonSet-managed Pods: kube-system/kube-flannel-ds-42sn7, kube-system/kube-proxy-pxl88, monitoring/node-exporter-qhsrs, weave/weave-scope-agent-9ztgn
node/k8s-node4 drained
    
[root@k8s-master1 ~]# kubectl delete node k8s-node4
node "k8s-node4" deleted
[root@k8s-master1 ~]#


[root@k8s-master1 ~]# kubectl get pods -o wide --all-namespaces  | grep node4
kube-system            kube-flannel-ds-42sn7                        1/1     Running             7 (21m ago)    14d     192.168.50.213   k8s-node4     <none>           <none>
kube-system            kube-proxy-pxl88                             1/1     Running             6 (21m ago)    14d     192.168.50.213   k8s-node4     <none>           <none>
monitoring             node-exporter-qhsrs                          2/2     Running             6 (14m ago)    12d     192.168.50.213   k8s-node4     <none>           <none>
weave                  weave-scope-agent-9ztgn                      1/1     Running             4 (21m ago)    12d     192.168.50.213   k8s-node4     <none>           <none>
[root@k8s-master1 ~]#
    

驱逐node1(notready状态)

[root@k8s-master1 ~]# kubectl get nodes
NAME          STATUS     ROLES                  AGE   VERSION
k8s-master1   Ready      control-plane,master   14d   v1.22.2
k8s-node1     NotReady   <none>                 14d   v1.22.2
k8s-node2     Ready      <none>                 14d   v1.22.2
k8s-node3     Ready      <none>                 14d   v1.22.2
    
[root@k8s-master1 ~]# kubectl cordon k8s-node1
node/k8s-node1 cordoned
    
    # 集群原本就是notready,执行一段时间后,直接终止执行。直接执行下一步删除节点
[root@k8s-master1 ~]# kubectl drain k8s-node1 --delete-local-data --force --ignore-daemonsets
Flag --delete-local-data has been deprecated, This option is deprecated and will be deleted. Use --delete-emptydir-data.
node/k8s-node1 already cordoned
WARNING: ignoring DaemonSet-managed Pods: kube-system/kube-flannel-ds-z8nlk, kube-system/kube-proxy-vx8lh, monitoring/node-exporter-hr2hs, weave/weave-scope-agent-z759n
evicting pod monitoring/prometheus-k8s-1

^C

[root@k8s-master1 ~]# kubectl delete node k8s-node1
node "k8s-node1" deleted