7.基于 Kubernetes v1.25 (二进制) 和 Docker部署高可用集群(二)

2,144 阅读7分钟

“我报名参加金石计划1期挑战——瓜分10万奖池,这是我的第4篇文章,点击查看活动详情

6.Runtime安装

6.1 安装docker

master和node安装docker-ce:

[root@k8s-master01 ~]# cat install_docker_binary.sh 
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2021-12-07
#FileName:      install_docker_binary.sh
#URL:           raymond.blog.csdn.net
#Description:   install_docker_binary for centos 7/8 & ubuntu 18.04/20.04 & Rocky 8
#Copyright (C): 2021 All rights reserved
#*********************************************************************************************
SRC_DIR=/usr/local/src
COLOR="echo -e \033[01;31m"
END='\033[0m'
URL='https://mirrors.cloud.tencent.com/docker-ce/linux/static/stable/x86_64/'
DOCKER_FILE=docker-20.10.18.tgz
HARBOR_DOMAIN=harbor.raymonds.cc
​
os(){
    OS_ID=`sed -rn '/^NAME=/s@.*="([[:alpha:]]+).*"$@\1@p' /etc/os-release`
}
​
check_file (){
    cd ${SRC_DIR}
    rpm -q wget &> /dev/null || yum -y install wget &> /dev/null
    if [ ! -e ${DOCKER_FILE} ];then
        ${COLOR}"缺少${DOCKER_FILE}文件,如果是离线包,请把文件放到${SRC_DIR}目录下"${END}
        ${COLOR}'开始下载DOCKER二进制安装包'${END}
        wget ${URL}${DOCKER_FILE} || { ${COLOR}"DOCKER二进制安装包下载失败"${END}; exit; } 
    else
        ${COLOR}"相关文件已准备好"${END}
    fi
}
​
install(){ 
    [ -f /usr/bin/docker ] && { ${COLOR}"DOCKER已存在,安装失败"${END};exit; }
    ${COLOR}"开始安装DOCKER..."${END}
    tar xf ${DOCKER_FILE} 
    mv docker/* /usr/bin/
    if ! getent group docker > /dev/null; then
        groupadd --system docker
    fi
    cat > /usr/lib/systemd/system/docker.socket <<-EOF
[Unit]
Description=Docker Socket for the API
​
[Socket]
ListenStream=/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
​
[Install]
WantedBy=sockets.target
EOF
​
    cat > /usr/lib/systemd/system/containerd.service <<-EOF
# Copyright The containerd Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
​
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/bin/containerd
​
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=infinity
# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
OOMScoreAdjust=-999[Install]
WantedBy=multi-user.target
EOF
​
    cat > /usr/lib/systemd/system/docker.service <<-EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target docker.socket firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service
​
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
​
# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s
​
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
​
# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
​
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500[Install]
WantedBy=multi-user.target
EOF
​
    mkdir -p /etc/docker
    cat > /etc/docker/daemon.json <<-EOF
{
    "registry-mirrors": [
        "https://registry.docker-cn.com",
        "http://hub-mirror.c.163.com",
        "https://docker.mirrors.ustc.edu.cn"
    ],
    "insecure-registries": ["${HARBOR_DOMAIN}"],
    "exec-opts": ["native.cgroupdriver=systemd"],
    "max-concurrent-downloads": 10,
    "max-concurrent-uploads": 5,
    "log-opts": {
        "max-size": "300m",
        "max-file": "2"  
    },
    "live-restore": true
}
EOF
    echo 'alias rmi="docker images -qa|xargs docker rmi -f"' >> ~/.bashrc
    echo 'alias rmc="docker ps -qa|xargs docker rm -f"' >> ~/.bashrc
    systemctl daemon-reload
    systemctl enable --now docker &> /dev/null
    systemctl is-active docker &> /dev/null && ${COLOR}"Docker 服务启动成功"${END} || { ${COLOR}"Docker 启动失败"${END};exit; }
    docker version && ${COLOR}"Docker 安装成功"${END} || ${COLOR}"Docker 安装失败"${END}
}
​
set_swap_limit(){
    if [ ${OS_ID} == "Ubuntu" ];then
        ${COLOR}'设置Docker的"WARNING: No swap limit support"警告'${END}
        sed -ri '/^GRUB_CMDLINE_LINUX=/s@"$@ swapaccount=1"@' /etc/default/grub
        update-grub &> /dev/null
        ${COLOR}"10秒后,机器会自动重启"${END}
        sleep 10
        reboot
    fi
}
​
main(){
    os
    check_file
    install
    set_swap_limit
}
​
main
​
[root@k8s-master01 ~]# bash install_docker_binary.sh
[root@k8s-master02 ~]# bash install_docker_binary.sh
[root@k8s-master03 ~]# bash install_docker_binary.sh
​
[root@k8s-node01 ~]# bash install_docker_binary.sh
[root@k8s-node02 ~]# bash install_docker_binary.sh
[root@k8s-node03 ~]# bash install_docker_binary.sh

6.2 安装 cri-dockerd

Kubernetes自v1.24移除了对docker-shim的支持,而Docker Engine默认又不支持CRI规范,因而二者将无法直接完成整合。为此,Mirantis和Docker联合创建了cri-dockerd项目,用于为Docker Engine提供一个能够支持到CRI规范的垫片,从而能够让Kubernetes基于CRI控制Docker 。

项目地址:github.com/Mirantis/cr…

cri-dockerd项目提供了预制的二制格式的程序包,用户按需下载相应的系统和对应平台的版本即可完成安装,这里以Ubuntu 20.04 64bits系统环境,以及cri-dockerd目前最新的程序版本v0.2.5为例。

[root@k8s-master01 ~]# wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.2.5/cri-dockerd-0.2.5.amd64.tgz[root@k8s-master01 ~]# tar xf cri-dockerd-0.2.5.amd64.tgz[root@k8s-master01 ~]# mv cri-dockerd/* /usr/bin/
[root@k8s-master01 ~]# ll /usr/bin/cri-dockerd 
-rwxr-xr-x 1 root root 52351080 Sep  3 07:00 /usr/bin/cri-dockerd*
​
[root@k8s-master01 ~]# cat > /usr/lib/systemd/system/cri-docker.service <<EOF
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
​
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd://
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
​
# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s
​
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
​
# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
Delegate=yes
KillMode=process
​
[Install]
WantedBy=multi-user.target
EOF
​
[root@k8s-master01 ~]# cat > /usr/lib/systemd/system/cri-docker.socket <<EOF
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service
​
[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
​
[Install]
WantedBy=sockets.target
EOF
​
[root@k8s-master01 ~]# sed -ri '/ExecStart.*/s@(ExecStart.*)@\1 --pod-infra-container-image harbor.raymonds.cc/google_containers/pause:3.8@g' /lib/systemd/system/cri-docker.service#注意:如果没有harbor执行下面命令
[root@k8s-master01 ~]# sed -ri '/ExecStart.*/s@(ExecStart.*)@\1 --pod-infra-container-image registry.aliyuncs.com/google_containers/pause:3.8@g' /lib/systemd/system/cri-docker.service[root@k8s-master01 ~]# systemctl daemon-reload && systemctl enable --now cri-docker

master02、master03和node安装:

[root@k8s-master01 ~]# cat install_cri_dockerd_binary.sh 
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-09-03
#FileName:      install_cri_dockerd_binary.sh
#URL:           raymond.blog.csdn.net
#Description:   install_docker_binary for centos 7/8 & ubuntu 18.04/20.04 & Rocky 8
#Copyright (C): 2021 All rights reserved
#*********************************************************************************************
SRC_DIR=/usr/local/src
COLOR="echo -e \033[01;31m"
END='\033[0m'
#cri-dockerd下载地址:https://github.com/Mirantis/cri-dockerd/releases/download/v0.2.5/cri-dockerd-0.2.5.amd64.tgz
CRI_DOCKER_FILE=cri-dockerd-0.2.5.amd64.tgz
HARBOR_DOMAIN=harbor.raymonds.cc
​
check_file (){
    cd ${SRC_DIR}
    if [ ! -e ${CRI_DOCKER_FILE} ];then
        ${COLOR}"缺少${CRI_DOCKER_FILE}文件,如果是离线包,请把文件放到${SRC_DIR}目录下"${END}
        exit
    else
        ${COLOR}"相关文件已准备好"${END}
    fi
}
​
install(){ 
    [ -f /usr/bin/cri-dockerd ] && { ${COLOR}"cri-dockerd已存在,安装失败"${END};exit; }
    ${COLOR}"开始安装cri-dockerd..."${END}
    tar xf ${CRI_DOCKER_FILE} 
    mv cri-dockerd/* /usr/bin/
    cat > /usr/lib/systemd/system/cri-docker.service <<-EOF
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
​
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image ${HARBOR_DOMAIN}/google_containers/pause:3.8
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
​
# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s
​
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
​
# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
Delegate=yes
KillMode=process
​
[Install]
WantedBy=multi-user.target
EOF
    cat > /usr/lib/systemd/system/cri-docker.socket <<-EOF
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service
​
[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
​
[Install]
WantedBy=sockets.target
EOF
    systemctl daemon-reload
    systemctl enable --now cri-docker &> /dev/null
    systemctl is-active cri-docker &> /dev/null && ${COLOR}"cri-docker 服务启动成功"${END} || { ${COLOR}"cri-docker 启动失败"${END};exit; }
    cri-dockerd --version && ${COLOR}"cri-dockerd 安装成功"${END} || ${COLOR}"cri-dockerd 安装失败"${END}
}
​
main(){
    check_file
    install
}
​
main
​
[root@k8s-master02 ~]# bash install_cri_dockerd_binary.sh
[root@k8s-master03 ~]# bash install_cri_dockerd_binary.sh[root@k8s-node01 ~]# bash install_cri_dockerd_binary.sh
[root@k8s-node02 ~]# bash install_cri_dockerd_binary.sh
[root@k8s-node03 ~]# bash install_cri_dockerd_binary.sh

7.部署master

7.1 创建etcd相关目录和复制etcd证书

master节点创建etcd证书目录

[root@k8s-master01 ~]# mkdir /etc/etcd/ssl -p
[root@k8s-master02 ~]# mkdir /etc/etcd/ssl -p
[root@k8s-master03 ~]# mkdir /etc/etcd/ssl -p

将etcd证书复制到master节点

[root@k8s-etcd01 pki]# for NODE in k8s-master01 k8s-master02 k8s-master03; do
     ssh -o StrictHostKeyChecking=no $NODE "mkdir -p /etc/etcd/ssl"
     for FILE in etcd-ca-key.pem  etcd-ca.pem  etcd-key.pem  etcd.pem; do
       scp -o StrictHostKeyChecking=no /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE}
     done
 done

所有master节点创建etcd的证书目录

[root@k8s-master01 ~]# mkdir /etc/kubernetes/pki/etcd -p
[root@k8s-master02 ~]# mkdir /etc/kubernetes/pki/etcd -p
[root@k8s-master03 ~]# mkdir /etc/kubernetes/pki/etcd -p
​
[root@k8s-master01 ~]# ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
[root@k8s-master02 ~]# ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
[root@k8s-master03 ~]# ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/

7.2 安装kubernetes组件

需要下载最新的1.25.x版本:

github.com/kubernetes/…

打开页面后点击:

018.jpg 下载kubernetes安装包

[root@k8s-master01 ~]# wget https://dl.k8s.io/v1.25.2/kubernetes-server-linux-amd64.tar.gz

解压kubernetes安装文件

[root@k8s-master01 ~]# tar -xf kubernetes-server-linux-amd64.tar.gz  --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}

版本查看

[root@k8s-master01 ~]# kubelet --version
Kubernetes v1.25.2

将组件发送到其他master节点

[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do echo $NODE; scp -o StrictHostKeyChecking=no /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} $NODE:/usr/local/bin/; done

master节点创建/opt/cni/bin目录

[root@k8s-master01 ~]# mkdir -p /opt/cni/bin
[root@k8s-master02 ~]# mkdir -p /opt/cni/bin
[root@k8s-master03 ~]# mkdir -p /opt/cni/bin

7.3 生成k8s组件证书

二进制安装最关键步骤,一步错误全盘皆输,一定要注意每个步骤都要是正确的

master节点创建kubernetes相关目录

[root@k8s-master01 ~]# mkdir -p /etc/kubernetes/pki
[root@k8s-master02 ~]# mkdir -p /etc/kubernetes/pki
[root@k8s-master03 ~]# mkdir -p /etc/kubernetes/pki

master01下载生成证书工具

[root@k8s-master01 ~]# wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl_1.6.1_linux_amd64
​
[root@k8s-master01 ~]# wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssljson_1.6.1_linux_amd64
​
[root@k8s-master01 ~]# mv cfssl_1.6.1_linux_amd64 /usr/local/bin/cfssl
[root@k8s-master01 ~]# mv cfssljson_1.6.1_linux_amd64 /usr/local/bin/cfssljson
​
[root@k8s-master01 ~]# chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson

7.3.1 生成ca证书

[root@k8s-master01 ~]# mkdir pki
[root@k8s-master01 ~]# cd pki/
​
[root@k8s-master01 pki]# cat > ca-csr.json <<EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "Kubernetes",
      "OU": "Kubernetes-manual"
    }
  ],
  "ca": {
    "expiry": "876000h"
  }
}
EOF
​
[root@k8s-master01 pki]# cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca
#执行结果
2022/09/23 19:02:38 [INFO] generating a new CA key and certificate from CSR
2022/09/23 19:02:38 [INFO] generate received request
2022/09/23 19:02:38 [INFO] received CSR
2022/09/23 19:02:38 [INFO] generating key: rsa-2048
2022/09/23 19:02:38 [INFO] encoded CSR
2022/09/23 19:02:38 [INFO] signed certificate with serial number 300246035273547949949054133479321620737070405660
​
[root@k8s-master01 pki]# ll /etc/kubernetes/pki/ca*
-rw-r--r-- 1 root root 1070 Sep 23 19:02 /etc/kubernetes/pki/ca.csr
-rw------- 1 root root 1679 Sep 23 19:02 /etc/kubernetes/pki/ca-key.pem
-rw-r--r-- 1 root root 1363 Sep 23 19:02 /etc/kubernetes/pki/ca.pem

7.3.2 生成apiserver证书

10.96.0.是k8s service的网段,如果说需要更改k8s service网段,那就需要更改10.96.0.1,

如果不是高可用集群,172.31.3.188为Master01的IP

[root@k8s-master01 pki]# cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "876000h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "876000h"
      }
    }
  }
}
EOF
​
[root@k8s-master01 pki]# cat > apiserver-csr.json <<EOF
{
  "CN": "kube-apiserver",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "Kubernetes",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF
​
[root@k8s-master01 pki]# cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=ca-config.json -hostname=10.96.0.1,172.31.3.188,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,172.31.3.101,172.31.3.102,172.31.3.103 -profile=kubernetes apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver
#执行结果
2022/09/23 19:07:22 [INFO] generate received request
2022/09/23 19:07:22 [INFO] received CSR
2022/09/23 19:07:22 [INFO] generating key: rsa-2048
2022/09/23 19:07:22 [INFO] encoded CSR
2022/09/23 19:07:22 [INFO] signed certificate with serial number 97639055323963738097663089184954029439938106495
​
[root@k8s-master01 pki]# ll /etc/kubernetes/pki/apiserver*
-rw-r--r-- 1 root root 1297 Sep 23 19:07 /etc/kubernetes/pki/apiserver.csr
-rw------- 1 root root 1675 Sep 23 19:07 /etc/kubernetes/pki/apiserver-key.pem
-rw-r--r-- 1 root root 1692 Sep 23 19:07 /etc/kubernetes/pki/apiserver.pem

7.3.3 生成apiserver的聚合证书

生成apiserver的聚合证书。Requestheader-client-xxx requestheader-allowwd-xxx:aggerator

[root@k8s-master01 pki]# cat > front-proxy-ca-csr.json <<EOF
{
  "CN": "kubernetes",
  "key": {
     "algo": "rsa",
     "size": 2048
  }
}
EOF

[root@k8s-master01 pki]# cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca
#执行结果
2022/09/23 19:08:04 [INFO] generating a new CA key and certificate from CSR
2022/09/23 19:08:04 [INFO] generate received request
2022/09/23 19:08:04 [INFO] received CSR
2022/09/23 19:08:04 [INFO] generating key: rsa-2048
2022/09/23 19:08:04 [INFO] encoded CSR
2022/09/23 19:08:04 [INFO] signed certificate with serial number 645044159777554404075615178297005647861235360519

[root@k8s-master01 pki]# ll /etc/kubernetes/pki/front-proxy-ca*
-rw-r--r-- 1 root root  891 Sep 23 19:08 /etc/kubernetes/pki/front-proxy-ca.csr
-rw------- 1 root root 1679 Sep 23 19:08 /etc/kubernetes/pki/front-proxy-ca-key.pem
-rw-r--r-- 1 root root 1094 Sep 23 19:08 /etc/kubernetes/pki/front-proxy-ca.pem

[root@k8s-master01 pki]# cat > front-proxy-client-csr.json <<EOF
{
  "CN": "front-proxy-client",
  "key": {
     "algo": "rsa",
     "size": 2048
  }
}
EOF

[root@k8s-master01 pki]# cfssl gencert -ca=/etc/kubernetes/pki/front-proxy-ca.pem -ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem -config=ca-config.json -profile=kubernetes front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client
#返回结果(忽略警告)
2022/09/23 19:08:48 [INFO] generate received request
2022/09/23 19:08:48 [INFO] received CSR
2022/09/23 19:08:48 [INFO] generating key: rsa-2048
2022/09/23 19:08:48 [INFO] encoded CSR
2022/09/23 19:08:48 [INFO] signed certificate with serial number 542110043523992850338882659118864861631516149446
2022/09/23 19:08:48 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

[root@k8s-master01 pki]# ll /etc/kubernetes/pki/front-proxy-client*
-rw-r--r-- 1 root root  903 Sep 23 19:08 /etc/kubernetes/pki/front-proxy-client.csr
-rw------- 1 root root 1675 Sep 23 19:08 /etc/kubernetes/pki/front-proxy-client-key.pem
-rw-r--r-- 1 root root 1188 Sep 23 19:08 /etc/kubernetes/pki/front-proxy-client.pem

7.3.4 生成controller-manage的证书和配置文件

[root@k8s-master01 pki]# cat > manager-csr.json <<EOF
{
  "CN": "system:kube-controller-manager",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [    {      "C": "CN",      "ST": "Beijing",      "L": "Beijing",      "O": "system:kube-controller-manager",      "OU": "Kubernetes-manual"    }  ]
}
EOF[root@k8s-master01 pki]# cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem  -config=ca-config.json -profile=kubernetes manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager
#执行结果
2022/09/23 19:09:32 [INFO] generate received request
2022/09/23 19:09:32 [INFO] received CSR
2022/09/23 19:09:32 [INFO] generating key: rsa-2048
2022/09/23 19:09:32 [INFO] encoded CSR
2022/09/23 19:09:32 [INFO] signed certificate with serial number 458236955769281375346096830700239559896822453072
2022/09/23 19:09:32 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
​
[root@k8s-master01 pki]# ll /etc/kubernetes/pki/controller-manager*
-rw-r--r-- 1 root root 1082 Sep 23 19:09 /etc/kubernetes/pki/controller-manager.csr
-rw------- 1 root root 1679 Sep 23 19:09 /etc/kubernetes/pki/controller-manager-key.pem
-rw-r--r-- 1 root root 1501 Sep 23 19:09 /etc/kubernetes/pki/controller-manager.pem
​
# 注意,如果不是高可用集群,172.31.3.188:6443改为master01的地址
# set-cluster:设置一个集群项,
[root@k8s-master01 pki]# kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://172.31.3.188:6443 --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
#执行结果
Cluster "kubernetes" set.
​
# set-credentials 设置一个用户项
[root@k8s-master01 pki]# kubectl config set-credentials system:kube-controller-manager --client-certificate=/etc/kubernetes/pki/controller-manager.pem --client-key=/etc/kubernetes/pki/controller-manager-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
#执行结果
User "system:kube-controller-manager" set.
​
# 设置一个环境项,一个上下文
[root@k8s-master01 pki]# kubectl config set-context system:kube-controller-manager@kubernetes --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
#执行结果
Context "system:kube-controller-manager@kubernetes" created.
​
# 使用某个环境当做默认环境
[root@k8s-master01 pki]# kubectl config use-context system:kube-controller-manager@kubernetes --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
#执行结果
Switched to context "system:kube-controller-manager@kubernetes".

7.3.5 生成scheduler的证书和配置文件

[root@k8s-master01 pki]# cat > scheduler-csr.json <<EOF
{
  "CN": "system:kube-scheduler",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [    {      "C": "CN",      "ST": "Beijing",      "L": "Beijing",      "O": "system:kube-scheduler",      "OU": "Kubernetes-manual"    }  ]
}
EOF[root@k8s-master01 pki]# cfssl gencert \
   -ca=/etc/kubernetes/pki/ca.pem \
   -ca-key=/etc/kubernetes/pki/ca-key.pem \
   -config=ca-config.json \
   -profile=kubernetes \
   scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler
#执行结果
2022/09/23 19:10:51 [INFO] generate received request
2022/09/23 19:10:51 [INFO] received CSR
2022/09/23 19:10:51 [INFO] generating key: rsa-2048
2022/09/23 19:10:51 [INFO] encoded CSR
2022/09/23 19:10:51 [INFO] signed certificate with serial number 186470152054476932937744145753250739942742891584
2022/09/23 19:10:51 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
​
[root@k8s-master01 pki]# ll /etc/kubernetes/pki/scheduler*
-rw-r--r-- 1 root root 1058 Sep 23 19:10 /etc/kubernetes/pki/scheduler.csr
-rw------- 1 root root 1675 Sep 23 19:10 /etc/kubernetes/pki/scheduler-key.pem
-rw-r--r-- 1 root root 1476 Sep 23 19:10 /etc/kubernetes/pki/scheduler.pem
​
# 注意,如果不是高可用集群,172.31.3.188:6443改为master01的地址
[root@k8s-master01 pki]# kubectl config set-cluster kubernetes \
     --certificate-authority=/etc/kubernetes/pki/ca.pem \
     --embed-certs=true \
     --server=https://172.31.3.188:6443 \
     --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
#执行结果
Cluster "kubernetes" set.
​
[root@k8s-master01 pki]# kubectl config set-credentials system:kube-scheduler \
     --client-certificate=/etc/kubernetes/pki/scheduler.pem \
     --client-key=/etc/kubernetes/pki/scheduler-key.pem \
     --embed-certs=true \
     --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
#执行结果
User "system:kube-scheduler" set.
​
[root@k8s-master01 pki]# kubectl config set-context system:kube-scheduler@kubernetes \
     --cluster=kubernetes \
     --user=system:kube-scheduler \
     --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
#执行结果
Context "system:kube-scheduler@kubernetes" created.
​
[root@k8s-master01 pki]# kubectl config use-context system:kube-scheduler@kubernetes \
     --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
#执行结果
Switched to context "system:kube-scheduler@kubernetes".

7.3.6 生成admin证书和配置文件

[root@k8s-master01 pki]# cat > admin-csr.json <<EOF
{
  "CN": "admin",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [    {      "C": "CN",      "ST": "Beijing",      "L": "Beijing",      "O": "system:masters",      "OU": "Kubernetes-manual"    }  ]
}
EOF[root@k8s-master01 pki]# cfssl gencert \
   -ca=/etc/kubernetes/pki/ca.pem \
   -ca-key=/etc/kubernetes/pki/ca-key.pem \
   -config=ca-config.json \
   -profile=kubernetes \
   admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin
#执行结果
2022/09/23 19:12:09 [INFO] generate received request
2022/09/23 19:12:09 [INFO] received CSR
2022/09/23 19:12:09 [INFO] generating key: rsa-2048
2022/09/23 19:12:09 [INFO] encoded CSR
2022/09/23 19:12:09 [INFO] signed certificate with serial number 317231386349206655263931288739385974109450858184
2022/09/23 19:12:09 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
​
[root@k8s-master01 pki]# ll /etc/kubernetes/pki/admin*
-rw-r--r-- 1 root root 1025 Sep 23 19:12 /etc/kubernetes/pki/admin.csr
-rw------- 1 root root 1679 Sep 23 19:12 /etc/kubernetes/pki/admin-key.pem
-rw-r--r-- 1 root root 1444 Sep 23 19:12 /etc/kubernetes/pki/admin.pem
​
# 注意,如果不是高可用集群,172.31.3.188:8443改为master01的地址
[root@k8s-master01 pki]# kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://172.31.3.188:6443 --kubeconfig=/etc/kubernetes/admin.kubeconfig
#执行结果
Cluster "kubernetes" set.
​
[root@k8s-master01 pki]# kubectl config set-credentials kubernetes-admin --client-certificate=/etc/kubernetes/pki/admin.pem --client-key=/etc/kubernetes/pki/admin-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/admin.kubeconfig
#执行结果
User "kubernetes-admin" set.
​
[root@k8s-master01 pki]# kubectl config set-context kubernetes-admin@kubernetes --cluster=kubernetes     --user=kubernetes-admin --kubeconfig=/etc/kubernetes/admin.kubeconfig
#执行结果
Context "kubernetes-admin@kubernetes" created.
​
[root@k8s-master01 pki]# kubectl config use-context kubernetes-admin@kubernetes --kubeconfig=/etc/kubernetes/admin.kubeconfig
#执行结果
Switched to context "kubernetes-admin@kubernetes".

7.3.7 创建sa key

创建ServiceAccount Key à secret

[root@k8s-master01 pki]# openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
#返回结果
Generating RSA private key, 2048 bit long modulus
..............................+++
....................................+++
e is 65537 (0x10001)
​
[root@k8s-master01 pki]# openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub
#执行结果
writing RSA key
​
[root@k8s-master01 pki]# ll /etc/kubernetes/pki/sa*
-rw------- 1 root root 1679 Sep 23 19:14 /etc/kubernetes/pki/sa.key
-rw-r--r-- 1 root root  451 Sep 23 19:14 /etc/kubernetes/pki/sa.pub

发送证书至其他master节点

[root@k8s-master01 pki]# for NODE in k8s-master02 k8s-master03; do 
    for FILE in $(ls /etc/kubernetes/pki | grep -v etcd); do 
        scp /etc/kubernetes/pki/${FILE} $NODE:/etc/kubernetes/pki/${FILE};
    done; 
    for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig; do 
        scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE};
    done;
done

查看证书文件

[root@k8s-master01 pki]# ll -R /etc/kubernetes/pki/
/etc/kubernetes/pki/:
total 92
-rw-r--r-- 1 root root 1025 Sep 23 19:12 admin.csr
-rw------- 1 root root 1679 Sep 23 19:12 admin-key.pem
-rw-r--r-- 1 root root 1444 Sep 23 19:12 admin.pem
-rw-r--r-- 1 root root 1297 Sep 23 19:07 apiserver.csr
-rw------- 1 root root 1675 Sep 23 19:07 apiserver-key.pem
-rw-r--r-- 1 root root 1692 Sep 23 19:07 apiserver.pem
-rw-r--r-- 1 root root 1070 Sep 23 19:02 ca.csr
-rw------- 1 root root 1679 Sep 23 19:02 ca-key.pem
-rw-r--r-- 1 root root 1363 Sep 23 19:02 ca.pem
-rw-r--r-- 1 root root 1082 Sep 23 19:09 controller-manager.csr
-rw------- 1 root root 1679 Sep 23 19:09 controller-manager-key.pem
-rw-r--r-- 1 root root 1501 Sep 23 19:09 controller-manager.pem
drwxr-xr-x 2 root root   84 Sep 23 18:57 etcd
-rw-r--r-- 1 root root  891 Sep 23 19:08 front-proxy-ca.csr
-rw------- 1 root root 1679 Sep 23 19:08 front-proxy-ca-key.pem
-rw-r--r-- 1 root root 1094 Sep 23 19:08 front-proxy-ca.pem
-rw-r--r-- 1 root root  903 Sep 23 19:08 front-proxy-client.csr
-rw------- 1 root root 1675 Sep 23 19:08 front-proxy-client-key.pem
-rw-r--r-- 1 root root 1188 Sep 23 19:08 front-proxy-client.pem
-rw------- 1 root root 1679 Sep 23 19:14 sa.key
-rw-r--r-- 1 root root  451 Sep 23 19:14 sa.pub
-rw-r--r-- 1 root root 1058 Sep 23 19:10 scheduler.csr
-rw------- 1 root root 1675 Sep 23 19:10 scheduler-key.pem
-rw-r--r-- 1 root root 1476 Sep 23 19:10 scheduler.pem/etc/kubernetes/pki/etcd:
total 0
lrwxrwxrwx 1 root root 29 Sep 23 18:57 etcd-ca-key.pem -> /etc/etcd/ssl/etcd-ca-key.pem
lrwxrwxrwx 1 root root 25 Sep 23 18:57 etcd-ca.pem -> /etc/etcd/ssl/etcd-ca.pem
lrwxrwxrwx 1 root root 26 Sep 23 18:57 etcd-key.pem -> /etc/etcd/ssl/etcd-key.pem
lrwxrwxrwx 1 root root 22 Sep 23 18:57 etcd.pem -> /etc/etcd/ssl/etcd.pem
​
[root@k8s-master01 pki]# ls /etc/kubernetes/pki | grep -v etcd |wc -l
23
# 一共23个就对了

7.4 Kubernetes组件配置

master节点创建相关目录

[root@k8s-master01 pki]# cd
[root@k8s-master01 ~]# mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
​
[root@k8s-master02 ~]# mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
​
[root@k8s-master03 ~]# mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes

7.4.1 Apiserver

所有Master节点创建kube-apiserver service,# 注意,如果不是高可用集群,172.31.3.188改为master01的地址

7.4.1.1 Master01配置

注意本文档使用的k8s service网段为10.96.0.0/12,该网段不能和宿主机的网段、Pod网段的重复,请按需修改

[root@k8s-master01 ~]# cat > /lib/systemd/system/kube-apiserver.service <<EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
​
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
      --v=2  \
      --logtostderr=true  \
      --allow-privileged=true  \
      --bind-address=0.0.0.0  \
      --secure-port=6443  \
      --advertise-address=172.31.3.101 \
      --service-cluster-ip-range=10.96.0.0/12  \
      --service-node-port-range=30000-32767  \
      --etcd-servers=https://172.31.3.108:2379,https://172.31.3.109:2379,https://172.31.3.110:2379 \
      --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem  \
      --etcd-certfile=/etc/etcd/ssl/etcd.pem  \
      --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem  \
      --client-ca-file=/etc/kubernetes/pki/ca.pem  \
      --tls-cert-file=/etc/kubernetes/pki/apiserver.pem  \
      --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem  \
      --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem  \
      --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem  \
      --service-account-key-file=/etc/kubernetes/pki/sa.pub  \
      --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \
      --service-account-issuer=https://kubernetes.default.svc.cluster.local \
      --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \
      --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \
      --authorization-mode=Node,RBAC  \
      --enable-bootstrap-token-auth=true  \
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \
      --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \
      --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \
      --requestheader-allowed-names=aggregator  \
      --requestheader-group-headers=X-Remote-Group  \
      --requestheader-extra-headers-prefix=X-Remote-Extra-  \
      --requestheader-username-headers=X-Remote-User
      # --token-auth-file=/etc/kubernetes/token.csvRestart=on-failure
RestartSec=10s
LimitNOFILE=65535[Install]
WantedBy=multi-user.target
EOF

7.4.1.2 Master02配置

注意本文档使用的k8s service网段为10.96.0.0/12,该网段不能和宿主机的网段、Pod网段的重复,请按需修改

[root@k8s-master02 ~]# cat > /lib/systemd/system/kube-apiserver.service <<EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
​
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
      --v=2  \
      --logtostderr=true  \
      --allow-privileged=true  \
      --bind-address=0.0.0.0  \
      --secure-port=6443  \
      --advertise-address=172.31.3.102 \
      --service-cluster-ip-range=10.96.0.0/12  \
      --service-node-port-range=30000-32767  \
      --etcd-servers=https://172.31.3.108:2379,https://172.31.3.109:2379,https://172.31.3.110:2379 \
      --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem  \
      --etcd-certfile=/etc/etcd/ssl/etcd.pem  \
      --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem  \
      --client-ca-file=/etc/kubernetes/pki/ca.pem  \
      --tls-cert-file=/etc/kubernetes/pki/apiserver.pem  \
      --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem  \
      --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem  \
      --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem  \
      --service-account-key-file=/etc/kubernetes/pki/sa.pub  \
      --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \
      --service-account-issuer=https://kubernetes.default.svc.cluster.local \
      --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \
      --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \
      --authorization-mode=Node,RBAC  \
      --enable-bootstrap-token-auth=true  \
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \
      --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \
      --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \
      --requestheader-allowed-names=aggregator  \
      --requestheader-group-headers=X-Remote-Group  \
      --requestheader-extra-headers-prefix=X-Remote-Extra-  \
      --requestheader-username-headers=X-Remote-User
      # --token-auth-file=/etc/kubernetes/token.csvRestart=on-failure
RestartSec=10s
LimitNOFILE=65535[Install]
WantedBy=multi-user.target
EOF

7.4.1.3 Master03配置

注意本文档使用的k8s service网段为10.96.0.0/12,该网段不能和宿主机的网段、Pod网段的重复,请按需修改

[root@k8s-master03 ~]# cat > /lib/systemd/system/kube-apiserver.service <<EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
​
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
      --v=2  \
      --logtostderr=true  \
      --allow-privileged=true  \
      --bind-address=0.0.0.0  \
      --secure-port=6443  \
      --advertise-address=172.31.3.103 \
      --service-cluster-ip-range=10.96.0.0/12  \
      --service-node-port-range=30000-32767  \
      --etcd-servers=https://172.31.3.108:2379,https://172.31.3.109:2379,https://172.31.3.110:2379 \
      --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem  \
      --etcd-certfile=/etc/etcd/ssl/etcd.pem  \
      --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem  \
      --client-ca-file=/etc/kubernetes/pki/ca.pem  \
      --tls-cert-file=/etc/kubernetes/pki/apiserver.pem  \
      --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem  \
      --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem  \
      --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem  \
      --service-account-key-file=/etc/kubernetes/pki/sa.pub  \
      --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \
      --service-account-issuer=https://kubernetes.default.svc.cluster.local \
      --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \
      --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \
      --authorization-mode=Node,RBAC  \
      --enable-bootstrap-token-auth=true  \
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \
      --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \
      --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \
      --requestheader-allowed-names=aggregator  \
      --requestheader-group-headers=X-Remote-Group  \
      --requestheader-extra-headers-prefix=X-Remote-Extra-  \
      --requestheader-username-headers=X-Remote-User
      # --token-auth-file=/etc/kubernetes/token.csvRestart=on-failure
RestartSec=10s
LimitNOFILE=65535[Install]
WantedBy=multi-user.target
EOF

7.4.1.4 启动apiserver

所有Master节点开启kube-apiserver

[root@k8s-master01 ~]# systemctl daemon-reload && systemctl enable --now kube-apiserver
​
[root@k8s-master02 ~]# systemctl daemon-reload && systemctl enable --now kube-apiserver
​
[root@k8s-master03 ~]# systemctl daemon-reload && systemctl enable --now kube-apiserver

检测kube-server状态

[root@k8s-master01 ~]# systemctl status kube-apiserver
● kube-apiserver.service - Kubernetes API Server
   Loaded: loaded (/usr/lib/systemd/system/kube-apiserver.service; enabled; vendor preset: disabled)
   Active: active (running) since Fri 2022-09-23 19:17:08 CST; 6s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 5775 (kube-apiserver)
    Tasks: 14 (limit: 23474)
   Memory: 228.7M
   CGroup: /system.slice/kube-apiserver.service
           └─5775 /usr/local/bin/kube-apiserver --v=2 --logtostderr=true --allow-privileged=true --bind-address=0.0.0.0 --secure-port=6443 --a>
​
Sep 23 19:17:14 k8s-master01.example.local kube-apiserver[5775]: I0923 19:17:14.225207    5775 healthz.go:257] poststarthook/rbac/bootstrap-ro>
Sep 23 19:17:14 k8s-master01.example.local kube-apiserver[5775]: [-]poststarthook/rbac/bootstrap-roles failed: not finished
Sep 23 19:17:14 k8s-master01.example.local kube-apiserver[5775]: I0923 19:17:14.324880    5775 healthz.go:257] poststarthook/rbac/bootstrap-ro>
Sep 23 19:17:14 k8s-master01.example.local kube-apiserver[5775]: [-]poststarthook/rbac/bootstrap-roles failed: not finished
Sep 23 19:17:14 k8s-master01.example.local kube-apiserver[5775]: I0923 19:17:14.428060    5775 healthz.go:257] poststarthook/rbac/bootstrap-ro>
Sep 23 19:17:14 k8s-master01.example.local kube-apiserver[5775]: [-]poststarthook/rbac/bootstrap-roles failed: not finished
Sep 23 19:17:14 k8s-master01.example.local kube-apiserver[5775]: I0923 19:17:14.524001    5775 healthz.go:257] poststarthook/rbac/bootstrap-ro>
Sep 23 19:17:14 k8s-master01.example.local kube-apiserver[5775]: [-]poststarthook/rbac/bootstrap-roles failed: not finished
Sep 23 19:17:14 k8s-master01.example.local kube-apiserver[5775]: I0923 19:17:14.624035    5775 healthz.go:257] poststarthook/rbac/bootstrap-ro>
Sep 23 19:17:14 k8s-master01.example.local kube-apiserver[5775]: [-]poststarthook/rbac/bootstrap-roles failed: not finished
​
[root@k8s-master02 ~]# systemctl status kube-apiserver
[root@k8s-master03 ~]# systemctl status kube-apiserver

7.4.2 ControllerManager

所有Master节点配置kube-controller-manager service

注意本文档使用的k8s Pod网段为192.168.0.0/12,该网段不能和宿主机的网段、k8s Service网段的重复,请按需修改

[root@k8s-master01 ~]# cat > /lib/systemd/system/kube-controller-manager.service <<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
​
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
      --v=2 \
      --logtostderr=true \
      --bind-address=127.0.0.1 \
      --root-ca-file=/etc/kubernetes/pki/ca.pem \
      --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
      --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
      --service-account-private-key-file=/etc/kubernetes/pki/sa.key \
      --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
      --leader-elect=true \
      --use-service-account-credentials=true \
      --node-monitor-grace-period=40s \
      --node-monitor-period=5s \
      --pod-eviction-timeout=2m0s \
      --controllers=*,bootstrapsigner,tokencleaner \
      --allocate-node-cidrs=true \
      --cluster-cidr=192.168.0.0/12 \
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
      --node-cidr-mask-size=24
      
Restart=always
RestartSec=10s
​
[Install]
WantedBy=multi-user.target
EOF
​
[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do scp /lib/systemd/system/kube-controller-manager.service $NODE:/lib/systemd/system/; done

所有Master节点启动kube-controller-manager

[root@k8s-master01 ~]# systemctl daemon-reload && systemctl enable --now kube-controller-manager
[root@k8s-master02 ~]# systemctl daemon-reload && systemctl enable --now kube-controller-manager
[root@k8s-master03 ~]# systemctl daemon-reload && systemctl enable --now kube-controller-manager

查看启动状态

[root@k8s-master01 ~]# systemctl status kube-controller-manager
● kube-controller-manager.service - Kubernetes Controller Manager
   Loaded: loaded (/usr/lib/systemd/system/kube-controller-manager.service; enabled; vendor preset: disabled)
   Active: active (running) since Fri 2022-09-23 19:18:17 CST; 13s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 5834 (kube-controller)
    Tasks: 6 (limit: 23474)
   Memory: 24.5M
   CGroup: /system.slice/kube-controller-manager.service
           └─5834 /usr/local/bin/kube-controller-manager --v=2 --logtostderr=true --bind-address=127.0.0.1 --root-ca-file=/etc/kubernetes/pki/>
​
Sep 23 19:18:30 k8s-master01.example.local kube-controller-manager[5834]: I0923 19:18:30.022163    5834 controllermanager.go:573] Starting "no>
Sep 23 19:18:30 k8s-master01.example.local kube-controller-manager[5834]: I0923 19:18:30.022201    5834 tokencleaner.go:118] Starting token cl>
Sep 23 19:18:30 k8s-master01.example.local kube-controller-manager[5834]: I0923 19:18:30.022207    5834 shared_informer.go:255] Waiting for ca>
Sep 23 19:18:30 k8s-master01.example.local kube-controller-manager[5834]: I0923 19:18:30.022220    5834 shared_informer.go:262] Caches are syn>
Sep 23 19:18:30 k8s-master01.example.local kube-controller-manager[5834]: I0923 19:18:30.070984    5834 node_lifecycle_controller.go:497] Cont>
Sep 23 19:18:30 k8s-master01.example.local kube-controller-manager[5834]: I0923 19:18:30.071026    5834 controllermanager.go:602] Started "nod>
Sep 23 19:18:30 k8s-master01.example.local kube-controller-manager[5834]: I0923 19:18:30.071035    5834 controllermanager.go:573] Starting "ga>
Sep 23 19:18:30 k8s-master01.example.local kube-controller-manager[5834]: I0923 19:18:30.071076    5834 node_lifecycle_controller.go:532] Send>
Sep 23 19:18:30 k8s-master01.example.local kube-controller-manager[5834]: I0923 19:18:30.071168    5834 node_lifecycle_controller.go:543] Star>
Sep 23 19:18:30 k8s-master01.example.local kube-controller-manager[5834]: I0923 19:18:30.071177    5834 shared_informer.go:255] Waiting for ca>
​
[root@k8s-master02 ~]# systemctl status kube-controller-manager
[root@k8s-master03 ~]# systemctl status kube-controller-manager

7.4.3 Scheduler

所有Master节点配置kube-scheduler service

[root@k8s-master01 ~]# cat > /lib/systemd/system/kube-scheduler.service <<EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
​
[Service]
ExecStart=/usr/local/bin/kube-scheduler \
      --v=2 \
      --logtostderr=true \
      --bind-address=127.0.0.1 \
      --leader-elect=true \
      --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
​
Restart=always
RestartSec=10s
​
[Install]
WantedBy=multi-user.target
EOF
​
[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do scp /lib/systemd/system/kube-scheduler.service $NODE:/lib/systemd/system/; done[root@k8s-master01 ~]# systemctl daemon-reload && systemctl enable --now kube-scheduler
[root@k8s-master02 ~]# systemctl daemon-reload && systemctl enable --now kube-scheduler
[root@k8s-master03 ~]# systemctl daemon-reload && systemctl enable --now kube-scheduler[root@k8s-master01 ~]# systemctl status kube-scheduler
● kube-scheduler.service - Kubernetes Scheduler
   Loaded: loaded (/usr/lib/systemd/system/kube-scheduler.service; enabled; vendor preset: disabled)
   Active: active (running) since Fri 2022-09-23 19:19:23 CST; 12s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 5888 (kube-scheduler)
    Tasks: 8 (limit: 23474)
   Memory: 16.5M
   CGroup: /system.slice/kube-scheduler.service
           └─5888 /usr/local/bin/kube-scheduler --v=2 --logtostderr=true --bind-address=127.0.0.1 --leader-elect=true --kubeconfig=/etc/kubern>
​
Sep 23 19:19:23 k8s-master01.example.local kube-scheduler[5888]:           schedulerName: default-scheduler
Sep 23 19:19:23 k8s-master01.example.local kube-scheduler[5888]:  >
Sep 23 19:19:23 k8s-master01.example.local kube-scheduler[5888]: I0923 19:19:23.950424    5888 server.go:148] "Starting Kubernetes Scheduler" >
Sep 23 19:19:23 k8s-master01.example.local kube-scheduler[5888]: I0923 19:19:23.950436    5888 server.go:150] "Golang settings" GOGC="" GOMAXP>
Sep 23 19:19:23 k8s-master01.example.local kube-scheduler[5888]: I0923 19:19:23.951913    5888 tlsconfig.go:200] "Loaded serving cert" certNam>
Sep 23 19:19:23 k8s-master01.example.local kube-scheduler[5888]: I0923 19:19:23.952068    5888 named_certificates.go:53] "Loaded SNI cert" ind>
Sep 23 19:19:23 k8s-master01.example.local kube-scheduler[5888]: I0923 19:19:23.952090    5888 secure_serving.go:210] Serving securely on 127.>
Sep 23 19:19:23 k8s-master01.example.local kube-scheduler[5888]: I0923 19:19:23.952118    5888 tlsconfig.go:240] "Starting DynamicServingCerti>
Sep 23 19:19:24 k8s-master01.example.local kube-scheduler[5888]: I0923 19:19:24.052775    5888 leaderelection.go:248] attempting to acquire le>
Sep 23 19:19:24 k8s-master01.example.local kube-scheduler[5888]: I0923 19:19:24.072073    5888 leaderelection.go:258] successfully acquired le>
​
[root@k8s-master02 ~]# systemctl status kube-scheduler
[root@k8s-master03 ~]# systemctl status kube-scheduler

7.4.4 TLS Bootstrapping配置

在Master01创建bootstrap

[root@k8s-master01 ~]# cat > bootstrap.secret.yaml <<EOF
apiVersion: v1
kind: Secret
metadata:
  name: bootstrap-token-c8ad9c
  namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
  description: "The default bootstrap token generated by 'kubelet '."
  token-id: c8ad9c
  token-secret: 2e4d610cf3e7426e
  usage-bootstrap-authentication: "true"
  usage-bootstrap-signing: "true"
  auth-extra-groups:  system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
 
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubelet-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-certificate-rotation
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kube-apiserver-to-kubelet
rules:
  - apiGroups:
      - ""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
    verbs:
      - "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kube-apiserver
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-apiserver-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kube-apiserver
EOF

# 注意,如果不是高可用集群,172.31.3.188:6443改为master01的地址
[root@k8s-master01 ~]# kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://172.31.3.188:6443 --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
#执行结果
Cluster "kubernetes" set.

[root@k8s-master01 ~]# kubectl config set-credentials tls-bootstrap-token-user --token=c8ad9c.2e4d610cf3e7426e --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
#执行结果
User "tls-bootstrap-token-user" set.

[root@k8s-master01 ~]# kubectl config set-context tls-bootstrap-token-user@kubernetes --cluster=kubernetes --user=tls-bootstrap-token-user --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
#执行结果
Context "tls-bootstrap-token-user@kubernetes" modified.

[root@k8s-master01 ~]# kubectl config use-context tls-bootstrap-token-user@kubernetes --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
#执行结果
Switched to context "tls-bootstrap-token-user@kubernetes".

注意:如果要修改bootstrap.secret.yaml的token-id和token-secret,需要保证下图红圈内的字符串一致的,并且位数是一样的。还要保证上个命令的黄色字体:c8ad9c.2e4d610cf3e7426e与你修改的字符串要一致

019.jpg

[root@k8s-master01 ~]# mkdir -p /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config#查看集群状态,没问题的话继续后续操作
[root@k8s-master01 ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE                         ERROR
scheduler            Healthy   ok                              
controller-manager   Healthy   ok                              
etcd-1               Healthy   {"health":"true","reason":""}   
etcd-2               Healthy   {"health":"true","reason":""}   
etcd-0               Healthy   {"health":"true","reason":""}   
​
[root@k8s-master01 ~]# kubectl create -f bootstrap.secret.yaml
​
[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do
     for FILE in bootstrap-kubelet.kubeconfig; do
       scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}
     done
 done

7.4.5 Kubelet配置

master节点配置kubelet.service

[root@k8s-master01 ~]# cat > /lib/systemd/system/kubelet.service <<EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service
​
[Service]
ExecStart=/usr/local/bin/kubelet
​
Restart=always
StartLimitInterval=0
RestartSec=10[Install]
WantedBy=multi-user.target
EOF
​
[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do scp /lib/systemd/system/kubelet.service $NODE:/lib/systemd/system/ ;done

master节点配置10-kubelet.conf的配置文件

[root@k8s-master01 ~]# cat > /etc/systemd/system/kubelet.service.d/10-kubelet.conf <<EOF
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--hostname-override=172.31.3.101"
Environment="KUBELET_RINTIME=--container-runtime=remote --container-runtime-endpoint=unix:///var/run/cri-dockerd.sock"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node=''"
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS $KUBELET_RINTIME
EOF
​
[root@k8s-master02 ~]# cat > /etc/systemd/system/kubelet.service.d/10-kubelet.conf <<EOF
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--hostname-override=172.31.3.102"
Environment="KUBELET_RINTIME=--container-runtime=remote --container-runtime-endpoint=unix:///var/run/cri-dockerd.sock"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node=''"
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS $KUBELET_RINTIME
EOF
​
[root@k8s-master03 ~]# cat > /etc/systemd/system/kubelet.service.d/10-kubelet.conf <<EOF
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--hostname-override=172.31.3.103"
Environment="KUBELET_RINTIME=--container-runtime=remote --container-runtime-endpoint=unix:///var/run/cri-dockerd.sock"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node=''"
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS $KUBELET_RINTIME
EOF

下载镜像并上传至harbor:

[root@k8s-master01 ~]# docker login harbor.raymonds.cc
Username: admin
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store
​
Login Succeeded
​
[root@k8s-master01 ~]# cat download_pause_images.sh 
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-01-11
#FileName:      download_pause_images.sh
#URL:           raymond.blog.csdn.net
#Description:   The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \033[01;31m"
END='\033[0m'
​
PAUSE_VERSION=3.8
HARBOR_DOMAIN=harbor.raymonds.cc
​
images_download(){
    ${COLOR}"开始下载Pause镜像"${END}
        docker pull registry.aliyuncs.com/google_containers/pause:${PAUSE_VERSION}
        docker tag registry.aliyuncs.com/google_containers/pause:${PAUSE_VERSION} ${HARBOR_DOMAIN}/google_containers/pause:${PAUSE_VERSION}
        docker rmi registry.aliyuncs.com/google_containers/pause:${PAUSE_VERSION}
        docker push ${HARBOR_DOMAIN}/google_containers/pause:${PAUSE_VERSION}
    ${COLOR}"Pause镜像下载完成"${END}
}
​
images_download
​
[root@k8s-master01 ~]# bash download_pause_images.sh  
​
[root@k8s-master01 ~]# docker images
REPOSITORY                                   TAG       IMAGE ID       CREATED        SIZE
harbor.raymonds.cc/google_containers/pause   3.8       4873874c08ef   3 months ago   711kB

master创建kubelet的配置文件

注意:如果更改了k8s的service网段,需要更改kubelet-conf.yml 的clusterDNS:配置,改成k8s Service网段的第十个地址,比如10.96.0.10

[root@k8s-master01 ~]# cat > /etc/kubernetes/kubelet-conf.yml <<EOF
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
EOF

[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do scp /etc/kubernetes/kubelet-conf.yml $NODE:/etc/kubernetes/ ;done

启动master节点kubelet

[root@k8s-master01 ~]# systemctl daemon-reload && systemctl enable --now kubelet
[root@k8s-master02 ~]# systemctl daemon-reload && systemctl enable --now kubelet
[root@k8s-master03 ~]# systemctl daemon-reload && systemctl enable --now kubelet
​
[root@k8s-master01 ~]# systemctl status kubelet
● kubelet.service - Kubernetes Kubelet
   Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
  Drop-In: /etc/systemd/system/kubelet.service.d
           └─10-kubelet.conf
   Active: active (running) since Fri 2022-09-23 19:37:59 CST; 58s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 6366 (kubelet)
    Tasks: 14 (limit: 23474)
   Memory: 39.7M
   CGroup: /system.slice/kubelet.service
           └─6366 /usr/local/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kube>
​
Sep 23 19:38:20 k8s-master01.example.local kubelet[6366]: I0923 19:38:20.019352    6366 apiserver.go:52] "Watching apiserver"
Sep 23 19:38:20 k8s-master01.example.local kubelet[6366]: I0923 19:38:20.118172    6366 reconciler.go:169] "Reconciler: start to sync state"
Sep 23 19:38:20 k8s-master01.example.local kubelet[6366]: E0923 19:38:20.464305    6366 kubelet.go:2373] "Container runtime network not ready">
Sep 23 19:38:25 k8s-master01.example.local kubelet[6366]: E0923 19:38:25.474757    6366 kubelet.go:2373] "Container runtime network not ready">
Sep 23 19:38:30 k8s-master01.example.local kubelet[6366]: E0923 19:38:30.483816    6366 kubelet.go:2373] "Container runtime network not ready">
Sep 23 19:38:35 k8s-master01.example.local kubelet[6366]: E0923 19:38:35.494800    6366 kubelet.go:2373] "Container runtime network not ready">
Sep 23 19:38:40 k8s-master01.example.local kubelet[6366]: E0923 19:38:40.504300    6366 kubelet.go:2373] "Container runtime network not ready">
Sep 23 19:38:45 k8s-master01.example.local kubelet[6366]: E0923 19:38:45.512913    6366 kubelet.go:2373] "Container runtime network not ready">
Sep 23 19:38:50 k8s-master01.example.local kubelet[6366]: E0923 19:38:50.521932    6366 kubelet.go:2373] "Container runtime network not ready">
Sep 23 19:38:55 k8s-master01.example.local kubelet[6366]: E0923 19:38:55.534585    6366 kubelet.go:2373] "Container runtime network not ready">
​
[root@k8s-master02 ~]# systemctl status kubelet
[root@k8s-master03 ~]# systemctl status kubelet

此时系统日志

#显示只有如下信息为正常
[root@k8s-master01 ~]# tail -f /var/log/messages #ubuntu命令为"tail -f /var/log/syslog"
...
Sep 23 15:11:22 localhost kubelet[16173]: E0923 15:11:22.503340   16173 kubelet.go:2373] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized"

查看集群状态

[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS     ROLES    AGE   VERSION
172.31.3.101   NotReady   <none>   20m   v1.25.2
172.31.3.102   NotReady   <none>   19m   v1.25.2
172.31.3.103   NotReady   <none>   19m   v1.25.2

7.4.6 kube-proxy配置

注意,如果不是高可用集群,172.31.3.188:6443改为master01的地址

[root@k8s-master01 ~]# kubectl -n kube-system create serviceaccount kube-proxy
#执行结果
serviceaccount/kube-proxy created
​
[root@k8s-master01 ~]# kubectl create clusterrolebinding system:kube-proxy --clusterrole system:node-proxier --serviceaccount kube-system:kube-proxy
#执行结果
clusterrolebinding.rbac.authorization.k8s.io/system:kube-proxy created
​
[root@k8s-master01 ~]# cat > kube-proxy-scret.yml <<EOF
apiVersion: v1
kind: Secret
metadata:
  name: kube-proxy
  namespace: kube-system
  annotations:
    kubernetes.io/service-account.name: "kube-proxy"
type: kubernetes.io/service-account-token
EOF
​
[root@k8s-master01 ~]# kubectl apply -f kube-proxy-scret.yml
#执行结果
secret/kube-proxy created
​
[root@k8s-master01 ~]# JWT_TOKEN=$(kubectl -n kube-system get secret/kube-proxy \
--output=jsonpath='{.data.token}' | base64 -d)
​
[root@k8s-master01 ~]# kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://172.31.3.188:6443  --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
#执行结果
Cluster "kubernetes" set.
​
[root@k8s-master01 ~]# kubectl config set-credentials kubernetes --token=${JWT_TOKEN} --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
#执行结果
User "kubernetes" set.
​
[root@k8s-master01 ~]# kubectl config set-context kubernetes --cluster=kubernetes --user=kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
#执行结果
Context "kubernetes" created.
​
[root@k8s-master01 ~]# kubectl config use-context kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
#执行结果
Switched to context "kubernetes".

在master01将kube-proxy的systemd Service文件发送到其他节点

如果更改了集群Pod的网段,需要更改kube-proxy/kube-proxy.conf的clusterCIDR: 192.168.0.0/12参数为pod的网段。

[root@k8s-master01 ~]# cat > /etc/kubernetes/kube-proxy.conf <<EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
  acceptContentTypes: ""
  burst: 10
  contentType: application/vnd.kubernetes.protobuf
  kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
  qps: 5
clusterCIDR: 192.168.0.0/12
configSyncPeriod: 15m0s
conntrack:
  max: null
  maxPerCore: 32768
  min: 131072
  tcpCloseWaitTimeout: 1h0m0s
  tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
  masqueradeAll: false
  masqueradeBit: 14
  minSyncPeriod: 0s
  syncPeriod: 30s
ipvs:
  masqueradeAll: true
  minSyncPeriod: 5s
  scheduler: "rr"
  syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
EOF
​
[root@k8s-master01 ~]# cat > /lib/systemd/system/kube-proxy.service <<EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
​
[Service]
ExecStart=/usr/local/bin/kube-proxy \
  --config=/etc/kubernetes/kube-proxy.conf \
  --v=2Restart=always
RestartSec=10s
​
[Install]
WantedBy=multi-user.target
EOF
​
[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do
     scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig
     scp /etc/kubernetes/kube-proxy.conf $NODE:/etc/kubernetes/kube-proxy.conf
     scp /lib/systemd/system/kube-proxy.service $NODE:/lib/systemd/system/kube-proxy.service
 done

master节点启动kube-proxy

[root@k8s-master01 ~]# systemctl daemon-reload && systemctl enable --now kube-proxy
[root@k8s-master02 ~]# systemctl daemon-reload && systemctl enable --now kube-proxy
[root@k8s-master03 ~]# systemctl daemon-reload && systemctl enable --now kube-proxy[root@k8s-master01 ~]# systemctl status kube-proxy
● kube-proxy.service - Kubernetes Kube Proxy
   Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Fri 2022-09-23 19:51:23 CST; 14s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 8614 (kube-proxy)
    Tasks: 5 (limit: 23474)
   Memory: 10.7M
   CGroup: /system.slice/kube-proxy.service
           └─8614 /usr/local/bin/kube-proxy --config=/etc/kubernetes/kube-proxy.conf --v=2
​
Sep 23 19:51:23 k8s-master01.example.local kube-proxy[8614]: I0923 19:51:23.505482    8614 flags.go:64] FLAG: --vmodule=""
Sep 23 19:51:23 k8s-master01.example.local kube-proxy[8614]: I0923 19:51:23.505484    8614 flags.go:64] FLAG: --write-config-to=""
Sep 23 19:51:23 k8s-master01.example.local kube-proxy[8614]: I0923 19:51:23.506970    8614 server.go:442] "Using lenient decoding as strict de>
Sep 23 19:51:23 k8s-master01.example.local kube-proxy[8614]: I0923 19:51:23.507434    8614 feature_gate.go:245] feature gates: &{map[]}
Sep 23 19:51:23 k8s-master01.example.local kube-proxy[8614]: I0923 19:51:23.507544    8614 feature_gate.go:245] feature gates: &{map[]}
Sep 23 19:51:23 k8s-master01.example.local kube-proxy[8614]: I0923 19:51:23.520612    8614 proxier.go:666] "Failed to load kernel module with >
Sep 23 19:51:23 k8s-master01.example.local kube-proxy[8614]: E0923 19:51:23.550408    8614 node.go:152] Failed to retrieve node info: nodes "k>
Sep 23 19:51:24 k8s-master01.example.local kube-proxy[8614]: E0923 19:51:24.715443    8614 node.go:152] Failed to retrieve node info: nodes "k>
Sep 23 19:51:26 k8s-master01.example.local kube-proxy[8614]: E0923 19:51:26.915067    8614 node.go:152] Failed to retrieve node info: nodes "k>
Sep 23 19:51:31 k8s-master01.example.local kube-proxy[8614]: E0923 19:51:31.496512    8614 node.go:152] Failed to retrieve node info: nodes "k>
​
[root@k8s-master02 ~]# systemctl status kube-proxy
[root@k8s-master03 ~]# systemctl status kube-proxy[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS     ROLES    AGE   VERSION
172.31.3.101   NotReady   <none>   20m   v1.25.2
172.31.3.102   NotReady   <none>   19m   v1.25.2
172.31.3.103   NotReady   <none>   19m   v1.25.2

8.部署node

8.1 安装node组件

将组件发送到node节点

[root@k8s-master01 ~]# for NODE in k8s-node01 k8s-node02 k8s-node03; do echo $NODE; scp -o StrictHostKeyChecking=no /usr/local/bin/kube{let,-proxy} $NODE:/usr/local/bin/ ; done

node节点创建/opt/cni/bin目录

[root@k8s-node01 ~]# mkdir -p /opt/cni/bin
[root@k8s-node02 ~]# mkdir -p /opt/cni/bin
[root@k8s-node03 ~]# mkdir -p /opt/cni/bin

8.2 复制etcd证书

node节点创建etcd证书目录

[root@k8s-node01 ~]# mkdir /etc/etcd/ssl -p
[root@k8s-node02 ~]# mkdir /etc/etcd/ssl -p
[root@k8s-node03 ~]# mkdir /etc/etcd/ssl -p

将etcd证书复制到node节点

[root@k8s-etcd01 pki]# for NODE in k8s-node01 k8s-node02 k8s-node03; do
     ssh -o StrictHostKeyChecking=no $NODE "mkdir -p /etc/etcd/ssl"
     for FILE in etcd-ca-key.pem  etcd-ca.pem  etcd-key.pem  etcd.pem; do
       scp -o StrictHostKeyChecking=no /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE}
     done
 done

所有master节点创建etcd的证书目录

[root@k8s-node01 ~]# mkdir /etc/kubernetes/pki/etcd -p
[root@k8s-node02 ~]# mkdir /etc/kubernetes/pki/etcd -p
[root@k8s-node03 ~]# mkdir /etc/kubernetes/pki/etcd -p
​
[root@k8s-node01 ~]# ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
[root@k8s-node02 ~]# ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
[root@k8s-node03 ~]# ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/

8.3 复制kubernetes证书和配置文件

node节点创建kubernetes相关目录

[root@k8s-node01 ~]# mkdir -p /etc/kubernetes/pki
[root@k8s-node02 ~]# mkdir -p /etc/kubernetes/pki
[root@k8s-node03 ~]# mkdir -p /etc/kubernetes/pki

Master01节点复制证书至Node节点

[root@k8s-master01 ~]# for NODE in k8s-node01 k8s-node02 k8s-node03; do
     for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig; do
       scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}
     done
 done

8.4 配置kubelet

node节点创建相关目录

[root@k8s-node01 ~]# mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
​
[root@k8s-node02 ~]# mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
​
[root@k8s-node03 ~]# mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes

Master01节点复制配置文件kubelet service至Node节点

[root@k8s-master01 ~]# for NODE in k8s-node01 k8s-node02 k8s-node03; do scp /lib/systemd/system/kubelet.service $NODE:/lib/systemd/system/ ;done

node节点配置10-kubelet.conf的配置文件

[root@k8s-node01 ~]# cat > /etc/systemd/system/kubelet.service.d/10-kubelet.conf <<EOF
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--hostname-override=172.31.3.111"
Environment="KUBELET_RINTIME=--container-runtime=remote --container-runtime-endpoint=unix:///var/run/cri-dockerd.sock"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node=''"
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS $KUBELET_RINTIME
EOF
​
[root@k8s-node02 ~]# cat > /etc/systemd/system/kubelet.service.d/10-kubelet.conf <<EOF
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--hostname-override=172.31.3.112"
Environment="KUBELET_RINTIME=--container-runtime=remote --container-runtime-endpoint=unix:///var/run/cri-dockerd.sock"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node=''"
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS $KUBELET_RINTIME
EOF
​
[root@k8s-node03 ~]# cat > /etc/systemd/system/kubelet.service.d/10-kubelet.conf <<EOF
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--hostname-override=172.31.3.113"
Environment="KUBELET_RINTIME=--container-runtime=remote --container-runtime-endpoint=unix:///var/run/cri-dockerd.sock"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node=''"
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS $KUBELET_RINTIME
EOF

Master01节点kubelet的配置文件至Node节点

[root@k8s-master01 ~]# for NODE in k8s-node01 k8s-node02 k8s-node03; do scp /etc/kubernetes/kubelet-conf.yml $NODE:/etc/kubernetes/ ;done

启动node节点kubelet

[root@k8s-node01 ~]# systemctl daemon-reload && systemctl enable --now kubelet
[root@k8s-node02 ~]# systemctl daemon-reload && systemctl enable --now kubelet
[root@k8s-node03 ~]# systemctl daemon-reload && systemctl enable --now kubelet
​
[root@k8s-node01 ~]# systemctl status kubelet
● kubelet.service - Kubernetes Kubelet
   Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
  Drop-In: /etc/systemd/system/kubelet.service.d
           └─10-kubelet.conf
   Active: active (running) since Fri 2022-09-23 20:04:37 CST; 44s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 5734 (kubelet)
    Tasks: 14 (limit: 23474)
   Memory: 37.4M
   CGroup: /system.slice/kubelet.service
           └─5734 /usr/local/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kube>
​
Sep 23 20:04:57 k8s-node01.example.local kubelet[5734]: I0923 20:04:57.235951    5734 kuberuntime_manager.go:1050] "Updating runtime config th>
Sep 23 20:04:57 k8s-node01.example.local kubelet[5734]: I0923 20:04:57.236602    5734 kubelet_network.go:60] "Updating Pod CIDR" originalPodCI>
Sep 23 20:04:57 k8s-node01.example.local kubelet[5734]: E0923 20:04:57.244742    5734 kubelet.go:2373] "Container runtime network not ready" n>
Sep 23 20:04:57 k8s-node01.example.local kubelet[5734]: I0923 20:04:57.835358    5734 apiserver.go:52] "Watching apiserver"
Sep 23 20:04:57 k8s-node01.example.local kubelet[5734]: I0923 20:04:57.941157    5734 reconciler.go:169] "Reconciler: start to sync state"
Sep 23 20:04:58 k8s-node01.example.local kubelet[5734]: E0923 20:04:58.192522    5734 kubelet.go:2373] "Container runtime network not ready" n>
Sep 23 20:05:03 k8s-node01.example.local kubelet[5734]: E0923 20:05:03.211501    5734 kubelet.go:2373] "Container runtime network not ready" n>
Sep 23 20:05:08 k8s-node01.example.local kubelet[5734]: E0923 20:05:08.223142    5734 kubelet.go:2373] "Container runtime network not ready" n>
Sep 23 20:05:13 k8s-node01.example.local kubelet[5734]: E0923 20:05:13.232771    5734 kubelet.go:2373] "Container runtime network not ready" n>
Sep 23 20:05:18 k8s-node01.example.local kubelet[5734]: E0923 20:05:18.248971    5734 kubelet.go:2373] "Container runtime network not ready" n>
​
[root@k8s-node02 ~]# systemctl status kubelet
[root@k8s-node03 ~]# systemctl status kubelet

查看集群状态

[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS     ROLES    AGE   VERSION
172.31.3.101   NotReady   <none>   27m   v1.25.2
172.31.3.102   NotReady   <none>   27m   v1.25.2
172.31.3.103   NotReady   <none>   26m   v1.25.2
172.31.3.111   NotReady   <none>   59s   v1.25.2
172.31.3.112   NotReady   <none>   56s   v1.25.2
172.31.3.113   NotReady   <none>   54s   v1.25.2

8.5 配置kube-proxy

Master01节点复制kube-proxy相关文件到node

[root@k8s-master01 ~]# for NODE in k8s-node01 k8s-node02 k8s-node03; do
     scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig
     scp /etc/kubernetes/kube-proxy.conf $NODE:/etc/kubernetes/kube-proxy.conf
     scp /lib/systemd/system/kube-proxy.service $NODE:/lib/systemd/system/kube-proxy.service
 done

node节点启动kube-proxy

[root@k8s-node01 ~]# systemctl daemon-reload && systemctl enable --now kube-proxy
[root@k8s-node02 ~]# systemctl daemon-reload && systemctl enable --now kube-proxy
[root@k8s-node03 ~]# systemctl daemon-reload && systemctl enable --now kube-proxy[root@k8s-node01 ~]# systemctl status kube-proxy
● kube-proxy.service - Kubernetes Kube Proxy
   Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Fri 2022-09-23 20:07:24 CST; 31s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 6445 (kube-proxy)
    Tasks: 6 (limit: 23474)
   Memory: 10.6M
   CGroup: /system.slice/kube-proxy.service
           └─6445 /usr/local/bin/kube-proxy --config=/etc/kubernetes/kube-proxy.conf --v=2
​
Sep 23 20:07:24 k8s-node01.example.local kube-proxy[6445]: I0923 20:07:24.367809    6445 flags.go:64] FLAG: --write-config-to=""
Sep 23 20:07:24 k8s-node01.example.local kube-proxy[6445]: I0923 20:07:24.368891    6445 server.go:442] "Using lenient decoding as strict deco>
Sep 23 20:07:24 k8s-node01.example.local kube-proxy[6445]: I0923 20:07:24.369002    6445 feature_gate.go:245] feature gates: &{map[]}
Sep 23 20:07:24 k8s-node01.example.local kube-proxy[6445]: I0923 20:07:24.369079    6445 feature_gate.go:245] feature gates: &{map[]}
Sep 23 20:07:24 k8s-node01.example.local kube-proxy[6445]: I0923 20:07:24.384458    6445 proxier.go:666] "Failed to load kernel module with mo>
Sep 23 20:07:24 k8s-node01.example.local kube-proxy[6445]: E0923 20:07:24.421818    6445 node.go:152] Failed to retrieve node info: nodes "k8s>
Sep 23 20:07:25 k8s-node01.example.local kube-proxy[6445]: E0923 20:07:25.605692    6445 node.go:152] Failed to retrieve node info: nodes "k8s>
Sep 23 20:07:27 k8s-node01.example.local kube-proxy[6445]: E0923 20:07:27.621258    6445 node.go:152] Failed to retrieve node info: nodes "k8s>
Sep 23 20:07:32 k8s-node01.example.local kube-proxy[6445]: E0923 20:07:32.018317    6445 node.go:152] Failed to retrieve node info: nodes "k8s>
Sep 23 20:07:41 k8s-node01.example.local kube-proxy[6445]: E0923 20:07:41.243207    6445 node.go:152] Failed to retrieve node info: nodes "k8s>
​
[root@k8s-node02 ~]# systemctl status kube-proxy
[root@k8s-node03 ~]# systemctl status kube-proxy

查看haproxy状态

kubeapi.raymonds.cc:9999/haproxy-sta…

020.jpg