10.基于 Kubernetes v1.25 (二进制) 和 Containerd部署高可用集群(二)

849 阅读13分钟

“我报名参加金石计划1期挑战——瓜分10万奖池,这是我的第7篇文章,点击查看活动详情

6.安装 Containerd

6.1 内核参数调整

如果是安装 Docker 会自动配置以下的内核参数,而无需手动实现

但是如果安装Contanerd,还需手动配置

允许 iptables 检查桥接流量,若要显式加载此模块,需运行 modprobe br_netfilter

为了让 Linux 节点的 iptables 能够正确查看桥接流量,还需要确认net.bridge.bridge-nf-call-iptables 设置为 1。

配置Containerd所需的模块:

[root@k8s-master01 ~]# cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF

加载模块:

[root@k8s-master01 ~]# modprobe -- overlay
[root@k8s-master01 ~]# modprobe -- br_netfilter

配置Containerd所需的内核:

[root@k8s-master01 ~]# cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables  = 1
net.ipv4.ip_forward                 = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF

加载内核:

[root@k8s-master01 ~]# sysctl --system

6.2 二进制安装 Containerd

官方下载链接:

https://github.com/containerd/containerd

Containerd有三种二进制安装包:

  • containerd-xxx :不包含runC,需要单独安装runC

    [root@k8s-master01 ~]# tar tf containerd-1.6.8-linux-amd64.tar.gz 
    bin/
    bin/containerd-stress
    bin/containerd-shim-runc-v2
    bin/containerd-shim
    bin/ctr
    bin/containerd
    bin/containerd-shim-runc-v1
    
  • cri-containerd-xxx:包含runC,ctr、crictl、systemd 配置文件等相关文件,不包含cni插件,k8s不需要containerd的cni插件,所以选择这个二进制包安装

    [root@k8s-master01 ~]# tar tf cri-containerd-1.6.8-linux-amd64.tar.gz 
    etc/crictl.yaml
    etc/systemd/
    etc/systemd/system/
    etc/systemd/system/containerd.service
    usr/
    usr/local/
    usr/local/sbin/
    usr/local/sbin/runc
    usr/local/bin/
    usr/local/bin/containerd-stress
    usr/local/bin/containerd-shim-runc-v2
    usr/local/bin/containerd-shim
    usr/local/bin/ctr
    usr/local/bin/containerd
    usr/local/bin/critest
    usr/local/bin/ctd-decoder
    usr/local/bin/crictl
    usr/local/bin/containerd-shim-runc-v1
    opt/containerd/
    opt/containerd/cluster/
    opt/containerd/cluster/gce/
    opt/containerd/cluster/gce/env
    opt/containerd/cluster/gce/cloud-init/
    opt/containerd/cluster/gce/cloud-init/master.yaml
    opt/containerd/cluster/gce/cloud-init/node.yaml
    opt/containerd/cluster/gce/cni.template
    opt/containerd/cluster/gce/configure.sh
    opt/containerd/cluster/version
    
  • cri-containerd-cni-xxx:包含runc、ctr、crictl、cni插件、systemd 配置文件等相关文件

    [root@k8s-master01 ~]# tar tf cri-containerd-cni-1.6.8-linux-amd64.tar.gz 
    etc/
    etc/crictl.yaml
    etc/systemd/
    etc/systemd/system/
    etc/systemd/system/containerd.service
    etc/cni/
    etc/cni/net.d/
    etc/cni/net.d/10-containerd-net.conflist
    usr/
    usr/local/
    usr/local/sbin/
    usr/local/sbin/runc
    usr/local/bin/
    usr/local/bin/containerd-stress
    usr/local/bin/containerd-shim-runc-v2
    usr/local/bin/containerd-shim
    usr/local/bin/ctr
    usr/local/bin/containerd
    usr/local/bin/critest
    usr/local/bin/ctd-decoder
    usr/local/bin/crictl
    usr/local/bin/containerd-shim-runc-v1
    opt/
    opt/containerd/
    opt/containerd/cluster/
    opt/containerd/cluster/gce/
    opt/containerd/cluster/gce/env
    opt/containerd/cluster/gce/cloud-init/
    opt/containerd/cluster/gce/cloud-init/master.yaml
    opt/containerd/cluster/gce/cloud-init/node.yaml
    opt/containerd/cluster/gce/cni.template
    opt/containerd/cluster/gce/configure.sh
    opt/containerd/cluster/version
    opt/cni/
    opt/cni/bin/
    opt/cni/bin/bandwidth
    opt/cni/bin/loopback
    opt/cni/bin/ipvlan
    opt/cni/bin/host-local
    opt/cni/bin/static
    opt/cni/bin/vlan
    opt/cni/bin/tuning
    opt/cni/bin/host-device
    opt/cni/bin/firewall
    opt/cni/bin/portmap
    opt/cni/bin/sbr
    opt/cni/bin/macvlan
    opt/cni/bin/bridge
    opt/cni/bin/dhcp
    opt/cni/bin/ptp
    opt/cni/bin/vrf
    

安装Containerd:

[root@k8s-master01 ~]# wget https://github.com/containerd/containerd/releases/download/v1.6.8/cri-containerd-1.6.8-linux-amd64.tar.gz#cri-containerd-1.6.8-linux-amd64.tar.gz 压缩包中已经按照官方二进制部署推荐的目录结构布局好。 里面包含了 systemd 配置文件,containerd 和ctr、crictl等部署文件。 将解压缩到系统的根目录 / 中:
[root@k8s-master01 ~]# tar xf cri-containerd-1.6.8-linux-amd64.tar.gz -C /

配置Containerd的配置文件:

[root@k8s-master01 ~]# mkdir -p /etc/containerd
[root@k8s-master01 ~]# containerd config default | tee /etc/containerd/config.toml

将Containerd的Cgroup改为Systemd和修改containerd配置sandbox_image 镜像源设置为阿里google_containers镜像源:

[root@k8s-master01 ~]# vim /etc/containerd/config.toml
...
          [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
...
            SystemdCgroup = true #SystemdCgroup改成true
...
    sandbox_image = "harbor.raymonds.cc/google_containers/pause:3.8" #sandbox_image的镜像改为私有仓库“harbor.raymonds.cc/google_containers/pause:3.8”,如果没有私有仓库改为阿里镜像源“registry.aliyuncs.com/google_containers/pause:3.8”#使用下面命令修改
sed -ri -e 's/(.*SystemdCgroup = ).*/\1true/' -e 's@(.*sandbox_image = ).*@\1"harbor.raymonds.cc/google_containers/pause:3.8"@' /etc/containerd/config.toml
​
#如果没有harbor,请执行下面命令
sed -ri -e 's/(.*SystemdCgroup = ).*/\1true/' -e 's@(.*sandbox_image = ).*@\1"registry.aliyuncs.com/google_containers/pause:3.8"@' /etc/containerd/config.toml

配置镜像加速和配置私有镜像仓库:

参考文档:github.com/containerd/…

[root@k8s-master01 ~]# vim /etc/containerd/config.toml
...
    [plugins."io.containerd.grpc.v1.cri".registry]
...
#下面几行是配置私有仓库授权,如果没有私有仓库下面的不用设置
      [plugins."io.containerd.grpc.v1.cri".registry.configs]
        [plugins."io.containerd.grpc.v1.cri".registry.configs."harbor.raymonds.cc".tls]
          insecure_skip_verify = true
        [plugins."io.containerd.grpc.v1.cri".registry.configs."harbor.raymonds.cc".auth]
          username = "admin"
          password = "123456"
...
      [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
#下面两行是配置镜像加速
        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
          endpoint = ["https://registry.docker-cn.com" ,"http://hub-mirror.c.163.com" ,"https://docker.mirrors.ustc.edu.cn"]
#下面两行是配置私有仓库,如果没有私有仓库下面的不用设置
        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."harbor.raymonds.cc"]
          endpoint = ["http://harbor.raymonds.cc"]
...
​
#使用下面命令修改
sed -i -e '/.*registry.mirrors.*/a\        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]\n          endpoint = ["https://registry.docker-cn.com" ,"http://hub-mirror.c.163.com" ,"https://docker.mirrors.ustc.edu.cn"]\n        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."harbor.raymonds.cc"]\n          endpoint = ["http://harbor.raymonds.cc"]' -e '/.*registry.configs.*/a\        [plugins."io.containerd.grpc.v1.cri".registry.configs."harbor.raymonds.cc".tls]\n          insecure_skip_verify = true\n        [plugins."io.containerd.grpc.v1.cri".registry.configs."harbor.raymonds.cc".auth]\n          username = "admin"\n          password = "123456"' /etc/containerd/config.toml
​
#如果没有harbor不需要设置私有仓库相关配置,只需要设置镜像加速,请使用下面命令执行
sed -i '/.*registry.mirrors.*/a\        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]\n          endpoint = ["https://registry.docker-cn.com" ,"http://hub-mirror.c.163.com" ,"https://docker.mirrors.ustc.edu.cn"]' /etc/containerd/config.toml

配置crictl客户端连接的运行时位置:

[root@k8s-master01 ~]# cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF

启动Containerd,并配置开机自启动:

[root@k8s-master01 ~]# systemctl daemon-reload && systemctl enable --now containerd

查看信息:

[root@k8s-master01 ~]# ctr version
Client:
  Version:  v1.6.8
  Revision: 9cd3357b7fd7218e4aec3eae239db1f68a5a6ec6
  Go version: go1.17.13

Server:
  Version:  v1.6.8
  Revision: 9cd3357b7fd7218e4aec3eae239db1f68a5a6ec6
  UUID: 18d9c9c1-27cc-4883-be10-baf17a186aad

[root@k8s-master01 ~]# crictl version
Version:  0.1.0
RuntimeName:  containerd
RuntimeVersion:  v1.6.8
RuntimeApiVersion:  v1

[root@k8s-master01 ~]# crictl info
...
  },
...
  "lastCNILoadStatus": "cni config load failed: no network config found in /etc/cni/net.d: cni plugin not initialized: failed to load cni config",
  "lastCNILoadStatus.default": "cni config load failed: no network config found in /etc/cni/net.d: cni plugin not initialized: failed to load cni config"
}
#这里cni插件报错,不用管,因为没有装containerd的CNI插件,kubernetes里不需要containerd的CNI插件,装上了还会冲突,后边安装flannel或calico的CNI插件

6.3 containerd 客户端工具 nerdctl

推荐使用 nerdctl,使用效果与 docker 命令的语法一致,github 下载链接:

github.com/containerd/…

  • 精简 (nerdctl--linux-amd64.tar.gz): 只包含 nerdctl
  • 完整 (nerdctl-full--linux-amd64.tar.gz): 包含 containerd, runc, and CNI 等依赖

nerdctl 的目标并不是单纯地复制 docker 的功能,它还实现了很多 docker 不具备的功能,例如延迟拉取镜像(lazy-pulling)、镜像加密(imgcrypt)等。具体看 nerdctl。

018.jpg

延迟拉取镜像功能可以参考这篇文章:Containerd 使用 Stargz Snapshotter 延迟拉取镜像

icloudnative.io/posts/start…

1)安装 nerdctl(精简版):

[root@k8s-master01 ~]# wget https://github.com/containerd/nerdctl/releases/download/v0.23.0/nerdctl-0.23.0-linux-amd64.tar.gz[root@k8s-master01 ~]# tar xf nerdctl-0.23.0-linux-amd64.tar.gz -C /usr/local/bin/#配置nerdctl
[root@k8s-master01 ~]# mkdir -p /etc/nerdctl/
[root@k8s-master01 ~]# cat > /etc/nerdctl/nerdctl.toml <<EOF
namespace      = "k8s.io" #设置nerdctl工具默认namespace
insecure_registry = true #跳过安全镜像仓库检测
EOF

2)安装 buildkit 支持构建镜像:

buildkit GitHub 地址:

github.com/moby/buildk…

使用精简版 nerdctl 无法直接通过 containerd 构建镜像,需要与 buildkit 组全使用以实现镜像构建。当然你也可以安装上面的完整 nerdctl;buildkit 项目是 Docker 公司开源出来的一个构建工具包,支持 OCI 标准的镜像构建。它主要包含以下部分:

  • 服务端 buildkitd,当前支持 runc 和 containerd 作为 worker,默认是 runc;
  • 客户端 buildctl,负责解析 Dockerfile,并向服务端 buildkitd 发出构建请求。

buildkit 是典型的C/S 架构,client 和 server 可以不在一台服务器上。而 nerdctl 在构建镜像方面也可以作为 buildkitd 的客户端。

[root@k8s-master01 ~]# wget https://github.com/moby/buildkit/releases/download/v0.10.4/buildkit-v0.10.4.linux-amd64.tar.gz
​
[root@k8s-master01 ~]# tar xf buildkit-v0.10.4.linux-amd64.tar.gz -C /usr/local/

配置 buildkit 的启动文件,可以从这里下载:

github.com/moby/buildk…

buildkit 需要配置两个文件

  • /usr/lib/systemd/system/buildkit.socket
[root@k8s-master01 ~]# cat > /usr/lib/systemd/system/buildkit.socket <<EOF
[Unit]
Description=BuildKit
Documentation=https://github.com/moby/buildkit
​
[Socket]
ListenStream=%t/buildkit/buildkitd.sock
SocketMode=0660[Install]
WantedBy=sockets.target
EOF
  • /usr/lib/systemd/system/buildkit.service
[root@k8s-master01 ~]# cat > /usr/lib/systemd/system/buildkit.service << EOF
[Unit]
Description=BuildKit
Requires=buildkit.socket
After=buildkit.socket
Documentation=https://github.com/moby/buildkit
​
[Service]
Type=notify
ExecStart=/usr/local/bin/buildkitd --addr fd://
​
[Install]
WantedBy=multi-user.target
EOF

启动 buildkit:

[root@k8s-master01 ~]# systemctl daemon-reload && systemctl enable --now buildkit 

root@k8s-master01:~# systemctl status buildkit
 buildkit.service - BuildKit
     Loaded: loaded (/lib/systemd/system/buildkit.service; enabled; vendor preset: enabled)
     Active: active (running) since Tue 2022-09-13 16:47:14 CST; 21s ago
TriggeredBy:  buildkit.socket
       Docs: https://github.com/moby/buildkit
   Main PID: 3303 (buildkitd)
      Tasks: 7 (limit: 4575)
     Memory: 14.5M
     CGroup: /system.slice/buildkit.service
             └─3303 /usr/local/bin/buildkitd --addr fd://

Sep 13 16:47:14 k8s-master01.example.local systemd[1]: Started BuildKit.
Sep 13 16:47:14 k8s-master01.example.local buildkitd[3303]: time="2022-09-13T16:47:14+08:00" level=info msg="auto snapshotter: using overlayfs"
Sep 13 16:47:14 k8s-master01.example.local buildkitd[3303]: time="2022-09-13T16:47:14+08:00" level=warning msg="using host network as the defa>
Sep 13 16:47:14 k8s-master01.example.local buildkitd[3303]: time="2022-09-13T16:47:14+08:00" level=info msg="found worker "sgqr1t2c81tj7ec7w3>
Sep 13 16:47:14 k8s-master01.example.local buildkitd[3303]: time="2022-09-13T16:47:14+08:00" level=warning msg="using host network as the defa>
Sep 13 16:47:14 k8s-master01.example.local buildkitd[3303]: time="2022-09-13T16:47:14+08:00" level=info msg="found worker "w4fzprdjtuqtj3f3wd>
Sep 13 16:47:14 k8s-master01.example.local buildkitd[3303]: time="2022-09-13T16:47:14+08:00" level=info msg="found 2 workers, default="sgqr1t>
Sep 13 16:47:14 k8s-master01.example.local buildkitd[3303]: time="2022-09-13T16:47:14+08:00" level=warning msg="currently, only the default wo>
Sep 13 16:47:14 k8s-master01.example.local buildkitd[3303]: time="2022-09-13T16:47:14+08:00" level=info msg="running server on /run/buildkit/b>

[root@k8s-master01 ~]# nerdctl version
Client:
 Version:   v0.23.0
 OS/Arch:   linux/amd64
 Git commit:    660680b7ddfde1d38a66ec1c7f08f8d89ab92c68
 buildctl:
  Version:  v0.10.4
  GitCommit:    a2ba6869363812a210fcc3ded6926757ab780b5f

Server:
 containerd:
  Version:  v1.6.8
  GitCommit:    9cd3357b7fd7218e4aec3eae239db1f68a5a6ec6
 runc:
  Version:  1.1.3
  GitCommit:    v1.1.3-0-g6724737f

[root@k8s-master01 ~]# buildctl --version
buildctl github.com/moby/buildkit v0.10.4 a2ba6869363812a210fcc3ded6926757ab780b5f

[root@k8s-master01 ~]# nerdctl info
Client:
 Namespace: default
 Debug Mode:    false

Server:
 Server Version: v1.6.8
 Storage Driver: overlayfs
 Logging Driver: json-file
 Cgroup Driver: cgroupfs
 Cgroup Version: 1
 Plugins:
  Log: fluentd journald json-file
  Storage: aufs native overlayfs
 Security Options:
  apparmor
  seccomp
   Profile: default
 Kernel Version: 5.4.0-107-generic
 Operating System: Ubuntu 20.04.4 LTS
 OSType: linux
 Architecture: x86_64
 CPUs: 2
 Total Memory: 3.81GiB
 Name: k8s-master01.example.local
 ID: ab901e55-fa37-496e-9920-ee6eff687687

WARNING: No swap limit support #系统警告信息 (没有开启 swap 资源限制 )

解决上述SWAP报警提示:

#SWAP报警提示,只有在ubuntu系统有,centos系统里没有不用设置
root@k8s-master01:~# sed -ri '/^GRUB_CMDLINE_LINUX=/s@"$@ swapaccount=1"@' /etc/default/grub
​
root@k8s-master01:~# update-grub
root@k8s-master01:~# reboot
​
root@k8s-master01:~# nerdctl info
Client:
 Namespace: default
 Debug Mode:    false
​
Server:
 Server Version: v1.6.8
 Storage Driver: overlayfs
 Logging Driver: json-file
 Cgroup Driver: cgroupfs
 Cgroup Version: 1
 Plugins:
  Log: fluentd journald json-file
  Storage: aufs native overlayfs
 Security Options:
  apparmor
  seccomp
   Profile: default
 Kernel Version: 5.4.0-125-generic
 Operating System: Ubuntu 20.04.4 LTS
 OSType: linux
 Architecture: x86_64
 CPUs: 2
 Total Memory: 3.81GiB
 Name: k8s-master01.example.local
 ID: ab901e55-fa37-496e-9920-ee6eff687687
#现在就没有SWAP报警提示

nerdctl命令补全:

#CentOS
[root@k8s-master01 ~]# yum -y install bash-completion#Ubuntu
[root@k8s-master01 ~]# apt -y install bash-completion
​
[root@k8s-master01 ~]# echo "source <(nerdctl completion bash)" >> ~/.bashrc

master02、master03和node安装containerd:

[root@k8s-master02 ~]# cat install_containerd_binary.sh 
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-09-13
#FileName:      install_containerd_binary.sh
#URL:           raymond.blog.csdn.net
#Description:   install_containerd_binary for centos 7/8 & ubuntu 18.04/20.04 & Rocky 8
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
SRC_DIR=/usr/local/src
COLOR="echo -e \033[01;31m"
END='\033[0m'#Containerd下载地址:https://github.com/containerd/containerd/releases/download/v1.6.8/cri-containerd-1.6.8-linux-amd64.tar.gz
CONTAINERD_FILE=cri-containerd-1.6.8-linux-amd64.tar.gz
PAUSE_VERSION=3.8
HARBOR_DOMAIN=harbor.raymonds.cc
USERNAME=admin
PASSWORD=123456
​
#Netdctl下载地址:https://github.com/containerd/nerdctl/releases/download/v0.23.0/nerdctl-0.23.0-linux-amd64.tar.gz
NETDCTL_FILE=nerdctl-0.23.0-linux-amd64.tar.gz
#Buildkit下载地址:https://github.com/moby/buildkit/releases/download/v0.10.4/buildkit-v0.10.4.linux-amd64.tar.gz
BUILDKIT_FILE=buildkit-v0.10.4.linux-amd64.tar.gz
​
os(){
    OS_ID=`sed -rn '/^NAME=/s@.*="([[:alpha:]]+).*"$@\1@p' /etc/os-release`
}
​
check_file (){
    cd ${SRC_DIR}
    if [ ! -e ${CONTAINERD_FILE} ];then
        ${COLOR}"缺少${CONTAINERD_FILE}文件,请把文件放到${SRC_DIR}目录下"${END}
        exit
    elif [ ! -e ${NETDCTL_FILE} ];then
        ${COLOR}"缺少${NETDCTL_FILE}文件,请把文件放到${SRC_DIR}目录下"${END}
        exit
    elif [ ! -e ${BUILDKIT_FILE} ];then
        ${COLOR}"缺少${BUILDKIT_FILE}文件,请把文件放到${SRC_DIR}目录下"${END}
        exit
    else
        ${COLOR}"相关文件已准备好"${END}
    fi
}
​
install_containerd(){ 
    [ -f /usr/local/bin/containerd ] && { ${COLOR}"Containerd已存在,安装失败"${END};exit; }
    cat > /etc/modules-load.d/containerd.conf <<-EOF
overlay
br_netfilter
EOF
    modprobe -- overlay
    modprobe -- br_netfilter
​
    cat > /etc/sysctl.d/99-kubernetes-cri.conf <<-EOF
net.bridge.bridge-nf-call-iptables  = 1
net.ipv4.ip_forward                 = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
    sysctl --system &> /dev/null
​
    ${COLOR}"开始安装Containerd..."${END}
    tar xf ${CONTAINERD_FILE} -C /
​
    mkdir -p /etc/containerd
    containerd config default | tee /etc/containerd/config.toml &> /dev/null 
    sed -ri -e 's/(.*SystemdCgroup = ).*/\1true/' -e 's@(.*sandbox_image = ).*@\1"'''${HARBOR_DOMAIN}'''/google_containers/pause:'''${PAUSE_VERSION}'''"@' -e '/.*registry.mirrors.*/a\        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]\n          endpoint = ["https://registry.docker-cn.com" ,"http://hub-mirror.c.163.com" ,"https://docker.mirrors.ustc.edu.cn"]\n        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."'''${HARBOR_DOMAIN}'''"]\n          endpoint = ["http://'''${HARBOR_DOMAIN}'''"]' -e '/.*registry.configs.*/a\        [plugins."io.containerd.grpc.v1.cri".registry.configs."'''${HARBOR_DOMAIN}'''".tls]\n          insecure_skip_verify = true\n        [plugins."io.containerd.grpc.v1.cri".registry.configs."'''${HARBOR_DOMAIN}'''".auth]\n          username = "'''${USERNAME}'''"\n          password = "'''${PASSWORD}'''"' /etc/containerd/config.toml
    cat > /etc/crictl.yaml <<-EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF
    systemctl daemon-reload && systemctl enable --now containerd &> /dev/null
    systemctl is-active containerd &> /dev/null && ${COLOR}"Containerd 服务启动成功"${END} || { ${COLOR}"Containerd 启动失败"${END};exit; }
    ctr version &&  ${COLOR}"Containerd 安装成功"${END} || ${COLOR}"Containerd 安装失败"${END}
}
​
set_alias(){
    echo 'alias rmi="nerdctl images -qa|xargs nerdctl rmi -f"' >> ~/.bashrc
    echo 'alias rmc="nerdctl ps -qa|xargs nerdctl rm -f"' >> ~/.bashrc
}
​
install_netdctl_buildkit(){
    ${COLOR}"开始安装Netdctl..."${END}
    tar xf ${NETDCTL_FILE} -C /usr/local/bin/
    mkdir -p /etc/nerdctl/
    cat > /etc/nerdctl/nerdctl.toml <<-EOF
namespace      = "k8s.io"
insecure_registry = true
EOF
​
    ${COLOR}"开始安装Buildkit..."${END}
    tar xf ${BUILDKIT_FILE} -C /usr/local/
    cat > /usr/lib/systemd/system/buildkit.socket <<-EOF
[Unit]
Description=BuildKit
Documentation=https://github.com/moby/buildkit
​
[Socket]
ListenStream=%t/buildkit/buildkitd.sock
SocketMode=0660
​
[Install]
WantedBy=sockets.target
EOF
    cat > /usr/lib/systemd/system/buildkit.service <<-EOF
[Unit]
Description=BuildKit
Requires=buildkit.socket
After=buildkit.socket
Documentation=https://github.com/moby/buildkit
​
[Service]
Type=notify
ExecStart=/usr/local/bin/buildkitd --addr fd://
​
[Install]
WantedBy=multi-user.target
EOF
    systemctl daemon-reload && systemctl enable --now buildkit &> /dev/null
    systemctl is-active buildkit &> /dev/null && ${COLOR}"Buildkit 服务启动成功"${END} || { ${COLOR}"Buildkit 启动失败"${END};exit; }
    buildctl --version &&  ${COLOR}"Buildkit 安装成功"${END} || ${COLOR}"Buildkit 安装失败"${END}
}
​
nerdctl_command_completion(){
    if [ ${OS_ID} == "CentOS" -o ${OS_ID} == "Rocky" ];then
        yum -y install bash-completion
    else
        apt -y install bash-completion
    fi
    echo "source <(nerdctl completion bash)" >> ~/.bashrc
    . ~/.bashrc
}
​
set_swap_limit(){
    if [ ${OS_ID} == "Ubuntu" ];then
        ${COLOR}'设置Docker的"WARNING: No swap limit support"警告'${END}
        sed -ri '/^GRUB_CMDLINE_LINUX=/s@"$@ swapaccount=1"@' /etc/default/grub
        update-grub &> /dev/null
        ${COLOR}"10秒后,机器会自动重启"${END}
        sleep 10
        reboot
    fi
}
​
main(){
    os
    check_file
    install_containerd
    set_alias
    install_netdctl_buildkit
    nerdctl_command_completion
    set_swap_limit
}
​
main
​
[root@k8s-master02 ~]# bash install_containerd_binary.sh
[root@k8s-master03 ~]# bash install_containerd_binary.sh
​
[root@k8s-node01 ~]# bash install_containerd_binary.sh
[root@k8s-node02 ~]# bash install_containerd_binary.sh
[root@k8s-node03 ~]# bash install_containerd_binary.sh

7.部署master

7.1 创建etcd相关目录和复制etcd证书

master节点创建etcd证书目录

[root@k8s-master01 ~]# mkdir /etc/etcd/ssl -p
[root@k8s-master02 ~]# mkdir /etc/etcd/ssl -p
[root@k8s-master03 ~]# mkdir /etc/etcd/ssl -p

将etcd证书复制到master节点

[root@k8s-etcd01 pki]# for NODE in k8s-master01 k8s-master02 k8s-master03; do
     ssh -o StrictHostKeyChecking=no $NODE "mkdir -p /etc/etcd/ssl"
     for FILE in etcd-ca-key.pem  etcd-ca.pem  etcd-key.pem  etcd.pem; do
       scp -o StrictHostKeyChecking=no /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE}
     done
 done

所有master节点创建etcd的证书目录

[root@k8s-master01 ~]# mkdir /etc/kubernetes/pki/etcd -p
[root@k8s-master02 ~]# mkdir /etc/kubernetes/pki/etcd -p
[root@k8s-master03 ~]# mkdir /etc/kubernetes/pki/etcd -p
​
[root@k8s-master01 ~]# ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
[root@k8s-master02 ~]# ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
[root@k8s-master03 ~]# ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/

7.2 安装kubernetes组件

需要下载最新的1.25.x版本:

github.com/kubernetes/…

打开页面后点击:

019.jpg 下载kubernetes安装包

[root@k8s-master01 ~]# wget https://dl.k8s.io/v1.25.2/kubernetes-server-linux-amd64.tar.gz

解压kubernetes安装文件

[root@k8s-master01 ~]# tar -xf kubernetes-server-linux-amd64.tar.gz  --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}

版本查看

[root@k8s-master01 ~]# kubelet --version
Kubernetes v1.25.2

将组件发送到其他master节点

[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do echo $NODE; scp -o StrictHostKeyChecking=no /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} $NODE:/usr/local/bin/; done

master节点创建/opt/cni/bin目录

[root@k8s-master01 ~]# mkdir -p /opt/cni/bin
[root@k8s-master02 ~]# mkdir -p /opt/cni/bin
[root@k8s-master03 ~]# mkdir -p /opt/cni/bin

7.3 生成k8s组件证书

二进制安装最关键步骤,一步错误全盘皆输,一定要注意每个步骤都要是正确的

master节点创建kubernetes相关目录

[root@k8s-master01 ~]# mkdir -p /etc/kubernetes/pki
[root@k8s-master02 ~]# mkdir -p /etc/kubernetes/pki
[root@k8s-master03 ~]# mkdir -p /etc/kubernetes/pki

master01下载生成证书工具

[root@k8s-master01 ~]# wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl_1.6.1_linux_amd64
​
[root@k8s-master01 ~]# wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssljson_1.6.1_linux_amd64
​
[root@k8s-master01 ~]# mv cfssl_1.6.1_linux_amd64 /usr/local/bin/cfssl
[root@k8s-master01 ~]# mv cfssljson_1.6.1_linux_amd64 /usr/local/bin/cfssljson
​
[root@k8s-master01 ~]# chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson

7.3.1 生成ca证书

[root@k8s-master01 ~]# mkdir pki
[root@k8s-master01 ~]# cd pki/
​
[root@k8s-master01 pki]# cat > ca-csr.json <<EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "Kubernetes",
      "OU": "Kubernetes-manual"
    }
  ],
  "ca": {
    "expiry": "876000h"
  }
}
EOF
​
[root@k8s-master01 pki]# cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca
#执行结果
2022/09/25 16:39:54 [INFO] generating a new CA key and certificate from CSR
2022/09/25 16:39:54 [INFO] generate received request
2022/09/25 16:39:54 [INFO] received CSR
2022/09/25 16:39:54 [INFO] generating key: rsa-2048
2022/09/25 16:39:54 [INFO] encoded CSR
2022/09/25 16:39:54 [INFO] signed certificate with serial number 144467388419643602118985040994832054816132660567
​
[root@k8s-master01 pki]# ll /etc/kubernetes/pki/ca*
-rw-r--r-- 1 root root 1070 Sep 25 16:39 /etc/kubernetes/pki/ca.csr
-rw------- 1 root root 1679 Sep 25 16:39 /etc/kubernetes/pki/ca-key.pem
-rw-r--r-- 1 root root 1363 Sep 25 16:39 /etc/kubernetes/pki/ca.pem

7.3.2 生成apiserver证书

10.96.0.是k8s service的网段,如果说需要更改k8s service网段,那就需要更改10.96.0.1,

如果不是高可用集群,172.31.3.188为Master01的IP

[root@k8s-master01 pki]# cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "876000h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "876000h"
      }
    }
  }
}
EOF
​
[root@k8s-master01 pki]# cat > apiserver-csr.json <<EOF
{
  "CN": "kube-apiserver",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "Kubernetes",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF
​
[root@k8s-master01 pki]# cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=ca-config.json -hostname=10.96.0.1,172.31.3.188,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,172.31.3.101,172.31.3.102,172.31.3.103 -profile=kubernetes apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver
#执行结果
2022/09/25 16:40:41 [INFO] generate received request
2022/09/25 16:40:41 [INFO] received CSR
2022/09/25 16:40:41 [INFO] generating key: rsa-2048
2022/09/25 16:40:41 [INFO] encoded CSR
2022/09/25 16:40:41 [INFO] signed certificate with serial number 430981208956747841835012330139928202432283184832
​
[root@k8s-master01 pki]# ll /etc/kubernetes/pki/apiserver*
-rw-r--r-- 1 root root 1297 Sep 25 16:40 /etc/kubernetes/pki/apiserver.csr
-rw------- 1 root root 1675 Sep 25 16:40 /etc/kubernetes/pki/apiserver-key.pem
-rw-r--r-- 1 root root 1692 Sep 25 16:40 /etc/kubernetes/pki/apiserver.pem

7.3.3 生成apiserver的聚合证书

生成apiserver的聚合证书。Requestheader-client-xxx requestheader-allowwd-xxx:aggerator

[root@k8s-master01 pki]# cat > front-proxy-ca-csr.json <<EOF
{
  "CN": "kubernetes",
  "key": {
     "algo": "rsa",
     "size": 2048
  }
}
EOF

[root@k8s-master01 pki]# cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca
#执行结果
2022/09/25 16:41:21 [INFO] generating a new CA key and certificate from CSR
2022/09/25 16:41:21 [INFO] generate received request
2022/09/25 16:41:21 [INFO] received CSR
2022/09/25 16:41:21 [INFO] generating key: rsa-2048
2022/09/25 16:41:22 [INFO] encoded CSR
2022/09/25 16:41:22 [INFO] signed certificate with serial number 673709205875970548123940386290897802924816968201

[root@k8s-master01 pki]# ll /etc/kubernetes/pki/front-proxy-ca*
-rw-r--r-- 1 root root  891 Sep 25 16:41 /etc/kubernetes/pki/front-proxy-ca.csr
-rw------- 1 root root 1679 Sep 25 16:41 /etc/kubernetes/pki/front-proxy-ca-key.pem
-rw-r--r-- 1 root root 1094 Sep 25 16:41 /etc/kubernetes/pki/front-proxy-ca.pem

[root@k8s-master01 pki]# cat > front-proxy-client-csr.json <<EOF
{
  "CN": "front-proxy-client",
  "key": {
     "algo": "rsa",
     "size": 2048
  }
}
EOF

[root@k8s-master01 pki]# cfssl gencert -ca=/etc/kubernetes/pki/front-proxy-ca.pem -ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem -config=ca-config.json -profile=kubernetes front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client
#返回结果(忽略警告)
2022/09/25 16:42:01 [INFO] generate received request
2022/09/25 16:42:01 [INFO] received CSR
2022/09/25 16:42:01 [INFO] generating key: rsa-2048
2022/09/25 16:42:01 [INFO] encoded CSR
2022/09/25 16:42:01 [INFO] signed certificate with serial number 85560094871910737940366819468847340063056850525
2022/09/25 16:42:01 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

[root@k8s-master01 pki]# ll /etc/kubernetes/pki/front-proxy-client*
-rw-r--r-- 1 root root  903 Sep 25 16:42 /etc/kubernetes/pki/front-proxy-client.csr
-rw------- 1 root root 1675 Sep 25 16:42 /etc/kubernetes/pki/front-proxy-client-key.pem
-rw-r--r-- 1 root root 1188 Sep 25 16:42 /etc/kubernetes/pki/front-proxy-client.pem

7.3.4 生成controller-manage的证书和配置文件

[root@k8s-master01 pki]# cat > manager-csr.json <<EOF
{
  "CN": "system:kube-controller-manager",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [    {      "C": "CN",      "ST": "Beijing",      "L": "Beijing",      "O": "system:kube-controller-manager",      "OU": "Kubernetes-manual"    }  ]
}
EOF[root@k8s-master01 pki]# cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem  -config=ca-config.json -profile=kubernetes manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager
#执行结果
2022/09/25 16:42:37 [INFO] generate received request
2022/09/25 16:42:37 [INFO] received CSR
2022/09/25 16:42:37 [INFO] generating key: rsa-2048
2022/09/25 16:42:37 [INFO] encoded CSR
2022/09/25 16:42:37 [INFO] signed certificate with serial number 730142395596434023542887289901373515374508177240
2022/09/25 16:42:37 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
​
[root@k8s-master01 pki]# ll /etc/kubernetes/pki/controller-manager*
-rw-r--r-- 1 root root 1082 Sep 25 16:42 /etc/kubernetes/pki/controller-manager.csr
-rw------- 1 root root 1675 Sep 25 16:42 /etc/kubernetes/pki/controller-manager-key.pem
-rw-r--r-- 1 root root 1501 Sep 25 16:42 /etc/kubernetes/pki/controller-manager.pem
​
# 注意,如果不是高可用集群,172.31.3.188:6443改为master01的地址
# set-cluster:设置一个集群项,
[root@k8s-master01 pki]# kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://172.31.3.188:6443 --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
#执行结果
Cluster "kubernetes" set.
​
# set-credentials 设置一个用户项
[root@k8s-master01 pki]# kubectl config set-credentials system:kube-controller-manager --client-certificate=/etc/kubernetes/pki/controller-manager.pem --client-key=/etc/kubernetes/pki/controller-manager-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
#执行结果
User "system:kube-controller-manager" set.
​
# 设置一个环境项,一个上下文
[root@k8s-master01 pki]# kubectl config set-context system:kube-controller-manager@kubernetes --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
#执行结果
Context "system:kube-controller-manager@kubernetes" created.
​
# 使用某个环境当做默认环境
[root@k8s-master01 pki]# kubectl config use-context system:kube-controller-manager@kubernetes --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
#执行结果
Switched to context "system:kube-controller-manager@kubernetes".

7.3.5 生成scheduler的证书和配置文件

[root@k8s-master01 pki]# cat > scheduler-csr.json <<EOF
{
  "CN": "system:kube-scheduler",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [    {      "C": "CN",      "ST": "Beijing",      "L": "Beijing",      "O": "system:kube-scheduler",      "OU": "Kubernetes-manual"    }  ]
}
EOF[root@k8s-master01 pki]# cfssl gencert \
   -ca=/etc/kubernetes/pki/ca.pem \
   -ca-key=/etc/kubernetes/pki/ca-key.pem \
   -config=ca-config.json \
   -profile=kubernetes \
   scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler
#执行结果
2022/09/25 16:48:53 [INFO] generate received request
2022/09/25 16:48:53 [INFO] received CSR
2022/09/25 16:48:53 [INFO] generating key: rsa-2048
2022/09/25 16:48:53 [INFO] encoded CSR
2022/09/25 16:48:53 [INFO] signed certificate with serial number 228881838573494475296244558577950441626719203074
2022/09/25 16:48:53 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
​
[root@k8s-master01 pki]# ll /etc/kubernetes/pki/scheduler*
-rw-r--r-- 1 root root 1058 Sep 25 16:48 /etc/kubernetes/pki/scheduler.csr
-rw------- 1 root root 1679 Sep 25 16:48 /etc/kubernetes/pki/scheduler-key.pem
-rw-r--r-- 1 root root 1476 Sep 25 16:48 /etc/kubernetes/pki/scheduler.pem
​
# 注意,如果不是高可用集群,172.31.3.188:6443改为master01的地址
[root@k8s-master01 pki]# kubectl config set-cluster kubernetes \
     --certificate-authority=/etc/kubernetes/pki/ca.pem \
     --embed-certs=true \
     --server=https://172.31.3.188:6443 \
     --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
#执行结果
Cluster "kubernetes" set.
​
[root@k8s-master01 pki]# kubectl config set-credentials system:kube-scheduler \
     --client-certificate=/etc/kubernetes/pki/scheduler.pem \
     --client-key=/etc/kubernetes/pki/scheduler-key.pem \
     --embed-certs=true \
     --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
#执行结果
User "system:kube-scheduler" set.
​
[root@k8s-master01 pki]# kubectl config set-context system:kube-scheduler@kubernetes \
     --cluster=kubernetes \
     --user=system:kube-scheduler \
     --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
#执行结果
Context "system:kube-scheduler@kubernetes" created.
​
[root@k8s-master01 pki]# kubectl config use-context system:kube-scheduler@kubernetes \
     --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
#执行结果
Switched to context "system:kube-scheduler@kubernetes".

7.3.6 生成admin证书和配置文件

[root@k8s-master01 pki]# cat > admin-csr.json <<EOF
{
  "CN": "admin",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [    {      "C": "CN",      "ST": "Beijing",      "L": "Beijing",      "O": "system:masters",      "OU": "Kubernetes-manual"    }  ]
}
EOF[root@k8s-master01 pki]# cfssl gencert \
   -ca=/etc/kubernetes/pki/ca.pem \
   -ca-key=/etc/kubernetes/pki/ca-key.pem \
   -config=ca-config.json \
   -profile=kubernetes \
   admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin
#执行结果
2022/09/25 16:50:04 [INFO] generate received request
2022/09/25 16:50:04 [INFO] received CSR
2022/09/25 16:50:04 [INFO] generating key: rsa-2048
2022/09/25 16:50:04 [INFO] encoded CSR
2022/09/25 16:50:04 [INFO] signed certificate with serial number 42182691318354263130529298881094841114488241757
2022/09/25 16:50:04 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
​
[root@k8s-master01 pki]# ll /etc/kubernetes/pki/admin*
-rw-r--r-- 1 root root 1025 Sep 25 16:50 /etc/kubernetes/pki/admin.csr
-rw------- 1 root root 1675 Sep 25 16:50 /etc/kubernetes/pki/admin-key.pem
-rw-r--r-- 1 root root 1444 Sep 25 16:50 /etc/kubernetes/pki/admin.pem
​
# 注意,如果不是高可用集群,172.31.3.188:8443改为master01的地址
[root@k8s-master01 pki]# kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://172.31.3.188:6443 --kubeconfig=/etc/kubernetes/admin.kubeconfig
#执行结果
Cluster "kubernetes" set.
​
[root@k8s-master01 pki]# kubectl config set-credentials kubernetes-admin --client-certificate=/etc/kubernetes/pki/admin.pem --client-key=/etc/kubernetes/pki/admin-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/admin.kubeconfig
#执行结果
User "kubernetes-admin" set.
​
[root@k8s-master01 pki]# kubectl config set-context kubernetes-admin@kubernetes --cluster=kubernetes     --user=kubernetes-admin --kubeconfig=/etc/kubernetes/admin.kubeconfig
#执行结果
Context "kubernetes-admin@kubernetes" created.
​
[root@k8s-master01 pki]# kubectl config use-context kubernetes-admin@kubernetes --kubeconfig=/etc/kubernetes/admin.kubeconfig
#执行结果
Switched to context "kubernetes-admin@kubernetes".

7.3.7 创建sa key

创建ServiceAccount Key à secret

[root@k8s-master01 pki]# openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
#返回结果
Generating RSA private key, 2048 bit long modulus
..............................+++
....................................+++
e is 65537 (0x10001)
​
[root@k8s-master01 pki]# openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub
#执行结果
writing RSA key
​
[root@k8s-master01 pki]# ll /etc/kubernetes/pki/sa*
-rw------- 1 root root 1679 Sep 25 16:51 /etc/kubernetes/pki/sa.key
-rw-r--r-- 1 root root  451 Sep 25 16:51 /etc/kubernetes/pki/sa.pub

发送证书至其他master节点

[root@k8s-master01 pki]# for NODE in k8s-master02 k8s-master03; do 
    for FILE in $(ls /etc/kubernetes/pki | grep -v etcd); do 
        scp /etc/kubernetes/pki/${FILE} $NODE:/etc/kubernetes/pki/${FILE};
    done; 
    for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig; do 
        scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE};
    done;
done

查看证书文件

[root@k8s-master01 pki]# ll -R /etc/kubernetes/pki/
/etc/kubernetes/pki/:
total 92
-rw-r--r-- 1 root root 1025 Sep 25 16:50 admin.csr
-rw------- 1 root root 1675 Sep 25 16:50 admin-key.pem
-rw-r--r-- 1 root root 1444 Sep 25 16:50 admin.pem
-rw-r--r-- 1 root root 1297 Sep 25 16:40 apiserver.csr
-rw------- 1 root root 1675 Sep 25 16:40 apiserver-key.pem
-rw-r--r-- 1 root root 1692 Sep 25 16:40 apiserver.pem
-rw-r--r-- 1 root root 1070 Sep 25 16:39 ca.csr
-rw------- 1 root root 1679 Sep 25 16:39 ca-key.pem
-rw-r--r-- 1 root root 1363 Sep 25 16:39 ca.pem
-rw-r--r-- 1 root root 1082 Sep 25 16:42 controller-manager.csr
-rw------- 1 root root 1675 Sep 25 16:42 controller-manager-key.pem
-rw-r--r-- 1 root root 1501 Sep 25 16:42 controller-manager.pem
drwxr-xr-x 2 root root   84 Sep 25 16:37 etcd
-rw-r--r-- 1 root root  891 Sep 25 16:41 front-proxy-ca.csr
-rw------- 1 root root 1679 Sep 25 16:41 front-proxy-ca-key.pem
-rw-r--r-- 1 root root 1094 Sep 25 16:41 front-proxy-ca.pem
-rw-r--r-- 1 root root  903 Sep 25 16:42 front-proxy-client.csr
-rw------- 1 root root 1675 Sep 25 16:42 front-proxy-client-key.pem
-rw-r--r-- 1 root root 1188 Sep 25 16:42 front-proxy-client.pem
-rw------- 1 root root 1679 Sep 25 16:51 sa.key
-rw-r--r-- 1 root root  451 Sep 25 16:51 sa.pub
-rw-r--r-- 1 root root 1058 Sep 25 16:48 scheduler.csr
-rw------- 1 root root 1679 Sep 25 16:48 scheduler-key.pem
-rw-r--r-- 1 root root 1476 Sep 25 16:48 scheduler.pem/etc/kubernetes/pki/etcd:
total 0
lrwxrwxrwx 1 root root 29 Sep 25 16:37 etcd-ca-key.pem -> /etc/etcd/ssl/etcd-ca-key.pem
lrwxrwxrwx 1 root root 25 Sep 25 16:37 etcd-ca.pem -> /etc/etcd/ssl/etcd-ca.pem
lrwxrwxrwx 1 root root 26 Sep 25 16:37 etcd-key.pem -> /etc/etcd/ssl/etcd-key.pem
lrwxrwxrwx 1 root root 22 Sep 25 16:37 etcd.pem -> /etc/etcd/ssl/etcd.pe
​
[root@k8s-master01 pki]# ls /etc/kubernetes/pki | grep -v etcd |wc -l
23
# 一共23个就对了

7.4 Kubernetes组件配置

master节点创建相关目录

[root@k8s-master01 pki]# cd
[root@k8s-master01 ~]# mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
​
[root@k8s-master02 ~]# mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
​
[root@k8s-master03 ~]# mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes

7.4.1 Apiserver

所有Master节点创建kube-apiserver service,# 注意,如果不是高可用集群,172.31.3.188改为master01的地址

7.4.1.1 Master01配置

注意本文档使用的k8s service网段为10.96.0.0/12,该网段不能和宿主机的网段、Pod网段的重复,请按需修改

[root@k8s-master01 ~]# cat > /lib/systemd/system/kube-apiserver.service <<EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
​
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
      --v=2  \
      --logtostderr=true  \
      --allow-privileged=true  \
      --bind-address=0.0.0.0  \
      --secure-port=6443  \
      --advertise-address=172.31.3.101 \
      --service-cluster-ip-range=10.96.0.0/12  \
      --service-node-port-range=30000-32767  \
      --etcd-servers=https://172.31.3.108:2379,https://172.31.3.109:2379,https://172.31.3.110:2379 \
      --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem  \
      --etcd-certfile=/etc/etcd/ssl/etcd.pem  \
      --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem  \
      --client-ca-file=/etc/kubernetes/pki/ca.pem  \
      --tls-cert-file=/etc/kubernetes/pki/apiserver.pem  \
      --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem  \
      --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem  \
      --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem  \
      --service-account-key-file=/etc/kubernetes/pki/sa.pub  \
      --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \
      --service-account-issuer=https://kubernetes.default.svc.cluster.local \
      --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \
      --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \
      --authorization-mode=Node,RBAC  \
      --enable-bootstrap-token-auth=true  \
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \
      --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \
      --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \
      --requestheader-allowed-names=aggregator  \
      --requestheader-group-headers=X-Remote-Group  \
      --requestheader-extra-headers-prefix=X-Remote-Extra-  \
      --requestheader-username-headers=X-Remote-User
      # --token-auth-file=/etc/kubernetes/token.csvRestart=on-failure
RestartSec=10s
LimitNOFILE=65535[Install]
WantedBy=multi-user.target
EOF

7.4.1.2 Master02配置

注意本文档使用的k8s service网段为10.96.0.0/12,该网段不能和宿主机的网段、Pod网段的重复,请按需修改

[root@k8s-master02 ~]# cat > /lib/systemd/system/kube-apiserver.service <<EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
​
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
      --v=2  \
      --logtostderr=true  \
      --allow-privileged=true  \
      --bind-address=0.0.0.0  \
      --secure-port=6443  \
      --advertise-address=172.31.3.102 \
      --service-cluster-ip-range=10.96.0.0/12  \
      --service-node-port-range=30000-32767  \
      --etcd-servers=https://172.31.3.108:2379,https://172.31.3.109:2379,https://172.31.3.110:2379 \
      --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem  \
      --etcd-certfile=/etc/etcd/ssl/etcd.pem  \
      --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem  \
      --client-ca-file=/etc/kubernetes/pki/ca.pem  \
      --tls-cert-file=/etc/kubernetes/pki/apiserver.pem  \
      --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem  \
      --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem  \
      --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem  \
      --service-account-key-file=/etc/kubernetes/pki/sa.pub  \
      --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \
      --service-account-issuer=https://kubernetes.default.svc.cluster.local \
      --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \
      --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \
      --authorization-mode=Node,RBAC  \
      --enable-bootstrap-token-auth=true  \
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \
      --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \
      --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \
      --requestheader-allowed-names=aggregator  \
      --requestheader-group-headers=X-Remote-Group  \
      --requestheader-extra-headers-prefix=X-Remote-Extra-  \
      --requestheader-username-headers=X-Remote-User
      # --token-auth-file=/etc/kubernetes/token.csvRestart=on-failure
RestartSec=10s
LimitNOFILE=65535[Install]
WantedBy=multi-user.target
EOF

7.4.1.3 Master03配置

注意本文档使用的k8s service网段为10.96.0.0/12,该网段不能和宿主机的网段、Pod网段的重复,请按需修改

[root@k8s-master03 ~]# cat > /lib/systemd/system/kube-apiserver.service <<EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
​
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
      --v=2  \
      --logtostderr=true  \
      --allow-privileged=true  \
      --bind-address=0.0.0.0  \
      --secure-port=6443  \
      --advertise-address=172.31.3.103 \
      --service-cluster-ip-range=10.96.0.0/12  \
      --service-node-port-range=30000-32767  \
      --etcd-servers=https://172.31.3.108:2379,https://172.31.3.109:2379,https://172.31.3.110:2379 \
      --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem  \
      --etcd-certfile=/etc/etcd/ssl/etcd.pem  \
      --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem  \
      --client-ca-file=/etc/kubernetes/pki/ca.pem  \
      --tls-cert-file=/etc/kubernetes/pki/apiserver.pem  \
      --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem  \
      --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem  \
      --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem  \
      --service-account-key-file=/etc/kubernetes/pki/sa.pub  \
      --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \
      --service-account-issuer=https://kubernetes.default.svc.cluster.local \
      --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \
      --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \
      --authorization-mode=Node,RBAC  \
      --enable-bootstrap-token-auth=true  \
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \
      --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \
      --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \
      --requestheader-allowed-names=aggregator  \
      --requestheader-group-headers=X-Remote-Group  \
      --requestheader-extra-headers-prefix=X-Remote-Extra-  \
      --requestheader-username-headers=X-Remote-User
      # --token-auth-file=/etc/kubernetes/token.csvRestart=on-failure
RestartSec=10s
LimitNOFILE=65535[Install]
WantedBy=multi-user.target
EOF

7.4.1.4 启动apiserver

所有Master节点开启kube-apiserver

[root@k8s-master01 ~]# systemctl daemon-reload && systemctl enable --now kube-apiserver
​
[root@k8s-master02 ~]# systemctl daemon-reload && systemctl enable --now kube-apiserver
​
[root@k8s-master03 ~]# systemctl daemon-reload && systemctl enable --now kube-apiserver

检测kube-server状态

[root@k8s-master01 ~]# systemctl status kube-apiserver
 kube-apiserver.service - Kubernetes API Server
   Loaded: loaded (/usr/lib/systemd/system/kube-apiserver.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2022-09-25 16:53:11 CST; 12s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 6154 (kube-apiserver)
    Tasks: 14 (limit: 23474)
   Memory: 259.5M
   CGroup: /system.slice/kube-apiserver.service
           └─6154 /usr/local/bin/kube-apiserver --v=2 --logtostderr=true --allow-privileged=true --bind-address=0.0.0.0 --secure-port=6443 --a>

Sep 25 16:53:19 k8s-master01.example.local kube-apiserver[6154]: I0925 16:53:19.657786    6154 storage_rbac.go:321] created rolebinding.rbac.a>
Sep 25 16:53:19 k8s-master01.example.local kube-apiserver[6154]: I0925 16:53:19.672982    6154 storage_rbac.go:321] created rolebinding.rbac.a>
Sep 25 16:53:19 k8s-master01.example.local kube-apiserver[6154]: I0925 16:53:19.684131    6154 healthz.go:257] poststarthook/rbac/bootstrap-ro>
Sep 25 16:53:19 k8s-master01.example.local kube-apiserver[6154]: [-]poststarthook/rbac/bootstrap-roles failed: not finished
Sep 25 16:53:19 k8s-master01.example.local kube-apiserver[6154]: I0925 16:53:19.689793    6154 storage_rbac.go:321] created rolebinding.rbac.a>
Sep 25 16:53:19 k8s-master01.example.local kube-apiserver[6154]: I0925 16:53:19.705317    6154 storage_rbac.go:321] created rolebinding.rbac.a>
Sep 25 16:53:19 k8s-master01.example.local kube-apiserver[6154]: I0925 16:53:19.817048    6154 alloc.go:327] "allocated clusterIPs" service="d>
Sep 25 16:53:19 k8s-master01.example.local kube-apiserver[6154]: W0925 16:53:19.839303    6154 lease.go:250] Resetting endpoints for master se>
Sep 25 16:53:19 k8s-master01.example.local kube-apiserver[6154]: I0925 16:53:19.840909    6154 controller.go:616] quota admission added evalua>
Sep 25 16:53:19 k8s-master01.example.local kube-apiserver[6154]: I0925 16:53:19.878881    6154 controller.go:616] quota admission added evalua>

[root@k8s-master02 ~]# systemctl status kube-apiserver
[root@k8s-master03 ~]# systemctl status kube-apiserver

7.4.2 ControllerManager

所有Master节点配置kube-controller-manager service

注意本文档使用的k8s Pod网段为192.168.0.0/12,该网段不能和宿主机的网段、k8s Service网段的重复,请按需修改

[root@k8s-master01 ~]# cat > /lib/systemd/system/kube-controller-manager.service <<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
​
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
      --v=2 \
      --logtostderr=true \
      --bind-address=127.0.0.1 \
      --root-ca-file=/etc/kubernetes/pki/ca.pem \
      --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
      --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
      --service-account-private-key-file=/etc/kubernetes/pki/sa.key \
      --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \
      --leader-elect=true \
      --use-service-account-credentials=true \
      --node-monitor-grace-period=40s \
      --node-monitor-period=5s \
      --pod-eviction-timeout=2m0s \
      --controllers=*,bootstrapsigner,tokencleaner \
      --allocate-node-cidrs=true \
      --cluster-cidr=192.168.0.0/12 \
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
      --node-cidr-mask-size=24
      
Restart=always
RestartSec=10s
​
[Install]
WantedBy=multi-user.target
EOF
​
[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do scp /lib/systemd/system/kube-controller-manager.service $NODE:/lib/systemd/system/; done

所有Master节点启动kube-controller-manager

[root@k8s-master01 ~]# systemctl daemon-reload && systemctl enable --now kube-controller-manager
[root@k8s-master02 ~]# systemctl daemon-reload && systemctl enable --now kube-controller-manager
[root@k8s-master03 ~]# systemctl daemon-reload && systemctl enable --now kube-controller-manager

查看启动状态

[root@k8s-master01 ~]# systemctl status kube-controller-manager
 kube-controller-manager.service - Kubernetes Controller Manager
   Loaded: loaded (/usr/lib/systemd/system/kube-controller-manager.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2022-09-25 16:54:21 CST; 16s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 6212 (kube-controller)
    Tasks: 11 (limit: 23474)
   Memory: 35.3M
   CGroup: /system.slice/kube-controller-manager.service
           └─6212 /usr/local/bin/kube-controller-manager --v=2 --logtostderr=true --bind-address=127.0.0.1 --root-ca-file=/etc/kubernetes/pki/>

Sep 25 16:54:35 k8s-master01.example.local kube-controller-manager[6212]: I0925 16:54:35.274767    6212 shared_informer.go:262] Caches are syn>
Sep 25 16:54:35 k8s-master01.example.local kube-controller-manager[6212]: I0925 16:54:35.274835    6212 taint_manager.go:204] "Starting NoExec>
Sep 25 16:54:35 k8s-master01.example.local kube-controller-manager[6212]: I0925 16:54:35.274896    6212 taint_manager.go:209] "Sending events >
Sep 25 16:54:35 k8s-master01.example.local kube-controller-manager[6212]: I0925 16:54:35.302454    6212 shared_informer.go:262] Caches are syn>
Sep 25 16:54:35 k8s-master01.example.local kube-controller-manager[6212]: I0925 16:54:35.303795    6212 shared_informer.go:262] Caches are syn>
Sep 25 16:54:35 k8s-master01.example.local kube-controller-manager[6212]: I0925 16:54:35.303859    6212 resource_quota_controller.go:462] sync>
Sep 25 16:54:35 k8s-master01.example.local kube-controller-manager[6212]: I0925 16:54:35.643168    6212 shared_informer.go:262] Caches are syn>
Sep 25 16:54:35 k8s-master01.example.local kube-controller-manager[6212]: I0925 16:54:35.643226    6212 garbagecollector.go:263] synced garbag>
Sep 25 16:54:35 k8s-master01.example.local kube-controller-manager[6212]: I0925 16:54:35.680675    6212 shared_informer.go:262] Caches are syn>
Sep 25 16:54:35 k8s-master01.example.local kube-controller-manager[6212]: I0925 16:54:35.680732    6212 garbagecollector.go:163] Garbage colle>

[root@k8s-master02 ~]# systemctl status kube-controller-manager
[root@k8s-master03 ~]# systemctl status kube-controller-manager

7.4.3 Scheduler

所有Master节点配置kube-scheduler service

[root@k8s-master01 ~]# cat > /lib/systemd/system/kube-scheduler.service <<EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
​
[Service]
ExecStart=/usr/local/bin/kube-scheduler \
      --v=2 \
      --logtostderr=true \
      --bind-address=127.0.0.1 \
      --leader-elect=true \
      --kubeconfig=/etc/kubernetes/scheduler.kubeconfig
​
Restart=always
RestartSec=10s
​
[Install]
WantedBy=multi-user.target
EOF
​
[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do scp /lib/systemd/system/kube-scheduler.service $NODE:/lib/systemd/system/; done[root@k8s-master01 ~]# systemctl daemon-reload && systemctl enable --now kube-scheduler
[root@k8s-master02 ~]# systemctl daemon-reload && systemctl enable --now kube-scheduler
[root@k8s-master03 ~]# systemctl daemon-reload && systemctl enable --now kube-scheduler[root@k8s-master01 ~]# systemctl status kube-scheduler
● kube-scheduler.service - Kubernetes Scheduler
   Loaded: loaded (/usr/lib/systemd/system/kube-scheduler.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2022-09-25 16:55:26 CST; 14s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 6268 (kube-scheduler)
    Tasks: 7 (limit: 23474)
   Memory: 13.9M
   CGroup: /system.slice/kube-scheduler.service
           └─6268 /usr/local/bin/kube-scheduler --v=2 --logtostderr=true --bind-address=127.0.0.1 --leader-elect=true --kubeconfig=/etc/kubern>
​
Sep 25 16:55:27 k8s-master01.example.local kube-scheduler[6268]:           schedulerName: default-scheduler
Sep 25 16:55:27 k8s-master01.example.local kube-scheduler[6268]:  >
Sep 25 16:55:27 k8s-master01.example.local kube-scheduler[6268]: I0925 16:55:27.648331    6268 server.go:148] "Starting Kubernetes Scheduler" >
Sep 25 16:55:27 k8s-master01.example.local kube-scheduler[6268]: I0925 16:55:27.648349    6268 server.go:150] "Golang settings" GOGC="" GOMAXP>
Sep 25 16:55:27 k8s-master01.example.local kube-scheduler[6268]: I0925 16:55:27.650630    6268 tlsconfig.go:200] "Loaded serving cert" certNam>
Sep 25 16:55:27 k8s-master01.example.local kube-scheduler[6268]: I0925 16:55:27.650785    6268 named_certificates.go:53] "Loaded SNI cert" ind>
Sep 25 16:55:27 k8s-master01.example.local kube-scheduler[6268]: I0925 16:55:27.650811    6268 secure_serving.go:210] Serving securely on 127.>
Sep 25 16:55:27 k8s-master01.example.local kube-scheduler[6268]: I0925 16:55:27.651921    6268 tlsconfig.go:240] "Starting DynamicServingCerti>
Sep 25 16:55:27 k8s-master01.example.local kube-scheduler[6268]: I0925 16:55:27.752899    6268 leaderelection.go:248] attempting to acquire le>
Sep 25 16:55:27 k8s-master01.example.local kube-scheduler[6268]: I0925 16:55:27.771438    6268 leaderelection.go:258] successfully acquired le>
​
[root@k8s-master02 ~]# systemctl status kube-scheduler
[root@k8s-master03 ~]# systemctl status kube-scheduler

7.4.4 TLS Bootstrapping配置

在Master01创建bootstrap

[root@k8s-master01 ~]# cat > bootstrap.secret.yaml <<EOF
apiVersion: v1
kind: Secret
metadata:
  name: bootstrap-token-c8ad9c
  namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
  description: "The default bootstrap token generated by 'kubelet '."
  token-id: c8ad9c
  token-secret: 2e4d610cf3e7426e
  usage-bootstrap-authentication: "true"
  usage-bootstrap-signing: "true"
  auth-extra-groups:  system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
 
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubelet-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-certificate-rotation
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kube-apiserver-to-kubelet
rules:
  - apiGroups:
      - ""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
    verbs:
      - "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kube-apiserver
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-apiserver-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kube-apiserver
EOF

# 注意,如果不是高可用集群,172.31.3.188:6443改为master01的地址
[root@k8s-master01 ~]# kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://172.31.3.188:6443 --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
#执行结果
Cluster "kubernetes" set.

[root@k8s-master01 ~]# kubectl config set-credentials tls-bootstrap-token-user --token=c8ad9c.2e4d610cf3e7426e --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
#执行结果
User "tls-bootstrap-token-user" set.

[root@k8s-master01 ~]# kubectl config set-context tls-bootstrap-token-user@kubernetes --cluster=kubernetes --user=tls-bootstrap-token-user --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
#执行结果
Context "tls-bootstrap-token-user@kubernetes" modified.

[root@k8s-master01 ~]# kubectl config use-context tls-bootstrap-token-user@kubernetes --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
#执行结果
Switched to context "tls-bootstrap-token-user@kubernetes".

注意:如果要修改bootstrap.secret.yaml的token-id和token-secret,需要保证下图红圈内的字符串一致的,并且位数是一样的。还要保证上个命令的黄色字体:c8ad9c.2e4d610cf3e7426e与你修改的字符串要一致

020.jpg

[root@k8s-master01 ~]# mkdir -p /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config#查看集群状态,没问题的话继续后续操作
[root@k8s-master01 ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE                         ERROR
scheduler            Healthy   ok                              
controller-manager   Healthy   ok                              
etcd-1               Healthy   {"health":"true","reason":""}   
etcd-2               Healthy   {"health":"true","reason":""}   
etcd-0               Healthy   {"health":"true","reason":""}   
​
[root@k8s-master01 ~]# kubectl create -f bootstrap.secret.yaml
​
[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do
     for FILE in bootstrap-kubelet.kubeconfig; do
       scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}
     done
 done

7.4.5 Kubelet配置

master节点配置kubelet.service

[root@k8s-master01 ~]# cat > /lib/systemd/system/kubelet.service <<EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service
​
[Service]
ExecStart=/usr/local/bin/kubelet
​
Restart=always
StartLimitInterval=0
RestartSec=10[Install]
WantedBy=multi-user.target
EOF
​
[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do scp /lib/systemd/system/kubelet.service $NODE:/lib/systemd/system/ ;done

master节点配置10-kubelet.conf的配置文件

[root@k8s-master01 ~]# cat > /etc/systemd/system/kubelet.service.d/10-kubelet.conf <<EOF
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--hostname-override=172.31.3.101"
Environment="KUBELET_RINTIME=--container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node=''"
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS $KUBELET_RINTIME
EOF
​
[root@k8s-master02 ~]# cat > /etc/systemd/system/kubelet.service.d/10-kubelet.conf <<EOF
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--hostname-override=172.31.3.102"
Environment="KUBELET_RINTIME=--container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node=''"
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS $KUBELET_RINTIME
EOF
​
[root@k8s-master03 ~]# cat > /etc/systemd/system/kubelet.service.d/10-kubelet.conf <<EOF
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--hostname-override=172.31.3.103"
Environment="KUBELET_RINTIME=--container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node=''"
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS $KUBELET_RINTIME
EOF

下载镜像并上传至harbor:

[root@k8s-master01 ~]# nerdctl login harbor.raymonds.cc
Enter Username: admin
Enter Password: 
WARN[0003] skipping verifying HTTPS certs for "harbor.raymonds.cc" 
WARNING: Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store
​
Login Succeeded
​
[root@k8s-master01 ~]# cat download_pause_images.sh 
#!/bin/bash
#
#**********************************************************************************************
#Author:        Raymond
#QQ:            88563128
#Date:          2022-01-11
#FileName:      download_pause_images.sh
#URL:           raymond.blog.csdn.net
#Description:   The test script
#Copyright (C): 2022 All rights reserved
#*********************************************************************************************
COLOR="echo -e \033[01;31m"
END='\033[0m'
​
PAUSE_VERSION=3.8
HARBOR_DOMAIN=harbor.raymonds.cc
​
images_download(){
    ${COLOR}"开始下载Pause镜像"${END}
        nerdctl pull registry.aliyuncs.com/google_containers/pause:${PAUSE_VERSION}
        nerdctl tag registry.aliyuncs.com/google_containers/pause:${PAUSE_VERSION} ${HARBOR_DOMAIN}/google_containers/pause:${PAUSE_VERSION}
        nerdctl rmi registry.aliyuncs.com/google_containers/pause:${PAUSE_VERSION}
        nerdctl push ${HARBOR_DOMAIN}/google_containers/pause:${PAUSE_VERSION}
    ${COLOR}"Pause镜像下载完成"${END}
}
​
images_download
​
[root@k8s-master01 ~]# bash download_pause_images.sh  
​
[root@k8s-master01 ~]# nerdctl images
REPOSITORY                                    TAG       IMAGE ID        CREATED           PLATFORM       SIZE         BLOB SIZE
harbor.raymonds.cc/google_containers/pause    3.8       900118502363    22 seconds ago    linux/amd64    696.0 KiB    304.0 KiB

master创建kubelet的配置文件

注意:如果更改了k8s的service网段,需要更改kubelet-conf.yml 的clusterDNS:配置,改成k8s Service网段的第十个地址,比如10.96.0.10

[root@k8s-master01 ~]# cat > /etc/kubernetes/kubelet-conf.yml <<EOF
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
EOF

[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do scp /etc/kubernetes/kubelet-conf.yml $NODE:/etc/kubernetes/ ;done

启动master节点kubelet

[root@k8s-master01 ~]# systemctl daemon-reload && systemctl enable --now kubelet
[root@k8s-master02 ~]# systemctl daemon-reload && systemctl enable --now kubelet
[root@k8s-master03 ~]# systemctl daemon-reload && systemctl enable --now kubelet
​
[root@k8s-master01 ~]# systemctl status kubelet
● kubelet.service - Kubernetes Kubelet
   Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
  Drop-In: /etc/systemd/system/kubelet.service.d
           └─10-kubelet.conf
   Active: active (running) since Sun 2022-09-25 17:18:36 CST; 30s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 6441 (kubelet)
    Tasks: 14 (limit: 23474)
   Memory: 34.6M
   CGroup: /system.slice/kubelet.service
           └─6441 /usr/local/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kube>
​
Sep 25 17:18:50 k8s-master01.example.local kubelet[6441]: E0925 17:18:50.215908    6441 kubelet.go:2448] "Error getting node" err="node "172.>
Sep 25 17:18:50 k8s-master01.example.local kubelet[6441]: I0925 17:18:50.317300    6441 kuberuntime_manager.go:1050] "Updating runtime config >
Sep 25 17:18:50 k8s-master01.example.local kubelet[6441]: I0925 17:18:50.318585    6441 kubelet_network.go:60] "Updating Pod CIDR" originalPod>
Sep 25 17:18:50 k8s-master01.example.local kubelet[6441]: E0925 17:18:50.318962    6441 kubelet.go:2373] "Container runtime network not ready">
Sep 25 17:18:50 k8s-master01.example.local kubelet[6441]: I0925 17:18:50.577724    6441 apiserver.go:52] "Watching apiserver"
Sep 25 17:18:50 k8s-master01.example.local kubelet[6441]: I0925 17:18:50.593173    6441 reconciler.go:169] "Reconciler: start to sync state"
Sep 25 17:18:51 k8s-master01.example.local kubelet[6441]: E0925 17:18:51.875100    6441 kubelet.go:2373] "Container runtime network not ready">
Sep 25 17:18:56 k8s-master01.example.local kubelet[6441]: E0925 17:18:56.879725    6441 kubelet.go:2373] "Container runtime network not ready">
Sep 25 17:19:01 k8s-master01.example.local kubelet[6441]: E0925 17:19:01.881566    6441 kubelet.go:2373] "Container runtime network not ready">
Sep 25 17:19:06 k8s-master01.example.local kubelet[6441]: E0925 17:19:06.882411    6441 kubelet.go:2373] "Container runtime network not ready">
​
[root@k8s-master02 ~]# systemctl status kubelet
[root@k8s-master03 ~]# systemctl status kubelet

此时系统日志

#显示只有如下信息为正常
[root@k8s-master01 ~]# tail -f /var/log/messages #ubuntu命令为"tail -f /var/log/syslog"
...
Sep 23 15:11:22 localhost kubelet[16173]: E0923 15:11:22.503340   16173 kubelet.go:2373] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized"

查看集群状态

[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS     ROLES    AGE   VERSION
172.31.3.101   NotReady   <none>   38s   v1.25.2
172.31.3.102   NotReady   <none>   35s   v1.25.2
172.31.3.103   NotReady   <none>   33s   v1.25.2

7.4.6 kube-proxy配置

注意,如果不是高可用集群,172.31.3.188:6443改为master01的地址

[root@k8s-master01 ~]# kubectl -n kube-system create serviceaccount kube-proxy
#执行结果
serviceaccount/kube-proxy created
​
[root@k8s-master01 ~]# kubectl create clusterrolebinding system:kube-proxy --clusterrole system:node-proxier --serviceaccount kube-system:kube-proxy
#执行结果
clusterrolebinding.rbac.authorization.k8s.io/system:kube-proxy created
​
[root@k8s-master01 ~]# cat > kube-proxy-scret.yml <<EOF
apiVersion: v1
kind: Secret
metadata:
  name: kube-proxy
  namespace: kube-system
  annotations:
    kubernetes.io/service-account.name: "kube-proxy"
type: kubernetes.io/service-account-token
EOF
​
[root@k8s-master01 ~]# kubectl apply -f kube-proxy-scret.yml
#执行结果
secret/kube-proxy created
​
[root@k8s-master01 ~]# JWT_TOKEN=$(kubectl -n kube-system get secret/kube-proxy \
--output=jsonpath='{.data.token}' | base64 -d)
​
[root@k8s-master01 ~]# kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://172.31.3.188:6443  --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
#执行结果
Cluster "kubernetes" set.
​
[root@k8s-master01 ~]# kubectl config set-credentials kubernetes --token=${JWT_TOKEN} --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
#执行结果
User "kubernetes" set.
​
[root@k8s-master01 ~]# kubectl config set-context kubernetes --cluster=kubernetes --user=kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
#执行结果
Context "kubernetes" created.
​
[root@k8s-master01 ~]# kubectl config use-context kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
#执行结果
Switched to context "kubernetes".

在master01将kube-proxy的systemd Service文件发送到其他节点

如果更改了集群Pod的网段,需要更改kube-proxy/kube-proxy.conf的clusterCIDR: 192.168.0.0/12参数为pod的网段。

[root@k8s-master01 ~]# cat > /etc/kubernetes/kube-proxy.conf <<EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
  acceptContentTypes: ""
  burst: 10
  contentType: application/vnd.kubernetes.protobuf
  kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
  qps: 5
clusterCIDR: 192.168.0.0/12
configSyncPeriod: 15m0s
conntrack:
  max: null
  maxPerCore: 32768
  min: 131072
  tcpCloseWaitTimeout: 1h0m0s
  tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
  masqueradeAll: false
  masqueradeBit: 14
  minSyncPeriod: 0s
  syncPeriod: 30s
ipvs:
  masqueradeAll: true
  minSyncPeriod: 5s
  scheduler: "rr"
  syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
EOF
​
[root@k8s-master01 ~]# cat > /lib/systemd/system/kube-proxy.service <<EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
​
[Service]
ExecStart=/usr/local/bin/kube-proxy \
  --config=/etc/kubernetes/kube-proxy.conf \
  --v=2Restart=always
RestartSec=10s
​
[Install]
WantedBy=multi-user.target
EOF
​
[root@k8s-master01 ~]# for NODE in k8s-master02 k8s-master03; do
     scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig
     scp /etc/kubernetes/kube-proxy.conf $NODE:/etc/kubernetes/kube-proxy.conf
     scp /lib/systemd/system/kube-proxy.service $NODE:/lib/systemd/system/kube-proxy.service
 done

master节点启动kube-proxy

[root@k8s-master01 ~]# systemctl daemon-reload && systemctl enable --now kube-proxy
[root@k8s-master02 ~]# systemctl daemon-reload && systemctl enable --now kube-proxy
[root@k8s-master03 ~]# systemctl daemon-reload && systemctl enable --now kube-proxy[root@k8s-master01 ~]# systemctl status kube-proxy
● kube-proxy.service - Kubernetes Kube Proxy
   Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2022-09-25 17:21:25 CST; 31s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 6633 (kube-proxy)
    Tasks: 6 (limit: 23474)
   Memory: 10.5M
   CGroup: /system.slice/kube-proxy.service
           └─6633 /usr/local/bin/kube-proxy --config=/etc/kubernetes/kube-proxy.conf --v=2
​
Sep 25 17:21:25 k8s-master01.example.local kube-proxy[6633]: I0925 17:21:25.255233    6633 flags.go:64] FLAG: --write-config-to=""
Sep 25 17:21:25 k8s-master01.example.local kube-proxy[6633]: I0925 17:21:25.256063    6633 server.go:442] "Using lenient decoding as strict de>
Sep 25 17:21:25 k8s-master01.example.local kube-proxy[6633]: I0925 17:21:25.256192    6633 feature_gate.go:245] feature gates: &{map[]}
Sep 25 17:21:25 k8s-master01.example.local kube-proxy[6633]: I0925 17:21:25.256265    6633 feature_gate.go:245] feature gates: &{map[]}
Sep 25 17:21:25 k8s-master01.example.local kube-proxy[6633]: I0925 17:21:25.275422    6633 proxier.go:666] "Failed to load kernel module with >
Sep 25 17:21:25 k8s-master01.example.local kube-proxy[6633]: E0925 17:21:25.295761    6633 node.go:152] Failed to retrieve node info: nodes "k>
Sep 25 17:21:26 k8s-master01.example.local kube-proxy[6633]: E0925 17:21:26.318886    6633 node.go:152] Failed to retrieve node info: nodes "k>
Sep 25 17:21:28 k8s-master01.example.local kube-proxy[6633]: E0925 17:21:28.497970    6633 node.go:152] Failed to retrieve node info: nodes "k>
Sep 25 17:21:32 k8s-master01.example.local kube-proxy[6633]: E0925 17:21:32.991308    6633 node.go:152] Failed to retrieve node info: nodes "k>
Sep 25 17:21:41 k8s-master01.example.local kube-proxy[6633]: E0925 17:21:41.810292    6633 node.go:152] Failed to retrieve node info: nodes "k>
​
[root@k8s-master02 ~]# systemctl status kube-proxy
[root@k8s-master03 ~]# systemctl status kube-proxy[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS     ROLES    AGE     VERSION
172.31.3.101   NotReady   <none>   3m29s   v1.25.2
172.31.3.102   NotReady   <none>   3m26s   v1.25.2
172.31.3.103   NotReady   <none>   3m24s   v1.25.2

8.部署node

8.1 安装node组件

将组件发送到node节点

[root@k8s-master01 ~]# for NODE in k8s-node01 k8s-node02 k8s-node03; do echo $NODE; scp -o StrictHostKeyChecking=no /usr/local/bin/kube{let,-proxy} $NODE:/usr/local/bin/ ; done

node节点创建/opt/cni/bin目录

[root@k8s-node01 ~]# mkdir -p /opt/cni/bin
[root@k8s-node02 ~]# mkdir -p /opt/cni/bin
[root@k8s-node03 ~]# mkdir -p /opt/cni/bin

8.2 复制etcd证书

node节点创建etcd证书目录

[root@k8s-node01 ~]# mkdir /etc/etcd/ssl -p
[root@k8s-node02 ~]# mkdir /etc/etcd/ssl -p
[root@k8s-node03 ~]# mkdir /etc/etcd/ssl -p

将etcd证书复制到node节点

[root@k8s-etcd01 pki]# for NODE in k8s-node01 k8s-node02 k8s-node03; do
     ssh -o StrictHostKeyChecking=no $NODE "mkdir -p /etc/etcd/ssl"
     for FILE in etcd-ca-key.pem  etcd-ca.pem  etcd-key.pem  etcd.pem; do
       scp -o StrictHostKeyChecking=no /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE}
     done
 done

所有master节点创建etcd的证书目录

[root@k8s-node01 ~]# mkdir /etc/kubernetes/pki/etcd -p
[root@k8s-node02 ~]# mkdir /etc/kubernetes/pki/etcd -p
[root@k8s-node03 ~]# mkdir /etc/kubernetes/pki/etcd -p
​
[root@k8s-node01 ~]# ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
[root@k8s-node02 ~]# ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
[root@k8s-node03 ~]# ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/

8.3 复制kubernetes证书和配置文件

node节点创建kubernetes相关目录

[root@k8s-node01 ~]# mkdir -p /etc/kubernetes/pki
[root@k8s-node02 ~]# mkdir -p /etc/kubernetes/pki
[root@k8s-node03 ~]# mkdir -p /etc/kubernetes/pki

Master01节点复制证书至Node节点

[root@k8s-master01 ~]# for NODE in k8s-node01 k8s-node02 k8s-node03; do
     for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig; do
       scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}
     done
 done

8.4 配置kubelet

node节点创建相关目录

[root@k8s-node01 ~]# mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
​
[root@k8s-node02 ~]# mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
​
[root@k8s-node03 ~]# mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes

Master01节点复制配置文件kubelet service至Node节点

[root@k8s-master01 ~]# for NODE in k8s-node01 k8s-node02 k8s-node03; do scp /lib/systemd/system/kubelet.service $NODE:/lib/systemd/system/ ;done

node节点配置10-kubelet.conf的配置文件

[root@k8s-node01 ~]# cat > /etc/systemd/system/kubelet.service.d/10-kubelet.conf <<EOF
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--hostname-override=172.31.3.111"
Environment="KUBELET_RINTIME=--container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node=''"
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS $KUBELET_RINTIME
EOF
​
[root@k8s-node02 ~]# cat > /etc/systemd/system/kubelet.service.d/10-kubelet.conf <<EOF
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--hostname-override=172.31.3.112"
Environment="KUBELET_RINTIME=--container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node=''"
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS $KUBELET_RINTIME
EOF
​
[root@k8s-node03 ~]# cat > /etc/systemd/system/kubelet.service.d/10-kubelet.conf <<EOF
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig"
Environment="KUBELET_SYSTEM_ARGS=--hostname-override=172.31.3.113"
Environment="KUBELET_RINTIME=--container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock"
Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml"
Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node=''"
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS $KUBELET_RINTIME
EOF

Master01节点kubelet的配置文件至Node节点

[root@k8s-master01 ~]# for NODE in k8s-node01 k8s-node02 k8s-node03; do scp /etc/kubernetes/kubelet-conf.yml $NODE:/etc/kubernetes/ ;done

启动node节点kubelet

[root@k8s-node01 ~]# systemctl daemon-reload && systemctl enable --now kubelet
[root@k8s-node02 ~]# systemctl daemon-reload && systemctl enable --now kubelet
[root@k8s-node03 ~]# systemctl daemon-reload && systemctl enable --now kubelet[root@k8s-node01 ~]# systemctl status kubelet
● kubelet.service - Kubernetes Kubelet
   Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
  Drop-In: /etc/systemd/system/kubelet.service.d
           └─10-kubelet.conf
   Active: active (running) since Sun 2022-09-25 17:26:52 CST; 13s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 5957 (kubelet)
    Tasks: 14 (limit: 23474)
   Memory: 34.9M
   CGroup: /system.slice/kubelet.service
           └─5957 /usr/local/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kube>
​
Sep 25 17:26:53 k8s-node01.example.local kubelet[5957]: I0925 17:26:53.683391    5957 kubelet_network_linux.go:63] "Initialized iptables rules>
Sep 25 17:26:53 k8s-node01.example.local kubelet[5957]: I0925 17:26:53.766025    5957 kubelet_network_linux.go:63] "Initialized iptables rules>
Sep 25 17:26:53 k8s-node01.example.local kubelet[5957]: I0925 17:26:53.766047    5957 status_manager.go:161] "Starting to sync pod status with>
Sep 25 17:26:53 k8s-node01.example.local kubelet[5957]: I0925 17:26:53.766064    5957 kubelet.go:2010] "Starting kubelet main sync loop"
Sep 25 17:26:53 k8s-node01.example.local kubelet[5957]: E0925 17:26:53.766104    5957 kubelet.go:2034] "Skipping pod synchronization" err="PLE>
Sep 25 17:26:54 k8s-node01.example.local kubelet[5957]: I0925 17:26:54.279327    5957 apiserver.go:52] "Watching apiserver"
Sep 25 17:26:54 k8s-node01.example.local kubelet[5957]: I0925 17:26:54.305974    5957 reconciler.go:169] "Reconciler: start to sync state"
Sep 25 17:26:58 k8s-node01.example.local kubelet[5957]: E0925 17:26:58.602184    5957 kubelet.go:2373] "Container runtime network not ready" n>
Sep 25 17:27:03 k8s-node01.example.local kubelet[5957]: I0925 17:27:03.232179    5957 transport.go:135] "Certificate rotation detected, shutti>
Sep 25 17:27:03 k8s-node01.example.local kubelet[5957]: E0925 17:27:03.603901    5957 kubelet.go:2373] "Container runtime network not ready" n>
​
[root@k8s-node02 ~]# systemctl status kubelet
[root@k8s-node03 ~]# systemctl status kubelet

查看集群状态

[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS     ROLES    AGE    VERSION
172.31.3.101   NotReady   <none>   9m8s   v1.25.2
172.31.3.102   NotReady   <none>   9m5s   v1.25.2
172.31.3.103   NotReady   <none>   9m3s   v1.25.2
172.31.3.111   NotReady   <none>   64s    v1.25.2
172.31.3.112   NotReady   <none>   48s    v1.25.2
172.31.3.113   NotReady   <none>   46s    v1.25.2

8.5 配置kube-proxy

Master01节点复制kube-proxy相关文件到node

[root@k8s-master01 ~]# for NODE in k8s-node01 k8s-node02 k8s-node03; do
     scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig
     scp /etc/kubernetes/kube-proxy.conf $NODE:/etc/kubernetes/kube-proxy.conf
     scp /lib/systemd/system/kube-proxy.service $NODE:/lib/systemd/system/kube-proxy.service
 done

node节点启动kube-proxy

[root@k8s-node01 ~]# systemctl daemon-reload && systemctl enable --now kube-proxy
[root@k8s-node02 ~]# systemctl daemon-reload && systemctl enable --now kube-proxy
[root@k8s-node03 ~]# systemctl daemon-reload && systemctl enable --now kube-proxy[root@k8s-node01 ~]# systemctl status kube-proxy
● kube-proxy.service - Kubernetes Kube Proxy
   Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2022-09-25 17:28:29 CST; 26s ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 6162 (kube-proxy)
    Tasks: 5 (limit: 23474)
   Memory: 10.8M
   CGroup: /system.slice/kube-proxy.service
           └─6162 /usr/local/bin/kube-proxy --config=/etc/kubernetes/kube-proxy.conf --v=2
​
Sep 25 17:28:29 k8s-node01.example.local kube-proxy[6162]: I0925 17:28:29.391982    6162 flags.go:64] FLAG: --write-config-to=""
Sep 25 17:28:29 k8s-node01.example.local kube-proxy[6162]: I0925 17:28:29.393147    6162 server.go:442] "Using lenient decoding as strict deco>
Sep 25 17:28:29 k8s-node01.example.local kube-proxy[6162]: I0925 17:28:29.393298    6162 feature_gate.go:245] feature gates: &{map[]}
Sep 25 17:28:29 k8s-node01.example.local kube-proxy[6162]: I0925 17:28:29.393365    6162 feature_gate.go:245] feature gates: &{map[]}
Sep 25 17:28:29 k8s-node01.example.local kube-proxy[6162]: I0925 17:28:29.407927    6162 proxier.go:666] "Failed to load kernel module with mo>
Sep 25 17:28:29 k8s-node01.example.local kube-proxy[6162]: E0925 17:28:29.427017    6162 node.go:152] Failed to retrieve node info: nodes "k8s>
Sep 25 17:28:30 k8s-node01.example.local kube-proxy[6162]: E0925 17:28:30.464338    6162 node.go:152] Failed to retrieve node info: nodes "k8s>
Sep 25 17:28:32 k8s-node01.example.local kube-proxy[6162]: E0925 17:28:32.508065    6162 node.go:152] Failed to retrieve node info: nodes "k8s>
Sep 25 17:28:36 k8s-node01.example.local kube-proxy[6162]: E0925 17:28:36.760898    6162 node.go:152] Failed to retrieve node info: nodes "k8s>
Sep 25 17:28:45 k8s-node01.example.local kube-proxy[6162]: E0925 17:28:45.067601    6162 node.go:152] Failed to retrieve node info: nodes "k8s>
​
[root@k8s-node02 ~]# systemctl status kube-proxy
[root@k8s-node03 ~]# systemctl status kube-proxy

查看haproxy状态

kubeapi.raymonds.cc:9999/haproxy-sta…

021.jpg