基于 kubeadm 在 ubuntu 上部署 kubernetes 控制平面

48 阅读5分钟

禁用交换分区

详见官方文档:kubernetes.io/zh-cn/docs/…

# 永久禁用
sudo swapoff -a
sudo sed -i '/swap/d' /etc/fstab
# 或者在 /swap.img 那行前面加 #

禁用前显示

root@server-01:~# free -h
               total        used        free      shared  buff/cache   available
Mem:           3.8Gi       484Mi       3.3Gi       1.5Mi       221Mi       3.3Gi
Swap:          3.8Gi          0B       3.8Gi

禁用后显示

root@server-01:~# free -h
               total        used        free      shared  buff/cache   available
Mem:           3.8Gi       478Mi       3.3Gi       1.5Mi       221Mi       3.3Gi
Swap:             0B          0B          0B

重启虚拟机 free -h 的效果同禁用后

安装容器运行时

启用 IPv4 数据包转发

详见官方文档:kubernetes.io/zh-cn/docs/…

root@server-01:~# sysctl net.ipv4.ip_forward
net.ipv4.ip_forward = 0
root@server-01:~# cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
EOF
net.ipv4.ip_forward = 1
root@server-01:~# sudo sysctl --system
* Applying /usr/lib/sysctl.d/10-apparmor.conf ...
* Applying /etc/sysctl.d/10-bufferbloat.conf ...
* Applying /etc/sysctl.d/10-console-messages.conf ...
* Applying /etc/sysctl.d/10-ipv6-privacy.conf ...
* Applying /etc/sysctl.d/10-kernel-hardening.conf ...
* Applying /etc/sysctl.d/10-magic-sysrq.conf ...
* Applying /etc/sysctl.d/10-map-count.conf ...
* Applying /etc/sysctl.d/10-network-security.conf ...
* Applying /etc/sysctl.d/10-ptrace.conf ...
* Applying /etc/sysctl.d/10-zeropage.conf ...
* Applying /usr/lib/sysctl.d/50-pid-max.conf ...
* Applying /usr/lib/sysctl.d/99-protect-links.conf ...
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.d/k8s.conf ...
* Applying /etc/sysctl.conf ...
kernel.apparmor_restrict_unprivileged_userns = 1
net.core.default_qdisc = fq_codel
kernel.printk = 4 4 1 7
net.ipv6.conf.all.use_tempaddr = 2
net.ipv6.conf.default.use_tempaddr = 2
kernel.kptr_restrict = 1
kernel.sysrq = 176
vm.max_map_count = 1048576
net.ipv4.conf.default.rp_filter = 2
net.ipv4.conf.all.rp_filter = 2
kernel.yama.ptrace_scope = 1
vm.mmap_min_addr = 65536
kernel.pid_max = 4194304
fs.protected_fifos = 1
fs.protected_hardlinks = 1
fs.protected_regular = 2
fs.protected_symlinks = 1
net.ipv4.ip_forward = 1
root@server-01:~# sysctl net.ipv4.ip_forward
net.ipv4.ip_forward = 1

cgroup 驱动

详见官方文档:kubernetes.io/zh-cn/docs/…
了解即可,不用做什么操作,因为 1.22 版本之后 kubeadm 默认使用 systemd

containerd 安装

资源链接

详见官方文档:kubernetes.io/zh-cn/docs/…
安装步骤:github.com/containerd/…
containerd 下载地址:github.com/containerd/…
containerd.service 文件下载地址:raw.githubusercontent.com/containerd/…

下载 containerd

root@server-01:~# wget https://github.com/containerd/containerd/releases/download/v2.1.4/containerd-2.1.4-linux-amd64.tar.gz
root@server-01:~# tar Cxzvf /usr/local containerd-2.1.4-linux-amd64.tar.gz
bin/
bin/containerd-stress
bin/ctr
bin/containerd
bin/containerd-shim-runc-v2

下载 containerd.service systemd 服务配置文件

root@server-01:~# wget https://raw.githubusercontent.com/containerd/containerd/main/containerd.service
root@server-01:~# cp containerd.service /etc/systemd/system/

启动 containerd

root@server-01:/etc/systemd/system# systemctl daemon-reload
root@server-01:/etc/systemd/system# systemctl enable --now containerd
Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /etc/systemd/system/containerd.service.

查看状态,active 正常运行

root@server-01:/etc/systemd/system# systemctl status containerd
● containerd.service - containerd container runtime
     Loaded: loaded (/etc/systemd/system/containerd.service; enabled; preset: enabled)
     Active: active (running) since Wed 2025-10-29 08:50:12 UTC; 45s ago
       Docs: https://containerd.io
    Process: 4659 ExecStartPre=/sbin/modprobe overlay (code=exited, status=0/SUCCESS)
   Main PID: 4661 (containerd)
      Tasks: 7
     Memory: 12.6M (peak: 14.1M)
        CPU: 96ms
     CGroup: /system.slice/containerd.service
             └─4661 /usr/local/bin/containerd

Oct 29 08:50:12 server-01 containerd[4661]: time="2025-10-29T08:50:12.285627330Z" level=info msg="Start recovering state"
Oct 29 08:50:12 server-01 containerd[4661]: time="2025-10-29T08:50:12.285694689Z" level=info msg="Start event monitor"
Oct 29 08:50:12 server-01 containerd[4661]: time="2025-10-29T08:50:12.285704058Z" level=info msg="Start cni network conf syncer for default"
Oct 29 08:50:12 server-01 containerd[4661]: time="2025-10-29T08:50:12.285708425Z" level=info msg="Start streaming server"
Oct 29 08:50:12 server-01 containerd[4661]: time="2025-10-29T08:50:12.285714716Z" level=info msg="Registered namespace \"k8s.io\" with NRI"
Oct 29 08:50:12 server-01 containerd[4661]: time="2025-10-29T08:50:12.285719850Z" level=info msg="runtime interface starting up..."
Oct 29 08:50:12 server-01 containerd[4661]: time="2025-10-29T08:50:12.285723839Z" level=info msg="starting plugins..."
Oct 29 08:50:12 server-01 containerd[4661]: time="2025-10-29T08:50:12.285733173Z" level=info msg="Synchronizing NRI (plugin) with current runtime s>
Oct 29 08:50:12 server-01 systemd[1]: Started containerd.service - containerd container runtime.
Oct 29 08:50:12 server-01 containerd[4661]: time="2025-10-29T08:50:12.288474506Z" level=info msg="containerd successfully booted in 0.021746s"

安装 runc

下载地址

github.com/opencontain…

安装

下载文件

root@server-01:~# wget https://github.com/opencontainers/runc/releases/download/v1.3.2/runc.amd64

安装

install -m 755 runc.amd64 /usr/local/sbin/runc

看下版本验证正确安装

root@server-01:~# runc --version
runc version 1.3.2
commit: v1.3.2-0-gaeabe4e7
spec: 1.2.1
go: go1.23.12
libseccomp: 2.5.6

containerd 配置

创建默认配置 详见官方文档:kubernetes.io/zh-cn/docs/…

root@server-01:/etc# mkdir -p /etc/containerd
root@server-01:/etc# containerd config default > /etc/containerd/config.toml

编辑 /etc/containerd/config.toml 文件,新增 SystemdCgroup = true 配置

  [plugins.'io.containerd.cri.v1.runtime'.containerd.runtimes.runc.options]
    BinaryName = ''
    CriuImagePath = ''
    CriuWorkPath = ''
    IoGid = 0
    IoUid = 0
    NoNewKeyring = false
    Root = ''
    ShimCgroup = ''
    SystemdCgroup = true # 新增这个

重启 containerd

root@server-01:/etc/containerd# sudo systemctl restart containerd

containerd 镜像加速

/etc/containerd/config.toml 新增镜像目录配置 config_path = '/etc/containerd/certs.d'

[plugins.'io.containerd.cri.v1.images'.registry]
  config_path = '/etc/containerd/certs.d'

配置 k8s 容器镜像源

root@server-01:/etc/containerd# mkdir -p /etc/containerd/certs.d/registry.k8s.io
root@server-01:/etc/containerd# cat > /etc/containerd/certs.d/registry.k8s.io/hosts.toml << EOF
server = "https://registry.k8s.io"
[host."https://k8s.m.daocloud.io"]
  capabilities = ["pull", "resolve"]
EOF

重启 containerd

root@server-01:/etc/containerd# sudo systemctl restart containerd

安装 kubeadm、kubelet、kubectl

官方文档

kubernetes.io/zh-cn/docs/…

安装步骤

更新软件包索引列表

sudo apt-get update

安装一些前置需要的包

root@server-01:~# sudo apt-get install -y apt-transport-https ca-certificates curl gpg

下载 Kubernetes 的 GPG 公钥,转换为二进制格式,并保存到系统密钥目录

root@server-01:~# curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.34/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg

添加 Kubernetes apt 仓库

root@server-01:~# echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.34/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list

更新软件包索引列表

sudo apt-get update

安装 kubelet、kubeadm、kubectl

root@server-01:~# sudo apt-get install -y kubelet kubeadm kubectl

锁定版本

root@server-01:~# sudo apt-mark hold kubelet kubeadm kubectl
kubelet set on hold.
kubeadm set on hold.
kubectl set on hold.

启用 kubelet 服务

root@server-01:~# sudo systemctl enable --now kubelet

提前拉好镜像

kubeadm config images pull

初始化控制平面

root@server-01:/etc/containerd/certs.d/registry.k8s.io# kubeadm init --pod-network-cidr=10.244.0.0/16

安装成功打印

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.174.128:6443 --token gdoze2.zmlqnn1bsdinfrnm \
        --discovery-token-ca-cert-hash sha256:35fd160abe3f7e36db31657d1bc1916b6bf322fb133b1749c7bad4394cdc133b