11k8s1.26集群部署容器运行时为containerd

5 阅读3分钟

k8s1.26集群部署容器运行时为containerd

参考"kubernetes 1.24版本集群部署"

使用CentOS7u9操作系统,然后准备如下配置的三个节点

ipCPU内存硬盘角色主机名
192.168.91.2202C2G40GBmastermaster01
192.168.91.2212C2G40GBworker(node)worker01
192.168.91.2222C2G40GBworker(node)worker02

在上面准备的所有节点中操作

# 配置hosts
cat >> /etc/hosts << EOF
192.168.91.220  master01
192.168.91.221  worker01
192.168.91.222  worker02
EOF

# 时间同步配置,最小化安装系统需要安装ntpdate软件
yum -y install ntpdate
echo "0 */1 * * * ntpdate time1.aliyun.com" >> /var/spool/cron/root

# 关闭防火墙
systemctl disable firewalld && systemctl stop firewalld

# 关闭selinux
sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
setenforce 0

# 升级操作系统内核
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
yum -y install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm
yum --enablerepo="elrepo-kernel" -y install kernel-ml.x86_64
grub2-set-default 0
grub2-mkconfig -o /boot/grub2/grub.cfg

# 重启
reboot

# 配置内核转发及网桥过滤
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
EOF
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf

# 安装ipset及ipvsadm
yum -y install ipset ipvsadm

# 配置ipvsadm模块加载方式,添加需要加载的模块
cat > /etc/sysconfig/modules/ipvs.modules << EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules

# 关闭SWAP分区
sed -i 's&/dev/mapper/centos-swap&#/dev/mapper/centos-swap&' /etc/fstab
swapoff -a

Containerd准备

cat > /etc/modules-load.d/containerd.conf << EOF
overlay
br_netfilter
EOF
systemctl enable --now systemd-modules-load.service

# 下载containerd
wget https://github.com/containerd/containerd/releases/download/v1.7.7/cri-containerd-cni-1.7.7-linux-amd64.tar.gz
tar xf cri-containerd-cni-1.7.7-linux-amd64.tar.gz -C /

# containerd配置文件生成并修改
mkdir /etc/containerd
containerd config default > /etc/containerd/config.toml
sed -i -e 's#registry.k8s.io/pause:3.8#registry.aliyuncs.com/google_containers/pause:3.9#' -e 's#SystemdCgroup = false#SystemdCgroup = true#' /etc/containerd/config.toml
systemctl enable --now containerd
containerd --version

runc准备

# libseccomp准备
wget https://github.com/opencontainers/runc/releases/download/v1.1.8/libseccomp-2.5.4.tar.gz
tar xf libseccomp-2.5.4.tar.gz
cd libseccomp-2.5.4/
yum -y install gperf make gcc-c++
./configure
make && make install
find / -name "libseccomp.so"
cd ..

# runc安装
wget https://github.com/opencontainers/runc/releases/download/v1.1.8/runc.amd64
chmod +x runc.amd64
# 替换containerd安装已安装的runc
mv runc.amd64 /usr/local/sbin/runc
# 如果运行runc命令时提示:runc: error while loading shared libraries: libseccomp.so.2: cannot open shared object file: No such file or directory,则表明runc没有找到libseccomp,需要检查libseccomp是否安装
runc

K8S集群软件安装

# 阿里云YUM源
cat > /etc/yum.repos.d/kubernetes.repo << EOF 
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

# 安装指定版本
yum -y install --setopt=obsoletes=0 kubeadm-1.26.9-0  kubelet-1.26.9-0 kubectl-1.26.9-0

# 配置kubelet
sed -ri 's/KUBELET_EXTRA_ARGS=/KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"/' /etc/sysconfig/kubelet
systemctl enable kubelet

# 集群镜像准备
cat > image_download.sh << "EOF"
#!/bin/bash
images_list=`kubeadm config images list --kubernetes-version=v1.26.9`
for image in $images_list
do
    replace_uri=`echo $image | awk -F'/' '{print $1}'`
    image_aliyun=`echo $image | sed -e "s#$replace_uri#registry.aliyuncs.com/google_containers#" -e 's#coredns/##'`
    ctr image pull $image_aliyun
    ctr image tag $image_aliyun $image
    ctr image delete $image_aliyun
done
EOF
sh image_download.sh
# master01
# 集群初始化
kubeadm init --kubernetes-version=v1.26.9 --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.91.220 --image-repository=registry.aliyuncs.com/google_containers --cri-socket unix:///var/run/containerd/containerd.sock
...
Your Kubernetes control-plane has initialized successfully!
...
kubeadm join 192.168.91.220:6443 --token nyfkna.784vfu3s9yqm3v9e \
        --discovery-token-ca-cert-hash sha256:30d570aae419a3ec0d2dea2081ff1c06e426448d594af89f56e9ad224b9a2522

mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf

# 网络插件calico安装
wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.3/manifests/tigera-operator.yaml
kubectl create -f tigera-operator.yaml
watch kubectl get pods -n tigera-operator
wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.3/manifests/custom-resources.yaml
sed -i 's/192.168/10.244/' custom-resources.yaml
kubectl create -f custom-resources.yaml
watch kubectl get pods -n calico-system

# worker01和worker02
# 集群工作节点添加
kubeadm join 192.168.91.220:6443 --token nyfkna.784vfu3s9yqm3v9e \
        --discovery-token-ca-cert-hash sha256:30d570aae419a3ec0d2dea2081ff1c06e426448d594af89f56e9ad224b9a2522 --cri-socket unix:///var/run/containerd/containerd.sock

# master01
# 验证集群可用性
kubectl get nodes
NAME       STATUS   ROLES           AGE   VERSION
master01   Ready    control-plane   14h   v1.26.9
worker01   Ready    <none>          14h   v1.26.9
worker02   Ready    <none>          14h   v1.26.9

kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE   ERROR
etcd-0               Healthy
scheduler            Healthy   ok
controller-manager   Healthy   ok

# 所有pod都是Running状态
kubectl get pods -A -o wide