kubeadm极速部署k8s1.27版本集群
参考"kubernetes 1.24版本集群部署"
使用CentOS7u9操作系统,然后准备如下配置的三个节点
ip | CPU | 内存 | 硬盘 | 角色 | 主机名 |
---|---|---|---|---|---|
192.168.91.220 | 2C | 2G | 40GB | master | master01 |
192.168.91.221 | 2C | 2G | 40GB | worker(node) | worker01 |
192.168.91.222 | 2C | 2G | 40GB | worker(node) | worker02 |
在上面准备的所有节点中操作
# 配置hosts
cat >> /etc/hosts << EOF
192.168.91.220 master01
192.168.91.221 worker01
192.168.91.222 worker02
EOF
# 时间同步配置,最小化安装系统需要安装ntpdate软件
yum -y install ntpdate
echo "0 */1 * * * ntpdate time1.aliyun.com" >> /var/spool/cron/root
# 关闭防火墙
systemctl disable firewalld && systemctl stop firewalld
# 关闭selinux
sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
setenforce 0
# 升级操作系统内核
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
yum -y install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm
yum --enablerepo="elrepo-kernel" -y install kernel-ml.x86_64
grub2-set-default 0
grub2-mkconfig -o /boot/grub2/grub.cfg
# 重启
reboot
# 配置内核转发及网桥过滤
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
EOF
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf
# 安装ipset及ipvsadm
yum -y install ipset ipvsadm
# 配置ipvsadm模块加载方式,添加需要加载的模块
cat > /etc/sysconfig/modules/ipvs.modules << EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules
# 关闭SWAP分区
sed -i 's&/dev/mapper/centos-swap&#/dev/mapper/centos-swap&' /etc/fstab
swapoff -a
# Docker安装
wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum -y install --setopt=obsoletes=0 docker-ce-24.0.6-1.el7
cat << EOF > /etc/docker/daemon.json
{
"registry-mirrors": ["https://zwyx2n3v.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
systemctl enable --now docker
# 安装cri-dockerd
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.7/cri-dockerd-0.3.7.20231027185657.170103f2-0.el7.x86_64.rpm
yum -y install cri-dockerd-0.3.7.20231027185657.170103f2-0.el7.x86_64.rpm
sed -i 's#ExecStart=/usr/bin/cri-dockerd#ExecStart=/usr/bin/cri-dockerd --pod-infra-container-image=registry.k8s.io/pause:3.9#' /usr/lib/systemd/system/cri-docker.service
systemctl enable --now cri-docker
# 阿里云YUM源
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
# 安装指定版本
yum -y install --setopt=obsoletes=0 kubeadm-1.27.6-0 kubelet-1.27.6-0 kubectl-1.27.6-0
# 配置kubelet
sed -ri 's/KUBELET_EXTRA_ARGS=/KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"/' /etc/sysconfig/kubelet
systemctl enable kubelet
# 集群镜像准备
cat > image_download.sh << "EOF"
#!/bin/bash
images_list=`kubeadm config images list --kubernetes-version=v1.27.6`
for image in $images_list
do
replace_uri=`echo $image | awk -F'/' '{print $1}'`
image_aliyun=`echo $image | sed -e "s#$replace_uri#registry.aliyuncs.com/google_containers#" -e 's#coredns/##'`
docker pull $image_aliyun
docker tag $image_aliyun $image
docker rmi $image_aliyun
done
EOF
sh image_download.sh
# master01
# 集群初始化
kubeadm init --kubernetes-version=v1.27.6 --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.91.220 --cri-socket unix:///var/run/cri-dockerd.sock
...
Your Kubernetes control-plane has initialized successfully!
...
kubeadm join 192.168.91.220:6443 --token rfuudv.yacu4vc9tivodk0p \
--discovery-token-ca-cert-hash sha256:98b1401678f05975214f2c8174c95c2c24b80e95de313e28a9339b27beca703f
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf
# 网络插件calico安装
wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.3/manifests/tigera-operator.yaml
kubectl create -f tigera-operator.yaml
watch kubectl get pods -n tigera-operator
wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.3/manifests/custom-resources.yaml
sed -i 's/192.168/10.244/' custom-resources.yaml
kubectl create -f custom-resources.yaml
watch kubectl get pods -n calico-system
# worker01和worker02
# 集群工作节点添加
kubeadm join 192.168.91.220:6443 --token rfuudv.yacu4vc9tivodk0p \
--discovery-token-ca-cert-hash sha256:98b1401678f05975214f2c8174c95c2c24b80e95de313e28a9339b27beca703f --cri-socket unix:///var/run/cri-dockerd.sock
# master01
# 验证集群可用性
kubectl get nodes
NAME STATUS ROLES AGE VERSION
master01 Ready control-plane 58m v1.27.6
worker01 Ready <none> 26m v1.27.6
worker02 Ready <none> 26m v1.27.6
kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy
# 所有pod都是Running状态
kubectl get pods -A -o wide