k8s搭建时候使用ipvs(Rocky9部署k8s集群)

631 阅读2分钟

三台rockylinux9

node1 172.16.21.135   master
node2    172.16.21.136 worker
node3   172.16.21.137 worker

安装containerd(三台节点)

sudo yum install -y yum-utils
sudo yum-config-manager \
    --add-repo \
    https://download.docker.com/linux/centos/docker-ce.repo
sudo yum install containerd.io -y
sudo systemctl start --now containerd 

主机初始化配置(可以三台主机都设置上)

# 集群主机名
echo "172.16.21.135 node1" >> /etc/hosts 
echo "172.16.21.136 node2" >> /etc/hosts 
echo "172.16.21.137 node3" >> /etc/hosts
# 交换分区 
sed -i 's/^\(.*swap.*\)$/#\1/g' /etc/fstab 
swapoff -a
# 安装一些工具
yum update -y && yum -y install wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git network-scripts tar curl
# 关闭防火墙
systemctl disable --now firewalld
# 关闭selinux
setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
# 网络内核模块 
cat << EOF > /etc/modules-load.d/containerd.conf
overlay 
br_netfilter 
EOF 
modprobe overlay 
modprobe br_netfilter
 # 数据包转发
tee /etc/sysctl.d/kubernetes.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
# 安装ipvs相关
yum install ipvsadm ipset sysstat conntrack libseccomp -y
cat >> /etc/modules-load.d/ipvs.conf <<EOF 
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
systemctl restart systemd-modules-load.service

[root@node1 ~]# lsmod | grep -e ip_vs -e nf_conntrack
ip_vs_sh               16384  0
ip_vs_wrr              16384  0
ip_vs_rr               16384  0
ip_vs                 188416  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          176128  3 nf_nat,nft_ct,ip_vs
nf_defrag_ipv6         24576  2 nf_conntrack,ip_vs
nf_defrag_ipv4         16384  1 nf_conntrack
libcrc32c              16384  5 nf_conntrack,nf_nat,nf_tables,xfs,ip_vs
#内核参数
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384

net.ipv6.conf.all.disable_ipv6 = 0
net.ipv6.conf.default.disable_ipv6 = 0
net.ipv6.conf.lo.disable_ipv6 = 0
net.ipv6.conf.all.forwarding = 1
EOF
sysctl --system

生成containerd的默认配置文件(三台主机都设置上)

mkdir /etc/containerd
containerd config default | tee /etc/containerd/config.toml
sed -i 's#registry.k8s.io/pause:3.6#registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9#g' /etc/containerd/config.toml
sed -i 's#SystemdCgroup = false#SystemdCgroup = true#g'  /etc/containerd/config.toml
重启containerd
systemctl restart containerd
cat << EOF > /etc/crictl.yaml 
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF

安装(三台节点)

# 国外服务器
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-$basearch
enabled=1
gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kubelet kubeadm kubectl
EOF

# 将 SELinux 设置为 permissive 模式(相当于将其禁用)
sudo setenforce 0
sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config

sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes

sudo systemctl enable --now kubelet
# 国内服务器
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
setenforce 0
yum install -y kubelet kubeadm kubectl
systemctl enable kubelet && systemctl start kubelet

生成配置文件(master)

[root@node1 ~]# kubeadm config print init-defaults > kube-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 172.16.21.135
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  name: node
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.27.0
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16 #pod的网段地址配置
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs

使用kubeadm初始化(master)

 kubeadm init --config kube-config.yaml   

产生如下输出

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.16.21.135:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:aee8e43a0007d317bcf68583050418dd7744bbcdf12fd60ddcfc19894cdb3328 

执行上面的输入的命令(master执行)

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

两个worker节点执行

kubeadm join 172.16.21.135:6443 --token abcdef.0123456789abcdef \                                                                                                                            
        --discovery-token-ca-cert-hash sha256:aee8e43a0007d317bcf68583050418dd7744bbcdf12fd60ddcfc19894cdb3328   

查看状态,可以看到现在都是notready的

[root@node1 ~]# kubectl get nodes
NAME    STATUS     ROLES           AGE     VERSION
node    NotReady   control-plane   6m6s    v1.27.1
node2   NotReady   <none>          2m54s   v1.27.1
node3   NotReady   <none>          119s    v1.27.1

安装flannel网络组件

kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml

再次查看集群状态

[root@node1 ~]# kubectl get nodes
NAME    STATUS   ROLES           AGE     VERSION
node    Ready    control-plane   8m25s   v1.27.1
node2   Ready    <none>          5m13s   v1.27.1
node3   Ready    <none>          4m18s   v1.27.1

查看有ipvs规则

[root@node1 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.96.0.1:443 rr
  -> 172.16.21.135:6443           Masq    1      1          0         
TCP  10.96.0.10:53 rr
  -> 10.244.1.2:53                Masq    1      0          0         
  -> 10.244.1.3:53                Masq    1      0          0         
TCP  10.96.0.10:9153 rr
  -> 10.244.1.2:9153              Masq    1      0          0         
  -> 10.244.1.3:9153              Masq    1      0          0         
UDP  10.96.0.10:53 rr
  -> 10.244.1.2:53                Masq    1      0          0         
  -> 10.244.1.3:53                Masq    1      0          0