1、使用kubeadm部署Kubernetes集群

190 阅读3分钟

1、机器配置

1、服务器配置至少是2G2核的。如果不是则可以在集群初始化后面增加 --ignore-preflight-errors=NumCPU

2、机器列表

 IP Hostname
 192.168.31.201 k8s-1
 192.168.31.202 k8s-2
 192.168.31.203 k8s-3

2、系统准备(三台机器全部执行)

1、系统初始化设置

 # 关闭selinux
 [root@k8s-1 ~]# setenforce 0
 [root@k8s-1 ~]# getenforce
 Permissive
 ​
 [root@k8s-1 ~]# sed -i 's/SELINUX=enforcing/SELINUX=disable/' /etc/selinux/config
 ​
 # 关闭防火墙
 systemctl disable --now firewalld
 ​
 # 关闭swap分区
 swapoff -a
 修改/etc/fstab
 echo 'KUBELET_EXTRA_ARGS="--fail-swap-on=false"' > /etc/sysconfig/kubelet   # kubelet忽略swap
 ​
 # 修改hosts文件
 [root@k8s-1 ~]# cat /etc/hosts
 192.168.31.201 k8s-1
 192.168.31.202 k8s-2
 192.168.31.203 k8s-3
 ​
 # 免密登录
 [root@k8s-1 ~]# ssh-keygen -t rsa
 [root@k8s-1 .ssh]# for i in k8s-1 k8s-2 k8s-3 ;do  ssh-copy-id -i ~/.ssh/id_rsa.pub root@$i; done
 ​
 # 同步集群时间
 ntpdate time1.aliyun.com
 ​
 # 配置镜像源
 curl  -o /etc/yum.repos.d/CentOS-Base.repo https://repo.huaweicloud.com/repository/conf/CentOS-7-reg.repo
 yum clean all
 yum makecache
 ​
 # 更新系统
 yum update -y --exclud=kernel*
 ​
 # 安装基础常用命令包
 yum install wget expect vim net-tools ntp bash-completion ipvsadm ipset jq iptables conntrack sysstat libseccomp -y

2、更新系统内核

 # docker 对系统内核要求比较高,最好使用4.4+
 # CentOS 7 上启用 ELRepo 仓库
 ​
 rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
 rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
 ​
 #列出可用的内核相关包
 yum --disablerepo="*" --enablerepo="elrepo-kernel" list available
 ​
 #安装最新的主线稳定内核
 yum --enablerepo=elrepo-kernel install kernel-ml -y
 ​
 #设置 GRUB 默认的内核版本
 sed -i 's/GRUB_DEFAULT=saved/GRUB_DEFAULT=0/' /etc/default/grub
 #重新创建内核配置
 [root@k8s-1 ~]# grub2-mkconfig -o /boot/grub2/grub.cfg
 Generating grub configuration file ...
 Found linux image: /boot/vmlinuz-5.13.9-1.el7.elrepo.x86_64
 Found initrd image: /boot/initramfs-5.13.9-1.el7.elrepo.x86_64.img
 Found linux image: /boot/vmlinuz-3.10.0-1160.el7.x86_64
 Found initrd image: /boot/initramfs-3.10.0-1160.el7.x86_64.img
 Found linux image: /boot/vmlinuz-0-rescue-2ca1aaaa44ad406d975cabc854d4c822
 Found initrd image: /boot/initramfs-0-rescue-2ca1aaaa44ad406d975cabc854d4c822.img
 done
 [root@k8s-1 ~]# reboot
 [root@k8s-1 ~]# uname -ra
 Linux k8s-1 5.13.9-1.el7.elrepo.x86_64 #1 SMP Sat Aug 7 08:55:16 EDT 2021 x86_64 x86_64 x86_64 GNU/Linux

3、安装IPVS

 yum install -y conntrack-tools ipvsadm ipset conntrack libseccomp
 ​
 # 加载IPVS模块
 cat > /etc/sysconfig/modules/ipvs.modules <<EOF
 #!/bin/bash
 ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
 for kernel_module in ${ipvs_modules}; do
 /sbin/modinfo -F filename ${kernel_module} > /dev/null 2>&1
 if [ $? -eq 0 ]; then
 /sbin/modprobe ${kernel_module}
 fi
 done
 EOF
 ​
 chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
 ​
 # 修改内核启动参数
 cat > /etc/sysctl.d/k8s.conf << EOF
 net.ipv4.ip_forward = 1
 net.bridge.bridge-nf-call-iptables = 1
 net.bridge.bridge-nf-call-ip6tables = 1
 fs.may_detach_mounts = 1
 vm.overcommit_memory=1
 vm.panic_on_oom=0
 fs.inotify.max_user_watches=89100
 fs.file-max=52706963
 fs.nr_open=52706963
 net.ipv4.tcp_keepalive_time = 600
 net.ipv4.tcp.keepaliv.probes = 3
 net.ipv4.tcp_keepalive_intvl = 15
 net.ipv4.tcp.max_tw_buckets = 36000
 net.ipv4.tcp_tw_reuse = 1
 net.ipv4.tcp.max_orphans = 327680
 net.ipv4.tcp_orphan_retries = 3
 net.ipv4.tcp_syncookies = 1
 net.ipv4.tcp_max_syn_backlog = 16384
 net.ipv4.ip_conntrack_max = 65536
 net.ipv4.tcp_max_syn_backlog = 16384
 net.ipv4.top_timestamps = 0
 net.core.somaxconn = 16384
 EOF
 ​
 # 立即生效
 sysctl --system

3、安装Docker(三台机器都要安装)

 # 卸载之前安装过的docker
 yum remove docker docker-common docker-selinux docker-engine -y
 ​
 # 安装docker需要的依赖包
 yum install -y yum-utils device-mapper-persistent-data lvm2 -y
 ​
 # 安装dockeryum源
 wget -O /etc/yum.repos.d/docker-ce.repo https://repo.huaweicloud.com/docker-ce/linux/centos/docker-ce.repo
 ​
 # 安装docker
 yum install docker-ce -y
 ​
 # 设置开机自启动
 systemctl enable --now docker.service

5、安装kubelet(三台机器都要安装)

 # 安装 kebenetes 源
 cat <<EOF > /etc/yum.repos.d/kubernetes.repo
 [kubernetes]
 name=Kubernetes
 baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
 enabled=1
 gpgcheck=1
 repo_gpgcheck=1
 gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
 EOF
 ​
 # 安装kubelet
 yum install -y kubelet-1.21.2-0.x86_64 kubeadm-1.21.2-0.x86_64 kubectl-1.21.2-0.x86_64
 systemctl enable --now kubelet

6、镜像下载

由于kubernetes的相关镜像需要翻墙才能够进行下载,所以我们需要将镜像部署到我们的国内镜像仓库,然后再pull到我们本地的服务器。这里我们需要借用阿里云的容器镜像服务。

具体流程:

登录阿里云账号 ---> 进入控制台 ---> 搜索 容器镜像服务 ---> 创建个人实例 ---> 创建命名空间 ---> 创建镜像仓库 ---> 绑定代码源 ---> 添加构建规则 ---> 构建镜像 ---> 根据基本信息中的操作指南进行登录、拉去镜像 ---> docker tag 更改镜像名称,更改为官方名称

由于我的阿里云服账号,个人实例已经创建过了,就简单的给大家看一下之后的操作步骤。

1、源代码仓库

Github地址:https://github.com/Fong007/images,大家可以自己fork到自己的GitHub上,然后进行编辑`Dockerfile来选择下载镜像的版本。

2、创建镜像仓库

image-20211213185159407

3、添加源代码

image-20211210181241765

4、构建规则

image-20211210181415305

5、如何构建镜像

6、pull镜像

image-20211213185829074

将所有的镜像pull到本地服务上后,docker tag 更改名称,就可以进行初始化操作了。

7、初始化master节点(仅在master节点上执行)

 kubeadm init \
 --kubernetes-version=v1.21.2 \
 --service-cidr=10.96.0.0/12 \
 --pod-network-cidr=10.244.0.0/16

8、初始化(仅在master节点上执行)

1、建立用户集群权限

 [root@k8s-1 ~]# mkdir -p $HOME/.kube
 [root@k8s-1 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
 [root@k8s-1 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
 ​
 # 如果是root用户,则可以使用:export KUBECONFIG=/etc/kubernetes/admin.conf

2、安装集群网络插件

flannel配置文件:

 ---
 apiVersion: policy/v1beta1
 kind: PodSecurityPolicy
 metadata:
   name: psp.flannel.unprivileged
   annotations:
     seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
     seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
     apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
     apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
 spec:
   privileged: false
   volumes:
   - configMap
   - secret
   - emptyDir
   - hostPath
   allowedHostPaths:
   - pathPrefix: "/etc/cni/net.d"
   - pathPrefix: "/etc/kube-flannel"
   - pathPrefix: "/run/flannel"
   readOnlyRootFilesystem: false
   # Users and groups
   runAsUser:
     rule: RunAsAny
   supplementalGroups:
     rule: RunAsAny
   fsGroup:
     rule: RunAsAny
   # Privilege Escalation
   allowPrivilegeEscalation: false
   defaultAllowPrivilegeEscalation: false
   # Capabilities
   allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
   defaultAddCapabilities: []
   requiredDropCapabilities: []
   # Host namespaces
   hostPID: false
   hostIPC: false
   hostNetwork: true
   hostPorts:
   - min: 0
     max: 65535
   # SELinux
   seLinux:
     # SELinux is unused in CaaSP
     rule: 'RunAsAny'
 ---
 kind: ClusterRole
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: flannel
 rules:
 - apiGroups: ['extensions']
   resources: ['podsecuritypolicies']
   verbs: ['use']
   resourceNames: ['psp.flannel.unprivileged']
 - apiGroups:
   - ""
   resources:
   - pods
   verbs:
   - get
 - apiGroups:
   - ""
   resources:
   - nodes
   verbs:
   - list
   - watch
 - apiGroups:
   - ""
   resources:
   - nodes/status
   verbs:
   - patch
 ---
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: flannel
 roleRef:
   apiGroup: rbac.authorization.k8s.io
   kind: ClusterRole
   name: flannel
 subjects:
 - kind: ServiceAccount
   name: flannel
   namespace: kube-system
 ---
 apiVersion: v1
 kind: ServiceAccount
 metadata:
   name: flannel
   namespace: kube-system
 ---
 kind: ConfigMap
 apiVersion: v1
 metadata:
   name: kube-flannel-cfg
   namespace: kube-system
   labels:
     tier: node
     app: flannel
 data:
   cni-conf.json: |
     {
       "name": "cbr0",
       "cniVersion": "0.3.1",
       "plugins": [
         {
           "type": "flannel",
           "delegate": {
             "hairpinMode": true,
             "isDefaultGateway": true
           }
         },
         {
           "type": "portmap",
           "capabilities": {
             "portMappings": true
           }
         }
       ]
     }
   net-conf.json: |
     {
       "Network": "10.244.0.0/16",
       "Backend": {
         "Type": "vxlan"
       }
     }
 ---
 apiVersion: apps/v1
 kind: DaemonSet
 metadata:
   name: kube-flannel-ds
   namespace: kube-system
   labels:
     tier: node
     app: flannel
 spec:
   selector:
     matchLabels:
       app: flannel
   template:
     metadata:
       labels:
         tier: node
         app: flannel
     spec:
       affinity:
         nodeAffinity:
           requiredDuringSchedulingIgnoredDuringExecution:
             nodeSelectorTerms:
             - matchExpressions:
               - key: kubernetes.io/os
                 operator: In
                 values:
                 - linux
       hostNetwork: true
       priorityClassName: system-node-critical
       tolerations:
       - operator: Exists
         effect: NoSchedule
       serviceAccountName: flannel
       initContainers:
       - name: install-cni
         image: flannel:v0.14.0
         command:
         - cp
         args:
         - -f
         - /etc/kube-flannel/cni-conf.json
         - /etc/cni/net.d/10-flannel.conflist
         volumeMounts:
         - name: cni
           mountPath: /etc/cni/net.d
         - name: flannel-cfg
           mountPath: /etc/kube-flannel/
       containers:
       - name: kube-flannel
         image: flannel:v0.14.0
         command:
         - /opt/bin/flanneld
         args:
         - --ip-masq
         - --kube-subnet-mgr
         - --iface=eth0
         resources:
           requests:
             cpu: "100m"
             memory: "50Mi"
           limits:
             cpu: "100m"
             memory: "50Mi"
         securityContext:
           privileged: false
           capabilities:
             add: ["NET_ADMIN", "NET_RAW"]
         env:
         - name: POD_NAME
           valueFrom:
             fieldRef:
               fieldPath: metadata.name
         - name: POD_NAMESPACE
           valueFrom:
             fieldRef:
               fieldPath: metadata.namespace
         volumeMounts:
         - name: run
           mountPath: /run/flannel
         - name: flannel-cfg
           mountPath: /etc/kube-flannel/
       volumes:
       - name: run
         hostPath:
           path: /run/flannel
       - name: cni
         hostPath:
           path: /etc/cni/net.d
       - name: flannel-cfg
         configMap:
           name: kube-flannel-cfg

配置文件需要修改的点:

1、image 需要修改为你的阿里云镜像服务器的地址。

2、注意你的网卡信息,是eth0还是eth1 ,需要修改。

运行Flannel:

 kubectl apply -f flannel.yaml

3、将工作节点加入集群

 [root@k8s-1 ~]# kubeadm token create    --print-join-command
 kubeadm join 192.168.31.201:6443 --token n34jwn.yzpt2nmavue5js9a --discovery-token-ca-cert-hash sha256:2384d5035a6e1c57a907f247b455fec5699c0e942d0a2e66f5092c27da7f7cb9 
 ## 注:将上方生成的token复制到node节点上执行。
 ​
 [root@k8s-1 ~]# kubectl get nodes
 NAME    STATUS   ROLES                  AGE   VERSION
 k8s-1   Ready    control-plane,master   13m   v1.21.2
 k8s-2   Ready    <none>                 35s   v1.21.2
 k8s-3   Ready    <none>                 39s   v1.21.2
 ​
 # 检查集群状态
 ## 第一种方式
 [root@k8s-1 ~]# kubectl get nodes
 ​
 # 第二种方式
 [root@k8s-1 ~]# kubectl get pods -n kube-system
 NAME                               READY   STATUS    RESTARTS   AGE
 coredns-f68b4c98f-5t7wm            1/1     Running   0          5m54s
 coredns-f68b4c98f-5xqjs            1/1     Running   0          5m54s
 etcd-k8s-1                         1/1     Running   0          6m3s
 kube-apiserver-k8s-1               1/1     Running   0          6m3s
 kube-controller-manager-k8s-1      1/1     Running   0          6m3s
 kube-flannel-ds-njn5h              1/1     Running   4          5m24s
 kube-flannel-ds-sjtv7              1/1     Running   4          5m24s
 kube-flannel-ds-vz8pn              1/1     Running   4          5m24s
 kube-proxy-hsdxs                   1/1     Running   5          5m54s
 kube-proxy-tqhfg                   1/1     Running   5          5m54s
 kube-proxy-z8kqx                   1/1     Running   5          5m54s
 kube-scheduler-k8s-1               1/1     Running   0          6m3s
 ​
 # 第三种方式:直接验证集群DNS
 [root@k8s-1 ~]# kubectl run test-pod -it --rm --image=busybox:1.28.3
 If you don't see a command prompt, try pressing enter.
 / # nslookup kubernetes
 Server:    10.96.0.10
 Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
 ​
 Name:      kubernetes
 Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local

9、kubectl命令补全

 yum install -y bash-completion
 ​
 source /usr/share/bash-completion/bash_completion
 source <(kubectl completion bash)
 ​
 echo "source <(kubectl completion bash)" >> ~/.bashrc