Openeuler使用kubeadm快速搭建K8s集群-3主3从【V1.19】

54 阅读7分钟

系统配置

修改主机配置

分别编辑 Master 和 Worker 节点的/etc/hosts 文件,在文件末尾添加 Master 和 Worker 节点的IP

在所有节点添加hosts:
cat >>/etc/hosts <<EOF
192.168.1.81  k8s-master01
192.168.1.82  k8s-master02
192.168.1.83  k8s-master03
192.168.1.84  k8s-node01
192.168.1.85  k8s-node02
192.168.1.86  k8s-node03
EOF

设置主机名

hostnamectl set-hostname k8s-master01
hostnamectl set-hostname k8s-master02
hostnamectl set-hostname k8s-master03
hostnamectl set-hostname k8s-node01
hostnamectl set-hostname k8s-node02
hostnamectl set-hostname k8s-node03

关闭防火墙和selinux

由于 nftables 后端兼容性问题,产生了重复的防火墙规则,需要关闭防火墙;为了使容器可以访问宿主机的文件系统,需要关闭 selinux。

分别在 Master 和 Worker 节点上执行如下命令,关闭防火墙和 selinux。

systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i '/^SELINUX=/s/enforcing/disabled/' /etc/selinux/config

开启内核参数

开启内核路由转发
开启网桥模式,可将网桥的流量传递给iptables链
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF

sysctl -p /etc/sysctl.d/k8s.conf

加载内置的ipvs模块

每个节点都需要安装 IPVS 的相关工具和加载ipvs内核模块

#在所有节点执行以下命令
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules


yum install ipvsadm ipset -y


查看IPVS模块加载情况
lsmod | grep -e ip_vs -e nf_conntrack_ipv4

关闭交换分区

在安装 K8S 集群时,Linux 的 Swap 内存交换机制需要关闭,否则会因为内存交换影响系统的性能和稳定性。

1.  分别在 Master 和 Worker 节点上执行如下命令,关闭交换分区

swapoff -a 
cp -p /etc/fstab /etc/fstab.bak$(date '+%Y%m%d%H%M%S') 
sed -i "s//dev/mapper/openeuler-swap/#/dev/mapper/openeuler-swap/g" /etc/fstab

软件安装

安装docker

yum install docker -y


cat > /etc/docker/daemon.json <<EOF
{
  "registry-mirrors": [
        "https://docker.1panel.live",
        "https://docker.m.daocloud.io",
        "https://docker.moelty.fun",
        "https://dc-proxy.pmxu.xyz",
        "https://dockerpull.com",
        "https://docker.anyhub.us.kg",
        "https://dockerhub.jobcher.com",
        "https://dockerhub.icu",
        "https://docker.awsl9527.cn"
    ],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  }
}
EOF


systemctl restart docker

安装k8s组件

配置 kubernetes yum 源

分别在 Master 和 Worker 节点上执行如下命令,配置 kubernetes 的 yum 源。

  • aarch64架构
cat <<EOF > /etc/yum.repos.d/kubernetes.repo

[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-aarch64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
[http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg](http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg)
EOF
  • x86架构:
cat <<EOF > /etc/yum.repos.d/kubernetes.repo

[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
[http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg](http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg)
EOF
yum clean all
yum makecache

查看所有的可用版本

yum list kubeadm kubelet kubectl --showduplicates | sort -r

分别在 Master 和 Worker 节点上执行如下命令,安装 k8s 组件。

yum install -y kubelet-1.19.16 kubeadm-1.19.16 kubectl-1.19.16

systemctl enable kubelet

3台master节点配置高可用

3台master节点操作

3台安装nginx

stream {
    log_format  main  '$remote_addr upstream_addr - [time_local] $status $upstream_bytes_sent';
    access_log  /var/log/nginx/k8s-access.log  main;

     upstream k8s-apiserver {

              server 192.168.1.81:6443;   # Master1 APISERVER IP:PORT
              server 192.168.1.82:6443;   # Master2 APISERVER IP:PORT
              server 192.168.1.83:6443;   # Master3 APISERVER IP:PORT
      }

            server {
                 listen 16443;
                 proxy_pass k8s-apiserver;
           }

}


http  {
   ...
  }
yum install keepalived -y 
cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf_bak
! Configuration File for keepalived

global_defs {
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server **127.0.0.1**
   smtp_connect_timeout 30
   router_id **NGINX_MASTER**
   vrrp_skip_check_adv_addr
   vrrp_strict
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}
vrrp_script check_nginx {
    script "/etc/nginx/check_nginx.sh"
}


vrrp_instance VI_1 {
    state **MASTER**
    interface **eth18**
    mcast_src_ip **192.168.1.82**
    **virtual_router_id 51**
    priority **100**
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        **192.168.1.80**
    }
   track_script {
        check_nginx
    }
}
  • mcast_src_ip:配置多播源地址,此地址是当前主机的ip地址。
  • prioritykeepalived根据此项参数的大小仲裁master节点。我们这里让master节点为kubernetes提供服务,其他两个节点暂时为备用节点。因此master1节点设置为100master2节点设置为99master3节点设置为98
  • state:我们将master1节点的state字段设置为MASTER,其他两个节点字段修改为BACKUP
  • **virtual_router_id **保持一致 router_id 机器唯一标识
//创建nginx状态检查脚本

vi /etc/nginx/check_nginx.sh

#!/bin/bash

count=(ss−antp∣grep16443∣egrep−cv"grep∣(ss -antp |grep 16443 |egrep -cv "grep|(ss−antp∣grep16443∣egrep−cv"grep∣$")

if [ "$count" -eq 0 ];then

exit 1

else

exit 0

fi

chmod +x /etc/nginx/check_nginx.sh

部署Master节点

生成预处理文件

master节点执行如下指令: kubeadm config print init-defaults > kubeadm-init.yaml

cat kubeadm-init.yaml 
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  **advertiseAddress: 192.168.1.81**        #本机ip
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  **name: k8s-master01**                     ##主机名
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  certSANs:
    - "kubernetes"
    - "kubernetes.default"
    - "kubernetes.default.svc"
    - "kubernetes.default.svc.cluster.local"
    - "192.168.1.81"             #填写所有master节点IP,VIP, 以及公网IP,域名
    - "192.168.1.82"
    - "192.168.1.83"
    - "192.168.1.80"
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
controlPlaneEndpoint: "192.168.1.80:16443"     #VIP的地址和端口
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers      #指定国内镜像仓库地址
kind: ClusterConfiguration
kubernetesVersion: v1.19.0
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  **podSubnet: 10.244.0.0/16**
scheduler: {}
---
**apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"**

在所有master 节点上,pull 镜像到本地

kubeadm config images pull --config kubeadm-init.yaml

在master01 节点上,完成kubeadm 初始化

kubeadm init --config kubeadm-init.yaml --upload-certs
拷贝kubectl使用的连接k8s认证文件到默认路径
mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.1.80:16443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:1e61a18dd0ac8ceec51a88a9507eeafa3058828fa28075d5fc63590d91bedaa9 \
    --control-plane --certificate-key 6b3608b5c69b209d372e06756bce9aad4545a849ddfd70fda786da66f016a0fe

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.1.80:16443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:1e61a18dd0ac8ceec51a88a9507eeafa3058828fa28075d5fc63590d91bedaa9 

work节点加入集群

安装calico

 wget https://projectcalico.docs.tigera.io/archive/v3.20/manifests/calico.yaml
下载完后还需要修改里面定义Pod网络(CALICO_IPV4POOL_CIDR),与前面kubeadm init指定的一样
 - name: CALICO_IPV4POOL_CIDR
              value: "10.244.0.0/16"
            - name: IP_AUTODETECTION_METHOD
              value: "interface=ens.*"

kubectl apply -f calico.yaml $ kubectl get pods -n kube-system

使用NFS作为StorageClass提供动态存储

创建StorageClass

cat nfs-client-storageclass.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-client-storageclass
  annotations:
    storageclass.kubernetes.io/is-default-class: "true"
provisioner: nfs-storage
#reclaimPolicy: Delete  # 默认值是 Delete
parameters:
  archiveOnDelete: "false"

provisioner的值要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致

创建NFS provisioner

cat nfs-client-provisioner.yaml 
kind: Deployment
apiVersion: apps/v1
metadata:
  name: nfs-client-provisioner
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nfs-client-provisioner
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: quay.io/external_storage/nfs-client-provisioner:latest
          imagePullPolicy: IfNotPresent
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: nfs-storage
            - name: NFS_SERVER
              value: 192.168.1.22
            - name: NFS_PATH
              value: /share/data
      volumes:
        - name: nfs-client-root
          nfs:
            server: 192.168.1.22
            path: /share/data
vi nfs-rbac.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default        #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
    # replace with namespace where provisioner is deployed
  namespace: default
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io
所有node节点
yum -y install nfs-utils

任意master节点
kubectl apply -f nfs-client-storageclass.yaml 
kubectl apply -f nfs-client-provisioner.yaml 

Ingress Controller部署

DaemonSet+HostNetwork(+nodeSelector)方式部署

wget raw.githubusercontent.com/kubernetes/… 下载模板文件

#修改kind类型 
... 
kind: DaemonSet
....
#在DaemonSet.spec.template.spec新增以下配置 
....

    spec: 
      hostNetwork: true #使用主机网络 
      nodeSelector:
        ingress: "true" #选择节点运行,节点有标签ingress=true

替换镜像
registry.cn-hangzhou.aliyuncs.com/google_containers/nginx-ingress-controller:v1.0.0
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen:v1.0

为需要部署nginx-ingress-controller的节点设置lable

#新增标签 kubectl label node node03 ingress=true 
#移除标签 kubectl label node node03 ingress=true-
 kubectl apply -f  deploy.yaml 
kubectl get pods   -n ingress-nginx  -o wide

 kube-prometheus

下载kube-prometheus

下载地址:https://github.com/prometheus-operator/kube-prometheus 

yaml 文件分类

由于它的文件都存放在项目源码的 manifests 文件夹下,所以需要进入其中进行启动这些 kubernetes 应用 yaml 文件。又由于这些文件堆放在一起,不利于分类启动,所以这里将它们分类。

进入源码的 manifests 文件夹:

cd manifests/

mkdir -p node-exporter alertmanager grafana kube-state-metrics prometheus serviceMonitor adapter

# 移动 yaml 文件,进行分类到各个文件夹下
mv *-serviceMonitor* serviceMonitor/
mv grafana-* grafana/
mv kube-state-metrics-* kube-state-metrics/
mv alertmanager-* alertmanager/
mv node-exporter-* node-exporter/
mv prometheus-adapter* adapter/
mv prometheus-* prometheus/

修改 service 配置类型为 NodePort

【1】修改 prometheus 的 service

vim prometheus/prometheus-service.yaml


apiVersion: v1
kind: Service
metadata:
  labels:
    prometheus: k8s
  name: prometheus-k8s
  namespace: monitoring
spec:
  type: NodePort     # 增加配置
  ports:
  - name: web
    port: 9090
    targetPort: web
    nodePort: 30100  # 增加配置
  selector:
    app: prometheus
    prometheus: k8s
  sessionAffinity: ClientIP

【2】修改 grafana 的 service


vi grafana/grafana-service.yaml

apiVersion: v1
kind: Service
metadata:
  labels:
    app: grafana
  name: grafana
  namespace: monitoring
spec:
  type: NodePort     # 增加配置
  ports:
  - name: http
    port: 3000
    targetPort: http
    nodePort: 30101  # 增加配置
  selector:
    app: grafana

【3】修改 alertmanager 的 service

apiVersion: v1
kind: Service
metadata:
  labels:
    alertmanager: main
  name: alertmanager-main
  namespace: monitoring
spec:
  type: NodePort
  ports:
  - name: web
    port: 9093
    targetPort: web
    nodePort: 30093
  selector:
    alertmanager: main
    app: alertmanager
  sessionAffinity: ClientIP


Prometheus数据持久化

在prometheus-prometheus.yaml文件的末尾添加一下内容配置Promehtues数据持久化,在这里指定storageClassName


...
  serviceMonitorSelector: {}
  version: v2.11.0
  retention: 30d
  storage: #新增持久化配置
    volumeClaimTemplate:
      spec:
        storageClassName: nfs-storage
        resources:
          requests:
            storage: 50Gi

Grafana数据持久化

同样Grafana需要指定Storage Class 存储类型

vim grafana/grafana-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: grafana-pvc
  namespace: monitoring
  labels:
    app: grafana-pvc
spec:
  accessModes: #指定访问类型
  - ReadWriteOnce
  volumeMode: Filesystem #指定卷类型
  resources:
    requests:
      storage: 10Gi
  storageClassName: nfs-storage #指定创建的存储类的名字

因为Grafana 是基于Ddeployment部署,需要再手动指定Straoage Class PVC卷才可实现数据持久化

vi grafana/grafana-deployment.yaml

..........
..........
     volumes:
     #- emptyDir: {}
     #  name: grafana-storage
     - name: grafana-storage #挂载pvc卷数据卷

安装 Operator

定义Promehtues CRD 需要创建Prometheus 自定义监控定义CRD资源,主要包括Prometheus监控系统中报警管理配置、报警管理器、Pod监视器、探针、Prometheus实例、Prometheus规则、Service服务监视器以及Thanos规则。最后还创建了"monitoring" 命名空间。

kubectl apply --server-side -f setup/

检查CRD资源是否就绪

 kubectl get -f manifests/setup/

安装其它组件

 egrep -r image .
 
sed -i 's/quay.io/quay.mirrors.ustc.edu.cn/g' setup/prometheus-operator-deployment.yaml
sed -i 's/quay.io/quay.mirrors.ustc.edu.cn/g' prometheus-prometheus.yaml 
sed -i 's/quay.io/quay.mirrors.ustc.edu.cn/g' alertmanager-alertmanager.yaml
sed -i 's/quay.io/quay.mirrors.ustc.edu.cn/g' kube-state-metrics-deployment.yaml
sed -i 's/quay.io/quay.mirrors.ustc.edu.cn/g' node-exporter-daemonset.yaml
sed -i 's/quay.io/quay.mirrors.ustc.edu.cn/g' prometheus-adapter-deployment.yaml
sed -i 's/quay.io/quay.mirrors.ustc.edu.cn/g' blackbox-exporter-deployment.yaml
sed -i 's#k8s.gcr.io/kube-state-metrics/kube-state-metrics#bitnami/kube-state-metrics#g' kube-state-metrics-deployment.yaml
 
kubectl apply -f adapter/
kubectl apply -f alertmanager/
kubectl apply -f node-exporter/
kubectl apply -f kube-state-metrics/
kubectl apply -f grafana/
kubectl apply -f prometheus/
kubectl apply -f serviceMonitor/
 kubectl  get  pods -n  monitoring