kubeadm HA

455 阅读1分钟

修改主机名

hostnamectl set-hostname 

内核参数调整

net.bridge.bridge-nf-call-ip6tables=1
net.ipv6.conf.all.disable_ipv6=1
net.ipv4.ip_forward = 1
vm.swappiness = 0
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_nonlocal_bind = 1
/proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory
modprobe br_netfilter

升级内核

yum install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
yum --enablerepo=elrepo-kernel install -y kernel-lt
grub2-set-default "CentOS Linux (5.8.5-1.el7.elrepo.x86_64) 7 (Core)"
grub2-editenv list
vim /etc/fstab

加载ipvs模块

lsmod |grep -E 'nf_conntrack_ipv4|ip_vs' 

modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4
yum install ipset ipvsadm -y

清除防火墙规则

iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
ipvsadm -C
systemctl stop firewalld
systemctl disable firewalld

apt-get install kubeadm=1.19.11-0 kubectl=1.19.11-0 kubelet=1.19.11-0
yum install kubeadm-1.19.11-0 kubectl-1.19.11-0 kubelet-1.19.11-0

删除网卡

ip link delete kube-ipvs0 

ip link delete dummy0 

初始化集群

kubeadm config print init-defaults > kubeadm-init.yaml
--component-configs KubeProxyConfiguration KubeletConfiguration
cat > kubeadm-config.yaml <<-'EOF'
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.18.0
controlPlaneEndpoint: 192.168.130.200:8443
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
apiServer:
  timeoutForControlPlane: 4m0s
clusterName: kubernetes
networking:
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.95.0.0/16
controllerManager:
  extraArgs:
    deployment-controller-sync-period: "50s"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
evictionHard:
  memory.available: "5%"
  nodefs.available: "5%"
systemReserved:
  cpu: "2"
  memory: "10Gi"
systemReservedCgroup: /system.slice
kubeReservedCgroup: /kubelet.service
EOF

kubeadm init --config kubeadm-config.yaml --upload-certs #为多个master做准备
systemctl enable docker.service
systemctl enable kubelet.service

检测脚本

stream {
log_format proxy '$remote_addr [$time_local]'
                 '$protocol $status $bytes_sent $bytes_received'
                 '$session_time "$upstream_addr" '
                 '"$upstream_bytes_sent" "$upstream_bytes_received" "$upstream_connect_time"';
    upstream master {
        hash $remote_addr consistent;
        server 192.168.223.95:6443 max_fails=3 fail_timeout=30s; 
        server 192.168.223.96:6443 max_fails=3 fail_timeout=30s;
        server 192.168.223.97:6443 max_fails=3 fail_timeout=30s;
    }

    server {
        listen 8443;
        proxy_connect_timeout 1s;
        proxy_pass master;
        access_log /var/log/nginx/k8s-access.log proxy;
        error_log /var/log/nginx/k8s-error.log warn;
    }
}

global
    log /dev/log local0
    log /dev/log local1 notice
    daemon
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 1
    timeout http-request    10s
    timeout queue           20s
    timeout connect         5s
    timeout client          20s
    timeout server          20s
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
frontend  kubernetes-apiserver
    mode tcp
    bind *:6444
    option tcplog
    # bind *:443 ssl # To be completed ....
    default_backend             kubernetes-apiserver
backend kubernetes-apiserver
    option httpchk GET /healthz
    http-check expect status 200
    mode        tcp  # 模式tcp
    option ssl-hello-chk
    balance     roundrobin  # 采用轮询的负载算法
 server k8s-m-128-215 192.168.128.215:6443 check
 server k8s-m-128-216 192.168.128.216:6443 check
 server k8s-m-128-217 192.168.128.217:6443 check


#!/bin/bash

function check_haproxy(){
 for ((i=0;i<3;i++))
 do
  haproxy_job_id=`ps -C haproxy --no-heading|wc -l`
  if [[ ${haproxy_job_id} -ne 0 ]];then
   return
  else
   sleep 2
  fi
 done
}

check_haproxy
if [[ $haproxy_job_id -eq 0 ]];then
 /usr/bin/systemctl stop keepalived
 exit 1
else
 exit 0
fi

#!/bin/bash
counter=$(netstat -lntp|grep 8443|wc -l)
if [ "${counter}" = "0" ]; then
    sleep 2
    /usr/sbin/nginx
    counter=$(netstat -lntp|grep 8443|wc -l)
    if [ "${counter}" = "0" ]; then
        pkill keepalived
    fi
fi

! /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
    router_id ${hostname}
}
vrrp_script check_apiserver {
  script "/etc/keepalived/check_apiserver.sh"  
  interval 3
  weight -2
  fall 10
  rise 2
}
vrrp_instance calico_ha_apiserver_10_31_90_0 {
    state MASTER
    interface eth0
    virtual_router_id 90
    priority 100
    authentication {
        auth_type PASS
        auth_pass pass@77
    }
    virtual_ipaddress {
        10.31.90.0
    }
    track_script {
        check_apiserver
    }
}

#!/bin/sh
APISERVER_VIP="10.31.90.0"
APISERVER_DEST_PORT="8443"
errorExit() {
    echo "*** $*" 1>&2
    exit 1
}
curl --silent --max-time 2 --insecure https://localhost:${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://localhost:${APISERVER_DEST_PORT}/"
if ip addr | grep -q ${APISERVER_VIP}; then
    curl --silent --max-time 2 --insecure https://${APISERVER_VIP}:${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://${APISERVER_VIP}:${APISERVER_DEST_PORT}/"
fi

查询emptyDir类型的存储路径

/var/lib/kubelet/pods/${kubectl get pod -oyaml|grep uid}/volumes/kubernetes.io~empty-dir/${Volume_name}