kubeadm+keepalived+haproxy搭建高可用k8s集群
-
准备5台虚拟机(3台master节点,2台node节点)
-
master01 192.168.168.201 keepalived haproxy
-
master02 192.168.168.202 keepalived haproxy
-
master03 192.168.168.203 keepalived haproxy
-
node01 192.168.168.211
-
node01 192.168.168.212
-
VIP 192.168.168.200
-
-
虚拟机配置(全部节点)
- 安装环境包
yum update -y yum install -y nfs-utils rpcbind wget psmisc curl gcc openssl-devel libnl3-devel net-snmp-devel vim net-tools telnet.*- 关闭swap
vim /etc/fstab #注释swap行- 关闭selinux
sed -i '/SELINUX=/d' /etc/selinux/config && echo "SELINUX=disabled" >> /etc/selinux/config # getenforce 查看selinux状态- 关闭防火墙
systemctl stop firewalld && systemctl disable firewalld- 同步时间
timedatectl set-timezone Asia/Shanghai chronyc -a makestep- 配置静态IP
vim /etc/sysconfig/network-scripts/ifcfg-eth0BOOTPROTO="static" IPADDR="192.168.168.201" NETMASK="255.255.255.0" GATEWAY="192.168.168.1" DNS1="192.168.168.1" ONBOOT="yes"- 配置各个节点hosts文件
cat >> /etc/hosts <<EOF 192.168.168.200 k8s.master 192.168.168.201 master01 192.168.168.202 master02 192.168.168.203 master03 192.168.168.211 node01 192.168.168.212 node02 EOF- 重启服务器
reboot- 安装docker
source <(curl -sL https://get.docker.com)- 设置docker cgroupdriver
vim /etc/docker/daemon.json{ "exec-opts": ["native.cgroupdriver=systemd"] }systemctl daemon-reload- 启动docker
systemctl start docker && systemctl enable docker- 安装haproxy(只安装三台master节点)
yum install -y haproxy- 配置haproxy
mv /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak vim /etc/haproxy/haproxy.cfg#--------------------------------------------------------------------- # Global settings #--------------------------------------------------------------------- global # to have these messages end up in /var/log/haproxy.log you will # need to: # 1) configure syslog to accept network log events. This is done # by adding the '-r' option to the SYSLOGD_OPTIONS in # /etc/sysconfig/syslog # 2) configure local2 events to go to the /var/log/haproxy.log # file. A line like the following can be added to # /etc/sysconfig/syslog # # local2.* /var/log/haproxy.log # log 127.0.0.1 local2 chroot /var/lib/haproxy pidfile /var/run/haproxy.pid maxconn 4000 user haproxy group haproxy daemon # turn on stats unix socket stats socket /var/lib/haproxy/stats #--------------------------------------------------------------------- # common defaults that all the 'listen' and 'backend' sections will # use if not designated in their block #--------------------------------------------------------------------- defaults mode http log global option httplog option dontlognull option http-server-close option forwardfor except 127.0.0.0/8 option redispatch retries 3 timeout http-request 10s timeout queue 1m timeout connect 10s timeout client 1m timeout server 1m timeout http-keep-alive 10s timeout check 10s maxconn 3000 #--------------------------------------------------------------------- # kubernetes apiserver frontend which proxys to the backends #--------------------------------------------------------------------- frontend kubernetes-apiserver mode tcp bind *:16443 option tcplog default_backend kubernetes-apiserver #--------------------------------------------------------------------- # round robin balancing between the various backends #--------------------------------------------------------------------- backend kubernetes-apiserver mode tcp balance roundrobin server master01 192.168.168.201:6443 check server master02 192.168.168.202:6443 check server master03 192.168.168.203:6443 check #--------------------------------------------------------------------- # collection haproxy statistics message #--------------------------------------------------------------------- listen stats bind *:1080 stats auth admin:qwe123 stats refresh 5s stats realm HAProxy\ Statistics stats uri /admin?stats- 启动haproxy
systemctl start haproxy && systemctl enable haproxy -
keepalived配置(只配置三台master节点)
- keepalived安装
yum install -y keepalived- keepalived启动
systemctl start keepalived && systemctl enable keepalived- master01
mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak vim /etc/keepalived/keepalived.confglobal_defs { router_id master01 } vrrp_script check_haproxy { script "killall -0 haproxy" interval 2 timeout 1 fall 2 } vrrp_instance VI_1 { state MASTER #设置为主服务器 interface eth0 #监测网络接口 virtual_router_id 200 #主、备必须一样 priority 150 #(主、备机取不同的优先级,主机值较大,备份机值较小,值越大优先级越高) advert_int 1 #VRRP Multicast广播周期秒数 authentication { auth_type PASS #VRRP认证方式,主备必须一致 auth_pass 1111 #(密码) } virtual_ipaddress { 192.168.168.200/24 #VRRP HA虚拟地址 } track_script { check_haproxy } }- master02
global_defs { router_id master02 } vrrp_script check_haproxy { script "killall -0 haproxy" interval 2 timeout 1 fall 2 } vrrp_instance VI_1 { state BACKUP #设置为备用服务器 interface eth0 #监测网络接口 virtual_router_id 200 #主、备必须一样 priority 100 #(主、备机取不同的优先级,主机值较大,备份机值较小,值越大优先级越高) advert_int 1 #VRRP Multicast广播周期秒数 authentication { auth_type PASS #VRRP认证方式,主备必须一致 auth_pass 1111 #(密码) } virtual_ipaddress { 192.168.168.200/24 #VRRP HA虚拟地址 } track_script { check_haproxy } }- master03
global_defs { router_id master03 } vrrp_script check_haproxy { script "killall -0 haproxy" interval 2 timeout 1 fall 2 } vrrp_instance VI_1 { state BACKUP #设置为备用服务器 interface eth0 #监测网络接口 virtual_router_id 200 #主、备必须一样 priority 50 #(主、备机取不同的优先级,主机值较大,备份机值较小,值越大优先级越高) advert_int 1 #VRRP Multicast广播周期秒数 authentication { auth_type PASS #VRRP认证方式,主备必须一致 auth_pass 1111 #(密码) } virtual_ipaddress { 192.168.168.200/24 #VRRP HA虚拟地址 } track_script { check_haproxy } }配置完成后分别重启keepalived
systemctl restart keepalived -
安装kubeadm,kubelet,kubectl
- Kubernetes 文档 | Kubernetes
- 允许 iptables 检查桥接流量
确保
br_netfilter模块被加载。这一操作可以通过运行lsmod | grep br_netfilter来完成。若要显式加载该模块,可执行sudo modprobe br_netfilter。为了让你的 Linux 节点上的 iptables 能够正确地查看桥接流量,你需要确保在你的
sysctl配置中将net.bridge.bridge-nf-call-iptables设置为 1。例如:cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf br_netfilter EOF cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 EOF sudo sysctl --system- 安装 kubeadm、kubelet 和 kubectl
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes #baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ enabled=1 gpgcheck=1 repo_gpgcheck=1 #gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg exclude=kubelet kubeadm kubectl EOF sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes sudo systemctl enable --now kubelet- 在master01上 kubeadm拉取镜像
#查看镜像列表 kubeadm config images list #从阿里云拉取镜像,拉取后虚将镜像修改为 kubeadm config images list 中的名称 kubeadm config images pull --image-repository=registry.aliyuncs.com/google_containers初始化集群
#--service-cidr 默认为 10.96.0.0/12 #--pod-network-cidr 10.112.0.0/12 网段不能与service网段重叠 kubeadm init --control-plane-endpoint "192.168.168.200:16443" --upload-certs --pod-network-cidr 10.112.0.0/12- 在另外两台master上
kubeadm join 192.168.168.200:16443 --token h7yyrh.6g037ffqd1ba2wp9 \ --discovery-token-ca-cert-hash sha256:3e6ec71b1b418e970596feea2da9b9155eba0c9dadf83d0a42088ab6d51e1224 \ --control-plane --certificate-key 62e3231e821058de0086f5e256dad1b1bd171ed06ebeb47c2814ba82faad2a7e- 在node上
kubeadm join 192.168.168.200:16443 --token h7yyrh.6g037ffqd1ba2wp9 \ --discovery-token-ca-cert-hash sha256:3e6ec71b1b418e970596feea2da9b9155eba0c9dadf83d0a42088ab6d51e1224- 设置kubectl无法链接
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> /etc/profile source /etc/profile- 安装网络插件
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -
metallb搭建 LoadBalancer
- metallb部署
如果环境是 Kubernetes v1.14.2+ 使用 IPVS模式,必须启用ARP模式。
kubectl edit configmap -n kube-system kube-proxyapiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration mode: "ipvs" ipvs: strictARP: true- 部署
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml #首次安装需要设置 memberlist secret kubectl create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/metallb-frr.yaml- 配置
vim config.yamlapiVersion: v1 kind: ConfigMap metadata: namespace: metallb-system name: config data: config: | address-pools: - name: default protocol: layer2 addresses: - 192.168.168.230-192.168.168.250 #设置ip地址段kubectl apply -f config.yaml -
部署Dashborad
下载yaml文件
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.5.0/aio/deploy/recommended.yaml vim recommended.yaml修改Service Type为LoadBalancer
kind: Service apiVersion: v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboard spec: type: LoadBalancer ports: - port: 443 targetPort: 8443 selector: k8s-app: kubernetes-dashboardvim admin-user.yamlapiVersion: v1 kind: ServiceAccount metadata: name: admin-user namespace: kubernetes-dashboard- 绑定角色
vim cluster-role-binding.yamlapiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: admin-user roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: admin-user namespace: kubernetes-dashboardkubectl apply -f admin-user.yaml kubectl apply -f cluster-role-binding.yaml- 获取Token
kubectl -n kubernetes-dashboard get secret $(kubectl -n kubernetes-dashboard get sa/admin-user -o jsonpath="{.secrets[0].name}") -o go-template="{{.data.token | base64decode}}"