1.主机分配及硬件配置
| 主机ip | 主机名称 | 主机配置 | 安装软件 |
|---|---|---|---|
| 192.168.10.11 | master01 | 4C8G/100GB | kubeadm,kube-api-server,kubelet,kube-proxy,kube-scheduler,haproxy,keepalived |
| 192.168.10.12 | master02 | 4C8G/100GB | kubeadm,kube-api-server,kubelet,kube-proxy,kube-scheduler,haproxy,keepalived |
| 192.168.10.13 | master03 | 4C8G/100GB | kubeadm,kube-api-server,kubelet,kube-proxy,kube-scheduler,haproxy,keepalived |
| 192.168.10.14 | worker01 | 4C8G/100GB | kubelet,kube-proxy |
| 192.168.10.15 | worker02 | 4C8G/100GB | kubelet,kube-proxy |
2.主机配置
这些操作每台主机都需要
2.1 主机名设置
[root@master01 ~] # hostnamectl set-hostname master01
[root@master02 ~] # hostnamectl set-hostname master02
[root@master03 ~] # hostnamectl set-hostname master03
[root@worker01 ~] # hostnamectl set-hostname worker01
[root@worker02 ~] # hostnamectl set-hostname worker02
2.2 配置域名解析
vim /etc/hosts
192.168.10.11 master01
192.168.10.12 master02
192.168.10.13 master03
192.168.10.14 worker01
192.168.10.15 worker02
2.3 配置主机静态ip
# vim /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="none" 将这里的dhcp修改为none
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="ens33"
DEVICE="ens33"
ONBOOT="yes"
IPADDR="192.168.10.11" 从这行往下开始添加主机ip,网段,网关,dns地址
PREFIX="24"
GATEWAY="192.168.10.2"
DNS1="119.29.29.29"
2.4 配置时间同步
查看是否有时间同步配置
# crontab -l
编辑时间同步
# crontab -e
0 */1 * * * ntpdate time1.aliyun.com
这里使用的是阿里云时间服务器,如果没有ntpdate需要安装
# yum -y install ntpdate
2.5 关闭selinux
两种方式:一种直接找到selinux配置文件进行编辑,第二种交互式命令都可以
# vim /etc/selinux/config
将SELINUX值设置为disabled即可
# sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
2.6 关闭防火墙
# systemctl stop firewalld;systemctl disable firewalld
查看防火墙状态
# firewall-cmd --state
not running
2.7 升级内核
2.7.1 导入elrepo gpg key
# rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
2.7.2 安装elrepo YUM源仓库
# yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
2.7.3 安装kernel-ml版本
# yum --enablerepo="elrepo-kernel" -y install kernel-ml.x86_64
2.7.4 设置grub2默认引导
# grub2-set-default 0
2.7.5 重新生成grub2引导文件
# grub2-mkconfig -o /boot/grub2/grub.cfg
2.7.6 重启主机,并查看内核是否更新
# reboot
# uname -r
2.8 配置内核转发及网桥过滤
2.8.1 添加网桥过滤及内核转发
# vim /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.birdge-nf-call-iptables=1
net.ipv4.ip_forward=1
vm.swappiness=0
2.8.2 加载br_netfilter模块
加载br_netfilter模块
# modprobe br_netfilter
查看是否加载
# lsmod | grep br_netfilter
br_netfilter 22256 0
bridge 151336 1 br_netfilter
2.8.3 加载网桥过滤和内核转发的配置文件
# sysctl -p /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
2.9 安装ipset及ipvsadm
2.9.1 安装
# yum -y install ipset ipvsadm
2.9.2 配置ipvsadm加载模块
# vim /etc/sysconfig/modules/ipvs.modules
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
2.9.3 授权,运行,检查是否加载
# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack
2.10 关闭swap分区
临时关闭
swapoff -a
永远关闭swap分区,需要重启操作系统
# cat /etc/fstab
......
# /dev/mapper/centos-swap swap swap defaults 0 0
在上一行中行首添加#
2.11 多机互信
在master1节点生成即可,复制到其他节点,复制完成后,可以相互测试登录
2.11.1 生成密钥对
# ssh-keygen
2.11.2 复制
# cd /root/.ssh
[root@master1 .ssh] # ls
id_rsa id_rsa.pub known_hosts
[root@master1 .ssh] cp id_rsa.pub authorized_keys
2.11.3 分发到所有主机
# for i in 12 13 14 15;do scp -r /root/.ssh 192.168.10.$i:/root/.ssh
至此,所有的主机准备到此结束。
3.Docker准备
3.1 获取YUM源
wget -O /etc/yum.repos.d/docker-ce.repo <https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
3.2 查看可安装版本
# yum list docker-ce.x86\_64 --showduplicates | sort -r
3.3 安装指定版本
# yum -y install --setopt=obsoletes=0 docker-ce-20.10.17-3.el7
3.4 启动docker
systemctl enable docker; systemctl start docker
3.5 修改cgroup方式及镜像加速
# vim /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": ["https://09c57bec6f00f2130f22c00741378e40.mirror.swr.myhuaweicloud.com"]
}
这里也配置一下docker的镜像加速,配置的是华为云的,阿里云的好像不能用了
3.6 重启docker
重新加载文件
# systemctl daemon-reload
重启docker
# systemctl restart docker
4.HAProxy及Keepalived部署
为了部署高可用集群才需要这一步,如果是单master节点不需要第4节
现在我们是同时安装在master1和master2上,其实也可以增加在master3上安装,因为这里我只准备把master2添加为BACKUP,所以暂时不在master3上安装,也可以将HaProxy和keepalived单独安装到一台主机上来进行配置
4.1 安装HAProxy及keepalived
[root@master01 ~] # yum -y install haproxy keepalived
[root@master02 ~]# yum -y install haproxy keepalived
4.2 编辑HAProxy配置文件及启动
[root@master01 ~]# vim /etc/haproxy/haproxy.cfg
[root@master01 ~]# cat /etc/haproxy/haproxy.cfg
#---------------------------------------------------------------------
# Example configuration for a possible web application. See the
# full configuration options online.
#
#
#---------------------------------------------------------------------
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
maxconn 2000
ulimit-n 16384
log 127.0.0.1 local0 err
stats timeout 30s
defaults
log global
mode http
option httplog
timeout connect 5000
timeout client 50000
timeout server 50000
timeout http-request 15s
timeout http-keep-alive 15s
frontend monitor-in
bind *:33305 这是绑定的对外暴露的ip地址
mode http
option httplog
monitor-uri /monitor
frontend k8s-master
bind 0.0.0.0:16443
bind 127.0.0.1:16443
mode tcp
option tcplog
tcp-request inspect-delay 5s
default_backend k8s-master
backend k8s-master
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server master01 192.168.234.11:6443 check 主要需要配置这里,写你自己的主机ip
server master02 192.168.234.12:6443 check
server master03 192.168.234.13:6443 check
4.3 启动HAProxy并查看状态
4.3.1 启动
# systemctl enable haproxy; systemctl start haproxy
# systemctl status haproxy
4.3.2 访问监听端口
4.3.3 分发到master2上
[root@master01 ~]# scp /etc/haproxy/haproxy.cfg master02:/etc/haproxy/haproxy.cfg
然后在master2上启动,同时也可以像4.4那样去验证
4.4 Keepalived配置及启动
4.4.1 编辑keepalived配置文件
[root@master01 ~]# vim /etc/keepalived/keepalived.conf
[root@master01 ~]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh" #此脚本需要多独定义,并要调用。
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface ens33 # 修改为正在使用的网卡
mcast_src_ip 192.168.10.11 #为本master主机对应的IP地址
virtual_router_id 51
priority 101
advert_int 2
authentication {
auth_type PASS
auth_pass abc123
}
virtual_ipaddress {
192.168.10.100 #为VIP地址
}
track_script {
chk_apiserver # 执行上面检查apiserver脚本
}
}
4.4.2 编辑脚本文件
[root@master01 ~]# vim /etc/keepalived/check_apiserver.sh
[root@master01 ~]# cat /etc/keepalived/check_apiserver.sh
#!/bin/bash
err=0
for k in $(seq 1 3)
do
check_code=$(pgrep haproxy)
if [[ $check_code == "" ]]; then
err=$(expr $err + 1)
sleep 1
continue
else
err=0
break
fi
done
if [[ $err != "0" ]]; then
echo "systemctl stop keepalived"
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
4.4.3 添加执行权限
[root@master01 ~] # chmod +x /etc/keepalived/check_apiserver.sh
4.4.4 分发到master2
[root@master01 ~]# scp /etc/keepalived/keepalived.conf master02:/etc/keepalived/
[root@master01 ~]# scp /etc/keepalived/check_apiserver.sh master02:/etc/keepalived/
4.4.5 修改master2上的配置文件
[root@master02 ~]# vim /etc/keepalived/keepalived.conf
[root@master02 ~]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh" #此脚本需要多独定义,并要调用。
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state BACKUP # 这里不是设置为master,而是backup
interface ens33 # 修改为正在使用的网卡
mcast_src_ip 192.168.10.12 #为本master主机对应的IP地址
virtual_router_id 51
priority 99 # 修改为99 则立降低优先级,
advert_int 2
authentication {
auth_type PASS
auth_pass abc123
}
virtual_ipaddress {
192.168.10.100 #为VIP地址
}
track_script {
chk_apiserver # 执行上面检查apiserver脚本
}
}
4.4.6 启动keepalived
master1和master2都要启动
# systemctl enable keepalived;systemctl start keepalived
4.4.7 验证集群可用性
[root@master01 ~]# ip a s ens33
2: ens33: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:50:f9:5f brd ff:ff:ff:ff:ff:ff
inet 192.168.10.11/24 brd 192.168.10.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet 192.168.10.100/32 scope global ens33 这个虚拟IP只有在haproxy和keepalived的master上获取到,其他获取不到
valid_lft forever preferred_lft forever
inet6 fe80::adf4:a8bc:a1c:a9f7/64 scope link tentative noprefixroute dadfailed
valid_lft forever preferred_lft forever
inet6 fe80::2b33:40ed:9311:8812/64 scope link tentative noprefixroute dadfailed
valid_lft forever preferred_lft forever
inet6 fe80::8508:20d8:7240:32b2/64 scope link tentative noprefixroute dadfailed
valid_lft forever preferred_lft forever