1. 设置主机名以及host文件之间的相互解析
hostnamectl set-hostname master1
# 修改/etc/hosts文件
192.168.56.101 k8s-master1
192.168.56.102 k8s-master2
192.168.56.103 k8s-master3
192.168.56.104 k8s-node1
192.168.56.105 k8s-node2
192.168.56.106 k8s-master-lb
2. 工具
yum install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git -y
3. 设置初始环境
# 关闭防火墙:
systemctl stop firewalld
systemctl disable firewalld
systemctl status firewalld
# 关闭selinux:
setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
getenforce # 查看
# 关闭swap:
swapoff -a && sysctl -w vm.swappiness=0
sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab
检查是否关闭成功
free -m
total used free shared buff/cache available
Mem: 985 78 809 6 96 785
Swap: 0 0 0
# 同步系统时间
yum install ntpdate -y
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'Asia/Shanghai' > /etc/timezone
ntpdate time2.aliyun.com
加入到crontab
crontab -e
*/5 * * * * ntpdate time2.aliyun.com
4. 配置yum源
3.1、打开centos的yum文件夹
cd /etc/yum.repos.d/
3.2、用wget下载repo文件
wget http://mirrors.aliyun.com/repo/Centos-7.repo
如果wget命令不生效,说明还没有安装wget工具
yum -y install wget
回车进行安装,当前目录是/etc/yum.repos.d/,刚刚下载的Centos-7.repo也在这个目录上。
3.3、备份系统原来的repo文件
mv CentOS-Base.repo CentOS-Base.repo.bak
3.4、替换系统原理的repo文件
mv Centos-7.repo CentOS-Base.repo
3.5、执行yum源更新命令
yum clean all
yum makecache
yum update
5. master1节点免密登其它节点
ssh-keygen -t rsa
for i in k8s-master1 k8s-master2 k8s-master3 k8s-node1 k8s-node2;do ssh-copy-id -i /root/.ssh/id_rsa.pub $i;done
6. master1节点下载安装文件
cd /root/;git clone https://github.com/dotbalo/k8s-ha-install.git
7. 升级系统不更新内核
yum update -y --exclude=kernel* && reboot
8. 内核升级
centos需要升级内核到4.18,本地升级的版本为4.19 在master1节点下载内核:
cd /root
wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm
wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm
从master1传入其它节点
for i in k8s-master2 k8s-master3 k8s-node1 k8s-node2; do scp kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm $i:/root/ ;done
所有节点安装内核
cd /root && yum localinstall -y kernel-ml*
所有内核更改启动顺序
grub2-set-default o && grub2-mkconfig -o /etc/grub2.cfg
grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"
检查默认内核是否是4.19
grubby --default-kernel
所有节点重启检查内核是否是4.19
uname -a
所有节点安装ipvsadm
yum install ipvsadm ipset sysstat conntrack libseccomp -y
所有节点配置ws模块,在内核4.19+版本nf_conntrack_pv4己经改为nf_conntrack, 4.18以下使用nf_conntrack_ipv4即可:
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
vim /etc/modules-load.d/ipvs.conf
#加入以下内容
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_ng
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
然后执行systemctl enable --now systemd-modules-load.service即可
检测是否加载
lsmod |grep -e ip_vs -e nf_conntrack
9. 配置 k8s 内核
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward =1
net.bridge.bridge-nf-call-iptables =1
net.bridge.bridge-nf-call-ip6tables =1
fs.may_detach_mounts = 1
vm.overcommit_memory = 1
vm.panic_on_oom = 0
fs.inotify.max_user_watches = 89100
fs.file-max = 52706963
fs.nr_open = 52706963
net.netfilter.nf_conntrack_max = 2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies =1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
sysctl --system
所有节点配置完内核后,重启服务器,保证重启后内核依旧加载
reboot
lsmod |grep --color=auto -e ip_vs -e nf_conntrack
10. 基本组件安装
10.1 Docker安装
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install docker-ce-19.03.* -y
由于新版 kubelet 建议使用 systemd,所以可以把 docker 的 CgroupDriver 改为 systemd
mkdir /etc/docker
cat > /etc/docker/daemon.json << EOF
{
"exec-opts":["native.cgroupdriver=systemd"]
}
EOF
所有节点设置开机自动启动 Docker:
systemctl daemon-reload && systemctl enable --now docker
10.2 kubernetes-server 安装
https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG
wget https://dl.k8s.io/v1.24.0-alpha.1/kubernetes-server-linux-amd64.tar.gz
tar -xf kubernetes-server-linux-amd64.tar.gz --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}
10.3 etcd安装
wget https://github.com/etcd-io/etcd/releases/download/v3.4.13/etcd-v3.4.13-linux-amd64.tar.gz
tar -xf etcd-v3.4.13-linux-amd64.tar.gz --strip-components=1 -C /usr/local/bin etcd-v3.4.13-linux-amd64/etcd{,ctl}
10.4 将组件发送到其他节点
MasterNodes='k8s-master2 k8s-master3'
WorkNodes='k8s-node1 k8s-node2'
for NODE in $MasterNodes; do echo $NODE; scp /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} $NODE:/usr/local/bin/; scp /usr/local/bin/etcd* $NODE:/usr/local/bin/; done
for NODE in $WorkNodes; do scp /usr/local/bin/kube{let,-proxy} $NODE:/usr/local/bin/; done
mkdir -p /opt/cni/bin
cd k8s-ha-install && git checkout manual-installation-v1.24.x
11. 证书
11.1 master1下载生成证书工具
wget "https://pkg.cfssl.org/R1.2/cfssl_linux-amd64" -O /usr/local/bin/cfssl
wget "https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64" -O /usr/local/bin/cfssljson
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson
给所有master节点创建etcd证书目录
mkdir /etc/etcd/ssl -p
所有节点创建kubernetes相关目录
mkdir -p /etc/kubernetes/pki
11.2 master1节点生成etcd证书
生成证书的CSR文件:证书签名请求文件,配置了一些域名、公司、单位
cd /root/k8s-ha-install/pki
#生成etcd CA证书和CA证书的key
cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca
#生成客户端证书
cfssl gencert \
-ca=/etc/etcd/ssl/etcd-ca.pem \
-ca-key=/etc/etcd/ssl/etcd-ca-key.pem \
-config=ca-config.json \
-hostname=127.0.0.1,k8s-master1,k8s-master2,k8s-master3,192.168.56.101,192.168.56.102,192.168.56.103 \
-profile=kubernetes \
etcd-csr.json | cfssljson -bare /etc/etcd/ssl/etcd
#查看生成的etcd客户端证书
ls /etc/etcd/ssl
#将证书复制到其它节点
MasterNodes='k8s-master2 k8s-master3'
WorkNodes='k8s-node1 k8s-node2'
for NODE in $MasterNodes; do
ssh $NODE "mkdir -p /etc/etcd/ssl"
for FILE in etcd-ca-key.pem etcd-ca.pem etcd-key.pem etcd.pem; do
scp /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE}
done
done
11.3 master1节点生成kubernets证书
cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-hostname=10.96.0.1,192.168.56.211,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.loacl,192.168.56.201,192.168.56.202,192.168.56.203 \
-profile=kubernetes \
apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver