kubespray部署k8s1.26集群
Kubespray 是一个自由开源的工具,它提供了 Ansible 剧本(playbook) 来部署和管理 Kubernetes 集群。它旨在简化跨多个节点的 Kubernetes 集群的安装过程,允许用户快速轻松地部署和管理生产就绪的 Kubernetes 集群。
它支持一系列操作系统,包括 Ubuntu、CentOS、Rocky Linux 和 Red Hat Enterprise Linux(RHEL),它可以在各种平台上部署 Kubernetes,包括裸机、公共云和私有云。
部署环境准备
使用CentOS7u9操作系统,然后准备如下配置的六个节点
ip | CPU | 内存 | 硬盘 | 角色 | 主机名 |
---|---|---|---|---|---|
192.168.91.230 | 2C | 2G | 40GB | master | master01 |
192.168.91.231 | 2C | 2G | 40GB | master | master02 |
192.168.91.232 | 2C | 2G | 40GB | master | master03 |
192.168.91.233 | 2C | 2G | 40GB | worker(node) | worker01 |
192.168.91.234 | 2C | 2G | 40GB | worker(node) | worker02 |
192.168.91.235 | 1C | 1G | 40GB | ansible | kubespray |
# 在所有节点配置hosts
cat >> /etc/hosts << EOF
192.168.91.230 master01
192.168.91.231 master02
192.168.91.232 master03
192.168.91.233 worker01
192.168.91.234 worker02
192.168.91.235 kubespray
EOF
# kubespray
# 安装 openssl
curl -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
yum install -y ncurses-devel gdbm-devel xz-devel sqlite-devel tk-devel uuid-devel readline-devel bzip2-devel libffi-devel
yum install -y openssl-devel openssl11 openssl11-devel
openssl11 version
OpenSSL 1.1.1k FIPS 25 Mar 2021
# 安装python 3.10.4
yum -y install gperf make gcc-c++
mkdir -p /doc/temp && cd /doc/temp
wget https://www.python.org/ftp/python/3.10.4/Python-3.10.4.tgz
# 编译主要需要注意的问题是设置编译FLAG,以便使用最新的openssl库
export CFLAGS=$(pkg-config --cflags openssl11)
export LDFLAGS=$(pkg-config --libs openssl11)
echo $CFLAGS
echo $LDFLAGS
tar xf Python-3.10.4.tgz
cd Python-3.10.4/
./configure --enable-optimizations && make altinstall
python3.10 --version
Python 3.10.4
pip3.10 --version
pip 22.0.4 from /usr/local/lib/python3.10/site-packages/pip (python 3.10)
ln -sf /usr/local/bin/python3.10 /usr/bin/python3
ln -sf /usr/local/bin/pip3.10 /usr/bin/pip3
pip3 install --upgrade pip -i https://pypi.tuna.tsinghua.edu.cn/simple
pip3 list
Package Version
---------- -------
pip 23.3.1
setuptools 58.1.0
# kubespray 准备
cd ..
wget https://github.com/kubernetes-sigs/kubespray/archive/refs/tags/v2.22.1.tar.gz
tar xf kubespray-2.22.1.tar.gz
cd kubespray-2.22.1
pip3 install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
ansible --version
ansible [core 2.12.5]
config file = /doc/temp/kubespray-2.22.1/ansible.cfg
configured module search path = ['/doc/temp/kubespray-2.22.1/library']
ansible python module location = /usr/local/lib/python3.10/site-packages/ansible
ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible
python version = 3.10.4 (main, Nov 2 2023, 08:48:50) [GCC 4.8.5 20150623 (Red Hat 4.8.5-44)]
jinja version = 3.1.2
libyaml = True
k8s1.26集群部署
# kubespray
# 创建主机清单
cd /doc/temp/kubespray-2.22.1
cp -rfp ./inventory/sample ./inventory/mycluster
declare -a IPS=(192.168.91.230 192.168.91.231 192.168.91.232 192.168.91.233 192.168.91.234)
CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
DEBUG: Adding group all
DEBUG: Adding group kube_control_plane
DEBUG: Adding group kube_node
DEBUG: Adding group etcd
DEBUG: Adding group k8s_cluster
DEBUG: Adding group calico_rr
DEBUG: adding host node1 to group all
DEBUG: adding host node2 to group all
DEBUG: adding host node3 to group all
DEBUG: adding host node4 to group all
DEBUG: adding host node5 to group all
DEBUG: adding host node1 to group etcd
DEBUG: adding host node2 to group etcd
DEBUG: adding host node3 to group etcd
DEBUG: adding host node1 to group kube_control_plane
DEBUG: adding host node2 to group kube_control_plane
DEBUG: adding host node1 to group kube_node
DEBUG: adding host node2 to group kube_node
DEBUG: adding host node3 to group kube_node
DEBUG: adding host node4 to group kube_node
DEBUG: adding host node5 to group kube_node
# 修改K8S集群节点配置文件,修改为:添加一个master,删除三个node
vim inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
children:
kube_control_plane:
hosts:
node1:
node2:
node3:
kube_node:
hosts:
node4:
node5:
# inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml,为K8S集群配置文件,这里都采取默认值
# 要启用 Kuberenetes 仪表板和入口控制器等插件,请在文件`inventory/mycluster/group_vars/k8s_cluster/addons.yml` 中将参数设置为已启用,这里将dashboard启用
sed -i 's/# dashboard_enabled: false/dashboard_enabled: true/' inventory/mycluster/group_vars/k8s_cluster/addons.yml
# 修改配置打印详细日志,否则报错时只能显示"the output has been hidden due to the fact that 'no_log: true' was specified for this result",不能显示具体的错误信息
sed -i 's/unsafe_show_logs: false/unsafe_show_logs: true/' inventory/mycluster/group_vars/all/all.yml
# 修改镜像下载地址,国内能否安装的关键,不同时期可能不一样,可以科学上网的不用配置
cp inventory/mycluster/group_vars/all/offline.yml inventory/mycluster/group_vars/all/mirror.yml
sed -i -E '/# .*\{\{ files_repo/s/^# //g' inventory/mycluster/group_vars/all/mirror.yml
tee -a inventory/mycluster/group_vars/all/mirror.yml <<EOF
gcr_image_repo: "gcr.m.daocloud.io"
kube_image_repo: "k8s.m.daocloud.io"
docker_image_repo: "docker.m.daocloud.io"
quay_image_repo: "quay.m.daocloud.io"
github_image_repo: "ghcr.m.daocloud.io"
files_repo: "https://files.m.daocloud.io"
EOF
# 在所有节点,配置镜像对应的hosts,不同时期可能不一样,可以科学上网的不用配置
cat >> /etc/hosts << EOF
47.103.132.72 gcr.m.daocloud.io
47.103.132.72 k8s.m.daocloud.io
47.103.132.72 docker.m.daocloud.io
47.103.132.72 quay.m.daocloud.io
47.103.132.72 ghcr.m.daocloud.io
47.103.132.72 m.daocloud.io
113.31.105.121 files.m.daocloud.io
122.225.83.97 dn-dao-github-mirror.daocloud.io
47.101.83.165 image-mirror.oss-cn-shanghai.aliyuncs.com
EOF
# 节点互信配置
ssh-keygen
for i in 0 1 2 3 4; do ssh-copy-id root@192.168.91.23$i; done
# 在K8S集群节点添加sysops用户指行授权
ansible all -i inventory/mycluster/hosts.yaml -m shell -a "echo 'sysops ALL=(ALL) NOPASSWD:ALL' | tee /etc/sudoers.d/sysops"
# k8s集群节点安全设置
ansible all -i inventory/mycluster/hosts.yaml -m shell -a "systemctl stop firewalld && systemctl disable firewalld"
# k8s集群主机路由转发设置
ansible all -i inventory/mycluster/hosts.yaml -m shell -a "echo 'net.ipv4.ip_forward=1' | tee -a /etc/sysctl.conf"
# k8s集群禁用swap分区
ansible all -i inventory/mycluster/hosts.yaml -m shell -a "sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab && swapoff -a"
# k8s集群部署,如果没有执行成功,可以多次执行
# 可以科学上网的,一般都不会有问题。在国内网,如上面所示,需要修改镜像下载地址,可能还需要在hosts配置国内镜像对应的ip
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml
# 可用性验证
# master01
kubectl get nodes
NAME STATUS ROLES AGE VERSION
node1 Ready control-plane 5m43s v1.26.5
node2 Ready control-plane 5m10s v1.26.5
node3 Ready control-plane 4m55s v1.26.5
node4 Ready <none> 3m59s v1.26.5
node5 Ready <none> 3m59s v1.26.5
# 所有pod都是Running状态
kubectl get pods -A -o wide
# 创建nginx服务,进行验证
kubectl create deployment demo-nginx-kubespray --image=nginx --replicas=2
kubectl get pods
NAME READY STATUS RESTARTS AGE
demo-nginx-kubespray-b65cf84cd-4b6r8 1/1 Running 0 96s
demo-nginx-kubespray-b65cf84cd-lg28f 1/1 Running 0 96s
kubectl expose deployment demo-nginx-kubespray --type NodePort --port=80
kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
demo-nginx-kubespray NodePort 10.233.39.46 <none> 80:31784/TCP 19s
kubernetes ClusterIP 10.233.0.1 <none> 443/TCP 12m
kubectl get deployments.apps
NAME READY UP-TO-DATE AVAILABLE AGE
demo-nginx-kubespray 2/2 2 2 2m45s
kubectl get svc demo-nginx-kubespray
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
demo-nginx-kubespray NodePort 10.233.39.46 <none> 80:31784/TCP 86s
# 所有节点都可以访问
curl 192.168.91.230:31784
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
...
# 集群节点管理
# kubespray
cd /doc/temp/kubespray-2.22.1
# 移除节点,不用修改hosts.yaml文件
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root remove-node.yml -v -b --extra-vars "node=node5"
# 增加节点,需要修改hosts.yaml文件
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root scale.yml -v -b
# 清理k8s集群
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root reset.yml