离线安装集群
使用kubeasz 离线安装 k8s集群需要下载四个部分:
- kubeasz 项目代码
- 二进制文件(k8s、etcd、containerd等组件)
- 容器镜像文件(calico、coredns、metrics-server等容器镜像)
- 系统软件安装包(ipset、libseccomp2等,仅无法使用本地yum/apt源时需要)
离线文件准备
在一台能够访问互联网的服务器上执行:
- 下载工具脚本ezdown,举例使用kubeasz版本3.6.5
export release=3.6.5 wget <https://github.com/easzlab/kubeasz/releases/download/${release}/ezdown> chmod +x ./ezdown
- 使用工具脚本下载(更多关于ezdown的参数,运行./ezdown 查看)
下载kubeasz代码、二进制、默认容器镜像
# 国内环境
./ezdown -D
出现这种情况
浏览器下载 mirrors.tuna.tsinghua.edu.cn/docker-ce/l… 上传到/etc/kubeasz/down 目录下面 再执行./ezdown -D
[可选]如果需要更多组件,请下载额外容器镜像(cilium,flannel,prometheus等)
./ezdown -X flannel
./ezdown -X prometheus
...
下载离线系统包 (适用于无法使用yum/apt仓库情形)
# 如果操作系统是centos_7
./ezdown -P centos_7
上述脚本运行成功后,所有文件(kubeasz代码、二进制、离线镜像)均已整理好放入目录/etc/kubeasz
/etc/kubeasz包含 kubeasz 版本为 ${release} 的发布代码/etc/kubeasz/bin包含 k8s/etcd/docker/cni 等二进制文件/etc/kubeasz/down包含集群安装时需要的离线容器镜像/etc/kubeasz/down/packages包含集群安装时需要的系统基础软件
离线安装
上述下载完成后,把/etc/kubeasz整个目录复制到目标离线服务器相同目录,然后在离线服务器/etc/kubeasz目录下执行:
在控制节点安装依赖工具
yum install ansible git -y
ssh免密登录
#控制节点生成秘钥执行
ssh-keygen
#$IP为所有节点地址包括自身,按照提示输入yes 和root密码
ssh-copy-id $IP
创建集群配置集群
[root@vms66 ~]# cd /etc/kubeasz/
[root@vms66 kubeasz]# ./ezctl new test
2025-02-07 20:39:30 [ezctl:145] DEBUG generate custom cluster files in /etc/kubeasz/clusters/test
2025-02-07 20:39:30 [ezctl:151] DEBUG set versions
2025-02-07 20:39:30 [ezctl:193] DEBUG cluster test: files successfully created.
2025-02-07 20:39:30 [ezctl:194] INFO next steps 1: to config '/etc/kubeasz/clusters/test/hosts'
2025-02-07 20:39:30 [ezctl:195] INFO next steps 2: to config '/etc/kubeasz/clusters/test/config.yml'
编辑/etc/kubeasz/clusters/test/hosts 文件修改了ansible_python_interpreter =/usr/bin/python2.7 以及主机
# 'etcd' cluster should have odd member(s) (1,3,5,...)
[etcd]
192.168.26.66
192.168.26.67
192.168.26.68
# master node(s), set unique 'k8s_nodename' for each node
# CAUTION: 'k8s_nodename' must consist of lower case alphanumeric characters, '-' or '.',
# and must start and end with an alphanumeric character
[kube_master]
192.168.26.66 k8s_nodename='master-01'
192.168.26.67 k8s_nodename='master-02'
192.168.26.68 k8s_nodename='master-03'
# work node(s), set unique 'k8s_nodename' for each node
# CAUTION: 'k8s_nodename' must consist of lower case alphanumeric characters, '-' or '.',
# and must start and end with an alphanumeric character
[kube_node]
192.168.26.69 k8s_nodename='worker-01'
# [optional] harbor server, a private docker registry
# 'NEW_INSTALL': 'true' to install a harbor server; 'false' to integrate with existed one
[harbor]
#192.168.1.8 NEW_INSTALL=false
# [optional] loadbalance for accessing k8s from outside
[ex_lb]
#192.168.1.6 LB_ROLE=backup EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443
#192.168.1.7 LB_ROLE=master EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443
# [optional] ntp server for the cluster
[chrony]
#192.168.1.1
[all:vars]
# --------- Main Variables ---------------
# Secure port for apiservers
SECURE_PORT="6443"
# Cluster container-runtime supported: docker, containerd
# if k8s version >= 1.24, docker is not supported
CONTAINER_RUNTIME="containerd"
# Network plugins supported: calico, flannel, kube-router, cilium, kube-ovn
CLUSTER_NETWORK="calico"
# Service proxy mode of kube-proxy: 'iptables' or 'ipvs'
PROXY_MODE="ipvs"
# K8S Service CIDR, not overlap with node(host) networking
SERVICE_CIDR="10.68.0.0/16"
# Cluster CIDR (Pod CIDR), not overlap with node(host) networking
CLUSTER_CIDR="172.20.0.0/16"
# NodePort Range
NODE_PORT_RANGE="30000-32767"
# Cluster DNS Domain
CLUSTER_DNS_DOMAIN="cluster.local"
# -------- Additional Variables (don't change the default value right now) ---
# Binaries Directory
bin_dir="/opt/kube/bin"
# Deploy Directory (kubeasz workspace)
base_dir="/etc/kubeasz"
# Directory for a specific cluster
cluster_dir="{{ base_dir }}/clusters/test"
# CA and other components cert/key Directory
ca_dir="/etc/kubernetes/ssl"
# Default 'k8s_nodename' is empty
k8s_nodename=''
# Default python interpreter
ansible_python_interpreter=/usr/bin/python3
[root@vms66 ~]# vim /etc/kubeasz/clusters/test/hosts
[root@vms66 ~]# cat /etc/kubeasz/clusters/test/hosts
# 'etcd' cluster should have odd member(s) (1,3,5,...)
[etcd]
192.168.26.66
192.168.26.67
192.168.26.68
# master node(s), set unique 'k8s_nodename' for each node
# CAUTION: 'k8s_nodename' must consist of lower case alphanumeric characters, '-' or '.',
# and must start and end with an alphanumeric character
[kube_master]
192.168.26.66 k8s_nodename='master-01'
192.168.26.67 k8s_nodename='master-02'
192.168.26.68 k8s_nodename='master-03'
# work node(s), set unique 'k8s_nodename' for each node
# CAUTION: 'k8s_nodename' must consist of lower case alphanumeric characters, '-' or '.',
# and must start and end with an alphanumeric character
[kube_node]
192.168.26.69 k8s_nodename='worker-01'
# [optional] harbor server, a private docker registry
# 'NEW_INSTALL': 'true' to install a harbor server; 'false' to integrate with existed one
[harbor]
#192.168.1.8 NEW_INSTALL=false
# [optional] loadbalance for accessing k8s from outside
[ex_lb]
#192.168.1.6 LB_ROLE=backup EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443
#192.168.1.7 LB_ROLE=master EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443
# [optional] ntp server for the cluster
[chrony]
#192.168.1.1
[all:vars]
# --------- Main Variables ---------------
# Secure port for apiservers
SECURE_PORT="6443"
# Cluster container-runtime supported: docker, containerd
# if k8s version >= 1.24, docker is not supported
CONTAINER_RUNTIME="containerd"
# Network plugins supported: calico, flannel, kube-router, cilium, kube-ovn
CLUSTER_NETWORK="calico"
# Service proxy mode of kube-proxy: 'iptables' or 'ipvs'
PROXY_MODE="ipvs"
# K8S Service CIDR, not overlap with node(host) networking
SERVICE_CIDR="10.68.0.0/16"
# Cluster CIDR (Pod CIDR), not overlap with node(host) networking
CLUSTER_CIDR="172.20.0.0/16"
# NodePort Range
NODE_PORT_RANGE="30000-32767"
# Cluster DNS Domain
CLUSTER_DNS_DOMAIN="cluster.local"
# -------- Additional Variables (don't change the default value right now) ---
# Binaries Directory
bin_dir="/opt/kube/bin"
# Deploy Directory (kubeasz workspace)
base_dir="/etc/kubeasz"
# Directory for a specific cluster
cluster_dir="{{ base_dir }}/clusters/test"
# CA and other components cert/key Directory
ca_dir="/etc/kubernetes/ssl"
# Default 'k8s_nodename' is empty
k8s_nodename=''
# Default python interpreter
ansible_python_interpreter=/usr/bin/python2.7
修改基础包为离线
[root@vms66 kubeasz]# cat /etc/kubeasz/clusters/test/config.yml
############################
# prepare
############################
# 可选离线安装系统软件包 (offline|online)
INSTALL_SOURCE: "offline"
安装
[root@vms66 kubeasz]# ./ezctl setup test all