一、 安装前准备
1. 初始化系统环境
ubuntu系统环境初始化 :网络 network 、域名 hostname、ssh登录、内核优化等等
2. 安装容器引擎 containerd
root@lhey:~# apt update //更新软件包
root@lhey:~# apt install containerd -y //下载软件包
root@lhey:~# mkdir /etc/containerd/ //存放containerd的配置文件
root@lhey:~# containerd config default > /etc/containerd/config.toml //导入默认配置,文件名是固定的
root@lhey:~# vim /etc/containerd/config.toml
//修改pause镜像地址,此处使用阿里镜像地址
65 sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.9"
//ubuntu2204版本要修改为true
137 SystemdCgroup = true
//修改镜像加速配置:在168行下面添加
169 [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
170 endpoint = ["https://ryici85m.mirror.aliyuncs.com"]
root@lhey:~# systemctl restart containerd.service
root@lhey:~# systemctl status containerd.service
3. 安装crictl工具
root@lhey:~# mkdir /root/apps/
root@lhey:~# apt install lrzsz -y //安装上传软件
root@lhey:~/apps# cd /root/apps/ //上传软件包
root@lhey:~/apps# ls
crictl-v1.29.0-linux-amd64.tar.gz
root@lhey:~/apps# mkdir /usr/local/bin/crictl
root@lhey:~/apps# tar xvf crictl-v1.29.0-linux-amd64.tar.gz -C /usr/local/bin/crictl
crictl
root@lhey:~/apps# vim /etc/profile
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/usr/local/bin/crictl
//在最后一行插入
root@lhey:~/apps# source /etc/profile //执行脚本
root@lhey:~/apps# crictl -v //查看版本
crictl version v1.29.0
root@lhey:~/apps# cat > /etc/crictl.yaml <<EOF
runtime-endpoint: "unix:///run/containerd/containerd.sock"
image-endpoint: "unix:///run/containerd/containerd.sock"
timeout: 10
debug: false
EOF
4. 安装nerdctl工具
root@lhey:~/apps# cd /root/apps/ //上传nerdctl软件包
root@lhey:~/apps# ls
crictl-v1.29.0-linux-amd64.tar.gz nerdctl-1.7.6-linux-amd64.tar.gz
root@lhey:~/apps# tar xvf nerdctl-1.7.6-linux-amd64.tar.gz -C /usr/local/bin/
root@lhey:~/apps# nerdctl version
root@lhey:~/apps# mkdir /etc/nerdctl
root@lhey:~/apps# cat > /etc/nerdctl/nerdctl.toml <<EOF //配置(官方发布的固定配置)
namespace = "k8s.io"
debug = false
debug_full = false
insecure_registry = true
EOF
5. 安装CNI工具
root@lhey:~/apps# ls
crictl-v1.29.0-linux-amd64.tar.gz cni-plugins-linux-amd64-v1.5.1.tgz nerdctl-1.7.6-linux-amd64.tar.gz
root@lhey:~/apps# mkdir /opt/cni/bin/ -p
root@lhey:~/apps# tar xvf cni-plugins-linux-amd64-v1.5.1.tgz -C /opt/cni/bin/
6. 拉取镜像进行测试
//上传nginx镜像
root@lhey:~/apps# ls
nerdctl-1.7.6-linux-amd64.tar.gz cni-plugins-linux-amd64-v1.5.1.tgz nginx.tar.gz crictl-v1.29.0-linux-amd64.tar.gz
//加载镜像
root@lhey:~/apps# nerdctl load -i /root/apps/nginx.tar.gz
//运行容器
root@lhey:~/apps# nerdctl run -it -p 8000:80 --rm --name=nginx_test harbor.hiuiu.com/basic_image/centos7_filebeat_nginx:2408.u
//验证容器是否运行
root@lhey:~/apps# nerdctl images
REPOSITORY TAG IMAGE ID CREATED PLATFORM SIZE BLOB SIZE
harbor.hiuiu.com/basic_image/centos7_filebeat_nginx 2408.u 66bf78b3037a About a minute ago linux/amd64 844.6 MiB 289.8 MiB
<none> <none> 66bf78b3037a About a minute ago linux/amd64 844.6 MiB 289.8 MiB
7. 初始化K8S环境
- 安装基本的软件
apt install chrony ipvsadm tree ipset -y
#安装基本的软件
apt install iproute2 ntpdate tcpdump telnet traceroute nfs-kernel-server nfs-common lrzsz tree openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev gcc openssh-server iotop unzip zip -y
- 关闭防火墙和关闭selinux
ufw disable
ufw status
或者
systemctl stop ufw
systemctl status firewalld
- 关闭swap 交换分区
//临时关闭
swapoff -a
//永久关闭
sed -i '/swap.img/s/^/#/' /etc/fstab
- 配置时间服务器
sed -i 's/pool ntp.ubuntu.com/pool ntp.ntsc.ac.cn/' /etc/chrony/chrony.conf
systemctl restart chronyd
systemctl status chronyd
- 加载模块
modprobe br_netfilter && lsmod | grep br_netfilter
modprobe ip_conntrack && lsmod | grep conntrack
cat >/etc/modules-load.d/modules.conf<<EOF
ip_vs
ip_vs_lc
ip_vs_lblc
ip_vs_lblcr
ip_vs_rr
ip_vs_wrr
ip_vs_sh
ip_vs_dh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
ip_tables
ip_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
xt_set
br_netfilter
nf_conntrack
overlay
EOF
systemctl restart systemd-modules-load.service
lsmod | grep -e ip_vs -e nf_conntrack
- 修改内核参数
vim /etc/sysctl.conf
net.ipv4.ip_forward=1
vm.max_map_count=262144
kernel.pid_max=4194303
fs.file-max=1000000
net.ipv4.tcp_max_tw_buckets=6000
net.netfilter.nf_conntrack_max=2097152
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
sysctl -p
- 修改machin-id (克隆机器的需要修改)
cat /etc/machine-id //查看machin-id
rm -f /etc/machine-id
systemd-machine-id-setup
二、以 K8S-kubeadm 方式安装
1. 配置说明
//域名 IP地址 配置 角色
4m.lhey.com 192.168.100.4 4c4G master
5n.lhey.com 192.168.100.5 4c4G node1
6n.lhey.com 192.168.100.6 4c4G node2
2. 安装 kubeadm 、kubelet、 kubectl
apt update
apt install apt-transport-https ca-certificates curl gpg -y
mkdir -p -m 755 /etc/apt/keyrings
//默认官方地址
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
//阿里源
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/deb/ /" | tee /etc/apt/sources.list.d/kubernetes.list
//安装命令
apt-get update && apt-cache madison kubeadm
apt-get install -y kubelet=1.30.3-1.1 kubeadm=1.30.3-1.1 kubectl=1.30.3-1.1
3. 拉取镜像
//上传 images_download.sh 脚本
bash images_download.sh //执行脚本
nerdctl images //查看镜像
4. 在主节点创建MASTER
root@lhey:~# systemctl restart kubelet.service
root@lhey:~# ufw disable
root@lhey:~# swapoff -a
kubeadm init --apiserver-advertise-address=192.168.100.4 --apiserver-bind-port=6443 --kubernetes-version=v1.30.3 --pod-network-cidr=10.200.0.0/16 --service-cidr=10.96.0.0/16 --service-dns-domain=cluster.local --image-repository registry.cn-hangzhou.aliyuncs.com/google_containers --ignore-preflight-errors=swap
按照提示执行:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf
source /etc/profile
5. 在从节点加入集群
kubeadm join 192.168.100.4:6443 --token aaywt3.j9skdue8uvyp9ezc \
--discovery-token-ca-cert-hash sha256:7193178a9d38762b77af4e9efa5c4fe2ef91201790cf48814da8caa21df5ccd9
6. 在主节点上查看集群节点
kubectl get nodes
三、安装网络组件calico
Calico 是一个用于 Kubernetes 集群的网络和网络安全插件,提供网络策略、IP 地址管理(IPAM)和网络连接功能。它支持多种网络模型和跨主机通信,允许你定义网络安全策略来控制 Pod 之间的流量。
1. 在主从节点上都安装calico
//上传calico-release-v3.28.0压缩包
root@master:~/apps# tar zxvf calico-release-v3.28.0.tgz //解压
root@master:~/apps# cd release-v3.28.0/
root@master:~/apps/release-v3.28.0# ls
bin images manifests
root@master:~/apps/release-v3.28.0# cd manifests/
root@master:~/apps/release-v3.28.0/manifests# ls
alp calico-policy-only.yaml crds.yaml ocp
apiserver.yaml calico-typha.yaml csi-driver.yaml ocp-tigera-operator-no-resource-loading.yaml
calico-bpf.yaml calico-vxlan.yaml custom-resources.yaml operator-crds.yaml
calicoctl-etcd.yaml calico.yaml flannel-migration README.md
calicoctl.yaml canal-etcd.yaml generate.sh tigera-operator.yaml
calico-etcd.yaml canal.yaml grafana-dashboards.yaml
root@master:~/apps/release-v3.28.0/manifests# vim calico.yaml
//修改calico.yaml配置文件:calico.yaml
(1) 修改pod网络地址:calico.yaml
查找:/CALICO_IPV4POOL_CIDR
4957 - name: CALICO_IPV4POOL_CIDR
4958 value: "10.200.0.0/16"
将name和value前的#号删除,将ip改为初始化时设置的pod网段
2. 镜像导入
//切换到images镜像中
root@master:~/apps/release-v3.28.0/manifests# cd ..
root@master:~/apps/release-v3.28.0# ls
bin images manifests
root@master:~/apps/release-v3.28.0# cd images/
root@master:~/apps/release-v3.28.0/images# ll
total 876536
drwxrwxr-x 2 1001 1001 4096 May 11 08:20 ./
drwxrwxr-x 5 1001 1001 4096 May 11 08:20 ../
-rw------- 1 1001 1001 208990208 May 11 08:20 calico-cni.tar
-rw------- 1 1001 1001 41927168 May 11 08:19 calico-dikastes.tar
-rw------- 1 1001 1001 128270336 May 11 08:19 calico-flannel-migration-controller.tar
-rw------- 1 1001 1001 79177216 May 11 08:19 calico-kube-controllers.tar
-rw------- 1 1001 1001 354560512 May 11 08:20 calico-node.tar
-rw------- 1 1001 1001 13446144 May 11 08:19 calico-pod2daemon.tar
-rw------- 1 1001 1001 71183360 May 11 08:20 calico-typha.tar
//导入K8S环境里面(三台节点都要载入镜像)
root@master:~/apps/release-v3.28.0/images# nerdctl load -i calico-cni.tar
unpacking docker.io/calico/cni:v3.28.0 (sha256:2da41a4fcb31618b20817de9ec9fd13167344f5e2e034cee8baf73d89e212b4e)...
Loaded image: calico/cni:v3.28.0
root@master:~/apps/release-v3.28.0/images# nerdctl load -i calico-kube-controllers.tar
unpacking docker.io/calico/kube-controllers:v3.28.0 (sha256:83e080cba8dbb2bf2168af368006921fcb940085ba6326030a4867963d2be2b3)...
Loaded image: calico/kube-controllers:v3.28.0
root@master:~/apps/release-v3.28.0/images# nerdctl load -i calico-node.tar
unpacking docker.io/calico/node:v3.28.0 (sha256:5a4942472d32549581ed34d785c3724ecffd0d4a7c805e5f64ef1d89d5aaa947)...
Loaded image: calico/node:v3.28.0
3. 在master节点上执行安装
//切换到manifests文件中,执行calico.yaml文件
root@master:~/apps/release-v3.28.0/images# cd ..
root@master:~/apps/release-v3.28.0# ls
bin images manifests
root@master:~/apps/release-v3.28.0# cd manifests/
root@master:~/apps/release-v3.28.0/manifests# ls
alp calico-policy-only.yaml crds.yaml ocp
apiserver.yaml calico-typha.yaml csi-driver.yaml ocp-tigera-operator-no-resource-loading.yaml
calico-bpf.yaml calico-vxlan.yaml custom-resources.yaml operator-crds.yaml
calicoctl-etcd.yaml calico.yaml flannel-migration README.md
calicoctl.yaml canal-etcd.yaml generate.sh tigera-operator.yaml
calico-etcd.yaml canal.yaml grafana-dashboards.yaml
root@master:~/apps/release-v3.28.0/manifests# kubectl apply -f calico.yaml //执行calico.yaml文件
4.验证
root@master:~/apps/release-v3.28.0/manifests# kubectl run nginx11 --image=harbor.hiuiu.com/basic_image/centos7_filebeat_nginx:2408.u sleep 10000
pod/nginx11 created
root@master:~/apps/release-v3.28.0/manifests# kubectl run nginx22 --image=harbor.hiuiu.com/basic_image/centos7_filebeat_nginx:2408.u sleep 10000
pod/nginx22 created
root@master:~/apps/release-v3.28.0/manifests# kubectl get pod -o wide //查看pod状态
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx11 1/1 Running 0 43s 10.4.0.3 node2 <none> <none>
nginx22 1/1 Running 0 26s 10.4.0.4 node2 <none> <none>
//进入nginx11容器中,ping nginx22的IP地址
root@master:~/apps/release-v3.28.0/manifests# kubectl exec nginx11 -it bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
[root@nginx11 /]# ping 10.4.0.4
PING 10.4.0.4 (10.4.0.4) 56(84) bytes of data.
64 bytes from 10.4.0.4: icmp_seq=1 ttl=64 time=0.224 ms
64 bytes from 10.4.0.4: icmp_seq=2 ttl=64 time=0.178 ms
64 bytes from 10.4.0.4: icmp_seq=3 ttl=64 time=0.117 ms
64 bytes from 10.4.0.4: icmp_seq=4 ttl=64 time=0.104 ms
64 bytes from 10.4.0.4: icmp_seq=5 ttl=64 time=0.142 ms
^C
--- 10.4.0.4 ping statistics ---
5 packets transmitted, 5 received, 0% packet loss, time 4103ms
rtt min/avg/max/mdev = 0.104/0.153/0.224/0.043 ms
[root@nginx11 /]# exit
exit
//进入nginx22容器中,使用ip a命令查看ip
root@master:~/apps/release-v3.28.0/manifests# kubectl exec nginx22 -it bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
[root@nginx22 /]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: tunl0@NONE: <NOARP> mtu 1480 qdisc noop state DOWN group default qlen 1000
link/ipip 0.0.0.0 brd 0.0.0.0
3: eth0@if7: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether ce:96:3d:55:3a:d4 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 10.4.0.4/24 brd 10.4.0.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::cc96:3dff:fe55:3ad4/64 scope link
valid_lft forever preferred_lft forever
[root@nginx22 /]# exit
exit
注意:如果遇到报错可使用kubectl describe pod pod名 -n kube-system来进行排错