08使用sealos部署k8s集群并实现集群管理

0 阅读3分钟

使用sealos部署k8s集群并实现集群管理

节点说明

使用CentOS7u9操作系统,然后准备如下配置的三个节点

ipCPU内存硬盘角色主机名
192.168.91.2102C2G40GBmastermaster01
192.168.91.2112C2G40GBmastermaster02
192.168.91.2122C2G40GBmastermaster03
192.168.91.2132C2G40GBworkerworker01
# 在所有节点中操作
# 配置hosts
cat >> /etc/hosts << EOF
192.168.91.210 master01
192.168.91.211 master02
192.168.91.212 master03
192.168.91.213 worker01
EOF
# 时间同步
yum -y install ntpdate
echo "0 */1 * * * ntpdate time1.aliyun.com" >> /var/spool/cron/root
# 升级内核
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
yum --enablerepo="elrepo-kernel" -y install kernel-lt.x86_64
awk -F \' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg
0 : CentOS Linux (5.4.259-1.el7.elrepo.x86_64) 7 (Core)
1 : CentOS Linux (3.10.0-1160.71.1.el7.x86_64) 7 (Core)
2 : CentOS Linux (0-rescue-07b67373f5f64f3493970ca79289589a) 7 (Core)

grub2-set-default "CentOS Linux (5.4.259-1.el7.elrepo.x86_64) 7 (Core)"
reboot

使用sealos部署k8s

# master01
# sealos准备
wget https://github.com/labring/sealos/releases/download/v4.3.6/sealos_4.3.6_linux_amd64.tar.gz
tar zxvf sealos_4.3.6_linux_amd64.tar.gz sealos && chmod +x sealos && mv sealos /usr/bin

sealos version
SealosVersion:
  buildDate: "2023-10-20T14:15:00Z"
  compiler: gc
  gitCommit: a2719848
  gitVersion: 4.3.6
  goVersion: go1.20.10
  platform: linux/amd64

# 获取帮助
sealos -h

# 部署k8s,--passwd zhangjiabao 这个是ssh的密码,如果配置免密可以不需要
sealos run labring/kubernetes:v1.24.0 labring/calico:v3.22.1 --masters 192.168.91.210,192.168.91.211,192.168.91.212 --nodes 192.168.91.213 --passwd zhangjiabao

kubectl get nodes
NAME       STATUS   ROLES           AGE     VERSION
master01   Ready    control-plane   2m37s   v1.24.0
master02   Ready    control-plane   116s    v1.24.0
master03   Ready    control-plane   81s     v1.24.0
worker01   Ready    <none>          56s     v1.24.0

kubectl get pods -A
NAMESPACE         NAME                                       READY   STATUS    RESTARTS        AGE
calico-system     calico-kube-controllers-6b44b54755-jsx6q   1/1     Running   0               93s
calico-system     calico-node-66cns                          1/1     Running   0               93s
calico-system     calico-node-d86pq                          1/1     Running   0               93s
calico-system     calico-node-qs8wj                          1/1     Running   0               93s
calico-system     calico-node-r45t6                          1/1     Running   0               94s
calico-system     calico-typha-744d647f99-2nbtc              1/1     Running   0               94s
calico-system     calico-typha-744d647f99-pxnl6              1/1     Running   0               84s
kube-system       coredns-6d4b75cb6d-bj9qn                   1/1     Running   0               3m20s
kube-system       coredns-6d4b75cb6d-c9l2g                   1/1     Running   0               3m20s
kube-system       etcd-master01                              1/1     Running   0               3m35s
kube-system       etcd-master02                              1/1     Running   0               2m46s
kube-system       etcd-master03                              1/1     Running   0               2m11s
kube-system       kube-apiserver-master01                    1/1     Running   0               3m36s
kube-system       kube-apiserver-master02                    1/1     Running   1 (2m48s ago)   2m46s
kube-system       kube-apiserver-master03                    1/1     Running   0               2m12s
kube-system       kube-controller-manager-master01           1/1     Running   0               3m34s
kube-system       kube-controller-manager-master02           1/1     Running   0               104s
kube-system       kube-controller-manager-master03           1/1     Running   0               53s
kube-system       kube-proxy-24qw7                           1/1     Running   0               2m21s
kube-system       kube-proxy-gc6pn                           1/1     Running   0               2m56s
kube-system       kube-proxy-rdnzx                           1/1     Running   0               3m20s
kube-system       kube-proxy-vmmvn                           1/1     Running   0               116s
kube-system       kube-scheduler-master01                    1/1     Running   0               3m36s
kube-system       kube-scheduler-master02                    1/1     Running   0               105s
kube-system       kube-scheduler-master03                    1/1     Running   0               71s
kube-system       kube-sealos-lvscare-worker01               1/1     Running   0               109s
tigera-operator   tigera-operator-d7957f5cc-4hqmc            1/1     Running   0               115s

使用kuboard实现k8s集群托管

ipCPU内存硬盘
192.168.91.2142C1G40GB
# Docker安装
wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum -y install docker-ce
cat << EOF > /etc/docker/daemon.json
{
  "registry-mirrors": ["https://zwyx2n3v.mirror.aliyuncs.com"]
}
EOF
systemctl enable --now docker

docker run -d --restart=unless-stopped --name=kuboard -p 80:80/tcp -p 10081:10081/tcp -e KUBOARD_ENDPOINT="http://192.168.91.214:80" -e KUBOARD_AGENT_SERVER_TCP_PORT="10081" -v /root/kuboard-data:/data eipwork/kuboard:v3

用户名和密码分别为:admin及Kuboard123

image.png

image.png

image.png

image.png

image.png

# master01
curl -k 'http://192.168.91.214:80/kuboard-api/cluster/kubedemo/kind/KubernetesCluster/kubedemo/resource/installAgentToKubernetes?token=Kls7YOPyPzUigXeJzRvMDIMhGfX1zLID' > kuboard-agent.yaml
kubectl apply -f ./kuboard-agent.yaml

kubectl get pods -n kuboard
NAME                                      READY   STATUS    RESTARTS   AGE
kuboard-agent-hn49a6-2-6fff88b6bf-k2msx   1/1     Running   0          2m17s
kuboard-agent-hn49a6-5757c889f5-hd68l     1/1     Running   0          2m17s

image.png

image.png

image.png

image.png