CentOS8安装kubectl

268 阅读22分钟

基础命令熟悉

systemctl

命令作用样例
systemctl start启动服务systemctl start sshd
systemctl stop停止服务systemctl stop sshd
systemctl restart重启服务systemctl restart sshd
systemctl reload重新加载服务配置systemctl reload sshd
systemctl status显示服务状态systemctl status sshd
systemctl enable启用服务开机自启systemctl enable sshd
systemctl disable禁用服务开机自启systemctl disable sshd
systemctl is-enabled检查服务是否已启用开机自启systemctl is-enabled sshd
systemctl is-active检查服务是否正在运行systemctl is-active sshd
systemctl list-units列出所有服务单元systemctl list-units
systemctl list-units --type=service列出所有服务systemctl list-units --type=service
systemctl daemon-reload重新加载守护进程配置文件systemctl daemon-reload
systemctl set-default设置默认目标systemctl set-default multi-user.target
systemctl get-default获取默认目标systemctl get-default
systemctl show显示服务单元属性systemctl show sshd
systemctl kill发送信号给服务进程systemctl kill --signal=SIGTERM sshd

yum

命令作用样例
yum install <package_name>安装软件包yum install httpd
yum update <package_name>更新软件包yum update httpd
yum update更新所有软件包yum update
yum remove <package_name>卸载软件包yum remove httpd
yum list列出所有软件包yum list
yum list available列出所有可安装的软件包yum list available
yum list installed列出已安装的软件包yum list installed
yum search <keyword>搜索软件包yum search httpd
yum info <package_name>显示软件包信息yum info httpd
yum check-update列出所有可更新的软件包yum check-update
yum clean all清除缓存yum clean all
yum history显示yum的操作历史记录yum history
yum grouplist列出所有软件包组yum grouplist
yum groupinstall <group_name>安装软件包组yum groupinstall "Development Tools"
yum groupremove <group_name>卸载软件包组yum groupremove "Development Tools"

journalctl

参数描述示例
-f, --follow实时追踪日志输出journalctl -f
-n <行数>, --lines=<行数>显示指定数量的日志行journalctl -n 100
-u <unit>, --unit=<unit>显示特定服务的日志journalctl -u sshd
-k, --kmsg显示内核消息journalctl -k
-b, --boot显示引导过程的日志,可跟数字表示第几次引导journalctl -b -1
-p <优先级>, --priority=<优先级>仅显示指定优先级的日志journalctl -p err
-e, --pager-end直接定位到日志的末尾journalctl -e
--since "时间"显示指定时间之后的日志journalctl --since "2023-01-01 00:00:00"
--until "时间"显示指定时间之前的日志journalctl --until "2023-01-01 00:00:00"
--disk-usage显示日志占用的磁盘空间情况journalctl --disk-usage
--no-pager不分页输出日志journalctl --no-pager
-r按时间倒序查看日志journalctl -r
--all完整显示所有字段journalctl --all

1. 准备工作

辅助学习笔记

k8s详细教程(一) (znunwm.top)

k8s详细教程(二) (znunwm.top)

1.1. 更新yum数据源

# 查看系统版本
cat /etc/centos-release
cat /etc/os-release

# 备份/etc/yum.repos.d路径下的数据源文件,然后删除
cp /etc/yum.repos.d/ /etc/yum.repos.d.backup
rm -rf /etc/yum.repos.d/*.repo

# 在此网址下找到对应版本的repo
# 例如CentOS 8.5.2111 版本:http://mirrors.aliyun.com/repo/Centos-vault-8.5.2111.repo
# 替换数据源文件
curl -o /etc/yum.repos.d/CentOS-Linux-BaseOS.repo http://mirrors.aliyun.com/repo/Centos-vault-8.5.2111.repo

# 清除缓存并生成新的缓存
sudo yum clean all
sudo yum makecache

1.2 禁用swap分区

安装K8S时,k8s要求建议关闭swap,但是如果因为某些原因确实不能关闭swap分区,就需要在集群安装过程中通过明确的参数进行配置说明 swap分区:指的是虚拟内存分区,它的作用是物理内存使用完,之后将磁盘空间虚拟成内存来使用 优点:运行内存更大 缺点:系统的性能产生非常负面的影响

# 编辑分区配置文件/etc/fstab,注释掉swap分区一行
# 注意修改完毕之后需要重启linux服务
vim /etc/fstab
注释掉 /dev/mapper/cl-swap     none                    swap

# 暂时禁用 swap
swapoff -a

# 永久禁用 swap
swapon -s

# 验证 swap 是否已禁用
# 在输出的信息中,"Swap" 行的 "used" 列应显示为 0,表示 swap 未被使用且已禁用
free -h

1.3 修改linux的内核参数

# 修改linux的内核采纳数,添加网桥过滤和地址转发功能
# 编辑/etc/sysctl.d/kubernetes.conf文件,添加如下配置:
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1

# 重新加载配置
[root@master ~]# sysctl -p
# 加载网桥过滤模块
[root@master ~]# modprobe br_netfilter
# 查看网桥过滤模块是否加载成功
[root@master ~]# lsmod | grep br_netfilter

1.4 配置ipvs功能

在Kubernetes中Service有两种带来模型,一种是基于iptables的,一种是基于ipvs的两者比较的话,ipvs的性能明显要高一些,但是如果要使用它,需要手动载入ipvs模块

# 1.安装ipset和ipvsadm
[root@master ~]# yum install ipset ipvsadm -y
# 2.添加需要加载的模块写入脚本文件
[root@master ~]# cat <<EOF> /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
# 3.为脚本添加执行权限
[root@master ~]# chmod +x /etc/sysconfig/modules/ipvs.modules
# 4.执行脚本文件
[root@master ~]# /bin/bash /etc/sysconfig/modules/ipvs.modules
# 5.查看对应的模块是否加载成功
[root@master ~]# lsmod | grep -e ip_vs -e nf_conntrack_ipv4

1.5 关闭防火墙

systemctl disable firewalld
systemctl stop firewalld

1.6 配置hostname

 # 修改hosts文件
 vi /etc/hosts
 # 添加 ip 主机名 ,保存

1.7 笔记本设置盒盖不关机

# 修改配置文件
vim /etc/systemd/logind.conf
# 把HandleLidSwitch后面的[suspend]修改为lock
HandleLidSwitch=lock

# HandleLidSwitch=ignore 忽略,跳过
# HandleLidSwitch=power off 关机
# HandleLidSwitch=eboot 重启
# HandleLidSwitch=halt 挂起
# HandleLidSwitch=suspend 使计算机进入睡眠状态
# HandleLidSwitch=hibernate 使计算机休眠(保存内存到硬盘)
# HandleLidSwitch=hybrid-sleep 混合睡眠,是睡眠和休眠的组合,一般用于台式机
# HandleLidSwitch=lock 仅锁屏,计算机继续工作

# 重启systemd-logind服务
systemctl restart systemd-logind

2. 安装docker

# 1、切换镜像源
[root@master ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo

# 2、查看当前镜像源中支持的docker版本
[root@master ~]# yum list docker-ce --showduplicates

# 3、安装特定版本的docker-ce
# 必须制定--setopt=obsoletes=0,否则yum会自动安装更高版本
# 如果提示包冲突,命令后面加--allowerasing 来替换冲突的软件包
[root@master ~]# yum install --setopt=obsoletes=0 docker-ce-3:26.1.1-1.el8 -y

# 4、添加一个配置文件
#Docker 在默认情况下使用Vgroup Driver为cgroupfs,而Kubernetes推荐使用systemd来替代cgroupfs
[root@master ~]# mkdir /etc/docker
[root@master ~]# cat <<EOF> /etc/docker/daemon.json
{
	"exec-opts": ["native.cgroupdriver=systemd"],
	"registry-mirrors": ["https://kn0t2bca.mirror.aliyuncs.com"]
}
EOF

# 5、启动dokcer
[root@master ~]# systemctl restart docker

# 设置开机自启
[root@master ~]# systemctl enable docker
[root@master ~]# systemctl enable docker.service

2.1 禁用IPV6

/etc/docker/daemon.json加文件中加"ipv6": false

样例文件

// 禁用 IPv6
{
	"ipv6": false,
	"exec-opts": [
		"native.cgroupdriver=systemd"
	],
	"registry-mirrors": [
		"https://kn0t2bca.mirror.aliyuncs.com"
	]
}
# 修改后重启服务
systemctl restart docker

2.2 设置anliyu 厂库

需要登入阿里账号获取

  1. 进入容器镜像服务 (aliyun.com),登陆账号

  2. 创建个人实例

  3. 容器镜像服务-->镜像工具-->镜像工具,在操作文档选择对应的系统操作,设置对应的加速地址

  4. 修改vi /etc/docker/daemon.json 配置文件

    {
    	"exec-opts": [
    		"native.cgroupdriver=systemd"
    	],
    	# 设置镜像地址,可以是多个
    	"registry-mirrors": [
    		"https://16gdank1.mirror.aliyuncs.com"
    	]
    }
    
  5. 重启docker

    sudo systemctl daemon-reload  
    sudo systemctl restart docker
    
  6. 验证配置是否生效

    # 运行命令后观察 Registry Mirrors: 是否是配置的地址
    docker info
    

3. 安装Kubernetes组件

3.1 安装kubeadm、kubelet和kubectl

  • kubelet :运行在cluster,负责启动pod管理容器
  • kubeadm :k8s快速构建工具,用于初始化cluster
  • kubectl :k8s命令工具,部署和管理应用,维护组件
# 1、由于kubernetes的镜像在国外,速度比较慢,这里切换成国内的镜像源
# 2、编辑/etc/yum.repos.d/kubernetes.repo,添加下面的配置
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgchech=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

# 3、安装kubeadm、kubelet和kubectl
[root@master ~]# yum install --setopt=obsoletes=0 kubeadm-1.17.4-0 kubelet-1.17.4-0 kubectl-1.17.4-0 -y

# 4、配置kubelet的cgroup
#编辑/etc/sysconfig/kubelet, 添加下面的配置
KUBELET_CGROUP_ARGS="--cgroup-driver=systemd"
KUBE_PROXY_MODE="ipvs"

# 5、设置kubelet开机自启
[root@master ~]# systemctl enable kubelet

查看是否安装成功

kubelet --version
kubectl version
kubeadm version

3.2 集群初始化

# 创建集群
# -apiserver-advertise-address=192.168.90.100	Kubernetes API 服务器,一般用主节点IP
# --image-repository registry.aliyuncs.com/google_containers	指定了 Kubernetes 集群所需的容器镜像的存储库。默认=k8s.gcr.io 获取镜像,速度慢。
# --kubernetes-version=v1.17.4	指定Kubernetes 集群版本。
# --service-cidr=10.96.0.0/12 集群内部服务的 IP 地址范围。Kubernetes 会为每个服务分配一个 IP 地址,这些地址必须在这个范围内。
# --pod-network-cidr=10.244.0.0/16	Pod 的 IP 地址范围
[root@master ~]# kubeadm init --apiserver-advertise-address=192.168.0.104 --image-repository=registry.aliyuncs.com/google_containers --service-cidr=10.96.0.0/12 --pod-network-cidr=10.244.0.0/16
# 提示:报错提示版本不对,就升级到版本,升级后还报错版本不对,则加上--kubernetes-version 1.28.2 指定版本

# kubeadm reset //重置集群
# 然后再重新执行上面提到的kubeadm init命令

# 创建必要文件
[root@master ~]# mkdir -p ~/.kube
[root@master ~]# sudo cp -i /etc/kubernetes/admin.conf ~/.kube/config
[root@master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

初始化成功后,会在kube-sysetm 命名空间下创建相应的pod

# 查看默认创建的pod
[root@shark k8s-yaml]# kubectl get pods -n kube-system
NAME                            READY   STATUS    RESTARTS   AGE
coredns-66f779496c-72vf6        0/1     Pending   0          3h12m
coredns-66f779496c-mc55h        0/1     Pending   0          3h12m
etcd-shark                      1/1     Running   0          3h12m
kube-apiserver-shark            1/1     Running   0          3h12m
kube-controller-manager-shark   1/1     Running   1          3h12m
kube-proxy-xtmsk                1/1     Running   0          3h12m
kube-scheduler-shark            1/1     Running   1          3h12m

# 前面coredns两个状态是Pending,最大可能是缺少网络插件导致
这一行为是预期中的, 因为系统就是这么设计的. kubeadm 的网络供应商是中立的, 因此需要管理员来选择并安装pod的网络插件.
你必须完成pod的网络配置, 然后才能完全部署 CoreDNS. 在网络被配置好之前, DNS 组件会一直处于 Pending 状态.

3.3 安装pod网络插件

这里使用kube-flannel

# 下载kube-flannel,并保存到服务器
到https://github.com/flannel-io/flannel/tree/master/Documentation/kube-flannel.yml 下载kube-flannel.yml文件

# 执行安装,运行保存的kube-flannel.yml文件
kubectl apply -f kube-flannel.yml

# 查看kube-flannel 是否正常运行
kubectl  get pods -n kube-flannel

# 如果kube-flannel运行正常后,等待几分钟,查看k8s自带的coredns的coredns是否正常
kubectl  get pods -n kube-system

3.4 检查k8s状况

# 查看节点是否正常
kubectl get no

# 查看自带服务状态是否显示正常
kubectl get componentstatuses

# 生成新token
kubeadm token create --print-join-command

# 查看新token
kubeadm token list

3.5 基本部署测试

安装一个nginx进行测试

# 创建一个nginx服务
kubectl create deployment nginx  --image=nginx:1.14-alpine

# 暴露端口
kubectl expose deploy nginx  --port=80 --target-port=80  --type=NodePort

# 查看服务
kubectl get pod,svc

3.6 安装k8s可视化界面

比较流行的几款可视化界面

3.6.1 安装kubesphere(多节点)

在 Kubernetes 上最小化安装 KubeSphere

# 安装
kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.4.1/kubesphere-installer.yaml
kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.4.1/cluster-configuration.yaml

# 检查日志
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f

# 查看KubeSphere是否正常运行
# 通过以下命令检查控制台的端口(默认为 30880)
kubectl get svc/ks-console -n kubesphere-system
# 端口 30880,并通过 NodePort (IP:30880) 使用默认帐户和密码 (admin/P@88w0rd) 访问 Web 控制台。

3.6.1 安装kubesphere(单节点)

在 Linux 上以 All-in-One 模式安装 KubeSphere

【云原生】Kubernetes上安装KubeSphere详细教程_kubesphere教程-CSDN博客

注意:需要提前安装一下组件

  • 安装nfs-server(见3.7)
  • 配置默认存储(见3.8)

方式一:单独安装

# 下载核心文件,v3.4.1是版本号,如果无法下载把github.com换成kkgithub.com
wget https://github.com/kubesphere/ks-installer/releases/download/v3.4.1/cluster-configuration.yaml
wget https://github.com/kubesphere/ks-installer/releases/download/v3.4.1/kubesphere-installer.yaml

# 执行安装
kubectl apply -f kubesphere-installer.yaml
kubectl apply -f cluster-configuration.yaml

# 查看部署是否正常
kubectl get pod -n kubesphere-system

# 查看部署成功对外访问的端口,默认是30880
# 浏览器直接访问,用户名密码:admin/P@88w0rd
kubectl get services -n kubesphere-system

# 解决etcd监控证书找不到问题
kubectl -n kubesphere-monitoring-system create secret generic kube-etcd-client-certs  --from-file=etcd-client-ca.crt=/etc/kubernetes/pki/etcd/ca.crt  --from-file=etcd-client.crt=/etc/kubernetes/pki/apiserver-etcd-client.crt  --from-file=etcd-client.key=/etc/kubernetes/pki/apiserver-etcd-client.key

方式二:通过kk进行安装

# 下载 KubeKey
curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.13 sh -

# 为 kk 添加可执行权限
chmod +x kk

# 开始安装
# 选择中文区下载(访问 GitHub 受限时使用)
export KKZONE=cn
curl -sfL https://get-kk.kubesphere.io | sh -


[root@ksp-master-1 ~]# mkdir ~/kubekey
[root@ksp-master-1 ~]# cd ~/kubekey/
[root@ksp-master-1 kubekey]# export KKZONE=cn
[root@ksp-master-1 kubekey]# curl -sfL https://get-kk.kubesphere.io | sh -
# 正常显示如下
Downloading kubekey v3.1.1 from https://kubernetes.pek3b.qingstor.com/kubekey/releases/download/v3.1.1/kubekey-v3.1.1-linux-amd64.tar.gz ...
Kubekey v3.1.1 Download Complete!
[root@ksp-master-1 kubekey]# ll -h
total 114M
-rwxr-xr-x. 1 root root 79M Apr 16 12:30 kk
-rw-r--r--. 1 root root 36M Apr 25 09:37 kubekey-v3.1.1-linux-amd64.tar.gz

# 查看 KubeKey 支持的 Kubernetes 版本列表
./kk version --show-supported-k8s

# 查看kubectl版本
kubectl version

# 生成初始化文件,这里指定kubectl版本
./kk create config -f k8s-v1282.yaml --with-kubernetes v1.28.2

# 部署kubesphere时需要默认 StorageClass
kubectl create -f - <<EOF
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
  name: local-storage
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer
EOF

# 部署
./kk create cluster -f k8s-v1282.yaml

k8s-v1282.yaml文件说明

本文示例采用 3 个节点同时作为 control-plane、etcd 和 worker 节点。

编辑配置文件 k8s-v1288.yaml,主要修改 kind: Cluster 小节的相关配置

修改 kind: Cluster 小节中 hosts 和 roleGroups 等信息,修改说明如下。

hosts:指定节点的 IP、ssh 用户、ssh 密码、ssh 端口
roleGroups:指定 3 个 etcd、control-plane 节点,复用相同的机器作为 3 个 worker 节点
internalLoadbalancer: 启用内置的 HAProxy 负载均衡器
domain:自定义域名 lb.opsxlab.cn,没特殊需求可使用默认值 lb.kubesphere.local
clusterName:自定义 opsxlab.cn,没特殊需求可使用默认值 cluster.local
autoRenewCerts:该参数可以实现证书到期自动续期,默认为 true
containerManager:使用 containerd

apiVersion: kubekey.kubesphere.io/v1alpha2
kind: Cluster
metadata:
  name: sample
spec:
  hosts:
  - {name: ksp-master-1, address: 192.168.9.131, internalAddress: 192.168.9.131, user: root, password: "OpsXlab@2024"}
  - {name: ksp-master-2, address: 192.168.9.132, internalAddress: 192.168.9.132, user: root, password: "OpsXlab@2024"}
  - {name: ksp-master-3, address: 192.168.9.133, internalAddress: 192.168.9.133, user: root, password: "OpsXlab@2024"}
  roleGroups:
    etcd:
    - ksp-master-1
    - ksp-master-2
    - ksp-master-3
    control-plane:
    - ksp-master-1
    - ksp-master-2
    - ksp-master-3
    worker:
    - ksp-master-1
    - ksp-master-2
    - ksp-master-3
  controlPlaneEndpoint:
    ## Internal loadbalancer for apiservers
    internalLoadbalancer: haproxy

    domain: lb.opsxlab.cn
    address: ""
    port: 6443
  kubernetes:
    version: v1.28.8
    clusterName: opsxlab.cn
    autoRenewCerts: true
    containerManager: containerd
  etcd:
    type: kubekey
  network:
    plugin: calico
    kubePodsCIDR: 10.233.64.0/18
    kubeServiceCIDR: 10.233.0.0/18
    ## multus support. https://github.com/k8snetworkplumbingwg/multus-cni
    multusCNI:
      enabled: false
  registry:
    privateRegistry: ""
    namespaceOverride: ""
    registryMirrors: []
    insecureRegistries: []
  addons: []

问题:/usr/local/bin/kubectl: 没有那个文件或目录

/bin/bash: /usr/local/bin/kubectl: 没有那个文件或目录
18:07:47 CST message: [shark]
get kubernetes cluster info failed: Failed to exec command: sudo -E /bin/bash -c "/usr/local/bin/kubectl --no-headers=true get nodes -o custom-columns=:metadata.name,:status.nodeInfo.kubeletVersion,:status.addresses"

解决方法

kubectl 安装在 /usr/bin/kubectl 而不是 /usr/local/bin/kubectl

创建符号链接,在 /usr/local/bin/ 目录下为 /usr/bin/kubectl 创建一个符号链接

ln -s /usr/bin/kubectl /usr/local/bin/kubectl

问题: install Kubernetes v1.24 or later

[Notice]
Incorrect runtime. Please specify a container runtime other than Docker to install Kubernetes v1.24 or later.
You can set "spec.kubernetes.containerManager" in the configuration file to "containerd" or add "--container-manager containerd" to the "./kk create cluster" command.
For more information, see:
https://github.com/kubesphere/kubekey/blob/master/docs/commands/kk-create-cluster.md
https://kubernetes.io/docs/setup/production-environment/container-runtimes/#container-runtimes
https://kubernetes.io/blog/2022/02/17/dockershim-faq/

解决方法

从Kubernetes v1.24开始,Docker不再作为内置的容器运行时被支持。Kubernetes项目在v1.20版本中宣布弃用Docker作为容器运行时,并在后续版本中移除了对Dockershim组件的支持,该组件是Kubernetes用于与Docker交互的桥梁。

Kubernetes v1.24或更高版本需要使用其他的容器运行时,比如containerd或CRI-O。kubekey(kk)是KubeSphere提供的一个轻量级的Kubernetes集群安装工具

通过添加--container-manager containerd参数来指定使用containerd作为容器运行

./kk create cluster --with-kubernetes v1.24.x --container-manager containerd ... [其他参数]

3.7 安装nfs-server

# 在每个机器。
yum install -y nfs-utils
# 在master 执行以下命令 
echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports
# 执行以下命令,启动 nfs 服务;创建共享目录
mkdir -p /nfs/data
# 在master执行
systemctl enable rpcbind
systemctl start rpcbind
#开机自启
systemctl enable nfs-server
systemctl start nfs-server
# 立即生效配置
exportfs -r
# 查看共享目录信息
exportfs -v
# 检查配置是否生效
showmount -e 192.168.2.248


# 如果想新增nfs路径直接编辑/etc/exports
# 编辑完成后运行的命令
systemctl restart rpcbind
systemctl restart nfs-server
exportfs -r

3.8 配置默认存储

# 创建默认存储
kubectl create -f storag.yaml

# 确认配置是否生效
kubectl get sc

storag.yaml文件如下:(根据实际情况需修改nfs服务器地址)

## 创建了一个存储类
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-storage
  annotations:
    storageclass.kubernetes.io/is-default-class: "true"
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
parameters:
  archiveOnDelete: "true"  ## 删除pv的时候,pv的内容是否要备份

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/nfs-subdir-external-provisioner:v4.0.2
          # resources:
          #    limits:
          #      cpu: 10m
          #    requests:
          #      cpu: 10m
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: k8s-sigs.io/nfs-subdir-external-provisioner
            - name: NFS_SERVER
              value: 172.31.0.4 ## 指定自己nfs服务器地址
            - name: NFS_PATH  
              value: /nfs/data  ## nfs服务器共享的目录
      volumes:
        - name: nfs-client-root
          nfs:
            server: 172.31.0.4
            path: /nfs/data
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["nodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io

3.9 安装部署mysql8.0

基本说明

  • 需要创建ConfigMap,用于存储 Mysql 的配置文件 my.cnf 内容

  • 需要创建PersistentVolume(PV),用于存储 Mysql 持久化

  • 需要创建PersistentVolume(PersistentVolumeClaim),引用PV

  • 需要创建Deployment ,部署mysql的应用

  • 需要创建Service ,将MySQL端口暴露出来提供给外部可访问

运行步骤

# 本文举例统一放在mysql命名空间中

# 创建命名空间
kubectl create namespace mysql
# 创建ConfigMap
kubectl create -f mysql-config.yaml -n mysql
# 检查是否创建成功
kubectl get ConfigMap -n mysql

# 创建PV和PVC,需要提前开发NFS,可参考3.7章节
kubectl create -f mysql-storage.yaml -n mysql
# 检查是否创建成功
kubectl get pv,pvc -n mysql

# 创建Deployment,部署MySQL应用
kubectl create -f mysql-deploy.yaml -n mysql
# 检查是否创建成功
kubectl get Deployment -n mysql
# 找到对应的MySQL的pod,并记录IP
kubectl get pod -n mysql -owide|grep mysql
# 进入的MySQL的pod
kubectl exec -it -n mysql mysql-5f69b977b8-cvmds sh
# 在pod中运行直接连接是否成功
mysql -h 10.244.0.38 -P 3306 --user=root --password=123456

# 创建Service,提供给外部访问
kubectl create -f mysql-service.yaml -n mysql
# 检查是否创建成功
kubectl get Deployment -n mysql

命令涉及到yaml

mysql-config.yaml

vi mysql-config.yaml

apiVersion: v1
kind: ConfigMap
metadata:
  name: mysql-config
  labels:
    app: mysql
data:
  my.cnf: |-
    [client]
    default-character-set=utf8mb4
    [mysql]
    default-character-set=utf8mb4
    [mysqld] 
    max_connections = 2000
    secure_file_priv=/var/lib/mysql
    sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION

mysql-storage.yaml

vi mysql-storage.yaml

## PV
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql
  namespace: mydlqcloud
  labels:
    app: mysql             #设置 pv 的 label 标签
spec:
  storageClassName: ml-pv1
  capacity:          
    storage: 10Gi          #设置 pv 存储资源大小 
  accessModes:       
  - ReadWriteOnce
  nfs:                     #指定使用 NFS 存储驱动
    server: 192.168.2.248   #指定 NFS 服务器 IP 地址
    path: /nfs/mysql       #指定 NFS 共享目录的位置,且需提前在该目录中创建 mysql 目录
  persistentVolumeReclaimPolicy: Retain  
  volumeMode: Filesystem
---
## PVC
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: mysql
  namespace: mydlqcloud
spec:
  storageClassName: ml-pv1
  resources:
    requests:
      storage: 10Gi        #设置 pvc 存储资源大小
  accessModes:
  - ReadWriteOnce


mysql-deploy.yaml

vi mysql-deploy.yaml

## Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
  namespace: mydlqcloud
  labels:
    app: mysql
spec:
  replicas: 1
  selector:
    matchLabels:
      app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:     
      containers:
      - name: mysql
        image: mysql:8.0.19
        ports:
        - containerPort: 3306
        env:
        - name: MYSQL_ROOT_PASSWORD    ## 配置Root用户默认密码
          value: "123456"
        resources:
          limits:
            cpu: 2000m
            memory: 512Mi
          requests:
            cpu: 2000m
            memory: 512Mi
        livenessProbe:
          initialDelaySeconds: 30
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3
          exec:
            command: ["mysqladmin", "-uroot", "-p${MYSQL_ROOT_PASSWORD}", "ping"]
        readinessProbe:  
          initialDelaySeconds: 10
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3
          exec:
            command: ["mysqladmin", "-uroot", "-p${MYSQL_ROOT_PASSWORD}", "ping"]
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
        - name: config
          mountPath: /etc/mysql/conf.d/my.cnf
          subPath: my.cnf
        - name: localtime
          readOnly: true
          mountPath: /etc/localtime
      volumes:
      - name: data
        persistentVolumeClaim:
          claimName: mysql
      - name: config      
        configMap:
          name: mysql-config
      - name: localtime
        hostPath:
          type: File
          path: /etc/localtime

mysql-service.yaml

vi mysql-service.yaml

# 端口暴露给外部链接
apiVersion: v1
kind: Service
metadata:
  name: mysql
  namespace: mydlqcloud
  labels:
    app: mysql
spec:
  type: NodePort # ClusterIP:只对集群内部可见;NodePort:对外部可见
  ports:
  - name: mysql
    port: 3306
    targetPort: 3306
    nodePort: 30336 # 对外暴露的端口
  selector:
    app: mysql

3.10 安装heml厂库管理

进入Releases · helm/helm · GitHub 官网找到需要安装的版本

复制标识 Linux amd64 的连接地址

# 下载安装包
wget https://get.helm.sh/helm-v2.16.9-linux-amd64.tar.gz

#解压安装包
tar xf helm-v2.16.9-linux-amd64.tar.gz

[root@k8s-master software]# ll
total 12624
-rw-r--r-- 1 root root 12926032 Jun 16 06:55 helm-v3.2.4-linux-amd64.tar.gz
drwxr-xr-x 2 3434 3434       50 Jun 16 06:55 linux-amd64

# 将helm复制的/usr/bin路径下
[root@k8s-master software]# cp -a linux-amd64/helm /usr/bin/helm

# 查看版本号验证
helm version

# 添加国内 阿里云的 镜像源
helm repo remove stable
helm repo add stable http://mirror.azure.cn/kubernetes/charts/
helm repo update

# 搜索Redis
helm search repo redis

4. 内网穿透远程

使用cpolar 实现内网穿透

官网:www.cpolar.com/

内网:http://ip:9200/

账号:xxxxxx

密码:xxxxx

# 安装Cpolar
curl -L https://www.cpolar.com/static/downloads/install-release-cpolar.sh | sudo bash

# 查看版本号,有正常显示版本号即为安装成功
cpolar version

# 获取隧道 Authtoken
# 登入https://dashboard.cpolar.com/auth
# 输入账号---> 验证

5. 安装常见问题处理

5.1 docker pull报错443

5.1.1 错误详情

Error response from daemon: Head "registry-1.docker.io/v2/library/…": dial tcp [2600:1f18:2148:bc02:445d:9ace:d20b:c303]:443: connect: network is unreachable

5.1.2 解决方法

# 添加docker厂库 /etc/docker/daemon.json 文件
{
  "registry-mirrors": ["https://liadaibh.mirror.aliyuncs.com"]
}

# 配置DNS服务器,作为域名解析
#  /etc/resolv.conf 添加一下内容,8.8.4.4 放在第一个 nameserver
search localdomain
# 添加谷歌的dns域名解析地址
nameserver 8.8.4.4
nameserver localdomain

# 重启docker
systemctl daemon-reload
systemctl restart docker.service

5.2 kubeadm init报错CRI v1 runtime

5.2.1 错误详情

[preflight] Some fatal errors occurred:
        [ERROR CRI]: container runtime is not running: output: time="2024-05-14T10:29:07+08:00" level=fatal msg="validate service connection: CRI v1 runtime API is not implemented for endpoint \"unix:///var/run/containerd/containerd.sock\": rpc error: code = Unimplemented desc = unknown service runtime.v1.RuntimeService"
, error: exit status 1
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`

5.2.2 解决方法

# 修改文件
vi /etc/containerd/config.toml

#将这行用#注释或者将"cri"删除
#disabled_plugins = ["cri"]
disabled_plugins = []

#重启容器运行时
systemctl restart containerd 

5.3 kubeadm init报错kube-apiserver.yaml already exists

5.3.1 错误信息

[preflight] Running pre-flight checks
error execution phase preflight: [preflight] Some fatal errors occurred:
        [ERROR FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml]: /etc/kubernetes/manifests/kube-apiserver.yaml already exists
        [ERROR FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml]: /etc/kubernetes/manifests/kube-controller-manager.yaml already exists
        [ERROR FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml]: /etc/kubernetes/manifests/kube-scheduler.yaml already exists
        [ERROR FileAvailable--etc-kubernetes-manifests-etcd.yaml]: /etc/kubernetes/manifests/etcd.yaml already exists
        [ERROR Port-10250]: Port 10250 is in use
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher

5.3.2 解决方法

# 重置kubeadm
kubeadm reset

5.4 kubeadm init报错failed to pull image

5.4.1 错误信息

beadm. It is recommended that using "registry.k8s.io/pause:3.9" as the CRI sandbox image.
error execution phase preflight: [preflight] Some fatal errors occurred:
        [ERROR ImagePull]: failed to pull image registry.k8s.io/kube-apiserver:v1.28.2: output: E0515 19:01:48.718719 1002044 remote_image.go:171] "PullImage from image service failed" err="rpc error: code = Unknown desc = failed to pull and unpack image \"registry.k8s.io/kube-apiserver:v1.28.2\": failed to resolve reference \"registry.k8s.io/kube-apiserver:v1.28.2\": failed to do request: Head \"https://registry.k8s.io/v2/kube-apiserver/manifests/v1.28.2\": dial tcp [2600:1901:0:bbc4::]:443: connect: network is unreachable" image="registry.k8s.io/kube-apiserver:v1.28.2"
time="2024-05-15T19:01:48+08:00" level=fatal msg="pulling image: rpc error: code = Unknown desc = failed to pull and unpack image \"registry.k8s.io/kube-apiserver:v1.28.2\": failed to resolve reference \"registry.k8s.io/kube-apiserver:v1.28.2\": failed to do request: Head \"https://registry.k8s.io/v2/kube-apiserver/manifests/v1.28.2\": dial tcp [2600:1901:0:bbc4::]:443: connect: network is unreachable"
, error: exit status 1
        [ERROR ImagePull]: failed to pull image registry.k8s.io/kube-controller-manager:v1.28.2: output: E0515 19:01:48.939277 1002081 remote_image.go:171] "PullImage from image service failed" err="rpc error: code = Unknown desc = failed to pull and unpack image \"registry.k8s.io/kube-controller-manager:v1.28.2\": failed to resolve reference \"registry.k8s.io/kube-controller-manager:v1.28.2\": failed to do request: Head \"https://registry.k8s.io/v2/kube-controller-manager/manifests/v1.28.2\": dial tcp [2600:1901:0:bbc4::]:443: connect: network is unreachable" image="registry.k8s.io/kube-controller-manager:v1.28.2"
time="2024-05-15T19:01:48+08:00" level=fatal msg="pulling image: rpc error: code = Unknown desc = failed to pull and unpack image \"registry.k8s.io/kube-controller-manager:v1.28.2\": failed to resolve reference \"registry.k8s.io/kube-controller-manager:v1.28.2\": failed to do request: Head \"https://registry.k8s.io/v2/kube-controller-manager/manifests/v1.28.2\": dial tcp [2600:1901:0:bbc4::]:443: connect: network is unreachable"
, error: exit status 1
        [ERROR ImagePull]: failed to pull image registry.k8s.io/kube-scheduler:v1.28.2: output: E0515 19:01:49.143536 1002118 remote_image.go:171] "PullImage from image service failed" err="rpc error: code = Unknown desc = failed to pull and unpack image \"registry.k8s.io/kube-scheduler:v1.28.2\": failed to resolve reference \"registry.k8s.io/kube-scheduler:v1.28.2\": failed to do request: Head \"https://registry.k8s.io/v2/kube-scheduler/manifests/v1.28.2\": dial tcp [2600:1901:0:bbc4::]:443: connect: network is unreachable" image="registry.k8s.io/kube-scheduler:v1.28.2"

5.4.2 解决方法

# kubeadm init在初始化是需要拉取所需的基础镜像,registry.k8s.io无法拉取成功导致初始化失败
# 在init命令后面设置国内镜像地址
kubeadm init --image-repository=registry.aliyuncs.com/google_containers

5.5 master节点部署Pod处于Pending状态

5.5.1 错误信息

# pod一直处于pending状态
# 通过kubectl describe pod podID,报错如下
Warning FailedScheduling 40s (x28 over 28m) default-scheduler 0/1 nodes are available: 1 node(s) had taint {node-role.kubernetes.io/master: }, that the pod didn’t tolerate.


Warning  FailedScheduling  2m43s  default-scheduler  0/1 nodes are available: 1 node(s) had untolerated taint {node-role.kubernetes.io/control-plane: }. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling..

5.5.2 原因

产生问题的原因是master节点部署Pod,导致无法启动; k8s的master节点默认不允许pod,所以在master节点上标记了污点信息,标记污点的节点不会运行pod

5.5.3 解决方法

手动删除master的污点

# 查看指定节点污点信息,污点key= node-role.kubernetes.io/control-plane
[root@shark ~]# kubectl get no -o yaml | grep taint -A 5
    taints:
    - effect: NoSchedule
      key: node-role.kubernetes.io/control-plane
  status:
    addresses:
    - address: 192.168.2.248

# 删除master节点污点,污点key后面加-
[root@shark ~]# kubectl taint nodes --all node-role.kubernetes.io/control-plane-
node/shark untainted

# 在查看pod是否正常
kubectl get po --all-namespaces

5.6 提示没有/etc/kubernetes/kubeadm-config.yaml

5.6.1 解决方法

# 生成kubeadm-config.yaml文件
kubeadm config print init-defaults  > /etc/kubernetes/kubeadm-config.yaml

# 需要修改,修改如下
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.200.3     # 本机IP
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: master1        # 本主机名
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "192.168.200.16:16443"    # 虚拟IP和haproxy端口
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io    # 镜像仓库源要根据自己实际情况修改
kind: ClusterConfiguration
kubernetesVersion: v1.18.2     # k8s版本
networking:
  dnsDomain: cluster.local
  podSubnet: "10.244.0.0/16"
  serviceSubnet: 10.96.0.0/12
scheduler: {}

---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true
mode: ipvs

5.7 crictl pull镜像失败

错误信息

pull image failed: Failed to exec command: sudo -E /bin/bash -c "env PATH=$PATH crictl pull registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.9 --platform amd64"
WARN[0000] image connect using default endpoints: [unix:///var/run/dockershim.sock unix:///run/containerd/containerd.sock unix:///run/crio/crio.sock unix:///var/run/cri-dockerd.sock]. As the default settings are now deprecated, you should set the endpoint instead.
E0516 23:34:12.452918   37845 remote_image.go:171] "PullImage from image service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial unix /var/run/dockershim.sock: connect: no such file or directory\"" image="registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.9"
FATA[0000] pulling image: rpc error: code = Unavailable desc = connection error: desc = "transport: Error while dialing dial unix /var/run/dockershim.sock: connect: no such file or directory": Process exited with status 1
23:34:12 CST retry: [shark]
23:34:17 CST message: [shark]
downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.9
23:34:17 CST message: [shark]
pull image failed: Failed to exec command: sudo -E /bin/bash -c "env PATH=$PATH crictl pull registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.9 --platform amd64"
WARN[0000] image connect using default endpoints: [unix:///var/run/dockershim.sock unix:///run/containerd/containerd.sock unix:///run/crio/crio.sock unix:///var/run/cri-dockerd.sock]. As the default settings are now deprecated, you should set the endpoint instead.
E0516 23:34:17.563476   37927 remote_image.go:171] "PullImage from image service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial unix /var/run/dockershim.sock: connect: no such file or directory\"" image="registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.9"
FATA[0000] pulling image: rpc error: code = Unavailable desc = connection error: desc = "transport: Error while dialing dial unix /var/run/dockershim.sock: connect: no such file or directory": Process exited with status 1
23:34:17 CST retry: [shark]
23:34:22 CST message: [shark]
downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.9
23:34:22 CST message: [shark]
pull image failed: Failed to exec command: sudo -E /bin/bash -c "env PATH=$PATH crictl pull registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.9 --platform amd64"
WARN[0000] image connect using default endpoints: [unix:///var/run/dockershim.sock unix:///run/containerd/containerd.sock unix:///run/crio/crio.sock unix:///var/run/cri-dockerd.sock]. As the default settings are now deprecated, you should set the endpoint instead.
E0516 23:34:22.673913   38010 remote_image.go:171] "PullImage from image service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial unix /var/run/dockershim.sock: connect: no such file or directory\"" image="registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.9"
FATA[0000] pulling image: rpc error: code = Unavailable desc = connection error: desc = "transport: Error while dialing dial unix /var/run/dockershim.sock: connect: no such file or directory": Process exited with status 1
23:34:22 CST failed: [shark]
error: Pipeline[CreateClusterPipeline] execute failed: Module[PullModule] exec failed:
failed: [shark] [PullImages] exec failed after 3 retries: pull image failed: Failed to exec command: sudo -E /bin/bash -c "env PATH=$PATH crictl pull registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.9 --platform amd64"
WARN[0000] image connect using default endpoints: [unix:///var/run/dockershim.sock unix:///run/containerd/containerd.sock unix:///run/crio/crio.sock unix:///var/run/cri-dockerd.sock]. As the default settings are now deprecated, you should set the endpoint instead.
E0516 23:34:22.673913   38010 remote_image.go:171] "PullImage from image service failed" err="rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial unix /var/run/dockershim.sock: connect: no such file or directory\"" image="registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.9"
FATA[0000] pulling image: rpc error: code = Unavailable desc = connection error: desc = "transport: Error while dialing dial unix /var/run/dockershim.sock: connect: no such file or directory": Process exited with status 1

解决方法

cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///var/run/containerd/containerd.sock
image-endpoint: unix:///var/run/containerd/containerd.sock
timeout: 0
debug: false
pull-image-on-create: false
EOF

5.8 Default StorageClass was not found

错误信息

fatal: [localhost]: FAILED! => {
    "assertion": "\"(default)\" in default_storage_class_check.stdout",
    "changed": false,
    "evaluated_to": false,
    "msg": "Default StorageClass was not found !"
}

解决方法

根据错误提示:Default StorageClass was not found。缺少 StorageClass。从KubeSphere安装成功的集群里,拿到默认的StorageClass配置,保存成yaml文件后,添加到集群里。再次重试安装KubeSphere,即执行下面的命令

# 创建storage的yaml文件
cat >> default-storage-class.yaml <<-EOF
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
  name: local
  annotations:
    cas.openebs.io/config: |
      - name: StorageType
        value: "hostpath"
      - name: BasePath
        value: "/var/openebs/local/"
    kubectl.kubernetes.io/last-applied-configuration: >
      {"apiVersion":"storage.k8s.io/v1","kind":"StorageClass","metadata":{"annotations":{"cas.openebs.io/config":"-
      name: StorageType\n  value: \"hostpath\"\n- name: BasePath\n  value:
      \"/var/openebs/local/\"\n","openebs.io/cas-type":"local","storageclass.beta.kubernetes.io/is-default-class":"true","storageclass.kubesphere.io/supported-access-modes":"[\"ReadWriteOnce\"]"},"name":"local"},"provisioner":"openebs.io/local","reclaimPolicy":"Delete","volumeBindingMode":"WaitForFirstConsumer"}
    openebs.io/cas-type: local
    storageclass.beta.kubernetes.io/is-default-class: 'true'
    storageclass.kubesphere.io/supported-access-modes: '["ReadWriteOnce"]'
provisioner: openebs.io/local
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer
EOF

# 运行
kubectl apply -f default-storage-class.yaml

# 重新安装KubeSphere

# 删除之前安装
kubectl delete -f cluster-configuration.yaml
kubectl delete -f kubesphere-installer.yaml
# 重新安装
kubectl apply -f kubesphere-installer.yaml
kubectl apply -f cluster-configuration.yaml