Kubernetes集群部署文档

544 阅读12分钟
  1. 准备工作(所有节点执行)

  1. 准备虚拟机

创建后服务器的优化配置物理服务器配置后优化

三个节点:名字为k8s-1-node1、k8s-1-node2、k8s-1-master

设置系统主机名及Host 文件

sudo cat << EOF >> /etc/hosts
192.168.18.221 k8s-1-master
192.168.18.222 k8s-1-node1
192.168.18.223 k8s-1-node2
EOF
# 对应的节点执行
sudo hostnamectl set-hostname k8s-1-node1
sudo hostnamectl set-hostname k8s-2-node2
sudo hostnamectl set-hostname k8s-2-master
  1. 更新yum

# 需要更新很久
    sudo yum update -y
#设置存储库
    sudo yum install -y yum-utils
    sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
  1. 相关设置

  1. 禁用iptablesfirewalld服务
systemctl stop firewalld
systemctl disable firewalld

systemctl stop iptables
systemctl disable iptables
  1. 禁用selinux
# 永久关闭
        sed -i 's/enforcing/disabled/' /etc/selinux/config
# 临时关闭
        setenforce 0
  1. 禁用swap分区
# 临时关闭
        swapoff -a
# 永久关闭
vim /etc/fstab
        将行 
        /dev/mapper/xxx    swap   xxx
        注释

  1. 调整内核参数,对于 K8S
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables  = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward                 = 1
EOF
# 依次执行下面命令
sysctl -p
modprobe br_netfilter
lsmod | grep br_netfilter

显示:

  1. 配置 ipvs 功能
# 安装ipset和ipvsadm
        yum install ipset ipvsadmin -y
# 添加需要加载的模块写入脚本文件
cat <<EOF > /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
# 为脚本文件添加执行权限
        chmod +x /etc/sysconfig/modules/ipvs.modules
# 执行脚本文件
         /bin/bash /etc/sysconfig/modules/ipvs.modules
# 查看对应的模块是否加载成功
        lsmod | grep -e ip_vs -e nf_conntrack_ipv4

重启
reboot
  1. 安装docker和cri-dockerd(所有节点执行)

  1. 安装docker

  1. 移除旧版docker(新安装虚拟机则不需执行)
sudo yum remove docker \
     docker-client \
     docker-client-latest \
     docker-common \
     docker-latest \
     docker-latest-logrotate \
     docker-logrotate \
     docker-engine
  1. 安装依赖包
yum install -y yum-utils device-mapper-persistent-data lvm2
  1. 添加aliyun docker软件包源
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
  1. 添加软件包源到本地缓存
yum makecache fast

rpm --import https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  1. 安装docker及其依赖库
sudo yum install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
  1. 启动Docker,设置开机自启动
# 启动docker
sudo systemctl start docker
# 设置docker开机启动
sudo systemctl enable docker
# 验证
sudo systemctl status docker

  1. 安装cri-dockerd

k8s 1.24版本后需要使用cri-dockerd和docker通信

  1. 下载cri-dockerd
# 若没有wget,则执行
        sudo yum install -y wget
# 下载
        sudo wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.4/cri-dockerd-0.3.4-3.el7.x86_64.rpm
# 安装
        sudo rpm -ivh cri-dockerd-0.3.4-3.el7.x86_64.rpm
# 重载系统守护进程
        sudo systemctl daemon-reload
  1. 设置镜像加速
mkdir -p /home/docker-images
sudo tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://q9n10oke.mirror.aliyuncs.com","https://registry.docker-cn.com","http://hub-mirror.c.163.com","https://docker.m.daocloud.io"],
  "insecure-registries": ["8.141.94.237:5000"],
  "data-root": "/home/docker-images"
}

EOF
systemctl daemon-reload
  1. 修改配置文件
vim /usr/lib/systemd/system/cri-docker.service

# 修改第10行  ExecStart=
# 改为        ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.7
  1. 自启动、重启Docker组件
# 重载系统守护进程
        sudo systemctl daemon-reload
# 设置cri-dockerd自启动
        sudo systemctl enable cri-docker.socket cri-docker
# 启动cri-dockerd
        sudo systemctl start cri-docker.socket cri-docker
# 检查Docker组件状态
        sudo systemctl status docker cir-docker.socket cri-docker

显示:

  1. 安装Kubernetes

  1. 安装kubectl(所有节点执行)

#  当前使用的是最新版本 v1.28.2
# 下载
        curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
# 检验
        curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256"
        echo "$(cat kubectl.sha256)  kubectl" | sha256sum --check
# 安装
        sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
# 测试
        kubectl version --client
  1. 安装kubeadm(所有节点执行)

# 改国内源
vim /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg


yum clean all 
yum makecache 


# 安装
        sudo yum install -y install kubeadm-1.28.2-0 kubelet-1.28.2-0 kubectl-1.28.2-0 --disableexcludes=kubernetes
# 设置自启动
        sudo systemctl enable --now kubelet
  1. 安装runc(所有节点执行)

# 下载 runc.amd64  
        sudo wget https://github.com/opencontainers/runc/releases/download/v1.1.9/runc.amd64
# 安装
        sudo install -m 755 runc.amd64 /usr/local/bin/runc
# 验证
        runc -v

  1. 部署集群

  1. 初始化集群(master节点执行)
# 执行 kubeadm  init 命令
kubeadm init  --node-name=k8s-1-master --image-repository=registry.aliyuncs.com/google_containers --cri-socket=unix:///var/run/cri-dockerd.sock --apiserver-advertise-address=192.168.18.221 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12

# 需要修改的参数
        --apiserver-advertise-address # 指定 API 服务器的广告地址、我设置为master节点的ip
        
# 初始化成功后运行下面的命令        
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

# master节点执行 配置文件的复制(为了在node节点可以使用kubectl相关命令)
        scp /etc/kubernetes/admin.conf 192.168.18.222:/etc/kubernetes/
        scp /etc/kubernetes/admin.conf 192.168.18.223:/etc/kubernetes/
        scp /etc/kubernetes/admin.conf 192.168.18.215:/etc/kubernetes/

显示:

  1. node节点加入(node节点执行)
# 到node节点检查admin.conf文件是否传输完成
        ls /etc/kubernetes/
        admin.conf  manifests
# 将admin.conf加入环境变量,直接使用永久生效
        echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >>  ~/.bash_profile
# 加载
        source ~/.bash_profile
        
# ---------------------------------加入集群-------------------------------------
# 1.在master节点执行 kubeadm init成功后,会出现 kubeadm join xxx xxx的命令,直接复制到node节点执行就好。
# 2.下面是若没有复制到kubeadm join的命令或者是想要在集群中加入新节点,
#   则先在master执行,获取token 和 discovery-token-ca-cert-hash。
#   获取 token 参数
                kubeadm token list   # 查看已有 token
                kubeadm token create # 没有token则执行,创建新的 TOKEN
#   获取 discovery-token-ca-cert-hash 参数 
                openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null |  openssl dgst -sha256 -hex | sed 's/^.* //'

# 3.node节点执行 kubeadm join
# 修改获取的 token 和 discovery-token-ca-cert-hash 后,再执行
kubeadm join 192.168.18.221:6443 --token yb0az0.nf850ct0dt22j9mo \
        --discovery-token-ca-cert-hash sha256:9c49fe6307284298f3761aedf1126adf4bde2d54b85f9acaedd0590a29f19a2e --cri-socket unix:///var/run/cri-dockerd.sock

# 新节点添加到集群
# 把token和sha 一起创建
kubeadm token create --print-join-command

# 加入集群(node操作) 
kubeadm join 172.16.20.102:6443 --token 【token】--discovery-token-ca-cert-hash sha256:【sha值】--cri-socket unix:///var/run/cri-dockerd.sock

kubeadm join 192.168.18.221:6443 --token 8dajys.zncqdmwd0sku9klq --discovery-token-ca-cert-hash sha256:aa1c62c46ba97ebfb0bf1cc910ef7e1602ce7f9e94d75f9501e0bef7b9ca1b1c     --cri-socket unix:///var/run/cri-dockerd.sock

  1. 重新加入集群(node节点执行)
# 先执行,节点退出集群
        kubeadm reset --cri-socket unix:///var/run/cri-dockerd.sock
# 再获取TOKEN、discovery-token-ca-cert-hash 参数后,最后执行
kubeadm join 192.168.18.221:6443 --token yb0az0.nf850ct0dt22j9mo \
        --discovery-token-ca-cert-hash sha256:9c49fe6307284298f3761aedf1126adf4bde2d54b85f9acaedd0590a29f19a2e  --cri-socket unix:///var/run/cri-dockerd.sock
  1. 安装网络插件下载然后运行(master节点执行)
# 下载,若网络抽风~~,则复制下面的kube-flannel.yml
        sudo wget https://github.com/flannel-io/flannel/releases/download/v0.22.3/kube-flannel.yml
# 执行
        kubectl apply -f kube-flannel.yml

或者

vi kube-flannel.yml
# kube-flannel.yml
apiVersion: v1
kind: Namespace
metadata:
  labels:
    k8s-app: flannel
    pod-security.kubernetes.io/enforce: privileged
  name: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: flannel
  name: flannel
  namespace: kube-flannel
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: flannel
  name: flannel
rules:
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
- apiGroups:
  - networking.k8s.io
  resources:
  - clustercidrs
  verbs:
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: flannel
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-flannel
---
apiVersion: v1
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
kind: ConfigMap
metadata:
  labels:
    app: flannel
    k8s-app: flannel
    tier: node
  name: kube-flannel-cfg
  namespace: kube-flannel
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  labels:
    app: flannel
    k8s-app: flannel
    tier: node
  name: kube-flannel-ds
  namespace: kube-flannel
spec:
  selector:
    matchLabels:
      app: flannel
      k8s-app: flannel
  template:
    metadata:
      labels:
        app: flannel
        k8s-app: flannel
        tier: node
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      containers:
      - args:
        - --ip-masq
        - --kube-subnet-mgr
        command:
        - /opt/bin/flanneld
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: EVENT_QUEUE_DEPTH
          value: "5000"
        image: docker.io/flannel/flannel:v0.22.3
        name: kube-flannel
        resources:
          requests:
            cpu: 100m
            memory: 50Mi
        securityContext:
          capabilities:
            add:
            - NET_ADMIN
            - NET_RAW
          privileged: false
        volumeMounts:
        - mountPath: /run/flannel
          name: run
        - mountPath: /etc/kube-flannel/
          name: flannel-cfg
        - mountPath: /run/xtables.lock
          name: xtables-lock
      hostNetwork: true
      initContainers:
      - args:
        - -f
        - /flannel
        - /opt/cni/bin/flannel
        command:
        - cp
        image: docker.io/flannel/flannel-cni-plugin:v1.2.0
        name: install-cni-plugin
        volumeMounts:
        - mountPath: /opt/cni/bin
          name: cni-plugin
      - args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        command:
        - cp
        image: docker.io/flannel/flannel:v0.22.3
        name: install-cni
        volumeMounts:
        - mountPath: /etc/cni/net.d
          name: cni
        - mountPath: /etc/kube-flannel/
          name: flannel-cfg
      priorityClassName: system-node-critical
      serviceAccountName: flannel
      tolerations:
      - effect: NoSchedule
        operator: Exists
      volumes:
      - hostPath:
          path: /run/flannel
        name: run
      - hostPath:
          path: /opt/cni/bin
        name: cni-plugin
      - hostPath:
          path: /etc/cni/net.d
        name: cni
      - configMap:
          name: kube-flannel-cfg
        name: flannel-cfg
      - hostPath:
          path: /run/xtables.lock
          type: FileOrCreate
        name: xtables-lock
  1. kubernetes命令补全优化

# 加入~/.bashrc
vim ~/.bashrc
# 添加下面的
source <(kubectl completion bash)

source ~/.bashrc
  1. 测试kubernetes 集群

# 下面一般在master节点执行,若node节点可以使用kubectl命令,也可以在node节点上操作
kubectl get nodes
kubectl get pod -A                

  1. 使用nginx测试
vim nginx-deployment.yaml
kubectl apply -f nginx-deployment.yaml
# nginx-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
        - name: nginx
          image: nginx:latest
          ports:
            - containerPort: 80

---
apiVersion: v1
kind: Service
metadata:
  name: nginx-service
spec:
  selector:
    app: nginx
  ports:
    - name: http 
      port: 80
      targetPort: 80
      nodePort: 30080
  type: NodePort

显示:

[root@k8s-1-master ~] # kubectl get pod,svc |grep nginx
pod/nginx-deployment-7c79c4bf97-hmmr8   1/1     Running   0          7m32s
pod/nginx-deployment-7c79c4bf97-k2626   1/1     Running   0          7m32s
pod/nginx-deployment-7c79c4bf97-r5ln4   1/1     Running   0          7m32s
service/nginx-service   NodePort    10.111.164.231   <none>        80:30080/TCP   7m32s

访问:http://192.168.18.221:30080/,出现这个页面就算大功告成!

  1. 集群版本升级

升级思路:

  • 先升级master1节点,再升级master2、master3等管理节点,再升级node1、node2等工作节点。
  • 升级存在风险,所以需要先对升级的节点做数据备份。
  • 升级时,先驱逐节点上的pod,在设置为不可调度。因为升级存在风险,若升级失败,也不会影响业务。

注意事项:

  • 升级前必须备份所有组件及数据,例如etcd
  • 千万不要跨多个小版本进行升级,例如从1.16升级到1.19
  • 在测试环境经过多次演练、实操,没问题后才能上生产环境。
  1. 升级master节点

  1. 查找最新版本号。
[root@k8s-1-master bck]# yum list --showduplicates kubeadm

  1. 先升级kubeadm。
[root@k8s-1-master bck]# yum install -y kubeadm-1.28.2-0 
  1. 驱逐node上的pod,且不可调度。
#ignore-daemonsets参数:忽略无法驱逐的pod。
[root@k8s-1-master ~]# kubectl drain k8s-1-master --ignore-daemonsets
  1. 查看该节点pod状态,检查是否驱逐成功。此时该节点没有任何pod业务,可开始升级。

  1. 查看集群是否可以升级。
[root@k8s-1-master bck]# kubeadm upgrade plan

  1. 升级。
[root@k8s-1-master bck]# kubeadm upgrade apply v1.28.2
  1. 升级kubelet和kubectl,和kubeadm版本要一致。
[root@k8s-1-master bck]# yum install -y kubelet-1.28.2-0 kubectl-1.28.2-0
  1. 重启kubelet,再次查看版本已经被升级到新版本。
[root@k8s-1-master bck]# systemctl daemon-reload
[root@k8s-1-master bck]# systemctl restart kubelet

  1. 取消不可调度,重新上线。
[root@k8s-1-master bck]# kubectl uncordon k8s-1-master
  1. 升级node节点

  1. 先查看当前版本。

  1. 升级kubeadm版本。
[root@k8s-1-node2 ~]# yum install -y kubeadm-1.28.2-0
  1. 驱逐node1上的pod,且不可调度。
kubectl drain k8s-1-node1 --ignore-daemonsets
  1. 升级kubelet配置
kubeadm upgrade node
  1. 升级kubelet和kubectl
yum install -y kubelet-1.28.2-0 kubectl-1.28.2-0
  1. 重启kubelet
systemctl daemon-reload
systemctl restart kubelet
  1. 取消不可调度,重新上线。
kubectl uncordon k8s-1-node1
  1. Dashboard部署安装

  1. Dashboard部署安装

  1. 下载文件并执行,国内仓库下:
wget https://gitee.com/qinziteng/K8S/raw/master/YMAL/recommended.yaml
  1. 默认 Dashboard 只能在集群内部访问 添加Service 类型:
vim recommended.yaml

# 添加
  type: NodePort

  1. 修改完成确认无误后执行文件
kubectl apply -f recommended.yaml
  1. 查看状态 默认在 kubernetes-dashboard 名称空间下:
kubectl get pods -n kubernetes-dashboard

  1. 查看Service暴露端口,我们使用这个端口进行访问:
kubectl get svc -n kubernetes-dashboard

  1. 访问页面

https://IP+SVCPort

https://192.168.18.221:32716/#/login

  1. 配置Dashboard登入用户

  1. 通过Token令牌登入
  1. 创建一个 ClusterRoleBinding 对象,并赋予cluster-admin权限,即访问整个集群的权限,包括查看和修改所有资源的权限
#创建用户

kubectl create serviceaccount dashboard-cluster-admin -n kubernetes-dashboard

# 用户授权

kubectl create clusterrolebinding dashboard-cluster-admin --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:dashboard-cluster-admin

# 获取用户Token
kubectl get secret -n kubernetes-dashboard | grep dashboard-cluster-admin

# 创建一个新的token
kubectl create token dashboard-cluster-admin -n kubernetes-dashboard


kubectl create clusterrolebinding dashboard-cluster-admin --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard

# 命令参数解析:

create:创建资源的命令
clusterrolebinding:资源类型,表示创建一个 ClusterRoleBinding 对象
dashboard-cluster-admin:ClusterRoleBinding 对象的名称
clusterrole=cluster-admin:指定要绑定的 ClusterRole,这里是 cluster-admin,即具有完全访问权限的 ClusterRole。
serviceaccount=kubernetes-dashboard:kubernetes-dashboard:指定要绑定的 ServiceAccount,这里是 kubernetes-dashboard 命名空间下的 kubernetes-dashboard ServiceAccount
  1. 查看创建的secret
kubectl get secret -n kubernetes-dashboard

发现没有找到带有token的标识,确定token没有被创建出来,需要手动创建一个

# 创建 Token:

kubectl create token dashboard-cluster-admin -n kubernetes-dashboard

# 创建 Secret:
将上一步生成的 token 保存为一个新的 Secret:

kubectl create secret generic dashboard-cluster-admin-token -n kubernetes-dashboard --from-literal=token=<token-value>

# 注<token-value>为上一步生成的 token
# 更新 ServiceAccount:
将新创建的 Secret 关联到 ServiceAccount:

kubectl patch serviceaccount dashboard-cluster-admin -n kubernetes-dashboard -p '{"secrets": [{"name": "dashboard-cluster-admin-token"}]}

  1. 获取token信息:
kubectl get secret dashboard-cluster-admin-token -n kubernetes-dashboard -o jsonpath='{.data.token}' | base64 --decode

  1. 将token信息复制到浏览器下面即可管理K8S集群了

  1. dashboard报错

这是Kubernetes 权限问题。错误信息表明服务账户 system:serviceaccount:kubernetes-dashboard:dashboard-cluster-admin 没有在集群范围内列出(list)命名空间(namespaces)的权限。

添加权限

vim rbac.yaml 

# 添加以下内容
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  creationTimestamp: "2024-04-28T06:15:05Z"
  name: dashboard-cluster-admin
  resourceVersion: "383710"
  uid: 01007a40-d3c5-423c-93b0-0ee557f1b85d
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: dashboard-cluster-admin
  namespace: kubernetes-dashboard

# 执行更新
kubectl apply -f rbac.yaml 
  1. 通过kubeconfig文件登入
  1. 创建cluster集群
cd /etc/kubernetes/pki

kubectl config set-cluster kubernetes --certificate-authority=./ca.crt --server="https://16.32.15.201:6443" --embed-certs=true --kubeconfig=/root/dashboard-admin.conf
  1. 执行完成后会 /root/dashboard-admin.conf 生成文件
cat /root/dashboard-admin.conf
  1. 创建credentials
TOKEN=$(kubectl get secret $(kubectl get secret -n kubernetes-dashboard|grep kubernetes-dashboard-token*|awk '{print $1}') -n kubernetes-dashboard  -o jsonpath={.data.token}|base64 -d)

kubectl config set-credentials dashboard-admin --token=$TOKEN --kubeconfig=/root/dashboard-admin.conf
  1. 创建context
kubectl config set-context dashboard-admin@kubernetes --cluster=kubernetes --user=dashboard-admin --kubeconfig=/root/dashboard-admin.conf
  1. 切换context的current-context是dashboard-admin@kubernetes
kubectl config use-context dashboard-admin@kubernetes --kubeconfig=/root/dashboard-admin.conf
  1. 把 /root/dashboard-admin.conf 这个文件下载到本地,之后上传到WEB页面登入即可

  1. 可视化kuboard 部署[master]

  1. 部署

kuboard官方文档:kuboard.cn/install/v3/…

wget https://kuboard.cn/install-script/kuboard.yaml

# 将kuboard.yaml内的镜像替换为8.141.94.237:5000/qingxi/kuboard:v3
kubectl apply -f kuboard.yaml

kubectl get pod -A
kube-system            kuboard-858d5cb67b-qjhrb                     1/1     Running   0               4m32s
kubectl get svc -A
kube-system            kuboard                     NodePort    10.96.73.234    <none>        80:32567/TCP             4m41s

访问地址: http://192.168.18.212:32567
用户:admin
密码:Kuboard123


docker pull swr.cn-east-2.myhuaweicloud.com/kuboard/kuboard:v3
 
mkdir -p /home/docker/kuboard
#启动kuboard容器
docker run -d \
  --restart=unless-stopped \
  --name=kuboard \
  -p 8081:80/tcp \
  -p 30081:10081/tcp \
  -e KUBOARD_ENDPOINT="http://192.168.18.221:8081" \
  -e KUBOARD_AGENT_SERVER_TCP_PORT="30081" \
  -v /home/docker/kuboard:/data/kuboard \
  swr.cn-east-2.myhuaweicloud.com/kuboard/kuboard:v3

访问地址: http://192.168.18.221:8081
用户:admin
密码:Kuboard123

进入Kuboard

  1. 添加k8s集群

使用说明【云原生】Kubernetes 多集群管理工具Kuboard v3-CSDN博客

  1. 快速部署istio

官网部署:istio.io

blog.csdn.net