记录一次在K8s上搭建最新Minio集群脚本

1,030 阅读3分钟

因公司业务系统架构发展,需要在现有的PAAS平台上面搭建一套文件存储系统,使用目前比较流行的开源MINIO无疑是现在比较理想的方案。本以为官方有提供K8s的部署脚本,但从官网上并未找到。因此借鉴先前部署nacos-nfs的方案,编写了如下部署脚本,过程中也踩过很过坑(如更改service的名称从minio改为minio-svc,但是server的参数地址:http://minio-{0...3}.minio.minio.svc.cluster.local/miniodata 造成各节点间的服务无法互相访问,集群异常等问题),为让更多有需要的人少走弯路,特将此分享。要注意一下,版本要保持一致:

  • 1.Kubernetes Version:1.20.9
  • 2.NFS:4.2
  • 3.Minio:RELEASE.2022-06-20T23-13-45Z
#!/bin/bash
set -e
cat 1>app-deployment.yaml <<EOF
###1、新建命名空间###
---
apiVersion: v1
kind: Namespace
metadata:
  name: minio

###2、新建账号及权限###
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: minio        #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: minio
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
    # replace with namespace where provisioner is deployed
  namespace: minio
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  namespace: minio
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: minio
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io
  
###3、新建部署NFS provisioner Deployment###
---  
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: minio  #与RBAC文件中的namespace保持一致
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nfs-client-provisioner
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      imagePullSecrets:
        - name: harbor-pro-registry
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: harbor.xxxx.com/infrastructure/nfs-subdir-external-provisioner:v4.0.2
          volumeMounts:
            - name: nfs-client-volume
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: minio-nfs-storage  #provisioner名称,请确保该名称与 nfs-StorageClass.yaml文件中的provisioner名称保持一致
            - name: NFS_SERVER
              value: XX.XX.XX.XX   #NFS Server IP地址
            - name: NFS_PATH
              value: /appdata/minio     #NFS服务器根目录上需要建好该文件夹
      volumes:
        - name: nfs-client-volume
          nfs:
            server: XX.XX.XX.XX  #NFS Server IP地址
            path: /appdata/minio     #NFS服务器根目录上需要建好该文件夹

###4、创建NFS资源的StorageClass###
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: minio-managed-nfs-storage
provisioner: minio-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致
reclaimPolicy: "Retain"
parameters:
  archiveOnDelete: "false"
mountOptions:
  - hard
  - vers=4.2 ## 指定NFS版本,這個需要根據NFS Server版本號設置

###5、创建minio Service###
---
apiVersion: v1
kind: Service
metadata:
  name: minio
  namespace: minio
  labels:
    app: minio
spec:
  selector:
    app: minio
  type: NodePort
  ports:
    - name: minio-api
      port: 9000
      targetPort: 9000
      nodePort: 30090
      protocol: TCP
    - name: minio-console
      port: 9001
      targetPort: 9001
      nodePort: 30091
      protocol: TCP

###6、创建minio StatefulSet###
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: minio
  namespace: minio
spec:
  serviceName: minio
  podManagementPolicy: Parallel
  replicas: 4
  selector:
    matchLabels:
      app: minio
  template:
    metadata:
      labels:
        app: minio
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              - key: app
                operator: In
                values:
                - minio
            topologyKey: kubernetes.io/hostname
      imagePullSecrets:
        - name: harbor-pro-registry
      containers:
      - name: minio
        env:
        - name: MINIO_ACCESS_KEY
          value: "admin"
        - name: MINIO_SECRET_KEY
          value: "Minio123"
        image: harbor.xxxx.com/infrastructure/minio/minio:RELEASE.2022-06-20T23-13-45Z
        imagePullPolicy: Always
        args:
        - server
        - http://minio-{0...3}.minio.minio.svc.cluster.local/miniodata
        - --console-address
        - ":9001"
        - --address 
        - ":9000"
        ports:
        - containerPort: 9000
        - containerPort: 9001
        volumeMounts:
        - name: data
          mountPath: /miniodata
  volumeClaimTemplates:
  - metadata:
      name: data
    spec:
      accessModes:
        - ReadWriteOnce
      resources:
        requests:
          storage: 200Gi
      storageClassName: minio-managed-nfs-storage

###7、新建INGRESS###
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
  annotations:
    nginx.ingress.kubernetes.io/proxy-body-size: 5000m
    nginx.ingress.kubernetes.io/proxy-connect-timeout: "300"
    nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
    nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
    nginx.ingress.kubernetes.io/proxy-http-version: "1.1"
  creationTimestamp: null
  generation: 1
  name: minio-ingress
  namespace: minio
spec:
  rules:
    - host: minio-api-test.xxxx.com
      http:
        paths:
          - backend:
              serviceName: minio
              servicePort: 9000
    - host: minio-test.xxxx.com
      http:
        paths:
          - backend:
              serviceName: minio
              servicePort: 9001
status:
  loadBalancer: {}
          
EOF

kubectl apply -f app-deployment.yaml --record