k8s实现基于NFS StorageClass的存储动态供应

257 阅读8分钟

NFS实现k8s基于StorageClass的存储动态供应(2)

NFS flex-volume动态供应

NFS csi动态供应

参考

在在NFS实现K8s存储(1)中讲述了存储的重要概念和NFS动态供应,如果还没有相应的了解,请参考我的文章

NFS flex-volume动态供应

  1. 获取flex volume插件,点我
  2. 创建ServiceAccount、ClusterRole、ClusterRoleBinding等,为nfs-client-provisioner授权 本人rbac.yaml文件如下:
apiVersion: v1
kind: ServiceAccount
metadata:
 name: nfs-client-provisioner
 # replace with namespace where provisioner is deployed
 namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: nfs-client-provisioner-runner
rules:
 - apiGroups: [""]
   resources: ["persistentvolumes"]
   verbs: ["get", "list", "watch", "create", "delete"]
 - apiGroups: [""]
   resources: ["persistentvolumeclaims"]
   verbs: ["get", "list", "watch", "update"]
 - apiGroups: ["storage.k8s.io"]
   resources: ["storageclasses"]
   verbs: ["get", "list", "watch"]
 - apiGroups: [""]
   resources: ["events"]
   verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: run-nfs-client-provisioner
subjects:
 - kind: ServiceAccount
   name: nfs-client-provisioner
   # replace with namespace where provisioner is deployed
   namespace: default
roleRef:
 kind: ClusterRole
 name: nfs-client-provisioner-runner
 apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: leader-locking-nfs-client-provisioner
 # replace with namespace where provisioner is deployed
 namespace: default
rules:
 - apiGroups: [""]
   resources: ["endpoints"]
   verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: leader-locking-nfs-client-provisioner
 # replace with namespace where provisioner is deployed
 namespace: default
subjects:
 - kind: ServiceAccount
   name: nfs-client-provisioner
   # replace with namespace where provisioner is deployed
   namespace: default
roleRef:
 kind: Role
 name: leader-locking-nfs-client-provisioner
 apiGroup: rbac.authorization.k8s.io

部署rbac.yaml

root@master:/home/guanwu/k8s/nfs/flex-volume# k apply -f rbac.yaml 
serviceaccount/nfs-client-provisioner created
clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created
role.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created
rolebinding.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created
  1. 部署nfs-client-provisioner
    请注意,部署provisoner之前,请确保k8s集群节点已经安装了nfs server和nfs client 本人安装的nfs如下
root@master:/home/guanwu/k8s/nfs/flex-volume# systemctl status nfs-server
● nfs-server.service - NFS server and services
     Loaded: loaded (/lib/systemd/system/nfs-server.service; enabled; vendor preset: enabled)
    Drop-In: /run/systemd/generator/nfs-server.service.d
             └─order-with-mounts.conf
     Active: active (exited) since Sat 2024-01-06 21:20:30 CST; 19min ago
   Main PID: 1222 (code=exited, status=0/SUCCESS)
        CPU: 69ms

1月 06 21:20:29 master systemd[1]: Starting NFS server and services...
1月 06 21:20:29 master exportfs[1220]: exportfs: /etc/exports [1]: Neither 'subtree_check' or 'no_subtree_check' spec>
1月 06 21:20:29 master exportfs[1220]:   Assuming default behaviour ('no_subtree_check').
1月 06 21:20:29 master exportfs[1220]:   NOTE: this default has changed since nfs-utils version 1.0.x
1月 06 21:20:30 master systemd[1]: Finished NFS server and services.
root@master:/home/guanwu/k8s/nfs/flex-volume# showmount -e
Export list for master:
/usr/share/nfs *
root@master:/home/guanwu/k8s/nfs/flex-volume# 

provider的deployment文件如下

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: registry.cn-beijing.aliyuncs.com/pylixm/nfs-subdir-external-provisioner:v4.0.0

          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: fuseim.pri/ifs
            - name: NFS_SERVER
              value: 192.168.201.129 #请注意这里,nfs服务器ip,替换为你的真实ip
            - name: NFS_PATH
              value: /usr/share/nfs # 这个是你的共享文件目录
      volumes:
        - name: nfs-client-root
          nfs:
            server: 192.168.201.129 # 修改一致即可
            path: /usr/share/nfs # 修改一致即可

部署deployment.yaml

root@master:/home/guanwu/k8s/nfs/flex-volume# k apply -f depolyment.yaml 
deployment.apps/nfs-client-provisioner created
root@master:/home/guanwu/k8s/nfs/flex-volume# k describe deployments.apps nfs-client-provisioner -n default
Name:               nfs-client-provisioner
Namespace:          default
CreationTimestamp:  Sat, 06 Jan 2024 21:52:59 +0800
Labels:             app=nfs-client-provisioner
Annotations:        deployment.kubernetes.io/revision: 1
Selector:           app=nfs-client-provisioner
Replicas:           1 desired | 1 updated | 1 total | 1 available | 0 unavailable
StrategyType:       Recreate
MinReadySeconds:    0
Pod Template:
 Labels:           app=nfs-client-provisioner
 Service Account:  nfs-client-provisioner
 Containers:
  nfs-client-provisioner:
   Image:      registry.cn-beijing.aliyuncs.com/pylixm/nfs-subdir-external-provisioner:v4.0.0
   Port:       <none>
   Host Port:  <none>
   Environment:
     PROVISIONER_NAME:  fuseim.pri/ifs
     NFS_SERVER:        192.168.201.129
     NFS_PATH:          /usr/share/nfs
   Mounts:
     /persistentvolumes from nfs-client-root (rw)
 Volumes:
  nfs-client-root:
   Type:      NFS (an NFS mount that lasts the lifetime of a pod)
   Server:    192.168.201.129
   Path:      /usr/share/nfs
   ReadOnly:  false
Conditions:
 Type           Status  Reason
 ----           ------  ------
 Available      True    MinimumReplicasAvailable
 Progressing    True    NewReplicaSetAvailable
OldReplicaSets:  <none>
NewReplicaSet:   nfs-client-provisioner-6cfc66f876 (1/1 replicas created)
Events:
 Type    Reason             Age   From                   Message
 ----    ------             ----  ----                   -------
 Normal  ScalingReplicaSet  8s    deployment-controller  Scaled up replica set nfs-client-provisioner-6cfc66f876 to 1
root@master:/home/guanwu/k8s/nfs/flex-volume# k get pods -n default
NAME                                      READY   STATUS    RESTARTS   AGE
nfs-client-provisioner-6cfc66f876-tnn7d   1/1     Running   0          13s
root@master:/home/guanwu/k8s/nfs/flex-volume# 

4.创建StorageClass

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: managed-nfs-storage
provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
  archiveOnDelete: "false"

创建classl

root@master:/home/guanwu/k8s/nfs/flex-volume# k apply -f class.yaml 
storageclass.storage.k8s.io/managed-nfs-storage created
root@master:/home/guanwu/k8s/nfs/flex-volume# k get storageclasses.storage.k8s.io 
NAME                  PROVISIONER      RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
managed-nfs-storage   fuseim.pri/ifs   Delete          Immediate           false                  8s
  1. 创建Stateful应用验证 这里创建了三个replicas的nginx statefulSet,并且使用了managed-nfs-storage实现动态供应,不需要额外指定PV了
apiVersion: v1
kind: Service
metadata:
 name: nginx
 labels:
   app: nginx
spec:
 ports:
 - port: 80
   name: web
 clusterIP: None
 selector:
   app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
 name: nfs-web
spec:
 serviceName: "nginx"
 replicas: 3
 selector:
   matchLabels:
     app: nfs-web # has to match .spec.template.metadata.labels
 template:
   metadata:
     labels:
       app: nfs-web
   spec:
     terminationGracePeriodSeconds: 10
     containers:
     - name: nginx
       image: nginx:1.9
       ports:
       - containerPort: 80
         name: web
       volumeMounts:
       - name: www
         mountPath: /usr/share/nginx/html
 volumeClaimTemplates:
 - metadata:
     name: www
     annotations:
       volume.beta.kubernetes.io/storage-class: managed-nfs-storage #这里storageClass要对应之前创建的class的name
   spec:
     accessModes: [ "ReadWriteOnce" ]
     resources:
       requests:
         storage: 1Gi

执行kubectl apply -f statefulset-web.yaml,可以看到,/usr/share/nfs目录创建了三个test-www-nfs-web-0-pvc-前缀的目录,证明创建成功,执行以下命令可以

ls /usr/share/nfs/test-www-nfs*/*.html | xargs

root@master:/home/guanwu/k8s/nfs/flex-volume# k apply -f statefulset-nfs.yaml 
service/nginx created
statefulset.apps/nfs-web created
root@master:/home/guanwu/k8s/nfs/flex-volume# k get pods -l app=nfs-web
NAME        READY   STATUS    RESTARTS   AGE
nfs-web-0   1/1     Running   0          19s
nfs-web-1   1/1     Running   0          16s
nfs-web-2   1/1     Running   0          11s
oot@master:/home/guanwu/k8s/nfs/pv/provisor/csi# k get pvc
NAME            STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS          AGE
nfs-pvc         Bound    pv0001                                     5Gi        RWX                                  23h
www-nfs-web-0   Bound    pvc-11e1c1c9-ef65-4417-9042-2eca08688e6e   1Gi        RWO            managed-nfs-storage   31m
www-nfs-web-1   Bound    pvc-82c042de-217a-4449-a5e7-8b5e023996ee   1Gi        RWO            managed-nfs-storage   31m
www-nfs-web-2   Bound    pvc-dcd767d6-49e4-42ac-8d35-53a01efe1a62   1Gi        RWO            managed-nfs-storage   31m
root@master:/home/guanwu/k8s/nfs/flex-volume# ls /usr/share/nfs
date.txt
pvc-324d5c94-27c6-4d6d-8af0-a08adc9a8de6
test-www-nfs-web-0-pvc-11e1c1c9-ef65-4417-9042-2eca08688e6e
test-www-nfs-web-1-pvc-82c042de-217a-4449-a5e7-8b5e023996ee
test-www-nfs-web-2-pvc-dcd767d6-49e4-42ac-8d35-53a01efe1a62

验证创建的存储卷,对每个Container的挂载目录,创建一个index.html文件,并填充当前日期,最后使用 ls /usr/share/nfs/test-www-nfs*/*.html | xargs 命令验证

root@master:/home/guanwu/k8s/nfs/flex-volume# echo $(date) > /usr/share/nfs/test-www-nfs-web-0-pvc-11e1c1c9-ef65-4417-9042-2eca08688e6e/index.html
root@master:/home/guanwu/k8s/nfs/flex-volume# k get pods -l app=nfs-web -owideNAME        READY   STATUS    RESTARTS   AGE     IP            NODE      NOMINATED NODE   READINESS GATES
nfs-web-0   1/1     Running   0          3m44s   10.10.2.125   worker2   <none>           <none>
nfs-web-1   1/1     Running   0          3m41s   10.10.1.124   worker1   <none>           <none>
nfs-web-2   1/1     Running   0          3m36s   10.10.0.90    master    <none>           <none>
root@master:/home/guanwu/k8s/nfs/flex-volume# echo $(date) > /usr/share/nfs/test-www-nfs-web-1-pvc-82c042de-217a-4449-a5e7-8b5e023996ee/index.html
root@master:/home/guanwu/k8s/nfs/flex-volume# curl 10.10.1.1242024年 01月 06日 星期六 22:03:25 CST
root@master:/home/guanwu/k8s/nfs/flex-volume# echo $(date) > /usr/share/nfs/test-www-nfs-web-2-pvc-dcd767d6-49e4-42ac-8d35-53a01efe1a62/index.html
root@master:/home/guanwu/k8s/nfs/flex-volume# curl 10.10.0.90
2024年 01月 06日 星期六 22:03:55 CST
root@master:/home/guanwu/k8s/nfs/flex-volume# ls /usr/share/nfs/test-www-nfs*/*.html | xargs cat
2024年 01月 06日 星期六 22:02:32 CST
2024年 01月 06日 星期六 22:03:25 CST
2024年 01月 06日 星期六 22:03:55 CST

破坏性测试,delete nfs-web-0后,新生成的nfs-web-0重新访问还是能获取到之前创建的index.html的日期

root@master:/home/guanwu/k8s/nfs/flex-volume# k delete pod nfs-web-0
pod "nfs-web-0" deleted

root@master:/home/guanwu/k8s/nfs/flex-volume# k get pods -l app=nfs-web -owide
NAME        READY   STATUS    RESTARTS   AGE   IP            NODE      NOMINATED NODE   READINESS GATES
nfs-web-0   1/1     Running   0          13s   10.10.2.127   worker2   <none>           <none>
nfs-web-1   1/1     Running   0          16m   10.10.1.124   worker1   <none>           <none>
nfs-web-2   1/1     Running   0          16m   10.10.0.90    master    <none>           <none>
root@master:/home/guanwu/k8s/nfs/flex-volume# curl 10.10.2.127
2024年 01月 06日 星期六 22:02:32 CST

NFS csi动态供应

  1. csi插件安装 可以参考官方文档安装,点我 主要文件如下
$ kubectl apply -f rbac-csi.yaml

$ kubectl apply -f csi-nfs-driverinfo.yaml

$ kubectl apply -f csi-nfs-node.yaml

$ kubectl apply -f csi-nfs-controller.yaml

执行四个命令后

root@master:/home/guanwu/k8s/nfs/pv/provisor/csi# k get pod -n kube-system | grep csi
csi-nfs-controller-6866b7dfbf-dt587   4/4     Running   5 (69m ago)    4d
csi-nfs-node-8l5pl                    3/3     Running   2 (4d ago)     4d
csi-nfs-node-m6gps                    3/3     Running   2 (4d ago)     4d
csi-nfs-node-wlgfp                    3/3     Running   5 (67m ago)    4d

创建storageClass,storageClass.yaml文件如下


apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-csi
provisioner: nfs.csi.k8s.io
parameters:
  server: 192.168.201.129 #注意这里,需要替换为自己的nfs服务器ip
  share: /usr/share/nfs
  # csi.storage.k8s.io/provisioner-secret is only needed for providing mountOptions in DeleteVolume
  # csi.storage.k8s.io/provisioner-secret-name: "mount-options"
  # csi.storage.k8s.io/provisioner-secret-namespace: "default"
reclaimPolicy: Delete
volumeBindingMode: Immediate
mountOptions:
  - nfsvers=4.1

执行k apply -f storageClass.yaml后,可以看到

root@master:/home/guanwu/k8s/nfs/pv/provisor/csi# k apply -f storageClass.yaml 
storageclass.storage.k8s.io/nfs-csi created
root@master:/home/guanwu/k8s/nfs/pv/provisor/csi# k get storageclass
NAME                  PROVISIONER      RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
managed-nfs-storage   fuseim.pri/ifs   Delete          Immediate           false                  39m
nfs-csi               nfs.csi.k8s.io   Delete          Immediate           false                  4s

验证,创建一个pvc-sci的yaml文件,如下所示,请注意,这里使用的storageClass是nfs-sci

kind: PersistentVolumeClaim
metadata:
  name: pvc-deployment-nfs
spec:
  accessModes:
    - ReadWriteMany  # In this example, multiple Pods consume the same PVC.
  resources:
    requests:
      storage: 1Gi
  storageClassName: nfs-csi
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: deployment-nfs
spec:
  replicas: 1
  selector:
    matchLabels:
      name: deployment-nfs
  template:
    metadata:
      name: deployment-nfs
      labels:
        name: deployment-nfs
    spec:
      containers:
        - name: deployment-nfs
          image: nginx:1.9
          command:
            - "/bin/bash"
            - "-c"
            - set -euo pipefail; while true; do echo $(hostname) $(date) >> /mnt/nfs/outfile; sleep 1; done
          volumeMounts:
            - name: nfs
              mountPath: "/mnt/nfs"
      volumes:
        - name: nfs
          persistentVolumeClaim:
            claimName: pvc-deployment-nfs
root@master:/home/guanwu/k8s/nfs/pv/provisor/csi# k apply -f pvc-sci.yaml 
persistentvolumeclaim/pvc-deployment-nfs created
root@master:/home/guanwu/k8s/nfs/pv/provisor/csi# k get pod -l name=deployment-nfs
NAME                             READY   STATUS    RESTARTS   AGE
deployment-nfs-ddc874f7f-6krmt   1/1     Running   0          2m2s
root@master:/home/guanwu/k8s/nfs/pv/provisor/csi# k get pvc pvc-deployment-nfs
NAME                 STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
pvc-deployment-nfs   Bound    pvc-3267d735-b368-43e7-b18c-f37627b39786   1Gi        RWX            nfs-csi        4m59s

根据k get pvc的结果,可以看到分配的卷名称是pvc-3267d735-b368-43e7-b18c-f37627b39786,下面是通过tail -f 命令查看outfile的结果,可以看到命令的输出是对应yaml文件的配置的

root@master:/home/guanwu/k8s/nfs/pv/provisor/csi# ls -l /usr/share/nfs
总计 24
-rw-r--r-- 1 root root   28  1月  5 23:31 date.txt
drwxr-xr-x 2 root root 4096  1月  2 22:16 pvc-324d5c94-27c6-4d6d-8af0-a08adc9a8de6
drwxr-xr-x 2 root root 4096  1月  6 22:36 pvc-3267d735-b368-43e7-b18c-f37627b39786
oot@master:/home/guanwu/k8s/nfs/pv/provisor/csi# tail -f -n 1 /usr/share/nfs/pvc-3267d735-b368-43e7-b18c-f37627b39786/outfile
deployment-nfs-ddc874f7f-6krmt Sat Jan 6 14:44:53 UTC 2024
deployment-nfs-ddc874f7f-6krmt Sat Jan 6 14:44:55 UTC 2024
deployment-nfs-ddc874f7f-6krmt Sat Jan 6 14:44:56 UTC 2024
deployment-nfs-ddc874f7f-6krmt Sat Jan 6 14:44:57 UTC 2024
deployment-nfs-ddc874f7f-6krmt Sat Jan 6 14:44:58 UTC 2024
deployment-nfs-ddc874f7f-6krmt Sat Jan 6 14:44:59 UTC 2024
deployment-nfs-ddc874f7f-6krmt Sat Jan 6 14:45:00 UTC 2024
deployment-nfs-ddc874f7f-6krmt Sat Jan 6 14:45:01 UTC 2024
deployment-nfs-ddc874f7f-6krmt Sat Jan 6 14:45:02 UTC 2024
deployment-nfs-ddc874f7f-6krmt Sat Jan 6 14:45:03 UTC 2024
deployment-nfs-ddc874f7f-6krmt Sat Jan 6 14:45:04 UTC 2024
deployment-nfs-ddc874f7f-6krmt Sat Jan 6 14:45:05 UTC 2024
deployment-nfs-ddc874f7f-6krmt Sat Jan 6 14:45:06 UTC 2024
...

参考

kubernetes.io/zh-cn/docs/…
blog.csdn.net/gxf1027/art…