一、安装NFS
集群每个节点均安装nfs
yum install -y nfs-utils
二、master执行命令暴露出目录
[root@server253 /]# mkdir -p /nfs/data
[root@server253 /]# echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports
# 设置开机启动
[root@server253 nfs]# systemctl enable rpcbind --now
[root@server253 nfs]# systemctl enable nfs-server --now
Created symlink from /etc/systemd/system/multi-user.target.wants/nfs-server.service to /usr/lib/systemd/system/nfs-server.service.
#使配置立即生效
[root@server253 nfs]# exportfs -r
三、从master节点和node节点均执行命令挂载
#查看主节点IP下共享目录
[root@node249 ~]# showmount -e 192.168.2.253
Export list for 192.168.2.253:
/nfs/data *
#创建并挂载
[root@node249 ~]# mkdir -p /nfs/data/
[root@node249 /]# mount -t nfs 192.168.2.253:/nfs/data/ /nfs/data/
四、配置StorageClass动态生成PV
#storageclass.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-storage
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
parameters :
archiveOnDelete: "true" ## 删除pv的时候,pv的内容是否要备份
---
apiVersion: apps/v1
kind: Deployment
metadata :
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
namespace: default
spec:
replicas: 1
strategy :
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/nfs-subdir-external-provisioner:v4
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: k8s-sigs.io/nfs-subdir-external-provisioner
- name: NFS_SERVER
value: 192.168.2.253 ## 指定自己nfs服务器地址
- name: NFS_PATH
value: /nfs/data ## nfs服务器共享的目录
volumes:
- name: nfs-client-root
nfs:
server: 192.168.2.253
path: /nfs/data
!!上面是完整yaml文件,下面4.1 4.3 4.4是分开执行
4.1创建运行nfs-client-provisioner需要使用的SA账号
#serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
[root@server253 kubernetes]# kubectl apply -f serviceaccount.yaml
serviceaccount/nfs-client-provisioner created
[root@server253 kubernetes]# kubectl get sa nfs-client-provisioner
NAME SECRETS AGE
nfs-client-provisioner 1 36s
4.2针对SA账号进行授权
[root@server253 kubernetes]# kubectl delete clusterrolebinding nfs-provisioner-clusterrolebinding
clusterrolebinding.rbac.authorization.k8s.io "nfs-provisioner-clusterrolebinding" deleted
[root@server253 kubernetes]# kubectl create clusterrolebinding nfs-provisioner-clusterrolebinding --clusterrole=cluster-admin --serviceaccount=default:nfs-client-provisioner
clusterrolebinding.rbac.authorization.k8s.io/nfs-provisioner-clusterrolebinding created
[root@server253 kubernetes]# kubectl get clusterrolebinding nfs-provisioner-clusterrolebinding
NAME ROLE AGE
nfs-provisioner-clusterrolebinding ClusterRole/cluster-admin 26s
[root@server253 kubernetes]# kubectl get clusterrolebinding nfs-provisioner-clusterrolebinding -oyaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: "2023-10-11T05:36:33Z"
managedFields:
- apiVersion: rbac.authorization.k8s.io/v1
fieldsType: FieldsV1
fieldsV1:
f:roleRef:
f:apiGroup: {}
f:kind: {}
f:name: {}
f:subjects: {}
manager: kubectl
operation: Update
time: "2023-10-11T05:36:33Z"
name: nfs-provisioner-clusterrolebinding
resourceVersion: "8448551"
selfLink: /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/nfs-provisioner-clusterrolebinding
uid: b67d80b4-8a76-4d72-8848-5d3fc3575dfc
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: default
4.3创建pod
#deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata :
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
namespace: default
spec:
replicas: 1
strategy :
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner # 指定SA账号
containers:
- name: nfs-client-provisioner
image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/nfs-subdir-external-provisioner:v4
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME # NFS供应商名称
value: k8s-sigs.io/nfs-subdir-external-provisioner
- name: NFS_SERVER
value: 192.168.2.253 ## 指定自己nfs服务器地址
- name: NFS_PATH
value: /nfs/data ## nfs服务器共享的目录
volumes:
- name: nfs-client-root
nfs:
server: 192.168.2.253 # 指定自己nfs服务器地址
path: /nfs/data ## nfs服务器共享的目录
4.4创建StorageClass存储类
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-storage
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
parameters :
archiveOnDelete: "true" ## 删除pv的时候,pv的内容是否要备份
[root@server253 kubernetes]# kubectl apply -f sc.yaml
storageclass.storage.k8s.io/nfs-storage created
[root@server253 kubernetes]# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
nfs-storagei (default) k8s-sigs.io/nfs-subdir-external-provisioner Delete Immediate false 24s
[root@server253 kubernetes]# kubectl delete sc nfs-storage
storageclass.storage.k8s.io "nfs-storagei" deleted
五、创建测试动态PV分配
#pvc-demo.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nginx-pvc
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Mi
[root@server253 k8s]# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
nfs-storage (default) k8s-sigs.io/nfs-subdir-external-provisioner Delete Immediate false 56s
[root@server253 k8s]# kubectl get pvc
No resources found in default namespace.
[root@server253 k8s]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
nginx-pvc Pending nfs-storage 9s
!!!注意:pending状态
如果创建动态pvc失败 ,一直如上:pending状态
首先看看4.2那一步执行没有!!!
#针对SA账号进行授权
[root@server253 k8s]# kubectl create clusterrolebinding nfs-provisioner-clusterrolebinding --clusterrole=cluster-admin --serviceaccount=default:nfs-client-provisioner
clusterrolebinding.rbac.authorization.k8s.io/nfs-provisioner-clusterrolebinding created
[root@server253 k8s]# kubectl get clusterrolebinding nfs-provisioner-clusterrolebinding
NAME ROLE AGE
nfs-provisioner-clusterrolebinding ClusterRole/cluster-admin 11s
#删除pvc重新执行
[root@server253 k8s]# kubectl apply -f pvc-demo.yaml
persistentvolumeclaim/pvc-demo created
[root@server253 k8s]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc-demo Bound pvc-36142ace-e6ef-430d-ad92-6219a7250bae 10Mi RWX nfs-storage 2s
[root@server253 k8s]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-36142ace-e6ef-430d-ad92-6219a7250bae 10Mi RWX Delete Bound default/pvc-demo nfs-storage 5s