k8s-4.4 存储后端nfs/ceph/glusterfs

1,661 阅读4分钟

k8s创建nfs storageclass

  1. 搭建nfs 集群 配置nfs服务器端
yum install nfs-utils
mkdir /var/nfs #共享目录
chmod 777 /var/nfs
cat >>EOF >/etc/exports
/var/nfs    192.168.0.199/24(no_root_squash,rw,sync,no_subtree_check) #no_root_squash是必须的,否则报权限错误
EOF
systemctl enable rpcbind
systemctl start rpcbind
systemctl enable nfs-server
systemctl start nfs-server

如果你修改了/etc/exports,使用如下命令使更改生效: exportfs -a 查看共享的目录: exportfs 配置NFS客户端:

yum install nfs-utils
mkdir /mnt/nfs
mount -t nfs 192.168.0.100:/var/nfs /mnt/nfs

查看所有挂载点: mount 在使用完成之后,可以使用umount卸载:umount /mnt/nfs 如果你想在系统启动后自动挂载NFS共享目录,可以在/etc/fstab文件中添加一行:

vi /etc/fstab
192.168.1.100:/var/nfs    /mnt/nfs  nfs defaults 0 0
  1. 创建storageclass 参照github.com/kubernetes-… 官方的nfs-client的deploy目录文件进行资源创建 class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-25
provisioner: fuseim.pri/ifs25 # or choose another name, must match deployment's env PROVISIONER_NAME'

deployment.yaml

kind: Deployment
apiVersion: extensions/v1beta1
metadata:
  name: nfs-client-provisioner25
spec:
  replicas: 1
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: nfs-client-provisioner25
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner25
          image: quay.io/external_storage/nfs-client-provisioner:latest
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: fuseim.pri/ifs25
            - name: NFS_SERVER
              value: 10.221.129.25
            - name: NFS_PATH
              value: /nfsstorage
      volumes:
        - name: nfs-client-root
          nfs:
            server: 10.221.129.25
            path: /nfsstorage

sa.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner

clusterrolebinding.yaml

kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: default
roleRef:
#  kind: ClusterRole
#  name: nfs-client-provisioner-runner
#  apiGroup: rbac.authorization.k8s.io
   kind: ClusterRole
   name: cluster-admin
   apiGroup: rbac.authorization.k8s.io

ceph storageclass

install

使用StorageClass挂载ceph rbd需要用到2个cephx角色。一个用于创建和管理rbd,在这里我们使用admin,另一个用于挂载rbd到Pod中,我们可以在ceph中手动创建一个。

  • 准备: 假设已有一套ceph集群,Monitor分别为192.168.1.1,192.168.1.2和192.168.1.3

需要保证Monitor的6379端口和OSD的6800端口对kubelet节点开放

为每个kubelet节点都安装ceph-common

  • 一、创建一个k8s存储池,用于存放数据卷

    ceph osd pool create k8s 64 64

  • 二、创建cephx用户,用于挂载由admin创建好的rbd

    ceph auth get-or-create client.k8s mon 'allow r' osd 'allow * pool=k8s'

  • 三、获取k8s用户的key,以base64编码

    ceph auth get-key client.k8s|base64

    记录输出的内容,如:QVFEQjlmcFpjUE5lS0JBQU40NnZxZ2dIT0dRTEtaeUx1blNjR1E9PQ==

  • 四、获取admin用户的key,以base64编码

    ceph auth get-key client.admin|base64

    记录输出的内容,如:QVFDUDNrQllRRVBJR0JBQXFuVXJQbHZQaC9xZEQ2ZGVZOXRoVXc9PQ==

  • 五、创建storage-class-rbd.yaml

---
apiVersion: v1
data:
  key: QVFBaFJLQmR2MnZzRFJBQWRkbDgxT1FZUXNMRldRb1BXSVY4elE9PQ==
kind: Secret
metadata:
  name: ceph-secret-user
  namespace: logging
type: kubernetes.io/rbd
---
apiVersion: v1
data:
  key: QVFBaFJLQmR2MnZzRFJBQWRkbDgxT1FZUXNMRldRb1BXSVY4elE9PQ==
kind: Secret
metadata:
  name: ceph-secret-user
  namespace: kube-system
type: kubernetes.io/rbd
---
apiVersion: v1
data:
  key: QVFDeldhMWNuMHBKQkJBQXp2YlQzVXBoOWNPWUpOTzFKZTlCM0E9PQ==
kind: Secret
metadata:
  name: ceph-secret-admin
  namespace: kube-system
type: kubernetes.io/rbd
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: rbd
  namespace: logging
provisioner: ceph.com/rbd
parameters:
  monitors: 10.220.54.6,10.220.54.1,10.220.54.2
  adminId: admin
  adminSecretName: ceph-secret-admin
  adminSecretNamespace: kube-system
  pool: k8s
  userId: k8s
  userSecretName: ceph-secret-user
  fsType: xfs
  imageFormat: "2"
  imageFeatures: "layering"
  • 六、创建StorageClass

    kubectl create -f ./storage-class-rbd.yaml

  • 七、创建nginx-statefulset.yaml, 通过StatefulSet动态挂载卷

---
apiVersion: apps/v1beta1kind: StatefulSetmetadata:
name: web
spec:
  serviceName: "nginx"
  replicas: 2
  volumeClaimTemplates:
  - metadata:
    annotations:
    volume.beta.kubernetes.io/storage-class: "rbd" # 这里配置 上面创建的 storageclass 的名称
    spec:
      accessModes: [ "ReadWriteOnce" ]
      resources:
        requests:
          storage: 2Gi
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        volumeMounts:
        - mountPath: "/usr/share/nginx/html/"
          name: html

StatefulSet会为每个Pod创建一个rbd数据卷,并挂载到/usr/share/nginx/html/目录中,数据卷会随着StatefulSet的伸缩而动态创建和销毁。

问题

[k8s@YZ-25-58-1 rbd]$ kubectl describe pvc -n logging data-es-elasticsearch-master-0
Warning    ProvisioningFailed  101s (x23 over 38m)  persistentvolume-controller  Failed to provision volume with StorageClass "rbd": failed to create rbd image: executable file not found in $PATH, command output:

原因:

一些用户会使用kubeadm来部署集群,或者将kube-controller-manager以容器的方式运行。这种方式下,kubernetes在创建使用ceph rbd pv/pvc时没任何问题,但使用dynamic provisioning自动管理存储生命周期时会报错。提示"rbd: create volume failed, err: failed to create rbd image: executable file not found in $PATH:"。

解决:

里面也给出了一个解决办法,那就是添加 ceph-common 到 hyperkube image 中,具体就是构建一个新的安装了 ceph-common 的同名镜像 hyperkube-amd64 替换官方镜像即可。 或者You need define a new provisioner rbd-provisioner. Ref this issue. jimmysong.io/kubernetes-…

[root@k8s01 ~]# rbd ls --pool rbd
kubernetes-dynamic-pvc-ea390cbf-cef7-11e8-aa22-0a580af40202
kubernetes-dynamic-pvc-eef5814f-cef7-11e8-aa22-0a580af40202

[root@k8s01 ~]# rbd info rbd/kubernetes-dynamic-pvc-ea390cbf-cef7-11e8-aa22-0a580af40202
rbd image 'kubernetes-dynamic-pvc-ea390cbf-cef7-11e8-aa22-0a580af40202':
    size 2048 MB in 512 objects
    order 22 (4096 kB objects)
    block_name_prefix: rbd_data.456876b8b4567
    format: 2
    features: layering
    flags:
    create_timestamp: Sat Oct 13 22:54:41 2018

glusterfs

apiVersion: v1
kind: Service
metadata:
  name: glusterfs-ai
spec:
  ports:
    - port: 1
      protocol: TCP
      targetPort: 1
  sessionAffinity: None
  type: ClusterIP

apiVersion: v1
kind: Endpoints
metadata:
  name: glusterfs-ai
subsets:
  - addresses:
    - ip: 192.168.0.71
    - ip: 192.168.0.72
    ports:
      - port: 1
        protocol: TCP