###根据磁盘多少来设置,后期可调整。
###每个cephfs文件系统需要一个data和一个metadata pool。
ceph osd pool create cephfs_data 128
ceph osd pool create cephfs_metadata 128
########################顺路知识-osd分组##########################
#新建ssd和hdd磁盘对应分类规则
ceph osd crush rule create-replicated rule-ssd default host ssd
ceph osd crush rule create-replicated rule-hdd default host hdd
#创建存储池并应用相应规则
ceph osd pool create ssdpool 64 64 rule-ssd
ceph osd pool create hddpool 64 64 rule-hdd
#修改pool绑定的rule
ceph osd pool set cephfs_data crush_rule rule-ssd
ceph osd pool set cephfs_metadata crush_rule rule-ssd
2、创建文件系统fs
#
#
ceph fs new cephfs cephfs_metadata cephfs_data
#
ceph fs ls
#
#
ceph fs rm cfs4ech cfs4ech --yes-i-really-really-mean-it
#
ceph osd pool rm gtscfs_metadata gtscfs_metadata --yes-i-really-really-mean-it
3、部署mds,每个文件系统需要一个mds
###进入计划启用mds的节点
mkdir /var/lib/ceph/mds/ceph-ceph1
chown ceph:ceph /var/lib/ceph/mds/ceph-ceph1
#ceph auth get-or-create mds.${id} mon 'profile mds' mgr 'profile mds' mds 'allow *' osd 'allow *' > /var/lib/ceph/mds/ceph-${id}/keyring
ceph auth get-or-create mds.ceph1 mon 'profile mds' mgr 'profile mds' mds 'allow *' osd 'allow *' > /var/lib/ceph/mds/ceph-ceph1/keyring
systemctl start ceph-mds@ceph1
#######################mds节点个数需要大于cephfs文件系统个数###################
否则会报错:
1 filesystem is online with fewer MDS than max_mds
#########查看已创建的cephfs#####################
ceph fs volume ls
ceph fs dump
4、cephfs挂载
cat /etc/ceph/ceph.client.admin.keyring
mkdir /cephfs
mount -t ceph 192.168.100.2:6789:/ /cephfs -o name=admin,secret=password
mount -t ceph 192.168.100.2:6789:/ /cephfs -o name=admin,secretfile=/etc/ceph/admin.secret
192.168.100.2:6789:/ /cephfs ceph name=admin,secretfile=/etc/ceph/admin.secret,noatime,_netdev 0 2
yum install ceph-fuse
ceph-fuse -m 192.168.100.2:6789 --no-mon-config -n client.admin -o /etc/ceph/ceph.client.admin.keyring -r / /cephfs