可参考 www.cnblogs.com/huangyanqi/…
环境准备:
| 操作系统 | ip | |
|---|---|---|
| CentOS Linux release 7.9 | 192.168.1.200(node01) | |
| CentOS Linux release 7.9 | 192.168.1.102(node02) |
配置yum源
cat >/etc/yum.repos.d/CentOS-Gluster.repo <<EOF
[gluster6]
name=CentOS
baseurl=http://buildlogs.centos.org/centos/\$releasever/storage/\$basearch/gluster-6/
gpgcheck=1
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-Storage
EOF
安装gluterfs源
yum install centos-release-gluster6.noarch
安装glusterfs
yum install glusterfs-server glusterfs-cli glusterfs-geo-replication
查看glusterfs版本
[root@node01 yum.repos.d]# glusterfs -V
glusterfs 6.10
Repository revision: git://git.gluster.org/glusterfs.git
Copyright (c) 2006-2016 Red Hat, Inc. <https://www.gluster.org/>
GlusterFS comes with ABSOLUTELY NO WARRANTY.
It is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3
or later), or the GNU General Public License, version 2 (GPLv2),
in all cases as published by the Free Software Foundation.
[root@node01 yum.repos.d]#
启动glusterfs服务
systemctl start glusterd.service && systemctl enable glusterd.service && systemctl status glusterd.service
将node02主机加入到信任主机池并查看加入的主机状态,
#在node01上操作
[root@node01 yum.repos.d]# gluster peer status
Number of Peers: 0
[root@node01 yum.repos.d]# gluster peer probe node02
peer probe: success.
[root@node01 yum.repos.d]# gluster peer status
Number of Peers: 1
Hostname: node02
Uuid: 4d775a60-1588-434a-97fd-8ac4d0993860
State: Peer in Cluster (Connected)
[root@node01 yum.repos.d]#
分布式卷类型 Distributed
挂载点下新增的数据会随机分布在不同的brick上
mkdir -p /data/test && gluster volume create testvol node01:/data/brick/d1
在客户端上上挂载
mount -t glusterfs node01:testvol /mnt # 此时容量是/data/brick/d1的大小
增加brick:
gluster volume add-brick testvol node02:/data/brick/d1
[root@node01 d1]# gluster volume info
Volume Name: testvol
Type: Distribute
Volume ID: fe3edfe4-190f-41a7-8c7a-b21f7e96e8ff
Status: Started
Snapshot Count: 0
Number of Bricks: 2
Transport-type: tcp
Bricks:
Brick1: node01:/data/brick/d1
Brick2: node02:/data/brick/d1
Options Reconfigured:
transport.address-family: inet
nfs.disable: on
此时再看/mnt挂载容量大小变为/data/brick/d1*2,实现了在线扩容
Replicated: 复制式卷(生产环境肯定用这种模式)
所有brick上的数据全部是一样的,镜像模式
#创建分布式复制卷,
gluster volume create certfiles replica 2 node01:/data/brick/t1 node02:/data/brick/t1
[root@node01 ~]# gluster volume info
Volume Name: certfiles
Type: Replicate
Volume ID: eb702bea-6ff2-477b-9d13-e43b6aac479d
Status: Created
Snapshot Count: 0
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: node01:/data/brick/t1
Brick2: node02:/data/brick/t1
Options Reconfigured:
transport.address-family: inet
nfs.disable: on
performance.client-io-threads: off
#启动卷
gluster volume start certfiles
#挂载
mkdir -pv certfiles && mount -t glusterfs node01:certfiles /certfiles/
删除node01上的卷信息
[root@node01 ~]# ls /var/lib/glusterd/vols/
gv2 gv3
[root@node01 ~]# rm -rf /var/lib/glusterd/vols/gv3 #删除卷gv3的卷信息
[root@node01 ~]# ls /var/lib/glusterd/vols/ #再查看卷信息情况如下:gv3卷信息被删除了
gv2
[root@node01 ~]# gluster volume sync node02 #因为其他节点服务器上的卷信息是完整的,比如从node02上同步所有卷信息如下:
Sync volume may make data inaccessible while the sync is in progress. Do you want to continue? (y/n) y
volume sync: success
[root@node01 ~]# ls /var/lib/glusterd/vols/ #验证卷信息是否同步过来
gv2 gv3
更多精彩关注公众号“51运维com”