5月 272020
服务端
DigitalOcean/2Core/2G/60G+100G 165.227.27.221 server1 159.89.152.41 server2 159.89.151.236 server3 167.172.118.183 server4 167.172.126.43 server5 64.225.47.139 server6
客户端
DigitalOcean/2Core/2G/60G 64.225.47.123 server7
查看可用磁盘信息
[root@server1 ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/vda1 60G 901M 60G 2% / devtmpfs 897M 0 897M 0% /dev tmpfs 920M 0 920M 0% /dev/shm tmpfs 920M 17M 903M 2% /run tmpfs 920M 0 920M 0% /sys/fs/cgroup /dev/sda 100G 33M 100G 1% /mnt/volume_sfo2_01 tmpfs 184M 0 184M 0% /run/user/0 [root@server1 ~]# [root@server2 ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/vda1 60G 901M 60G 2% / devtmpfs 897M 0 897M 0% /dev tmpfs 920M 0 920M 0% /dev/shm tmpfs 920M 17M 903M 2% /run tmpfs 920M 0 920M 0% /sys/fs/cgroup /dev/sda 100G 33M 100G 1% /mnt/volume_sfo2_02 tmpfs 184M 0 184M 0% /run/user/0 [root@server2 ~]# [root@server3 ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/vda1 60G 901M 60G 2% / devtmpfs 897M 0 897M 0% /dev tmpfs 920M 0 920M 0% /dev/shm tmpfs 920M 17M 903M 2% /run tmpfs 920M 0 920M 0% /sys/fs/cgroup /dev/sda 100G 33M 100G 1% /mnt/volume_sfo2_03 tmpfs 184M 0 184M 0% /run/user/0 [root@server3 ~]# [root@server4 ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/vda1 60G 901M 60G 2% / devtmpfs 897M 0 897M 0% /dev tmpfs 920M 0 920M 0% /dev/shm tmpfs 920M 17M 903M 2% /run tmpfs 920M 0 920M 0% /sys/fs/cgroup /dev/sda 100G 33M 100G 1% /mnt/volume_sfo2_04 tmpfs 184M 0 184M 0% /run/user/0 [root@server4 ~]# [root@server5 ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/vda1 60G 974M 60G 2% / devtmpfs 897M 0 897M 0% /dev tmpfs 920M 0 920M 0% /dev/shm tmpfs 920M 17M 903M 2% /run tmpfs 920M 0 920M 0% /sys/fs/cgroup /dev/sda 100G 33M 100G 1% /mnt/volume_sfo2_05 tmpfs 184M 0 184M 0% /run/user/0 [root@server5 ~]# [root@server6 ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/vda1 60G 973M 60G 2% / devtmpfs 897M 0 897M 0% /dev tmpfs 920M 0 920M 0% /dev/shm tmpfs 920M 17M 903M 2% /run tmpfs 920M 0 920M 0% /sys/fs/cgroup /dev/sda 100G 33M 100G 1% /mnt/volume_sfo2_06 tmpfs 184M 0 184M 0% /run/user/0 [root@server6 ~]#
在服务端节点安装并启动GlusterFS服务
sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config; yum -y install centos-release-gluster; yum -y install glusterfs-server; systemctl enable glusterfsd; systemctl start glusterfsd;
将节点加入受信存储池(Trusted Pool)
在受信存储池建立,节点间彼此建立通信连接后,只有受信成员节点可以将新节点加入池,新节点不可以直接操作已加入受信存储池的节点。
[root@server1 ~]# gluster peer probe server2 peer probe: success. [root@server1 ~]# gluster peer probe server3 peer probe: success. [root@server1 ~]# gluster peer probe server4 peer probe: success. [root@server1 ~]# gluster peer probe server5 peer probe: success. [root@server1 ~]# gluster peer probe server6 peer probe: success. [root@server1 ~]# gluster peer status Number of Peers: 5 Hostname: server2 Uuid: 6231013f-07cc-4701-93b3-34d4c623a890 State: Peer in Cluster (Connected) Hostname: server3 Uuid: aa808d87-4e7c-4ecd-bcf0-13ea03f844a8 State: Peer in Cluster (Connected) Hostname: server4 Uuid: d153d847-ad46-4c85-8336-f8e553d5aab6 State: Peer in Cluster (Connected) Hostname: server5 Uuid: a90c2969-67eb-4792-b5ce-6b4b3d782675 State: Peer in Cluster (Connected) Hostname: server6 Uuid: 3ed5adc9-d3f7-40eb-8bbd-45f0882f55cd State: Peer in Cluster (Connected) [root@server1 ~]#
在节点上创建Brick目录
[root@server1 ~]# mkdir -p /mnt/volume_sfo2_01/brick1 [root@server2 ~]# mkdir -p /mnt/volume_sfo2_02/brick2 [root@server3 ~]# mkdir -p /mnt/volume_sfo2_03/brick3 [root@server4 ~]# mkdir -p /mnt/volume_sfo2_04/brick4 [root@server5 ~]# mkdir -p /mnt/volume_sfo2_05/brick5 [root@server6 ~]# mkdir -p /mnt/volume_sfo2_06/brick6
创建6节点3副本分布式副本卷
gluster volume create data-volume replica 3 transport tcp \ server1:/mnt/volume_sfo2_01/brick1 server2:/mnt/volume_sfo2_02/brick2 \ server3:/mnt/volume_sfo2_03/brick3 server4:/mnt/volume_sfo2_04/brick4 \ server5:/mnt/volume_sfo2_05/brick5 server6:/mnt/volume_sfo2_06/brick6 [root@server1 ~]# gluster volume create data-volume replica 3 transport tcp \ > server1:/mnt/volume_sfo2_01/brick1 server2:/mnt/volume_sfo2_02/brick2 \ > server3:/mnt/volume_sfo2_03/brick3 server4:/mnt/volume_sfo2_04/brick4 \ > server5:/mnt/volume_sfo2_05/brick5 server6:/mnt/volume_sfo2_06/brick6 volume create: data-volume: success: please start the volume to access data [root@server1 ~]#
查看卷信息
[root@server1 ~]# gluster volume info Volume Name: data-volume Type: Distributed-Replicate Volume ID: 2a2103ab-17e4-47b5-9d4c-96e460ac419c Status: Created Snapshot Count: 0 Number of Bricks: 2 x 3 = 6 Transport-type: tcp Bricks: Brick1: server1:/mnt/volume_sfo2_01/brick1 Brick2: server2:/mnt/volume_sfo2_02/brick2 Brick3: server3:/mnt/volume_sfo2_03/brick3 Brick4: server4:/mnt/volume_sfo2_04/brick4 Brick5: server5:/mnt/volume_sfo2_05/brick5 Brick6: server6:/mnt/volume_sfo2_06/brick6 Options Reconfigured: transport.address-family: inet storage.fips-mode-rchecksum: on nfs.disable: on performance.client-io-threads: off [root@server1 ~]#
启动卷并查看卷信息和状态信息
[root@server1 ~]# gluster volume start data-volume volume start: data-volume: success [root@server1 ~]# gluster volume info Volume Name: data-volume Type: Distributed-Replicate Volume ID: 2a2103ab-17e4-47b5-9d4c-96e460ac419c Status: Started Snapshot Count: 0 Number of Bricks: 2 x 3 = 6 Transport-type: tcp Bricks: Brick1: server1:/mnt/volume_sfo2_01/brick1 Brick2: server2:/mnt/volume_sfo2_02/brick2 Brick3: server3:/mnt/volume_sfo2_03/brick3 Brick4: server4:/mnt/volume_sfo2_04/brick4 Brick5: server5:/mnt/volume_sfo2_05/brick5 Brick6: server6:/mnt/volume_sfo2_06/brick6 Options Reconfigured: transport.address-family: inet storage.fips-mode-rchecksum: on nfs.disable: on performance.client-io-threads: off [root@server1 ~]# gluster volume status Status of volume: data-volume Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick server1:/mnt/volume_sfo2_01/brick1 49152 0 Y 9805 Brick server2:/mnt/volume_sfo2_02/brick2 49152 0 Y 9843 Brick server3:/mnt/volume_sfo2_03/brick3 49152 0 Y 9690 Brick server4:/mnt/volume_sfo2_04/brick4 49152 0 Y 9734 Brick server5:/mnt/volume_sfo2_05/brick5 49152 0 Y 10285 Brick server6:/mnt/volume_sfo2_06/brick6 49152 0 Y 10470 Self-heal Daemon on localhost N/A N/A Y 9826 Self-heal Daemon on server5 N/A N/A Y 10306 Self-heal Daemon on server2 N/A N/A Y 9864 Self-heal Daemon on server6 N/A N/A Y 10491 Self-heal Daemon on server3 N/A N/A Y 9711 Self-heal Daemon on server4 N/A N/A Y 9755 Task Status of Volume data-volume ------------------------------------------------------------------------------ There are no active volume tasks [root@server1 ~]#
客户端安装GlusterFS必要组件
[root@server7 ~]# yum -y install centos-release-gluster [root@server7 ~]# yum -y install glusterfs glusterfs-fuse glusterfs-rdma
挂载data-volume卷并查看磁盘信息(实际可用存储200GB)
[root@server7 ~]# mount -t glusterfs server6:/data-volume /mnt/ [root@server7 ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/vda1 60G 1003M 60G 2% / devtmpfs 897M 0 897M 0% /dev tmpfs 920M 0 920M 0% /dev/shm tmpfs 920M 17M 903M 2% /run tmpfs 920M 0 920M 0% /sys/fs/cgroup tmpfs 184M 0 184M 0% /run/user/0 server6:/data-volume 200G 2.1G 198G 2% /mnt [root@server7 ~]# [root@server7 ~]# mount |grep server6 server6:/data-volume on /mnt type fuse.glusterfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other,max_read=131072) [root@server7 ~]#
查看与服务端节点间通信状态
客户端随机写入文件
[root@server7 ~]# for i in `seq -w 1 20`; do cp -rp /var/log/messages /mnt/copy-test-$i; done [root@server7 ~]# ls /mnt/ copy-test-01 copy-test-03 copy-test-05 copy-test-07 copy-test-09 copy-test-11 copy-test-13 copy-test-15 copy-test-17 copy-test-19 copy-test-02 copy-test-04 copy-test-06 copy-test-08 copy-test-10 copy-test-12 copy-test-14 copy-test-16 copy-test-18 copy-test-20 [root@server7 ~]#
在服务端节点查看随机写入文件的分布
[root@server1 ~]# ls /mnt/volume_sfo2_01/brick1/ copy-test-04 copy-test-05 copy-test-09 copy-test-15 copy-test-17 copy-test-18 copy-test-20 [root@server1 ~]# [root@server2 ~]# ls /mnt/volume_sfo2_02/brick2/ copy-test-04 copy-test-05 copy-test-09 copy-test-15 copy-test-17 copy-test-18 copy-test-20 [root@server2 ~]# [root@server3 ~]# ls /mnt/volume_sfo2_03/brick3/ copy-test-04 copy-test-05 copy-test-09 copy-test-15 copy-test-17 copy-test-18 copy-test-20 [root@server3 ~]# [root@server4 ~]# ls /mnt/volume_sfo2_04/brick4/ copy-test-01 copy-test-03 copy-test-07 copy-test-10 copy-test-12 copy-test-14 copy-test-19 copy-test-02 copy-test-06 copy-test-08 copy-test-11 copy-test-13 copy-test-16 [root@server4 ~]# [root@server5 ~]# ls /mnt/volume_sfo2_05/brick5/ copy-test-01 copy-test-03 copy-test-07 copy-test-10 copy-test-12 copy-test-14 copy-test-19 copy-test-02 copy-test-06 copy-test-08 copy-test-11 copy-test-13 copy-test-16 [root@server5 ~]# [root@server6 ~]# ls /mnt/volume_sfo2_06/brick6/ copy-test-01 copy-test-03 copy-test-07 copy-test-10 copy-test-12 copy-test-14 copy-test-19 copy-test-02 copy-test-06 copy-test-08 copy-test-11 copy-test-13 copy-test-16 [root@server6 ~]#