1月 292015
https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/
https://wiki.centos.org/SpecialInterestGroup/Storage/gluster-Quickstart
可扩展的分布式文件系统,实现在唯一全局命名空间中聚合多个服务器节点的磁盘资源。
分布式文件系统节点
glusterfs-01 138.197.217.220 10.138.18.152 glusterfs-02 157.245.169.92 10.138.146.225 glusterfs-03 165.227.21.222 10.138.178.108
在所有节点配置hosts文件
[root@glusterfs-01 ~]# vi /etc/hosts 10.138.18.152 glusterfs-01 10.138.146.225 glusterfs-02 10.138.178.108 glusterfs-03
查看当前可用磁盘和分区信息
[root@glusterfs-01 ~]# fdisk -l Disk /dev/vda: 64.4 GB, 64424509440 bytes, 125829120 sectors Units = sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 512 bytes / 512 bytes Disk label type: dos Disk identifier: 0x000b6061 Device Boot Start End Blocks Id System /dev/vda1 * 2048 125829086 62913519+ 83 Linux Disk /dev/vdb: 0 MB, 466944 bytes, 912 sectors Units = sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 512 bytes / 512 bytes Disk /dev/sda: 107.4 GB, 107374182400 bytes, 209715200 sectors Units = sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 512 bytes / 512 bytes [root@glusterfs-01 ~]#
创建分区
[root@glusterfs-01 ~]# fdisk /dev/sda Welcome to fdisk (util-linux 2.23.2). Changes will remain in memory only, until you decide to write them. Be careful before using the write command. Device does not contain a recognized partition table Building a new DOS disklabel with disk identifier 0x99c4ee31. Command (m for help): n Partition type: p primary (0 primary, 0 extended, 4 free) e extended Select (default p): e Partition number (1-4, default 1): First sector (2048-209715199, default 2048): Using default value 2048 Last sector, +sectors or +size{K,M,G} (2048-209715199, default 209715199): Using default value 209715199 Partition 1 of type Extended and of size 100 GiB is set Command (m for help): n Partition type: p primary (0 primary, 1 extended, 3 free) l logical (numbered from 5) Select (default p): l Adding logical partition 5 First sector (4096-209715199, default 4096): Using default value 4096 Last sector, +sectors or +size{K,M,G} (4096-209715199, default 209715199): Using default value 209715199 Partition 5 of type Linux and of size 100 GiB is set Command (m for help): w The partition table has been altered! Calling ioctl() to re-read partition table. Syncing disks. [root@glusterfs-01 ~]#
查看当前可用磁盘和分区信息
[root@glusterfs-01 ~]# fdisk -l Disk /dev/vda: 64.4 GB, 64424509440 bytes, 125829120 sectors Units = sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 512 bytes / 512 bytes Disk label type: dos Disk identifier: 0x000b6061 Device Boot Start End Blocks Id System /dev/vda1 * 2048 125829086 62913519+ 83 Linux Disk /dev/vdb: 0 MB, 466944 bytes, 912 sectors Units = sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 512 bytes / 512 bytes Disk /dev/sda: 107.4 GB, 107374182400 bytes, 209715200 sectors Units = sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 512 bytes / 512 bytes Disk label type: dos Disk identifier: 0xbb370b51 Device Boot Start End Blocks Id System /dev/sda1 2048 209715199 104856576 5 Extended /dev/sda5 4096 209715199 104855552 83 Linux [root@glusterfs-01 ~]#
在所有节点格式化并挂载数据盘
# mkfs.xfs -i size=512 /dev/sda5 # mkdir -p /data/brick1 # echo '/dev/sda5 /data/brick1 xfs defaults 1 2' >> /etc/fstab # mount -a && mount [root@glusterfs-01 ~]# mkfs.xfs -i size=512 /dev/sda5 meta-data=/dev/sda5 isize=512 agcount=4, agsize=6553472 blks = sectsz=512 attr=2, projid32bit=1 = crc=1 finobt=0, sparse=0 data = bsize=4096 blocks=26213888, imaxpct=25 = sunit=0 swidth=0 blks naming =version 2 bsize=4096 ascii-ci=0 ftype=1 log =internal log bsize=4096 blocks=12799, version=2 = sectsz=512 sunit=0 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0 [root@glusterfs-01 ~]# mkdir -p /data/brick1 [root@glusterfs-01 ~]# echo '/dev/sda5 /data/brick1 xfs defaults 1 2' >> /etc/fstab [root@glusterfs-01 ~]# mount -a && mount sysfs on /sys type sysfs (rw,nosuid,nodev,noexec,relatime,seclabel) proc on /proc type proc (rw,nosuid,nodev,noexec,relatime) devtmpfs on /dev type devtmpfs (rw,nosuid,seclabel,size=917804k,nr_inodes=229451,mode=755) securityfs on /sys/kernel/security type securityfs (rw,nosuid,nodev,noexec,relatime) tmpfs on /dev/shm type tmpfs (rw,nosuid,nodev,seclabel) devpts on /dev/pts type devpts (rw,nosuid,noexec,relatime,seclabel,gid=5,mode=620,ptmxmode=000) tmpfs on /run type tmpfs (rw,nosuid,nodev,seclabel,mode=755) tmpfs on /sys/fs/cgroup type tmpfs (ro,nosuid,nodev,noexec,seclabel,mode=755) cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd) pstore on /sys/fs/pstore type pstore (rw,nosuid,nodev,noexec,relatime) cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,hugetlb) cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,freezer) cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,cpuset) cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,perf_event) cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,pids) cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,net_prio,net_cls) cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,cpuacct,cpu) cgroup on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,memory) cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,blkio) cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,seclabel,devices) configfs on /sys/kernel/config type configfs (rw,relatime) /dev/vda1 on / type xfs (rw,relatime,seclabel,attr2,inode64,noquota) rpc_pipefs on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw,relatime) selinuxfs on /sys/fs/selinux type selinuxfs (rw,relatime) hugetlbfs on /dev/hugepages type hugetlbfs (rw,relatime,seclabel) mqueue on /dev/mqueue type mqueue (rw,relatime,seclabel) debugfs on /sys/kernel/debug type debugfs (rw,relatime) systemd-1 on /proc/sys/fs/binfmt_misc type autofs (rw,relatime,fd=32,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=13335) tmpfs on /run/user/0 type tmpfs (rw,nosuid,nodev,relatime,seclabel,size=188220k,mode=700) /dev/sda5 on /data/brick1 type xfs (rw,relatime,seclabel,attr2,inode64,noquota) [root@glusterfs-01 ~]#
在所有节点安装GlusterFS软件
[root@glusterfs-01 ~]# yum -y install centos-release-gluster [root@glusterfs-01 ~]# yum -y install glusterfs-server [root@glusterfs-02 ~]# yum -y install centos-release-gluster [root@glusterfs-02 ~]# yum -y install glusterfs-server [root@glusterfs-03 ~]# yum -y install centos-release-gluster [root@glusterfs-03 ~]# yum -y install glusterfs-server
在所有节点注册并启动glusterfsd系统服务
[root@glusterfs-01 ~]# systemctl enable glusterfsd Created symlink from /etc/systemd/system/multi-user.target.wants/glusterfsd.service to /usr/lib/systemd/system/glusterfsd.service. [root@glusterfs-01 ~]# systemctl start glusterfsd [root@glusterfs-01 ~]# systemctl status glusterfsd ● glusterfsd.service - GlusterFS brick processes (stopping only) Loaded: loaded (/usr/lib/systemd/system/glusterfsd.service; enabled; vendor preset: disabled) Active: active (exited) since Tue 2020-05-26 07:28:17 UTC; 8s ago Process: 10737 ExecStart=/bin/true (code=exited, status=0/SUCCESS) Main PID: 10737 (code=exited, status=0/SUCCESS) May 26 07:28:17 glusterfs-01 systemd[1]: Starting GlusterFS brick processes (stopping only)... May 26 07:28:17 glusterfs-01 systemd[1]: Started GlusterFS brick processes (stopping only). [root@glusterfs-01 ~]# [root@glusterfs-02 ~]# systemctl enable glusterfsd Created symlink from /etc/systemd/system/multi-user.target.wants/glusterfsd.service to /usr/lib/systemd/system/glusterfsd.service. [root@glusterfs-02 ~]# systemctl start glusterfsd [root@glusterfs-02 ~]# systemctl status glusterfsd ● glusterfsd.service - GlusterFS brick processes (stopping only) Loaded: loaded (/usr/lib/systemd/system/glusterfsd.service; enabled; vendor preset: disabled) Active: active (exited) since Tue 2020-05-26 07:29:21 UTC; 11s ago Process: 18817 ExecStart=/bin/true (code=exited, status=0/SUCCESS) Main PID: 18817 (code=exited, status=0/SUCCESS) May 26 07:29:20 glusterfs-02 systemd[1]: Starting GlusterFS brick processes (stopping only)... May 26 07:29:21 glusterfs-02 systemd[1]: Started GlusterFS brick processes (stopping only). [root@glusterfs-02 ~]# [root@glusterfs-03 ~]# systemctl enable glusterfsd Created symlink from /etc/systemd/system/multi-user.target.wants/glusterfsd.service to /usr/lib/systemd/system/glusterfsd.service. [root@glusterfs-03 ~]# systemctl start glusterfsd [root@glusterfs-03 ~]# systemctl status glusterfsd ● glusterfsd.service - GlusterFS brick processes (stopping only) Loaded: loaded (/usr/lib/systemd/system/glusterfsd.service; enabled; vendor preset: disabled) Active: active (exited) since Tue 2020-05-26 07:30:27 UTC; 7s ago Process: 18444 ExecStart=/bin/true (code=exited, status=0/SUCCESS) Main PID: 18444 (code=exited, status=0/SUCCESS) May 26 07:30:27 glusterfs-03 systemd[1]: Starting GlusterFS brick processes (stopping only)... May 26 07:30:27 glusterfs-03 systemd[1]: Started GlusterFS brick processes (stopping only). [root@glusterfs-03 ~]#
查看端口监听
[root@glusterfs-01 ~]# netstat -lntuop Active Internet connections (only servers) Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name Timer tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN 1195/master off (0.00/0/0) tcp 0 0 0.0.0.0:24007 0.0.0.0:* LISTEN 1047/glusterd off (0.00/0/0) tcp 0 0 0.0.0.0:111 0.0.0.0:* LISTEN 1/systemd off (0.00/0/0) tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 1247/sshd off (0.00/0/0) tcp6 0 0 ::1:25 :::* LISTEN 1195/master off (0.00/0/0) tcp6 0 0 :::111 :::* LISTEN 1/systemd off (0.00/0/0) tcp6 0 0 :::22 :::* LISTEN 1247/sshd off (0.00/0/0) udp 0 0 0.0.0.0:111 0.0.0.0:* 1/systemd off (0.00/0/0) udp 0 0 127.0.0.1:323 0.0.0.0:* 647/chronyd off (0.00/0/0) udp 0 0 0.0.0.0:802 0.0.0.0:* 629/rpcbind off (0.00/0/0) udp6 0 0 :::111 :::* 1/systemd off (0.00/0/0) udp6 0 0 ::1:323 :::* 647/chronyd off (0.00/0/0) udp6 0 0 :::802 :::* 629/rpcbind off (0.00/0/0) [root@glusterfs-01 ~]#
查看版本信息
[root@glusterfs-01 ~]# glusterfs -V glusterfs 7.5 Repository revision: git://git.gluster.org/glusterfs.git Copyright (c) 2006-2016 Red Hat, Inc. <https://www.gluster.org/> GlusterFS comes with ABSOLUTELY NO WARRANTY. It is licensed to you under your choice of the GNU Lesser General Public License, version 3 or any later version (LGPLv3 or later), or the GNU General Public License, version 2 (GPLv2), in all cases as published by the Free Software Foundation. [root@glusterfs-01 ~]#
将节点加入受信存储池
[root@glusterfs-01 ~]# gluster peer probe glusterfs-02 peer probe: success. [root@glusterfs-01 ~]# gluster peer probe glusterfs-03 peer probe: success. [root@glusterfs-01 ~]#
查看节点状态
[root@glusterfs-01 ~]# gluster peer status Number of Peers: 2 Hostname: glusterfs-02 Uuid: 9375a552-1cce-414c-8850-997800dd1f6e State: Peer in Cluster (Connected) Hostname: glusterfs-03 Uuid: c490e4ee-03f7-4b83-9456-6cccd101020f State: Peer in Cluster (Connected) [root@glusterfs-01 ~]# [root@glusterfs-02 ~]# gluster peer status Number of Peers: 2 Hostname: glusterfs-01 Uuid: 605bacf2-abb4-4083-be2b-0d17c843bc68 State: Peer in Cluster (Connected) Hostname: glusterfs-03 Uuid: c490e4ee-03f7-4b83-9456-6cccd101020f State: Peer in Cluster (Connected) [root@glusterfs-02 ~]# [root@glusterfs-03 ~]# gluster peer status Number of Peers: 2 Hostname: glusterfs-01 Uuid: 605bacf2-abb4-4083-be2b-0d17c843bc68 State: Peer in Cluster (Connected) Hostname: glusterfs-02 Uuid: 9375a552-1cce-414c-8850-997800dd1f6e State: Peer in Cluster (Connected) [root@glusterfs-03 ~]#
创建一个存储卷(三副本)
[root@glusterfs-01 ~]# mkdir -p /data/brick1/gv0 [root@glusterfs-02 ~]# mkdir -p /data/brick1/gv0 [root@glusterfs-03 ~]# mkdir -p /data/brick1/gv0 [root@glusterfs-01 ~]# gluster volume create gv0 replica 3 glusterfs-01:/data/brick1/gv0 glusterfs-02:/data/brick1/gv0 glusterfs-03:/data/brick1/gv0 volume create: gv0: success: please start the volume to access data [root@glusterfs-01 ~]# [root@glusterfs-01 ~]# gluster volume start gv0 volume start: gv0: success [root@glusterfs-01 ~]#
查看存储卷信息
[root@glusterfs-01 ~]# gluster volume info Volume Name: gv0 Type: Replicate Volume ID: aaa143ff-c7db-4b12-9d2f-4199c2cf76c9 Status: Started Snapshot Count: 0 Number of Bricks: 1 x 3 = 3 Transport-type: tcp Bricks: Brick1: glusterfs-01:/data/brick1/gv0 Brick2: glusterfs-02:/data/brick1/gv0 Brick3: glusterfs-03:/data/brick1/gv0 Options Reconfigured: transport.address-family: inet storage.fips-mode-rchecksum: on nfs.disable: on performance.client-io-threads: off [root@glusterfs-01 ~]#
查看卷状态信息
[root@glusterfs-01 ~]# gluster volume status Status of volume: gv0 Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick glusterfs-01:/data/brick1/gv0 49152 0 Y 1580 Brick glusterfs-02:/data/brick1/gv0 49152 0 Y 10275 Brick glusterfs-03:/data/brick1/gv0 49152 0 Y 10248 Self-heal Daemon on localhost N/A N/A Y 1601 Self-heal Daemon on glusterfs-03 N/A N/A Y 10269 Self-heal Daemon on glusterfs-02 N/A N/A Y 10296 Task Status of Volume gv0 ------------------------------------------------------------------------------ There are no active volume tasks [root@glusterfs-01 ~]#
通过任意节点挂载已创建的三副本文件系统
在GlusterFS集群文件系统中,执行挂载命令时指定的服务器,仅用于获取卷的配置信息。随后客户端将直接与卷配置文件中的服务器进行通信(甚至不包括用于挂载的服务器)。
[root@glusterfs-01 ~]# mount -t glusterfs glusterfs-03:/gv0 /mnt [root@glusterfs-01 ~]# mount sysfs on /sys type sysfs (rw,nosuid,nodev,noexec,relatime) proc on /proc type proc (rw,nosuid,nodev,noexec,relatime) devtmpfs on /dev type devtmpfs (rw,nosuid,size=917804k,nr_inodes=229451,mode=755) securityfs on /sys/kernel/security type securityfs (rw,nosuid,nodev,noexec,relatime) tmpfs on /dev/shm type tmpfs (rw,nosuid,nodev) devpts on /dev/pts type devpts (rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000) tmpfs on /run type tmpfs (rw,nosuid,nodev,mode=755) tmpfs on /sys/fs/cgroup type tmpfs (ro,nosuid,nodev,noexec,mode=755) cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd) pstore on /sys/fs/pstore type pstore (rw,nosuid,nodev,noexec,relatime) cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event) cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio) cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices) cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,pids) cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,net_prio,net_cls) cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer) cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,cpuacct,cpu) cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset) cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,hugetlb) cgroup on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,memory) configfs on /sys/kernel/config type configfs (rw,relatime) /dev/vda1 on / type xfs (rw,relatime,attr2,inode64,noquota) rpc_pipefs on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw,relatime) systemd-1 on /proc/sys/fs/binfmt_misc type autofs (rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=12616) hugetlbfs on /dev/hugepages type hugetlbfs (rw,relatime) mqueue on /dev/mqueue type mqueue (rw,relatime) debugfs on /sys/kernel/debug type debugfs (rw,relatime) /dev/sda5 on /data/brick1 type xfs (rw,relatime,attr2,inode64,noquota) tmpfs on /run/user/0 type tmpfs (rw,nosuid,nodev,relatime,size=188220k,mode=700) glusterfs-03:/gv0 on /mnt type fuse.glusterfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other,max_read=131072) [root@glusterfs-01 ~]#
写入20个文件
[root@glusterfs-01 ~]# for i in `seq -w 1 20`; do cp -rp /var/log/messages /mnt/copy-test-$i; done [root@glusterfs-01 ~]#
确认写入文件数量
[root@glusterfs-01 ~]# ls -lA /mnt/copy* | wc -l 20 [root@glusterfs-01 ~]#
在各个节点的本地挂载点查看写入的文件
[root@glusterfs-01 ~]# ls /data/brick1/gv0/ copy-test-01 copy-test-03 copy-test-05 copy-test-07 copy-test-09 copy-test-11 copy-test-13 copy-test-15 copy-test-17 copy-test-19 copy-test-02 copy-test-04 copy-test-06 copy-test-08 copy-test-10 copy-test-12 copy-test-14 copy-test-16 copy-test-18 copy-test-20 [root@glusterfs-01 ~]# [root@glusterfs-02 ~]# ls /data/brick1/gv0/ copy-test-01 copy-test-03 copy-test-05 copy-test-07 copy-test-09 copy-test-11 copy-test-13 copy-test-15 copy-test-17 copy-test-19 copy-test-02 copy-test-04 copy-test-06 copy-test-08 copy-test-10 copy-test-12 copy-test-14 copy-test-16 copy-test-18 copy-test-20 [root@glusterfs-02 ~]# [root@glusterfs-03 ~]# ls /data/brick1/gv0/ copy-test-01 copy-test-03 copy-test-05 copy-test-07 copy-test-09 copy-test-11 copy-test-13 copy-test-15 copy-test-17 copy-test-19 copy-test-02 copy-test-04 copy-test-06 copy-test-08 copy-test-10 copy-test-12 copy-test-14 copy-test-16 copy-test-18 copy-test-20 [root@glusterfs-03 ~]#