1. 创建分布式条带卷
1.1 准备工作
#给四台虚拟机再添加一块盘sdc并格式化 [root@mystorage1 ~]# mkfs.xfs -f /dev/sdc #在四台机器上执行,建立挂载块设备的目录 [root@mystorage1 ~]# mkdir -p /storage/brick2 #挂载盘到文件目录 [root@mystorage1 ~]# mount /dev/sdc /storage/brick2 [root@mystorage1 ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/sda2 38G 15G 21G 42% / tmpfs 935M 0 935M 0% /dev/shm /dev/sda1 283M 85M 183M 32% /boot /dev/sdc 50G 33M 50G 1% /storage/brick2 /dev/sdb 50G 33M 50G 1% /storage/brick1
1.2 创建分布式条带卷
#stripe 2表示把一份数据打散成2份,分布在不同的机器上 [root@mystorage1 ~]# gluster volume create gv3 stripe 2 mystorage3:/storage/brick2 mystorage4:/storage/brick2 force volume create: gv3: success: please start the volume to access data [root@mystorage1 ~]# gluster volume start gv3 volume start: gv3: success [root@mystorage1 ~]# gluster volume info Volume Name: gv1 Type: Distribute Volume ID: d721ad47-3bfb-45fe-bc47-67ce11d19af9 Status: Stopped Number of Bricks: 2 Transport-type: tcp Bricks: Brick1: mystorage1:/storage/brick1 Brick2: mystorage2:/storage/brick1 Options Reconfigured: performance.readdir-ahead: on Volume Name: gv2 Type: Replicate Volume ID: 228f63c4-0219-4c39-8e87-f3ae237ff6d9 Status: Stopped Number of Bricks: 1 x 2 = 2 Transport-type: tcp Bricks: Brick1: mystorage3:/storage/brick1 Brick2: mystorage4:/storage/brick1 Options Reconfigured: performance.readdir-ahead: on Volume Name: gv3 Type: Stripe #条带卷 Volume ID: e7566631-7b79-433a-97d0-c98db746f017 Status: Started Number of Bricks: 1 x 2 = 2 Transport-type: tcp Bricks: Brick1: mystorage3:/storage/brick2 Brick2: mystorage4:/storage/brick2 Options Reconfigured: performance.readdir-ahead: on #在每台机器上创建3个挂载目录,然后执行以下操作 [root@mystorage4 ~]# mkdir -p /gv1 /gv2 /gv3 [root@mystorage1 ~]# gluster volume start gv1 volume start: gv1: success [root@mystorage1 ~]# gluster volume start gv2 volume start: gv2: success [root@mystorage1 ~]# mount -t glusterfs 127.0.0.1:gv1 /gv1 [root@mystorage1 ~]# mount -t glusterfs 127.0.0.1:gv2 /gv2 [root@mystorage1 ~]# mount -t glusterfs 127.0.0.1:gv3 /gv3 [root@mystorage1 ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/sda2 38G 15G 21G 42% / tmpfs 935M 0 935M 0% /dev/shm /dev/sda1 283M 85M 183M 32% /boot /dev/sdc 50G 33M 50G 1% /storage/brick2 /dev/sdb 50G 33M 50G 1% /storage/brick1 127.0.0.1:gv1 100G 65M 100G 1% /gv1 127.0.0.1:gv2 50G 33M 50G 1% /gv2 127.0.0.1:gv3 100G 65M 100G 1% /gv3
1.3 测试并查看
#在storage4上的gv3中分别写入一个10M大小和20M大小的文件,看分布式条带卷如何分布 [root@mystorage4 gv3]# dd if=/dev/zero bs=1024 count=10000 of=/gv3/10M.file 10000+0 records in 10000+0 records out 10240000 bytes (10 MB) copied, 0.530019 s, 19.3 MB/s [root@mystorage4 gv3]# dd if=/dev/zero bs=1024 count=20000 of=/gv3/20M.file 20000+0 records in 20000+0 records out 20480000 bytes (20 MB) copied, 1.26475 s, 16.2 MB/s #可以看到gv3目录里数据显示10M,20M,而在底层卷中被分成2份存在storage3和storage4上 [root@mystorage4 gv3]# ls -lh total 30M -rw-r--r-- 1 root root 9.8M Mar 20 15:51 10M.file -rw-r--r-- 1 root root 20M Mar 20 15:52 20M.file [root@mystorage4 gv3]# cd /storage/brick2 && ls -lh total 15M -rw-r--r-- 2 root root 4.9M Mar 20 15:51 10M.file -rw-r--r-- 2 root root 9.8M Mar 20 15:52 20M.file #storage3上看到的结果 [root@mystorage3 ~]# cd /storage/brick2 && ls -lh total 15M -rw-r--r-- 2 root root 4.9M Mar 20 15:51 10M.file -rw-r--r-- 2 root root 9.8M Mar 20 15:52 20M.file [root@mystorage3 brick2]# cd /gv3 && ls -lh total 30M -rw-r--r-- 1 root root 9.8M Mar 20 15:51 10M.file -rw-r--r-- 1 root root 20M Mar 20 15:52 20M.file
2. 分布式复制卷测试
2.1 测试复制卷
[root@mystorage4 brick2]# cd /gv2 [root@mystorage4 gv2]# ls [root@mystorage4 gv2]# dd if=/dev/zero bs=1024 count=10000 of=/gv2/10M.file 10000+0 records in 10000+0 records out 10240000 bytes (10 MB) copied, 0.521815 s, 19.6 MB/s [root@mystorage4 gv2]# dd if=/dev/zero bs=1024 count=20000 of=/gv2/20M.file 20000+0 records in 20000+0 records out 20480000 bytes (20 MB) copied, 1.18621 s, 17.3 MB/s [root@mystorage4 gv2]# dd if=/dev/zero bs=1024 count=30000 of=/gv2/30M.file 30000+0 records in 30000+0 records out 30720000 bytes (31 MB) copied, 2.33485 s, 13.2 MB/s [root@mystorage4 gv2]# [root@mystorage4 gv2]# cd /storage/brick1 && ls -lh total 59M -rw-r--r-- 2 root root 9.8M Mar 20 16:01 10M.file -rw-r--r-- 2 root root 20M Mar 20 16:01 20M.file -rw-r--r-- 2 root root 30M Mar 20 16:01 30M.file [root@mystorage3 brick2]# cd /storage/brick1 && ls -lh total 59M -rw-r--r-- 2 root root 9.8M Mar 20 16:01 10M.file -rw-r--r-- 2 root root 20M Mar 20 16:01 20M.file -rw-r--r-- 2 root root 30M Mar 20 16:01 30M.file [root@mystorage3 brick1]# ls -lh total 59M -rw-r--r-- 2 root root 9.8M Mar 20 16:01 10M.file -rw-r--r-- 2 root root 20M Mar 20 16:01 20M.file -rw-r--r-- 2 root root 30M Mar 20 16:01 30M.file #可以看到复制卷在底层挂载目录没有分片,而是复制了两份
3. 添加卷操作
3.1 使用add-brick命令添加卷操作
[root@mystorage4 brick1]# gluster volume stop gv2 Stopping volume will make its data inaccessible. Do you want to continue? (y/n) y volume stop: gv2: success #添加剩余1,2上的两块brick2盘 [root@mystorage4 brick1]# gluster volume add-brick gv2 replica 2 mystorage1:/storage/brick2 mystorage2:/storage/brick2 force volume add-brick: success [root@mystorage4 brick1]# gluster volume start gv2 volume start: gv2: success [root@mystorage4 brick1]# df -h Filesystem Size Used Avail Use% Mounted on /dev/mapper/VolGroup-lv_root 18G 2.4G 14G 15% / tmpfs 931M 0 931M 0% /dev/shm /dev/sda1 477M 40M 412M 9% /boot /dev/sdc 50G 47M 50G 1% /storage/brick2 /dev/sdb 50G 91M 50G 1% /storage/brick1 127.0.0.1:gv1 100G 65M 100G 1% /gv1 127.0.0.1:gv2 50G 91M 50G 1% /gv2 127.0.0.1:gv3 100G 94M 100G 1% /gv3 [root@mystorage4 brick1]# umount /gv2 [root@mystorage4 brick1]# mount -t glusterfs 127.0.0.1:gv2 /gv2 [root@mystorage4 brick1]# df -h Filesystem Size Used Avail Use% Mounted on /dev/mapper/VolGroup-lv_root 18G 2.4G 14G 15% / tmpfs 931M 0 931M 0% /dev/shm /dev/sda1 477M 40M 412M 9% /boot /dev/sdc 50G 47M 50G 1% /storage/brick2 /dev/sdb 50G 91M 50G 1% /storage/brick1 127.0.0.1:gv1 100G 65M 100G 1% /gv1 127.0.0.1:gv3 100G 94M 100G 1% /gv3 127.0.0.1:gv2 100G 124M 100G 1% /gv2 [root@mystorage4 brick1]# [root@mystorage4 brick1]# gluster volume info gv2 Volume Name: gv2 Type: Distributed-Replicate Volume ID: 228f63c4-0219-4c39-8e87-f3ae237ff6d9 Status: Started Number of Bricks: 2 x 2 = 4 Transport-type: tcp Bricks: Brick1: mystorage3:/storage/brick1 Brick2: mystorage4:/storage/brick1 Brick3: mystorage1:/storage/brick2 Brick4: mystorage2:/storage/brick2 Options Reconfigured: performance.readdir-ahead: on #可以看到添加卷后,gv2从50G变成了100G,新加的卷里头是没有之前卷里的数据 [root@mystorage1 ~]# ls -al /storage/brick2 total 4 drwxr-xr-x 4 root root 39 Mar 20 01:03 . drwxr-xr-x 4 root root 4096 Mar 20 00:17 .. drw------- 8 root root 127 Mar 20 01:04 .glusterfs drwxr-xr-x 3 root root 24 Mar 20 01:03 .trashcan #扩容后进行测试,发现文件都分布,扩容前的卷中 [root@mystorage4 brick1]# ll total 60000 -rw-r--r-- 2 root root 10240000 Mar 20 16:01 10M.file -rw-r--r-- 2 root root 20480000 Mar 20 16:01 20M.file -rw-r--r-- 2 root root 30720000 Mar 20 16:01 30M.file
4. 磁盘存储的平衡
4.1 创建测试数据
#对于新加入的磁盘gluster为了安全是不会主动存储数据的,除非做了磁盘平衡操作(生产中谨慎操作!) [root@mystorage4 brick1]# cd /gv2 [root@mystorage4 gv2]# dd if=/dev/zero bs=1024 count=10000 of=/gv2/10M-1.file 10000+0 records in 10000+0 records out 10240000 bytes (10 MB) copied, 0.514694 s, 19.9 MB/s [root@mystorage4 gv2]# dd if=/dev/zero bs=1024 count=10000 of=/gv2/10M-2.file 10000+0 records in 10000+0 records out 10240000 bytes (10 MB) copied, 0.663699 s, 15.4 MB/s [root@mystorage4 gv2]# dd if=/dev/zero bs=1024 count=10000 of=/gv2/10M-3.file 10000+0 records in 10000+0 records out 10240000 bytes (10 MB) copied, 0.752133 s, 13.6 MB/s [root@mystorage4 gv2]# dd if=/dev/zero bs=1024 count=10000 of=/gv2/10M-4.file 10000+0 records in 10000+0 records out 10240000 bytes (10 MB) copied, 0.716057 s, 14.3 MB/s [root@mystorage4 gv2]# cd /storage/brick1 && ls -lh total 98M -rw-r--r-- 2 root root 9.8M Mar 20 16:26 10M-1.file -rw-r--r-- 2 root root 9.8M Mar 20 16:27 10M-2.file -rw-r--r-- 2 root root 9.8M Mar 20 16:27 10M-3.file -rw-r--r-- 2 root root 9.8M Mar 20 16:27 10M-4.file -rw-r--r-- 2 root root 9.8M Mar 20 16:01 10M.file -rw-r--r-- 2 root root 20M Mar 20 16:01 20M.file -rw-r--r-- 2 root root 30M Mar 20 16:01 30M.file [root@mystorage2 brick2]# cd /storage/brick2 && ls -lh total 0
4.2 磁盘平衡操作
#数据量大的话,平衡需要一定的时间 [root@mystorage3 brick2]# gluster volume rebalance gv2 start volume rebalance: gv2: success: Rebalance on gv2 has been started successfully. Use rebalance status command to check status of the rebalance process. ID: c2c85706-9db9-4286-b20f-69e12b2ce946 #查看磁盘平衡过程状态 [root@mystorage3 brick2]# gluster volume rebalance gv2 status Node Rebalanced-files size scanned failures skipped status run time in h:m:s --------- ----------- ----------- ----------- ----------- ----------- ------------ -------------- localhost 4 48.8MB 7 0 0 completed 0:0:2 mystorage2 0 0Bytes 0 0 0 completed 0:0:1 mystorage4 0 0Bytes 0 0 0 completed 0:0:1 mystorage1 0 0Bytes 4 0 0 completed 0:0:0 volume rebalance: gv2: success #查看磁盘平衡后的结果 #storage4平衡前的状态 [root@mystorage4 gv2]# cd /storage/brick1 && ls -lh total 98M -rw-r--r-- 2 root root 9.8M Mar 20 16:26 10M-1.file -rw-r--r-- 2 root root 9.8M Mar 20 16:27 10M-2.file -rw-r--r-- 2 root root 9.8M Mar 20 16:27 10M-3.file -rw-r--r-- 2 root root 9.8M Mar 20 16:27 10M-4.file -rw-r--r-- 2 root root 9.8M Mar 20 16:01 10M.file -rw-r--r-- 2 root root 20M Mar 20 16:01 20M.file -rw-r--r-- 2 root root 30M Mar 20 16:01 30M.file #storage4平衡后的状态 [root@mystorage4 brick1]# ls -lh total 49M -rw-r--r-- 2 root root 9.8M Mar 20 16:27 10M-2.file -rw-r--r-- 2 root root 9.8M Mar 20 16:01 10M.file -rw-r--r-- 2 root root 30M Mar 20 16:01 30M.file #storage2平衡前的状态 [root@mystorage2 brick2]# cd /storage/brick2 && ls -lh total 0 #storage2平衡后的状态 [root@mystorage2 brick2]# ls -lh total 49M -rw-r--r-- 2 root root 9.8M Mar 20 16:26 10M-1.file -rw-r--r-- 2 root root 9.8M Mar 20 16:27 10M-3.file -rw-r--r-- 2 root root 9.8M Mar 20 16:27 10M-4.file -rw-r--r-- 2 root root 20M Mar 20 16:01 20M.file
注意:平衡布局是很有必要的,因为布局结构是静态的,当新的bricks加入现有卷,新创建的文件会分布到旧的bricks中,所以需要平衡布局结构,使新加入 的bricks生效。布局平衡只是使新布局生效,并不会在新的布局移动老的数据,如果你想在新布局生效后,重新平衡卷中的数据,还需要对卷中的数据进行平衡。
当然,如果考虑到不能承受磁盘平衡后可能造成的问题,直接添加新的盘就行了。
5. 移除brick和删除卷操作
注意:你可能想在线缩小卷的大小,例如:当硬件损坏或者网络故障的时候,你可能想在卷中移除相关的bricks。当你移除bricks的时候,你在 gluster的挂载点将不能继续访问数据,只有配置文件中的信息移除后你才能继续访问bricks的数据。当移除分布式复制卷或者分布式条带卷的时候, 移除的bricks数目必须是replica或者stripe的倍数。例如:一个分布式条带卷的stripe是2,当你移除bricks的时候必须是2、 4、6、8等。
5.1 移除brick
[root@mystorage4 gv2]# gluster volume stop gv2 Stopping volume will make its data inaccessible. Do you want to continue? (y/n) y volume stop: gv2: success #复制卷replica为2,移除时也要成对移除 [root@mystorage4 gv2]# gluster volume remove-brick gv2 replica 2 mystorage3:/storage/brick1 mystorage4:/storage/brick1 force Removing brick(s) can result in data loss. Do you want to Continue? (y/n) y volume remove-brick commit force: success #查看gv2大小,从100G变成了50G [root@mystorage4 gv2]# gluster volume start gv2 volume start: gv2: success [root@mystorage4 gv2]# df -h Filesystem Size Used Avail Use% Mounted on /dev/mapper/VolGroup-lv_root 18G 2.4G 14G 15% / tmpfs 931M 0 931M 0% /dev/shm /dev/sda1 477M 40M 412M 9% /boot /dev/sdc 50G 47M 50G 1% /storage/brick2 /dev/sdb 50G 82M 50G 1% /storage/brick1 127.0.0.1:gv1 100G 65M 100G 1% /gv1 127.0.0.1:gv3 100G 94M 100G 1% /gv3 127.0.0.1:gv2 50G 82M 50G 1% /gv2 #gv2中已没有之前storage3,storage4中的数据 [root@mystorage4 gv2]# ll total 50000 -rw-r--r-- 1 root root 10240000 Mar 20 16:26 10M-1.file -rw-r--r-- 1 root root 10240000 Mar 20 16:27 10M-3.file -rw-r--r-- 1 root root 10240000 Mar 20 16:27 10M-4.file -rw-r--r-- 1 root root 20480000 Mar 20 16:01 20M.file #虽然移除了卷,但是在底层数据卷中数据还没有被删除,这样,当误删时还可以重新添加进去 [root@mystorage4 gv2]# cd /storage/brick1 && ls -lh total 49M -rw-r--r-- 2 root root 9.8M Mar 20 16:27 10M-2.file -rw-r--r-- 2 root root 9.8M Mar 20 16:01 10M.file -rw-r--r-- 2 root root 30M Mar 20 16:01 30M.file
生产中移除卷操作几乎很少发生,因为分布式存储是按算法随机存储的,很容易丢失数据
5.2 删除卷操作
[root@mystorage4 brick1]# umount /gv1 [root@mystorage4 brick1]# df -h Filesystem Size Used Avail Use% Mounted on /dev/mapper/VolGroup-lv_root 18G 2.4G 14G 15% / tmpfs 931M 0 931M 0% /dev/shm /dev/sda1 477M 40M 412M 9% /boot /dev/sdc 50G 47M 50G 1% /storage/brick2 /dev/sdb 50G 82M 50G 1% /storage/brick1 127.0.0.1:gv3 100G 94M 100G 1% /gv3 127.0.0.1:gv2 50G 82M 50G 1% /gv2 #取消挂载后,然后停止卷,最后删除 [root@mystorage4 brick1]# gluster volume stop gv1 Stopping volume will make its data inaccessible. Do you want to continue? (y/n) y volume stop: gv1: success [root@mystorage4 brick1]# gluster volume delete gv1 Deleting volume will erase all information about the volume. Do you want to continue? (y/n) y volume delete: gv1: success #查看删除后的卷信息 [root@mystorage4 brick1]# gluster volume info gv1 Volume gv1 does not exist [root@mystorage4 brick1]# gluster volume info Volume Name: gv2 Type: Replicate Volume ID: 228f63c4-0219-4c39-8e87-f3ae237ff6d9 Status: Started Number of Bricks: 1 x 2 = 2 Transport-type: tcp Bricks: Brick1: mystorage1:/storage/brick2 Brick2: mystorage2:/storage/brick2 Options Reconfigured: performance.readdir-ahead: on Volume Name: gv3 Type: Stripe Volume ID: e7566631-7b79-433a-97d0-c98db746f017 Status: Started Number of Bricks: 1 x 2 = 2 Transport-type: tcp Bricks: Brick1: mystorage3:/storage/brick2 Brick2: mystorage4:/storage/brick2 Options Reconfigured: performance.readdir-ahead: on #即使删除了卷,在底层数据卷中数据还是存在的,只要你不格式化磁盘 [root@mystorage1 brick2]# cd /storage/brick1 [root@mystorage1 brick1]# ls aa
生产中配置错误,或命名不规范,推倒重来才会执行删除卷操作
注意:
glusterfs的卷挂载目录不要轻易换,当挂载目录有数据时,glusterfs会在挂载目录下创建2个隐藏文件夹,用于记载存储数据的信息,随意更换怕出问题。 [root@mystorage1 brick1]# ls -a /storage/brick1 . .. aa .glusterfs .trashcan