zfs用来顶替Raid控制卡,有相当强悍的性能,TrueNAS用的就是这玩意。
官方安装文档: https://openzfs.github.io/openzfs-docs/Getting%20Started/RHEL%20and%20CentOS.html
CentOS7安装
yum install -y epel-release
yum install -y https://zfsonlinux.org/epel/zfs-release.el7_9.noarch.rpm
yum install -y kernel-devel zfs
加载模块
[root@localhost ~]# lsmod|grep zfs
[root@localhost ~]# modprobe zfs
[root@localhost ~]# lsmod|grep zfs
zfs 3986850 0
zunicode 331170 1 zfs
zlua 151525 1 zfs
zcommon 89551 1 zfs
znvpair 94388 2 zfs,zcommon
zavl 15167 1 zfs
icp 301854 1 zfs
spl 104299 5 icp,zfs,zavl,zcommon,znvpair
看看文件系统
[root@localhost ~]# zfs list
no datasets available
192.168.85.100的本地磁盘从sdb 一直到 sdg,首先zpool建立池子,类似raid卡的功能 然后再zfs建立文件系统
[root@localhost ~]# zpool create -f zfspool sdb sdc sdd sde sdf sdg
[root@localhost ~]# zpool status
pool: zfspool
state: ONLINE
scan: none requested
config:
NAME STATE READ WRITE CKSUM
zfspool ONLINE 0 0 0
sdb ONLINE 0 0 0
sdc ONLINE 0 0 0
sdd ONLINE 0 0 0
sde ONLINE 0 0 0
sdf ONLINE 0 0 0
sdg ONLINE 0 0 0
errors: No known data errors
[root@localhost ~]# df -h
文件系统 容量 已用 可用 已用% 挂载点
devtmpfs 63G 0 63G 0% /dev
tmpfs 63G 0 63G 0% /dev/shm
tmpfs 63G 9.9M 63G 1% /run
tmpfs 63G 0 63G 0% /sys/fs/cgroup
/dev/mapper/centos-root 50G 1.7G 49G 4% /
/dev/sda1 1014M 189M 826M 19% /boot
/dev/mapper/centos-home 392G 33M 392G 1% /home
tmpfs 13G 0 13G 0% /run/user/0
zfspool 53T 128K 53T 1% /zfspool
上面对生产无意义,没有任何冗余的配置在生产是行不通的
破坏掉先
zpool destroy zfspool
条带对机房基本无意义,做mirror 1即Raid1
[root@localhost ~]# zpool create -f zfspool mirror sdb sdc
[root@localhost ~]# zpool status
pool: zfspool
state: ONLINE
scan: none requested
config:
NAME STATE READ WRITE CKSUM
zfspool ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
sdb ONLINE 0 0 0
sdc ONLINE 0 0 0
errors: No known data errors
mirror的话往里面增加盘必须成对,单盘禁止往里面加
[root@localhost ~]# zpool add -f zfspool mirror sde
invalid vdev specification: mirror requires at least 2 devices
增加一对盘进去,这样就做成Raid10了
[root@localhost ~]# zpool add -f zfspool mirror sde sdf
[root@localhost ~]# zpool status
pool: zfspool
state: ONLINE
scan: none requested
config:
NAME STATE READ WRITE CKSUM
zfspool ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
sdb ONLINE 0 0 0
sdc ONLINE 0 0 0
mirror-1 ONLINE 0 0 0
sde ONLINE 0 0 0
sdf ONLINE 0 0 0
errors: No known data errors
另外zfs有个特殊的raidz1 raidz2 raidz3 分别代表允许1块盘坏,2块盘坏,3块盘坏 需要的盘最小数量分别是2块,3块,4块
在生产,比较适合的是允许2块盘坏,并加1块host spare
[root@localhost /]# zpool create -f zfspool raidz2 sdb sdc sde sdf
[root@localhost /]# zpool status
pool: zfspool
state: ONLINE
scan: none requested
config:
NAME STATE READ WRITE CKSUM
zfspool ONLINE 0 0 0
raidz2-0 ONLINE 0 0 0
sdb ONLINE 0 0 0
sdc ONLINE 0 0 0
sde ONLINE 0 0 0
sdf ONLINE 0 0 0
errors: No known data errors
增加热备盘spare
[root@localhost /]# zpool add zfspool spare sdg
[root@localhost /]# zpool status
pool: zfspool
state: ONLINE
scan: none requested
config:
NAME STATE READ WRITE CKSUM
zfspool ONLINE 0 0 0
raidz2-0 ONLINE 0 0 0
sdb ONLINE 0 0 0
sdc ONLINE 0 0 0
sde ONLINE 0 0 0
sdf ONLINE 0 0 0
spares
sdg AVAIL
errors: No known data errors
如果出现坏盘,用好盘sdf换掉坏盘sde
[root@localhost /]# zpool replace zfspool sde sdf
[root@localhost /]# zpool status
pool: zfspool
state: ONLINE
scan: resilvered 1.17M in 0 days 00:00:00 with 0 errors on Thu Mar 11 20:02:19 2021
config:
NAME STATE READ WRITE CKSUM
zfspool ONLINE 0 0 0
raidz2-0 ONLINE 0 0 0
sdb ONLINE 0 0 0
sdc ONLINE 0 0 0
sdf ONLINE 0 0 0
spares
sdg AVAIL
errors: No known data errors
检查zpool磁盘组的完整性
zpool scrub testpool
增加cache会增加读速度,类似盘阵热点SSD自动落盘技术
$ zpool create mirror /dev/sda /dev/sdb cache /dev/sdk /dev/sdl
增加log会提高写速度,类似盘阵热点SSD自动落盘技术
$ zpool create mirror /dev/sda /dev/sdb log /dev/sdk /dev/sdl
zpool 建立好zfspool后,可以建文件系统
zfs create zfspool/dba-vol
[root@localhost /]# zfs list
NAME USED AVAIL REFER MOUNTPOINT
zfspool 1.47M 35.2T 153K /zfspool
zfspool/dba-vol 262K 35.2T 160K /zfspool/dba-vol
修改挂载点
zfs set mountpoint=/path/to/mount zpool-name/dataset-name
快照
zfs snapshot [pool]/[dataset name]@[snapshot name]
zfs snapshot zfspool/dba-vol@dba-vol-20210315
zfs list -t snapshot
注意,快照恢复后,该时间点之后的快照会全部丢失
zfs rollback -r [pool]/[dataset]@[snapshot name]
zfs rollback -r zfspool/dba-vol@dba-vol-20210315