日志审计加装了两块硬盘,加上自带的系统盘共三块,但是在全局设置里面没有磁盘挂载的功能,是需要重装配置raid还是把新装的两块盘做下raid后能出现挂载的配置吗?
目前发现日志审计不做raid的情况下全局设置中似乎不会出现磁盘挂载的功能,如果保持原系统盘不动,新增的两块硬盘做raid,是否全局设置中也会出现磁盘挂载的功能
(0)
最佳答案
需要做raid
CSAP-SA综合日志审计平台,为了能够存储更多日志,需要额外扩容5块硬盘
1、执行lvdisplay h3linux_cyber查看home分区是否为逻辑卷,如果有如下回显证明可以进行扩容操作,如提示不支持此命令,但必须要扩容,需要联系400确认
[root@cyber ~]# lvdisplay h3linux_cyber
--- Logical volume ---
LV Path /dev/h3linux_cyber/swap
LV Name swap
VG Name h3linux_cyber
LV UUID KAQMMN-fj6O-pUQJ-1g2h-af4c-3AJF-LOACqW
LV Write Access read/write
LV Creation host, time localhost, 2022-05-26 03:07:24 +0800
LV Status available
# open 2
LV Size 15.56 GiB
Current LE 3984
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 1024
Block device 253:1
--- Logical volume ---
LV Path /dev/h3linux_cyber/home
LV Name home
VG Name h3linux_cyber
LV UUID WezwI1-re3X-3KK0-HZy5-u0R7-8bJ5-MuRgZh
LV Write Access read/write
LV Creation host, time localhost, 2022-05-26 03:07:25 +0800
LV Status available
# open 1
LV Size 7.21 TiB
Current LE 1890629
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 1024
Block device 253:2
--- Logical volume ---
LV Path /dev/h3linux_cyber/root
LV Name root
VG Name h3linux_cyber
LV UUID R7wQ2m-gxAD-O3hY-7XBX-lOT8-Qyc4-7i5Ke2
LV Write Access read/write
LV Creation host, time localhost, 2022-05-26 03:31:15 +0800
LV Status available
# open 1
LV Size 50.00 GiB
Current LE 12800
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 1024
Block device 253:0
[root@cyber ~]#
2、插入新的硬盘,一次扩容至少三块,且单个硬盘的大小应该相同。本次插入五块,可以看到新的硬盘为sdd、sde、sdf、sdg和sdh
[root@cyber ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 3.7T 0 disk
md126 9:126 0 7.3T 0 raid5
md126p1 259:0 0 200M 0 md /boot/efi
md126p2 259:1 0 1G 0 md /boot
md126p3 259:2 0 7.3T 0 md
h3linux_cyber-root 253:0 0 50G 0 lvm /
h3linux_cyber-swap 253:1 0 15.6G 0 lvm [SWAP]
h3linux_cyber-home 253:2 0 7.2T 0 lvm /home
sdb 8:16 0 3.7T 0 disk
md126 9:126 0 7.3T 0 raid5
md126p1 259:0 0 200M 0 md /boot/efi
md126p2 259:1 0 1G 0 md /boot
md126p3 259:2 0 7.3T 0 md
h3linux_cyber-root 253:0 0 50G 0 lvm /
h3linux_cyber-swap 253:1 0 15.6G 0 lvm [SWAP]
h3linux_cyber-home 253:2 0 7.2T 0 lvm /home
sdc 8:32 0 3.7T 0 disk
md126 9:126 0 7.3T 0 raid5
md126p1 259:0 0 200M 0 md /boot/efi
md126p2 259:1 0 1G 0 md /boot
md126p3 259:2 0 7.3T 0 md
h3linux_cyber-root 253:0 0 50G 0 lvm /
h3linux_cyber-swap 253:1 0 15.6G 0 lvm [SWAP]
h3linux_cyber-home 253:2 0 7.2T 0 lvm /home
sdd 8:48 0 3.7T 0 disk
sde 8:64 0 3.7T 0 disk
sdf 8:80 0 3.7T 0 disk
sdg 8:96 0 3.7T 0 disk
sdh 8:112 0 3.7T 0 disk
3、格式化磁盘,依次执行mkfs.xfs -f /dev/sdd、mkfs.xfs -f /dev/sde、mkfs.xfs -f /dev/sdf、mkfs.xfs -f /dev/sdg、mkfs.xfs -f /dev/sdh将新插入的硬盘全部格式化
[root@cyber ~]# mkfs.xfs -f /dev/sdd
meta-data=/dev/sdd isize=512 agcount=4, agsize=244188662 blks
= sectsz=4096 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=976754646, imaxpct=5
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=476930, version=2
= sectsz=4096 sunit=1 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@cyber ~]#
[root@cyber ~]# mkfs.xfs -f /dev/sdde
meta-data=/dev/sde isize=512 agcount=4, agsize=244188662 blks
= sectsz=4096 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=976754646, imaxpct=5
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=476930, version=2
= sectsz=4096 sunit=1 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@cyber ~]#
[root@cyber ~]# mkfs.xfs -f /dev/sdef
meta-data=/dev/sdf isize=512 agcount=4, agsize=244188662 blks
= sectsz=4096 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=976754646, imaxpct=5
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=476930, version=2
= sectsz=4096 sunit=1 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@cyber ~]#
[root@cyber ~]# mkfs.xfs -f /dev/sdfg
meta-data=/dev/sdg isize=512 agcount=4, agsize=244188662 blks
= sectsz=4096 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=976754646, imaxpct=5
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=476930, version=2
= sectsz=4096 sunit=1 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@cyber ~]#
[root@cyber ~]# mkfs.xfs -f /dev/sdgh
meta-data=/dev/sdh isize=512 agcount=4, agsize=244188662 blks
= sectsz=4096 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=976754646, imaxpct=5
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=476930, version=2
= sectsz=4096 sunit=1 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@cyber ~]#
4、创建Raid阵列,建议使用Raid 5,执行:mdadm -C /dev/md0 -l5 -n5 --consistency-policy=ppl /dev/sdd /dev/sde /dev/sdf /dev/sdg /dev/sdh,执行完成后可以cat /proc/mdstat查看阵列状态。(mdadm -C /dev/md0 -l5 -n5 --consistency-policy=ppl /dev/sdd /dev/sde /dev/sdf /dev/sdg /dev/sdh 本次配置RAID 5阵列名称为md0,可根据需要自定义阵列名;-n5代表有5块硬盘; --consistency-policy=ppl表示阵列使用ppl模式,该模式可解决write hole问题,提高稳定性,但是阵列读写性能会稍降低,扩容时建议使用该模式;/dev/sdd /dev/sde /dev/sdf /dev/sdg /dev/sdh是本次新插入的硬盘)
[root@cyber ~]# cat /proc/mdstat
Personalities : [raid6] [raid5] [raid4]
md126 : active raid5 sdb[2] sdc[1] sda[0]
7814025216 blocks super external:/md127/0 level 5, 128k chunk, algorithm 0 [3/3] [UUU]
md127 : inactive sda[2](S) sdc[1](S) sdb[0](S)
9459 blocks super external:imsm
unused devices: <none>
[root@cyber ~]# mdadm -C /dev/md0 -l5 -n5 --consistency-policy=ppl /dev/sdd /dev/sde /dev/sdf /dev/sdg /dev/sdh
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.
[root@cyber ~]#
[root@cyber ~]# cat /proc/mdstat
Personalities : [raid6] [raid5] [raid4]
md0 : active raid5 sdh[5] sdg[3] sdf[2] sde[1] sdd[0]
15627540480 blocks super 1.2 level 5, 512k chunk, algorithm 2 [5/4] [UUUU_]
[>....................] recovery = 0.0% (1405568/3906885120) finish=602.0min speed=108120K/sec
md126 : active raid5 sdb[2] sdc[1] sda[0]
7814025216 blocks super external:/md127/0 level 5, 128k chunk, algorithm 0 [3/3] [UUU]
md127 : inactive sda[2](S) sdc[1](S) sdb[0](S)
9459 blocks super external:imsm
unused devices: <none>
5、执行lvm pvcreate –ff /dev/md0命令创建PV,md0为上面创建的Raid5阵列名
[root@cyber ~]# lvm pvcreate -ff /dev/md0
Physical volume "/dev/md0" successfully created.
/var/spool/mail/root
[root@cyber ~]#
6、将创建的PV加入到系统的VG中。实际操作时先使用vgdisplay命令可以查看当前系统中VG名称,本次名称为h3linux_cyber,再使用vgextend命令将PV加入到VG中
[root@cyber ~]# vgdsisplay
--- Volume group ---
VG Name h3linux_cyber
System ID
Format lvm2
Metadata Areas 1
Metadata Sequence No 4
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 3
Open LV 3
Max PV 0
Cur PV 1
Act PV 1
VG Size <7.28 TiB
PE Size 4.00 MiB
Total PE 1907414
Alloc PE / Size 1907413 / <7.28 TiB
Free PE / Size 1 / 4.00 MiB
VG UUID hVID0l-8576-IjlI-exti-uABO-9wQR-uh2pm8
[root@cyber ~]#
[root@cyber ~]# vgextend h3linux_cyber /dev/md0
Volume group "h3linux_cyber" successfully extended
[root@cyber ~]#
7、再次使用vgdisplay命令查看到该VG下空闲PE增加了
[root@cyber ~]# vgdisplay
--- Volume group ---
VG Name h3linux_cyber
System ID
Format lvm2
Metadata Areas 2
Metadata Sequence No 5
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 3
Open LV 3
Max PV 0
Cur PV 2
Act PV 2
VG Size 21.83 TiB
PE Size 4.00 MiB
Total PE 5722731
Alloc PE / Size 1907413 / <7.28 TiB
Free PE / Size 3815318 / 14.55 TiB
VG UUID hVID0l-8576-IjlI-exti-uABO-9wQR-uh2pm8
8、使用lvextend命令将空闲PE加入到home分区中。如:lvextend –l +100%FREE /dev/mapper/h3linux_cyber-home,其中分区名称可通过fdisk –l查询
[root@cyber ~]# fdisk -l
……
/dev/mapper/h3linux_cyber-home23932.5 GB, 23932476325888 46743117824
Units = of 1 * 512 = 512 bytes
(/)512 / 4096
I/O (/)524288 / 2097152
…………..
[root@cyber ~]# lvextend -l +100%FREE /dev/mapper/h3linux_cyber-home
Size of logical volume h3linux_cyber/home changed from 7.21 TiB (1890629 extents) to <21.77 TiB (5705947 extents).
Logical volume h3linux_cyber/home successfully resized.
[root@cyber ~]#
9、使用xfs_growfs /dev/mapper/h3linux_cyber-home命令将home分区扩展,分区扩展完成后,使用df –h命令查看home可用空间由7.34TB扩展至22TB
[root@cyber ~]# xfs_growfs /dev/mapper/h3linux_cyber-home
meta-data=/dev/mapper/h3linux_cyber-home isize=512 agcount=32, agsize=60500128 blks
= sectsz=4096 attr=2, projid32bit=1
= crc=1 finobt=0 spinodes=0
data = bsize=4096 blocks=1936004096, imaxpct=5
= sunit=32 swidth=64 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal bsize=4096 blocks=521728, version=2
= sectsz=4096 sunit=1 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
data blocks changed from 1936004096 to 5842889728
[root@cyber ~]#
[root@cyber ~]# df -h
%
/dev/mapper/h3linux_cyber-root 50G 5.7G 45G 12% /
devtmpfs 110G 0 110G 0% /dev
tmpfs 110G 0 110G 0% /dev/shm
tmpfs 110G 707M 110G 1% /run
tmpfs 110G 0 110G 0% /sys/fs/cgroup
/dev/md126p2 1017M 158M 860M 16% /boot
/dev/md126p1 200M 9.8M 191M 5% /boot/efi
/dev/mapper/h3linux_cyber-home 22T 8.9G 22T 1% /home
tmpfs 22G 0 22G 0% /run/user/0
overlay 50G 5.7G 45G 12% /var/lib/docker/overlay2/29099e61670fa97abf190fef51f55e18cf6e8c06c5fc4e42d55b3218b85774c9/merged
shm 64M 0 64M 0% /var/lib/docker/containers/5904b12885980e39209a115e392dbf42056a488d7c2edc91525ed6d46b45cd49/mounts/shm
[root@cyber ~]#
1、执行lvdisplay h3linux_cyber查看home分区是否为逻辑卷,如果有如下回显证明可以进行扩容操作,如提示不支持此命令,但必须要扩容,需要联系400确认
2、一次扩容至少三块硬盘,且单个硬盘的大小应该相同
3、建议使用Raid5
(0)
亲~登录后才可以操作哦!
确定你的邮箱还未认证,请认证邮箱或绑定手机后进行当前操作
举报
×
侵犯我的权益
×
侵犯了我企业的权益
×
抄袭了我的内容
×
原文链接或出处
诽谤我
×
对根叔社区有害的内容
×
不规范转载
×
举报说明
暂无评论