創建硬盤并掛載到EC2上,后查詢如下
[root@ip-127-0-0-1 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
nvme0n1 259:0 0 40G 0 disk
├─nvme0n1p1 259:1 0 40G 0 part /
├─nvme0n1p127 259:2 0 1M 0 part
└─nvme0n1p128 259:3 0 10M 0 part /boot/efi
nvme1n1 259:4 0 15G 0 disk
nvme2n1 259:5 0 15G 0 disk
nvme3n1 259:6 0 15G 0 disk
nvme4n1 259:7 0 15G 0 disk
nvme5n1 259:8 0 15G 0 disk
nvme6n1 259:9 0 15G 0 disk
nvme7n1 259:10 0 15G 0 disk
nvme8n1 259:11 0 15G 0 disk
安裝 mdadm 工具
[root@ip-127-0-0-1 ~]# yum update -y
......
Dependencies resolved.
Nothing to do.
Complete!
[root@ip-172-31-26-146 ~]# yum install mdadm -y
......
Installed:mdadm-4.2-3.amzn2023.0.5.x86_64 Complete!
創建 RAID 0 陣列
[root@ip-127-0-0-1 ~]# mdadm --create /dev/md0 --level=0 --raid-devices=8 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/nvme4n1 /dev/nvme5n1 /dev/nvme6n1 /dev/nvme7n1 /dev/nvme8n1
mdadm: Defaulting to version 1.2 metadata /dev/md0 --level=0 --raid-devices=8 /dev/nvme1n1 /dev/nvme2n1 /dev/nvme3n1 /dev/nvme4n1 /dev/nvme5n1 /dev/nvme6n1 /dev/nvme7n1 /dev/nvme8n1
[1031053.926574] md0: detected capacity change from 0 to 251510784
mdadm: array /dev/md0 started.
創建過程通常很快。你可以使用 watch cat /proc/mdstat 或 sudo mdadm --detail /dev/md0 來查看狀態。
[root@ip-127-0-0-1 ~]# mdadm --detail /dev/md0
/dev/md0:Version : 1.2Creation Time : Tue Apr 29 06:19:53 2025Raid Level : raid0Array Size : 125755392 (119.93 GiB 128.77 GB)Raid Devices : 8Total Devices : 8Persistence : Superblock is persistentUpdate Time : Tue Apr 29 06:19:53 2025State : clean Active Devices : 8Working Devices : 8Failed Devices : 0Spare Devices : 0Layout : -unknown-Chunk Size : 512KConsistency Policy : noneName : 0UUID : ab7af972:85386ba3:4de7de66:c4efcf91Events : 0Number Major Minor RaidDevice State0 259 4 0 active sync /dev/sdb1 259 5 1 active sync /dev/sdc2 259 6 2 active sync /dev/sdd3 259 7 3 active sync /dev/sde4 259 8 4 active sync /dev/sdf5 259 9 5 active sync /dev/sdg6 259 10 6 active sync /dev/sdh7 259 11 7 active sync /dev/sdi
在新建的 /dev/md0 設備上創建文件系統。常用的文件系統有 ext4 或 xfs (xfs 通常在大容量卷上表現更好)。
[root@ip-127-0-0-1 ~]# mkfs.xfs /dev/md0
log stripe unit (524288 bytes) is too large (maximum is 256KiB)
log stripe unit adjusted to 32KiB
meta-data=/dev/md0 isize=512 agcount=16, agsize=1964928 blks= sectsz=512 attr=2, projid32bit=1= crc=1 finobt=1, sparse=1, rmapbt=0= reflink=1 bigtime=1 inobtcount=1
data = bsize=4096 blocks=31438848, imaxpct=25= sunit=128 swidth=1024 blks
naming =version 2 bsize=4096 ascii-ci=0, ftype=1
log =internal log bsize=4096 blocks=16384, version=2= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
查看文件屬性
[root@ip-127-0-0-1 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
nvme0n1 259:0 0 40G 0 disk
├─nvme0n1p1 259:1 0 40G 0 part /
├─nvme0n1p127 259:2 0 1M 0 part
└─nvme0n1p128 259:3 0 10M 0 part /boot/efi
nvme1n1 259:4 0 15G 0 disk
└─md0 9:0 0 119.9G 0 raid0
nvme2n1 259:5 0 15G 0 disk
└─md0 9:0 0 119.9G 0 raid0
nvme3n1 259:6 0 15G 0 disk
└─md0 9:0 0 119.9G 0 raid0
nvme4n1 259:7 0 15G 0 disk
└─md0 9:0 0 119.9G 0 raid0
nvme5n1 259:8 0 15G 0 disk
└─md0 9:0 0 119.9G 0 raid0
nvme6n1 259:9 0 15G 0 disk
└─md0 9:0 0 119.9G 0 raid0
nvme7n1 259:10 0 15G 0 disk
└─md0 9:0 0 119.9G 0 raid0
nvme8n1 259:11 0 15G 0 disk
└─md0 9:0 0 119.9G 0 raid0
配置自動掛載 (持久化)
更新 mdadm 配置
[root@ip-127-0-0-1 ~]# mdadm --detail --scan | sudo tee -a /etc/mdadm.conf
ARRAY /dev/md0 metadata=1.2 name=0 UUID=ab7af972:85386ba3:4de7de66:c4efcf91
更新 /etc/fstab
[root@ip-127-0-0-1 ~]# blkid /dev/md0
/dev/md0: UUID="33e3c305-fcc7-472f-b583-ac29e5b998b8" BLOCK_SIZE="512" TYPE="xfs"
[root@ip-172-31-26-146 ~]# tail -1 /etc/fstab
UUID=33e3c305-fcc7-472f-b583-ac29e5b998b8 /data/raid0-storge/ xfs defaults,nofail 0 0
掛載
[root@ip-127-0-0-1 ~]# mount -a
[1031693.035943] XFS (md0): Mounting V5 Filesystem
[1031693.132845] XFS (md0): Ending clean mount
[root@ip-127-0-0-1 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 4.0M 0 4.0M 0% /dev
tmpfs 3.9G 0 3.9G 0% /dev/shm
tmpfs 1.6G 636K 1.6G 1% /run
/dev/nvme0n1p1 40G 5.4G 35G 14% /
tmpfs 3.9G 0 3.9G 0% /tmp
/dev/nvme0n1p128 10M 1.3M 8.7M 13% /boot/efi
overlay 40G 5.4G 35G 14% /var/lib/docker/overlay2/84699b7470c48b0c4a1cb8b91b868be21f96c388de173f25df9ac741be7d0d0e/merged
tmpfs 782M 0 782M 0% /run/user/1000
/dev/md0 120G 889M 119G 1% /data/raid0-storge
將raid0進行擴容
手動存一些數據
[root@ip-127-0-0-1 data]# du -sh ./*
1.7G ./raid0-storge
手動將所有的各個EBS在控制臺上擴充10G,達到25G
確定硬盤是否分區并確定raid0八塊硬盤是否都在線
[root@ip-127-0-0-1 data]# fdisk -l /dev/nvme1n1
Disk /dev/nvme1n1: 25 GiB, 26843545600 bytes, 52428800 sectors
Disk model: Amazon Elastic Block Store
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 4096 bytes / 4096 bytes
查看raid的詳細信息
[root@ip-127-0-0-1 data]# mdadm --detail /dev/md0
/dev/md0:Version : 1.2Creation Time : Tue Apr 29 06:19:53 2025Raid Level : raid0Array Size : 125755392 (119.93 GiB 128.77 GB)Raid Devices : 8Total Devices : 8Persistence : Superblock is persistentUpdate Time : Tue Apr 29 06:19:53 2025State : clean Active Devices : 8Working Devices : 8Failed Devices : 0Spare Devices : 0Layout : -unknown-Chunk Size : 512KConsistency Policy : noneName : 0UUID : ab7af972:85386ba3:4de7de66:c4efcf91Events : 0Number Major Minor RaidDevice State0 259 4 0 active sync /dev/sdb1 259 5 1 active sync /dev/sdc2 259 6 2 active sync /dev/sdd3 259 7 3 active sync /dev/sde4 259 8 4 active sync /dev/sdf5 259 9 5 active sync /dev/sdg6 259 10 6 active sync /dev/sdh7 259 11 7 active sync /dev/sdi
擴大 RAID0 邏輯卷尺寸
RAID0 是 striping,可以使用 --grow 命令擴展大小:
[root@ip-127-0-0-1 data]# df -hT
Filesystem Type Size Used Avail Use% Mounted on
devtmpfs devtmpfs 4.0M 0 4.0M 0% /dev
tmpfs tmpfs 3.9G 0 3.9G 0% /dev/shm
tmpfs tmpfs 1.6G 636K 1.6G 1% /run
/dev/nvme0n1p1 xfs 40G 5.4G 35G 14% /
tmpfs tmpfs 3.9G 0 3.9G 0% /tmp
/dev/nvme0n1p128 vfat 10M 1.3M 8.7M 13% /boot/efi
overlay overlay 40G 5.4G 35G 14% /var/lib/docker/overlay2/84699b7470c48b0c4a1cb8b91b868be21f96c388de173f25df9ac741be7d0d0e/merged
tmpfs tmpfs 782M 0 782M 0% /run/user/1000
/dev/md0 xfs 120G 2.6G 118G 3% /data/raid0-storge
[root@ip-127-0-0-1 data]# xfs_growfs /data/raid0-storge/
meta-data=/dev/md0 isize=512 agcount=16, agsize=1964928 blks= sectsz=512 attr=2, projid32bit=1= crc=1 finobt=1, sparse=1, rmapbt=0= reflink=1 bigtime=1 inobtcount=1
data = bsize=4096 blocks=31438848, imaxpct=25= sunit=128 swidth=1024 blks
naming =version 2 bsize=4096 ascii-ci=0, ftype=1
log =internal log bsize=4096 blocks=16384, version=2= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
注
本來以為這一步會讓 RAID0 把每塊磁盤的新空間包含進來。
但最后是不行,最后的結果是不能動態擴充raid0,RAID0不支持在線擴容
此外想考慮新增一塊盤來增加
[root@ip-127-0-0-1 data]# wipefs -a /dev/nvme9n1
[root@ip-127-0-0-1 data]# dd if=/dev/zero of=/dev/nvme9n1 bs=1M count=100
100+0 records in
100+0 records out
104857600 bytes (105 MB, 100 MiB) copied, 0.21278 s, 493 MB/s
[root@ip-127-0-0-1 data]# mdadm --add /dev/md0 /dev/nvme5n1
[root@ip-172-31-26-14[1101946.857081] md0: personality does not support diskops!mdadm --add /dev/md0 /dev/nvme9n1
mdadm: add new device failed for /dev/nvme9n1 as 4: Invalid argument
解析
說明 mdadm --add 向 RAID0 陣列添加新盤 是不被支持的 —— RAID0 本身就不支持在線添加磁盤進行擴容。
說明
?為什么 RAID0 無法直接擴容?
RAID0 是條帶模式(Striping),它把數據平均分配到所有磁盤上,不做冗余備份。一旦創建完成,RAID0 的結構就定死了 —— 新盤不能加入已有條帶中。
結論
🔴 RAID0 無法使用 mdadm --add 添加新盤擴容。
各個陣列擴容詳情
RAID 類型 | 擴容方法 | 備注 |
---|---|---|
RAID 0 | 不能直接擴容 | 需重建或配合 LVM 使用 |
RAID 1 | 支持換更大盤再 sync | 可擴容但較繁瑣 |
RAID 5 | 支持添加新盤 | 也可以結合 LVM |
RAID 10 | 類似 RAID 1,可擴但復雜 | 通常需重建或配合 LVM |