use_pty:FALSE /usr/share/restraint/plugins/run_task_plugins bash ./runtest.sh :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: :: Test :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: :: [ 15:22:19 ] :: [ BEGIN ] :: Running 'uname -a' Linux hpe-ml350egen8-01.hpe2.lab.eng.bos.redhat.com 6.0.0-rc6 #1 SMP PREEMPT_DYNAMIC Sun Sep 25 16:31:18 UTC 2022 x86_64 GNU/Linux :: [ 15:22:19 ] :: [ PASS ] :: Command 'uname -a' (Expected 0, got 0) :: [ 15:22:20 ] :: [ BEGIN ] :: Running 'rpm -q mdadm || dnf install -y mdadm' mdadm-4.2-2.fc37.x86_64 :: [ 15:22:21 ] :: [ PASS ] :: Command 'rpm -q mdadm || dnf install -y mdadm' (Expected 0, got 0) :: [ 15:22:21 ] :: [ LOG ] :: ./runtest.sh :: [ 15:22:21 ] :: [ BEGIN ] :: Running 'modprobe raid456 devices_handle_discard_safely=Y' :: [ 15:22:22 ] :: [ PASS ] :: Command 'modprobe raid456 devices_handle_discard_safely=Y' (Expected 0, got 0) :: [ 15:22:23 ] :: [ BEGIN ] :: Running 'echo Y >/sys/module/raid456/parameters/devices_handle_discard_safely' :: [ 15:22:23 ] :: [ PASS ] :: Command 'echo Y >/sys/module/raid456/parameters/devices_handle_discard_safely' (Expected 0, got 0) /usr/sbin/mkfs.xfs INFO: Executing MD_Create_RAID() to create raid 0 INFO: Created md raid with these raid devices " /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4". :: [ 15:22:55 ] :: [ BEGIN ] :: Running 'mdadm --create --run /dev/md1 --level 0 --metadata 1.2 --raid-devices 5 /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 --chunk 512' mdadm: array /dev/md1 started. :: [ 15:23:00 ] :: [ PASS ] :: Command 'mdadm --create --run /dev/md1 --level 0 --metadata 1.2 --raid-devices 5 /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 --chunk 512' (Expected 0, got 0) INFO:cat /proc/mdstat###################### :: [ 15:23:00 ] :: [ BEGIN ] :: Running 'cat /proc/mdstat' Personalities : [raid6] [raid5] [raid4] [raid0] md1 : active raid0 loop4[4] loop3[3] loop2[2] loop1[1] loop0[0] 2549760 blocks super 1.2 512k chunks unused devices: :: [ 15:23:00 ] :: [ PASS ] :: Command 'cat /proc/mdstat' (Expected 0, got 0) :: [ 15:23:01 ] :: [ BEGIN ] :: Running 'lsblk' NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS loop0 7:0 0 500M 0 loop └─md1 9:1 0 2.4G 0 raid0 loop1 7:1 0 500M 0 loop └─md1 9:1 0 2.4G 0 raid0 loop2 7:2 0 500M 0 loop └─md1 9:1 0 2.4G 0 raid0 loop3 7:3 0 500M 0 loop └─md1 9:1 0 2.4G 0 raid0 loop4 7:4 0 500M 0 loop └─md1 9:1 0 2.4G 0 raid0 loop5 7:5 0 500M 0 loop sda 8:0 0 465.8G 0 disk ├─sda1 8:1 0 1M 0 part ├─sda2 8:2 0 1G 0 part /boot └─sda3 8:3 0 464.8G 0 part └─fedora_hpe--ml350egen8--01-root 253:0 0 930.5G 0 lvm / sdb 8:16 0 465.8G 0 disk └─sdb1 8:17 0 465.8G 0 part └─fedora_hpe--ml350egen8--01-root 253:0 0 930.5G 0 lvm / sr0 11:0 1 1024M 0 rom zram0 252:0 0 8G 0 disk [SWAP] :: [ 15:23:01 ] :: [ PASS ] :: Command 'lsblk' (Expected 0, got 0) egrep: warning: egrep is obsolescent; using grep -E /dev/md1 INFO:mdadm -D /dev/md1 ######################### :: [ 15:23:02 ] :: [ BEGIN ] :: Running 'mdadm --detail /dev/md1' /dev/md1: Version : 1.2 Creation Time : Sun Sep 25 15:22:55 2022 Raid Level : raid0 Array Size : 2549760 (2.43 GiB 2.61 GB) Raid Devices : 5 Total Devices : 5 Persistence : Superblock is persistent Update Time : Sun Sep 25 15:22:55 2022 State : clean Active Devices : 5 Working Devices : 5 Failed Devices : 0 Spare Devices : 0 Layout : -unknown- Chunk Size : 512K Consistency Policy : none Name : 1 UUID : 99f813e8:9ef31e36:cef93336:e3e85dd9 Events : 0 Number Major Minor RaidDevice State 0 7 0 0 active sync /dev/loop0 1 7 1 1 active sync /dev/loop1 2 7 2 2 active sync /dev/loop2 3 7 3 3 active sync /dev/loop3 4 7 4 4 active sync /dev/loop4 :: [ 15:23:02 ] :: [ PASS ] :: Command 'mdadm --detail /dev/md1' (Expected 0, got 0) :: [ 15:23:02 ] :: [ LOG ] :: INFO: Successfully created md raid /dev/md1 1664133783 start_time against this md array: /dev/md1 state is clean :: [ 15:23:03 ] :: [ LOG ] :: mkfs -t xfs /dev/md1 log stripe unit (524288 bytes) is too large (maximum is 256KiB) log stripe unit adjusted to 32KiB meta-data=/dev/md1 isize=512 agcount=8, agsize=79744 blks = sectsz=512 attr=2, projid32bit=1 = crc=1 finobt=1, sparse=1, rmapbt=0 = reflink=1 bigtime=1 inobtcount=1 nrext64=0 data = bsize=4096 blocks=637440, imaxpct=25 = sunit=128 swidth=640 blks naming =version 2 bsize=4096 ascii-ci=0, ftype=1 log =internal log bsize=4096 blocks=16384, version=2 = sectsz=512 sunit=8 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0 Discarding blocks...Done. :: [ 15:23:05 ] :: [ BEGIN ] :: Running 'mount -t xfs /dev/md1 /mnt/md_test ' :: [ 15:23:05 ] :: [ PASS ] :: Command 'mount -t xfs /dev/md1 /mnt/md_test ' (Expected 0, got 0) :: [ 15:23:05 ] :: [ BEGIN ] :: Running 'fstrim -v /mnt/md_test' /mnt/md_test: 2.4 GiB (2543484928 bytes) trimmed :: [ 15:23:06 ] :: [ PASS ] :: Command 'fstrim -v /mnt/md_test' (Expected 0, got 0) :: [ 15:23:06 ] :: [ BEGIN ] :: Running 'umount /dev/md1' :: [ 15:23:07 ] :: [ PASS ] :: Command 'umount /dev/md1' (Expected 0, got 0) INFO: Executing MD_Clean_RAID() against this md device: mdadm --stop /dev/md1 mdadm: stopped /dev/md1 clean devs : /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 mdadm --zero-superblock /dev/loop0 mdadm --zero-superblock /dev/loop1 mdadm --zero-superblock /dev/loop2 mdadm --zero-superblock /dev/loop3 mdadm --zero-superblock /dev/loop4 ret is 0 ls /dev/md1 ls: cannot access '/dev/md1': No such file or directory mdadm --stop can delete md node name /dev/md1 in /dev INFO: Executing MD_Create_RAID() to create raid 1 INFO: Created md raid with these raid devices " /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4". INFO: Created md raid with these spare disks " /dev/loop5". :: [ 15:23:33 ] :: [ BEGIN ] :: Running 'mdadm --create --run /dev/md1 --level 1 --metadata 1.2 --raid-devices 5 /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 --spare-devices 1 /dev/loop5 --bitmap=internal --bitmap-chunk=64M' mdadm: array /dev/md1 started. :: [ 15:23:34 ] :: [ PASS ] :: Command 'mdadm --create --run /dev/md1 --level 1 --metadata 1.2 --raid-devices 5 /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 --spare-devices 1 /dev/loop5 --bitmap=internal --bitmap-chunk=64M' (Expected 0, got 0) INFO:cat /proc/mdstat###################### :: [ 15:23:35 ] :: [ BEGIN ] :: Running 'cat /proc/mdstat' Personalities : [raid6] [raid5] [raid4] [raid0] [raid1] md1 : active raid1 loop5[5](S) loop4[4] loop3[3] loop2[2] loop1[1] loop0[0] 510976 blocks super 1.2 [5/5] [UUUUU] [=>...................] resync = 6.2% (32000/510976) finish=0.2min speed=32000K/sec bitmap: 1/1 pages [4KB], 65536KB chunk unused devices: :: [ 15:23:35 ] :: [ PASS ] :: Command 'cat /proc/mdstat' (Expected 0, got 0) :: [ 15:23:35 ] :: [ BEGIN ] :: Running 'lsblk' NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS loop0 7:0 0 500M 0 loop └─md1 9:1 0 499M 0 raid1 loop1 7:1 0 500M 0 loop └─md1 9:1 0 499M 0 raid1 loop2 7:2 0 500M 0 loop └─md1 9:1 0 499M 0 raid1 loop3 7:3 0 500M 0 loop └─md1 9:1 0 499M 0 raid1 loop4 7:4 0 500M 0 loop └─md1 9:1 0 499M 0 raid1 loop5 7:5 0 500M 0 loop └─md1 9:1 0 499M 0 raid1 sda 8:0 0 465.8G 0 disk ├─sda1 8:1 0 1M 0 part ├─sda2 8:2 0 1G 0 part /boot └─sda3 8:3 0 464.8G 0 part └─fedora_hpe--ml350egen8--01-root 253:0 0 930.5G 0 lvm / sdb 8:16 0 465.8G 0 disk └─sdb1 8:17 0 465.8G 0 part └─fedora_hpe--ml350egen8--01-root 253:0 0 930.5G 0 lvm / sr0 11:0 1 1024M 0 rom zram0 252:0 0 8G 0 disk [SWAP] :: [ 15:23:36 ] :: [ PASS ] :: Command 'lsblk' (Expected 0, got 0) egrep: warning: egrep is obsolescent; using grep -E /dev/md1 INFO:mdadm -D /dev/md1 ######################### :: [ 15:23:36 ] :: [ BEGIN ] :: Running 'mdadm --detail /dev/md1' /dev/md1: Version : 1.2 Creation Time : Sun Sep 25 15:23:33 2022 Raid Level : raid1 Array Size : 510976 (499.00 MiB 523.24 MB) Used Dev Size : 510976 (499.00 MiB 523.24 MB) Raid Devices : 5 Total Devices : 6 Persistence : Superblock is persistent Intent Bitmap : Internal Update Time : Sun Sep 25 15:23:35 2022 State : clean, resyncing Active Devices : 5 Working Devices : 6 Failed Devices : 0 Spare Devices : 1 Consistency Policy : bitmap Resync Status : 6% complete Name : 1 UUID : f7188e3d:5ac82826:152e4cdd:ab071a29 Events : 1 Number Major Minor RaidDevice State 0 7 0 0 active sync /dev/loop0 1 7 1 1 active sync /dev/loop1 2 7 2 2 active sync /dev/loop2 3 7 3 3 active sync /dev/loop3 4 7 4 4 active sync /dev/loop4 5 7 5 - spare /dev/loop5 :: [ 15:23:37 ] :: [ PASS ] :: Command 'mdadm --detail /dev/md1' (Expected 0, got 0) :: [ 15:23:37 ] :: [ LOG ] :: INFO: Successfully created md raid /dev/md1 1664133817 start_time against this md array: /dev/md1 state is clean, 1664133822 start_time against this md array: /dev/md1 state is clean, 1664133828 start_time against this md array: /dev/md1 state is clean, 1664133834 start_time against this md array: /dev/md1 state is clean, 1664133839 start_time against this md array: /dev/md1 state is clean, 1664133844 start_time against this md array: /dev/md1 state is clean, 1664133850 start_time against this md array: /dev/md1 state is clean :: [ 15:24:10 ] :: [ LOG ] :: mkfs -t xfs /dev/md1 meta-data=/dev/md1 isize=512 agcount=4, agsize=31936 blks = sectsz=512 attr=2, projid32bit=1 = crc=1 finobt=1, sparse=1, rmapbt=0 = reflink=1 bigtime=1 inobtcount=1 nrext64=0 data = bsize=4096 blocks=127744, imaxpct=25 = sunit=0 swidth=0 blks naming =version 2 bsize=4096 ascii-ci=0, ftype=1 log =internal log bsize=4096 blocks=16384, version=2 = sectsz=512 sunit=0 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0 Discarding blocks...Done. :: [ 15:24:15 ] :: [ BEGIN ] :: Running 'mount -t xfs /dev/md1 /mnt/md_test ' :: [ 15:24:16 ] :: [ PASS ] :: Command 'mount -t xfs /dev/md1 /mnt/md_test ' (Expected 0, got 0) :: [ 15:24:16 ] :: [ BEGIN ] :: Running 'fstrim -v /mnt/md_test' /mnt/md_test: 434.8 MiB (455933952 bytes) trimmed :: [ 15:24:17 ] :: [ PASS ] :: Command 'fstrim -v /mnt/md_test' (Expected 0, got 0) :: [ 15:24:17 ] :: [ BEGIN ] :: Running 'umount /dev/md1' :: [ 15:24:18 ] :: [ PASS ] :: Command 'umount /dev/md1' (Expected 0, got 0) INFO: Executing MD_Clean_RAID() against this md device: mdadm --stop /dev/md1 mdadm: stopped /dev/md1 clean devs : /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 /dev/loop5 mdadm --zero-superblock /dev/loop0 mdadm --zero-superblock /dev/loop1 mdadm --zero-superblock /dev/loop2 mdadm --zero-superblock /dev/loop3 mdadm --zero-superblock /dev/loop4 mdadm --zero-superblock /dev/loop5 ret is 0 ls /dev/md1 ls: cannot access '/dev/md1': No such file or directory mdadm --stop can delete md node name /dev/md1 in /dev INFO: Executing MD_Create_RAID() to create raid 4 INFO: Created md raid with these raid devices " /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4". INFO: Created md raid with these spare disks " /dev/loop5". :: [ 15:24:45 ] :: [ BEGIN ] :: Running 'mdadm --create --run /dev/md1 --level 4 --metadata 1.2 --raid-devices 5 /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 --spare-devices 1 /dev/loop5 --chunk 512 --bitmap=internal --bitmap-chunk=64M' mdadm: array /dev/md1 started. :: [ 15:24:45 ] :: [ PASS ] :: Command 'mdadm --create --run /dev/md1 --level 4 --metadata 1.2 --raid-devices 5 /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 --spare-devices 1 /dev/loop5 --chunk 512 --bitmap=internal --bitmap-chunk=64M' (Expected 0, got 0) INFO:cat /proc/mdstat###################### :: [ 15:24:46 ] :: [ BEGIN ] :: Running 'cat /proc/mdstat' Personalities : [raid6] [raid5] [raid4] [raid0] [raid1] md1 : active raid4 loop4[6] loop5[5](S) loop3[3] loop2[2] loop1[1] loop0[0] 2039808 blocks super 1.2 level 4, 512k chunk, algorithm 0 [5/4] [UUUU_] [>....................] recovery = 1.4% (7508/509952) finish=1.1min speed=7508K/sec bitmap: 1/1 pages [4KB], 65536KB chunk unused devices: :: [ 15:24:46 ] :: [ PASS ] :: Command 'cat /proc/mdstat' (Expected 0, got 0) :: [ 15:24:46 ] :: [ BEGIN ] :: Running 'lsblk' NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS loop0 7:0 0 500M 0 loop └─md1 9:1 0 1.9G 0 raid4 loop1 7:1 0 500M 0 loop └─md1 9:1 0 1.9G 0 raid4 loop2 7:2 0 500M 0 loop └─md1 9:1 0 1.9G 0 raid4 loop3 7:3 0 500M 0 loop └─md1 9:1 0 1.9G 0 raid4 loop4 7:4 0 500M 0 loop └─md1 9:1 0 1.9G 0 raid4 loop5 7:5 0 500M 0 loop └─md1 9:1 0 1.9G 0 raid4 sda 8:0 0 465.8G 0 disk ├─sda1 8:1 0 1M 0 part ├─sda2 8:2 0 1G 0 part /boot └─sda3 8:3 0 464.8G 0 part └─fedora_hpe--ml350egen8--01-root 253:0 0 930.5G 0 lvm / sdb 8:16 0 465.8G 0 disk └─sdb1 8:17 0 465.8G 0 part └─fedora_hpe--ml350egen8--01-root 253:0 0 930.5G 0 lvm / sr0 11:0 1 1024M 0 rom zram0 252:0 0 8G 0 disk [SWAP] :: [ 15:24:47 ] :: [ PASS ] :: Command 'lsblk' (Expected 0, got 0) egrep: warning: egrep is obsolescent; using grep -E /dev/md1 INFO:mdadm -D /dev/md1 ######################### :: [ 15:24:47 ] :: [ BEGIN ] :: Running 'mdadm --detail /dev/md1' /dev/md1: Version : 1.2 Creation Time : Sun Sep 25 15:24:45 2022 Raid Level : raid4 Array Size : 2039808 (1992.00 MiB 2088.76 MB) Used Dev Size : 509952 (498.00 MiB 522.19 MB) Raid Devices : 5 Total Devices : 6 Persistence : Superblock is persistent Intent Bitmap : Internal Update Time : Sun Sep 25 15:24:47 2022 State : clean, degraded, recovering Active Devices : 4 Working Devices : 6 Failed Devices : 0 Spare Devices : 2 Chunk Size : 512K Consistency Policy : bitmap Rebuild Status : 6% complete Name : 1 UUID : 38515c59:f24d73dc:f5494eae:5eed4dbc Events : 1 Number Major Minor RaidDevice State 0 7 0 0 active sync /dev/loop0 1 7 1 1 active sync /dev/loop1 2 7 2 2 active sync /dev/loop2 3 7 3 3 active sync /dev/loop3 6 7 4 4 spare rebuilding /dev/loop4 5 7 5 - spare /dev/loop5 :: [ 15:24:48 ] :: [ PASS ] :: Command 'mdadm --detail /dev/md1' (Expected 0, got 0) :: [ 15:24:48 ] :: [ LOG ] :: INFO: Successfully created md raid /dev/md1 1664133888 start_time against this md array: /dev/md1 state is clean, 1664133894 start_time against this md array: /dev/md1 state is clean, 1664133899 start_time against this md array: /dev/md1 state is clean, 1664133904 start_time against this md array: /dev/md1 state is clean, 1664133910 start_time against this md array: /dev/md1 state is clean, 1664133915 start_time against this md array: /dev/md1 state is clean, 1664133920 start_time against this md array: /dev/md1 state is clean, 1664133926 start_time against this md array: /dev/md1 state is clean, 1664133931 start_time against this md array: /dev/md1 state is clean, 1664133937 start_time against this md array: /dev/md1 state is clean :: [ 15:25:37 ] :: [ LOG ] :: mkfs -t xfs /dev/md1 log stripe unit (524288 bytes) is too large (maximum is 256KiB) log stripe unit adjusted to 32KiB meta-data=/dev/md1 isize=512 agcount=8, agsize=63744 blks = sectsz=512 attr=2, projid32bit=1 = crc=1 finobt=1, sparse=1, rmapbt=0 = reflink=1 bigtime=1 inobtcount=1 nrext64=0 data = bsize=4096 blocks=509952, imaxpct=25 = sunit=128 swidth=512 blks naming =version 2 bsize=4096 ascii-ci=0, ftype=1 log =internal log bsize=4096 blocks=16384, version=2 = sectsz=512 sunit=8 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0 Discarding blocks...Done. :: [ 15:28:32 ] :: [ BEGIN ] :: Running 'mount -t xfs /dev/md1 /mnt/md_test ' :: [ 15:28:32 ] :: [ PASS ] :: Command 'mount -t xfs /dev/md1 /mnt/md_test ' (Expected 0, got 0) :: [ 15:28:33 ] :: [ BEGIN ] :: Running 'fstrim -v /mnt/md_test' /mnt/md_test: 1.9 GiB (2020802560 bytes) trimmed :: [ 15:29:21 ] :: [ PASS ] :: Command 'fstrim -v /mnt/md_test' (Expected 0, got 0) :: [ 15:29:21 ] :: [ BEGIN ] :: Running 'umount /dev/md1' :: [ 15:29:22 ] :: [ PASS ] :: Command 'umount /dev/md1' (Expected 0, got 0) INFO: Executing MD_Clean_RAID() against this md device: mdadm --stop /dev/md1 mdadm: stopped /dev/md1 clean devs : /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 /dev/loop5 mdadm --zero-superblock /dev/loop0 mdadm --zero-superblock /dev/loop1 mdadm --zero-superblock /dev/loop2 mdadm --zero-superblock /dev/loop3 mdadm --zero-superblock /dev/loop4 mdadm --zero-superblock /dev/loop5 ret is 0 ls /dev/md1 ls: cannot access '/dev/md1': No such file or directory mdadm --stop can delete md node name /dev/md1 in /dev INFO: Executing MD_Create_RAID() to create raid 5 INFO: Created md raid with these raid devices " /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4". INFO: Created md raid with these spare disks " /dev/loop5". :: [ 15:29:50 ] :: [ BEGIN ] :: Running 'mdadm --create --run /dev/md1 --level 5 --metadata 1.2 --raid-devices 5 /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 --spare-devices 1 /dev/loop5 --chunk 512 --bitmap=internal --bitmap-chunk=64M' mdadm: array /dev/md1 started. :: [ 15:29:51 ] :: [ PASS ] :: Command 'mdadm --create --run /dev/md1 --level 5 --metadata 1.2 --raid-devices 5 /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 --spare-devices 1 /dev/loop5 --chunk 512 --bitmap=internal --bitmap-chunk=64M' (Expected 0, got 0) INFO:cat /proc/mdstat###################### :: [ 15:29:51 ] :: [ BEGIN ] :: Running 'cat /proc/mdstat' Personalities : [raid6] [raid5] [raid4] [raid0] [raid1] md1 : active raid5 loop4[6] loop5[5](S) loop3[3] loop2[2] loop1[1] loop0[0] 2039808 blocks super 1.2 level 5, 512k chunk, algorithm 2 [5/4] [UUUU_] [>....................] recovery = 1.6% (8200/509952) finish=1.0min speed=8200K/sec bitmap: 1/1 pages [4KB], 65536KB chunk unused devices: :: [ 15:29:52 ] :: [ PASS ] :: Command 'cat /proc/mdstat' (Expected 0, got 0) :: [ 15:29:52 ] :: [ BEGIN ] :: Running 'lsblk' NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS loop0 7:0 0 500M 0 loop └─md1 9:1 0 1.9G 0 raid5 loop1 7:1 0 500M 0 loop └─md1 9:1 0 1.9G 0 raid5 loop2 7:2 0 500M 0 loop └─md1 9:1 0 1.9G 0 raid5 loop3 7:3 0 500M 0 loop └─md1 9:1 0 1.9G 0 raid5 loop4 7:4 0 500M 0 loop └─md1 9:1 0 1.9G 0 raid5 loop5 7:5 0 500M 0 loop └─md1 9:1 0 1.9G 0 raid5 sda 8:0 0 465.8G 0 disk ├─sda1 8:1 0 1M 0 part ├─sda2 8:2 0 1G 0 part /boot └─sda3 8:3 0 464.8G 0 part └─fedora_hpe--ml350egen8--01-root 253:0 0 930.5G 0 lvm / sdb 8:16 0 465.8G 0 disk └─sdb1 8:17 0 465.8G 0 part └─fedora_hpe--ml350egen8--01-root 253:0 0 930.5G 0 lvm / sr0 11:0 1 1024M 0 rom zram0 252:0 0 8G 0 disk [SWAP] :: [ 15:29:53 ] :: [ PASS ] :: Command 'lsblk' (Expected 0, got 0) egrep: warning: egrep is obsolescent; using grep -E /dev/md1 INFO:mdadm -D /dev/md1 ######################### :: [ 15:29:53 ] :: [ BEGIN ] :: Running 'mdadm --detail /dev/md1' /dev/md1: Version : 1.2 Creation Time : Sun Sep 25 15:29:50 2022 Raid Level : raid5 Array Size : 2039808 (1992.00 MiB 2088.76 MB) Used Dev Size : 509952 (498.00 MiB 522.19 MB) Raid Devices : 5 Total Devices : 6 Persistence : Superblock is persistent Intent Bitmap : Internal Update Time : Sun Sep 25 15:29:53 2022 State : clean, degraded, recovering Active Devices : 4 Working Devices : 6 Failed Devices : 0 Spare Devices : 2 Layout : left-symmetric Chunk Size : 512K Consistency Policy : bitmap Rebuild Status : 6% complete Name : 1 UUID : 329e6a3a:085d6f27:d7565697:0032498d Events : 2 Number Major Minor RaidDevice State 0 7 0 0 active sync /dev/loop0 1 7 1 1 active sync /dev/loop1 2 7 2 2 active sync /dev/loop2 3 7 3 3 active sync /dev/loop3 6 7 4 4 spare rebuilding /dev/loop4 5 7 5 - spare /dev/loop5 :: [ 15:29:54 ] :: [ PASS ] :: Command 'mdadm --detail /dev/md1' (Expected 0, got 0) :: [ 15:29:54 ] :: [ LOG ] :: INFO: Successfully created md raid /dev/md1 1664134195 start_time against this md array: /dev/md1 state is clean, 1664134201 start_time against this md array: /dev/md1 state is clean, 1664134206 start_time against this md array: /dev/md1 state is clean, 1664134212 start_time against this md array: /dev/md1 state is clean, 1664134218 start_time against this md array: /dev/md1 state is clean, 1664134224 start_time against this md array: /dev/md1 state is clean, 1664134229 start_time against this md array: /dev/md1 state is clean, 1664134235 start_time against this md array: /dev/md1 state is clean, 1664134241 start_time against this md array: /dev/md1 state is clean, 1664134247 start_time against this md array: /dev/md1 state is clean :: [ 15:30:47 ] :: [ LOG ] :: mkfs -t xfs /dev/md1 mkfs.xfs: /dev/md1 appears to contain an existing filesystem (xfs). mkfs.xfs: Use the -f option to force overwrite. log stripe unit (524288 bytes) is too large (maximum is 256KiB) log stripe unit adjusted to 32KiB meta-data=/dev/md1 isize=512 agcount=8, agsize=63744 blks = sectsz=512 attr=2, projid32bit=1 = crc=1 finobt=1, sparse=1, rmapbt=0 = reflink=1 bigtime=1 inobtcount=1 nrext64=0 data = bsize=4096 blocks=509952, imaxpct=25 = sunit=128 swidth=512 blks naming =version 2 bsize=4096 ascii-ci=0, ftype=1 log =internal log bsize=4096 blocks=16384, version=2 = sectsz=512 sunit=8 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0 Discarding blocks...Done. :: [ 15:33:33 ] :: [ BEGIN ] :: Running 'mount -t xfs /dev/md1 /mnt/md_test ' :: [ 15:33:34 ] :: [ PASS ] :: Command 'mount -t xfs /dev/md1 /mnt/md_test ' (Expected 0, got 0) :: [ 15:33:34 ] :: [ BEGIN ] :: Running 'fstrim -v /mnt/md_test' /mnt/md_test: 1.9 GiB (2020802560 bytes) trimmed :: [ 15:34:22 ] :: [ PASS ] :: Command 'fstrim -v /mnt/md_test' (Expected 0, got 0) :: [ 15:34:22 ] :: [ BEGIN ] :: Running 'umount /dev/md1' :: [ 15:34:23 ] :: [ PASS ] :: Command 'umount /dev/md1' (Expected 0, got 0) INFO: Executing MD_Clean_RAID() against this md device: mdadm --stop /dev/md1 mdadm: stopped /dev/md1 clean devs : /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 /dev/loop5 mdadm --zero-superblock /dev/loop0 mdadm --zero-superblock /dev/loop1 mdadm --zero-superblock /dev/loop2 mdadm --zero-superblock /dev/loop3 mdadm --zero-superblock /dev/loop4 mdadm --zero-superblock /dev/loop5 ret is 0 ls /dev/md1 ls: cannot access '/dev/md1': No such file or directory mdadm --stop can delete md node name /dev/md1 in /dev INFO: Executing MD_Create_RAID() to create raid 6 INFO: Created md raid with these raid devices " /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4". INFO: Created md raid with these spare disks " /dev/loop5". :: [ 15:34:51 ] :: [ BEGIN ] :: Running 'mdadm --create --run /dev/md1 --level 6 --metadata 1.2 --raid-devices 5 /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 --spare-devices 1 /dev/loop5 --chunk 512 --bitmap=internal --bitmap-chunk=64M' mdadm: array /dev/md1 started. :: [ 15:34:52 ] :: [ PASS ] :: Command 'mdadm --create --run /dev/md1 --level 6 --metadata 1.2 --raid-devices 5 /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 --spare-devices 1 /dev/loop5 --chunk 512 --bitmap=internal --bitmap-chunk=64M' (Expected 0, got 0) INFO:cat /proc/mdstat###################### :: [ 15:34:52 ] :: [ BEGIN ] :: Running 'cat /proc/mdstat' Personalities : [raid6] [raid5] [raid4] [raid0] [raid1] md1 : active raid6 loop5[5](S) loop4[4] loop3[3] loop2[2] loop1[1] loop0[0] 1529856 blocks super 1.2 level 6, 512k chunk, algorithm 2 [5/5] [UUUUU] [=>...................] resync = 5.0% (25884/509952) finish=0.3min speed=25884K/sec bitmap: 1/1 pages [4KB], 65536KB chunk unused devices: :: [ 15:34:52 ] :: [ PASS ] :: Command 'cat /proc/mdstat' (Expected 0, got 0) :: [ 15:34:53 ] :: [ BEGIN ] :: Running 'lsblk' NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS loop0 7:0 0 500M 0 loop └─md1 9:1 0 1.5G 0 raid6 loop1 7:1 0 500M 0 loop └─md1 9:1 0 1.5G 0 raid6 loop2 7:2 0 500M 0 loop └─md1 9:1 0 1.5G 0 raid6 loop3 7:3 0 500M 0 loop └─md1 9:1 0 1.5G 0 raid6 loop4 7:4 0 500M 0 loop └─md1 9:1 0 1.5G 0 raid6 loop5 7:5 0 500M 0 loop └─md1 9:1 0 1.5G 0 raid6 sda 8:0 0 465.8G 0 disk ├─sda1 8:1 0 1M 0 part ├─sda2 8:2 0 1G 0 part /boot └─sda3 8:3 0 464.8G 0 part └─fedora_hpe--ml350egen8--01-root 253:0 0 930.5G 0 lvm / sdb 8:16 0 465.8G 0 disk └─sdb1 8:17 0 465.8G 0 part └─fedora_hpe--ml350egen8--01-root 253:0 0 930.5G 0 lvm / sr0 11:0 1 1024M 0 rom zram0 252:0 0 8G 0 disk [SWAP] :: [ 15:34:54 ] :: [ PASS ] :: Command 'lsblk' (Expected 0, got 0) egrep: warning: egrep is obsolescent; using grep -E /dev/md1 INFO:mdadm -D /dev/md1 ######################### :: [ 15:34:55 ] :: [ BEGIN ] :: Running 'mdadm --detail /dev/md1' /dev/md1: Version : 1.2 Creation Time : Sun Sep 25 15:34:51 2022 Raid Level : raid6 Array Size : 1529856 (1494.00 MiB 1566.57 MB) Used Dev Size : 509952 (498.00 MiB 522.19 MB) Raid Devices : 5 Total Devices : 6 Persistence : Superblock is persistent Intent Bitmap : Internal Update Time : Sun Sep 25 15:34:55 2022 State : clean, resyncing Active Devices : 5 Working Devices : 6 Failed Devices : 0 Spare Devices : 1 Layout : left-symmetric Chunk Size : 512K Consistency Policy : bitmap Resync Status : 19% complete Name : 1 UUID : cd47ff86:36a0bd3f:502a8ade:97041ef0 Events : 3 Number Major Minor RaidDevice State 0 7 0 0 active sync /dev/loop0 1 7 1 1 active sync /dev/loop1 2 7 2 2 active sync /dev/loop2 3 7 3 3 active sync /dev/loop3 4 7 4 4 active sync /dev/loop4 5 7 5 - spare /dev/loop5 :: [ 15:34:55 ] :: [ PASS ] :: Command 'mdadm --detail /dev/md1' (Expected 0, got 0) :: [ 15:34:56 ] :: [ LOG ] :: INFO: Successfully created md raid /dev/md1 1664134496 start_time against this md array: /dev/md1 state is clean, 1664134501 start_time against this md array: /dev/md1 state is clean, 1664134507 start_time against this md array: /dev/md1 state is clean, 1664134513 start_time against this md array: /dev/md1 state is clean :: [ 15:35:13 ] :: [ LOG ] :: mkfs -t xfs /dev/md1 log stripe unit (524288 bytes) is too large (maximum is 256KiB) log stripe unit adjusted to 32KiB meta-data=/dev/md1 isize=512 agcount=8, agsize=47872 blks = sectsz=512 attr=2, projid32bit=1 = crc=1 finobt=1, sparse=1, rmapbt=0 = reflink=1 bigtime=1 inobtcount=1 nrext64=0 data = bsize=4096 blocks=382464, imaxpct=25 = sunit=128 swidth=384 blks naming =version 2 bsize=4096 ascii-ci=0, ftype=1 log =internal log bsize=4096 blocks=16384, version=2 = sectsz=512 sunit=8 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0 Discarding blocks...Done. :: [ 15:36:12 ] :: [ BEGIN ] :: Running 'mount -t xfs /dev/md1 /mnt/md_test ' :: [ 15:36:13 ] :: [ PASS ] :: Command 'mount -t xfs /dev/md1 /mnt/md_test ' (Expected 0, got 0) :: [ 15:36:14 ] :: [ BEGIN ] :: Running 'fstrim -v /mnt/md_test' /mnt/md_test: 1.4 GiB (1498611712 bytes) trimmed :: [ 15:37:09 ] :: [ PASS ] :: Command 'fstrim -v /mnt/md_test' (Expected 0, got 0) :: [ 15:37:09 ] :: [ BEGIN ] :: Running 'umount /dev/md1' :: [ 15:37:10 ] :: [ PASS ] :: Command 'umount /dev/md1' (Expected 0, got 0) INFO: Executing MD_Clean_RAID() against this md device: mdadm --stop /dev/md1 mdadm: stopped /dev/md1 clean devs : /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 /dev/loop5 mdadm --zero-superblock /dev/loop0 mdadm --zero-superblock /dev/loop1 mdadm --zero-superblock /dev/loop2 mdadm --zero-superblock /dev/loop3 mdadm --zero-superblock /dev/loop4 mdadm --zero-superblock /dev/loop5 ret is 0 ls /dev/md1 ls: cannot access '/dev/md1': No such file or directory mdadm --stop can delete md node name /dev/md1 in /dev INFO: Executing MD_Create_RAID() to create raid 10 INFO: Created md raid with these raid devices " /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4". INFO: Created md raid with these spare disks " /dev/loop5". :: [ 15:37:38 ] :: [ BEGIN ] :: Running 'mdadm --create --run /dev/md1 --level 10 --metadata 1.2 --raid-devices 5 /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 --spare-devices 1 /dev/loop5 --chunk 512 --bitmap=internal --bitmap-chunk=64M' mdadm: array /dev/md1 started. :: [ 15:37:39 ] :: [ PASS ] :: Command 'mdadm --create --run /dev/md1 --level 10 --metadata 1.2 --raid-devices 5 /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 --spare-devices 1 /dev/loop5 --chunk 512 --bitmap=internal --bitmap-chunk=64M' (Expected 0, got 0) INFO:cat /proc/mdstat###################### :: [ 15:37:40 ] :: [ BEGIN ] :: Running 'cat /proc/mdstat' Personalities : [raid6] [raid5] [raid4] [raid0] [raid1] [raid10] md1 : active raid10 loop5[5](S) loop4[4] loop3[3] loop2[2] loop1[1] loop0[0] 1274880 blocks super 1.2 512K chunks 2 near-copies [5/5] [UUUUU] [=>...................] resync = 9.6% (123008/1274880) finish=0.1min speed=123008K/sec bitmap: 1/1 pages [4KB], 65536KB chunk unused devices: :: [ 15:37:40 ] :: [ PASS ] :: Command 'cat /proc/mdstat' (Expected 0, got 0) :: [ 15:37:41 ] :: [ BEGIN ] :: Running 'lsblk' NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS loop0 7:0 0 500M 0 loop └─md1 9:1 0 1.2G 0 raid10 loop1 7:1 0 500M 0 loop └─md1 9:1 0 1.2G 0 raid10 loop2 7:2 0 500M 0 loop └─md1 9:1 0 1.2G 0 raid10 loop3 7:3 0 500M 0 loop └─md1 9:1 0 1.2G 0 raid10 loop4 7:4 0 500M 0 loop └─md1 9:1 0 1.2G 0 raid10 loop5 7:5 0 500M 0 loop └─md1 9:1 0 1.2G 0 raid10 sda 8:0 0 465.8G 0 disk ├─sda1 8:1 0 1M 0 part ├─sda2 8:2 0 1G 0 part /boot └─sda3 8:3 0 464.8G 0 part └─fedora_hpe--ml350egen8--01-root 253:0 0 930.5G 0 lvm / sdb 8:16 0 465.8G 0 disk └─sdb1 8:17 0 465.8G 0 part └─fedora_hpe--ml350egen8--01-root 253:0 0 930.5G 0 lvm / sr0 11:0 1 1024M 0 rom zram0 252:0 0 8G 0 disk [SWAP] :: [ 15:37:42 ] :: [ PASS ] :: Command 'lsblk' (Expected 0, got 0) egrep: warning: egrep is obsolescent; using grep -E /dev/md1 INFO:mdadm -D /dev/md1 ######################### :: [ 15:37:43 ] :: [ BEGIN ] :: Running 'mdadm --detail /dev/md1' /dev/md1: Version : 1.2 Creation Time : Sun Sep 25 15:37:39 2022 Raid Level : raid10 Array Size : 1274880 (1245.00 MiB 1305.48 MB) Used Dev Size : 509952 (498.00 MiB 522.19 MB) Raid Devices : 5 Total Devices : 6 Persistence : Superblock is persistent Intent Bitmap : Internal Update Time : Sun Sep 25 15:37:42 2022 State : clean, resyncing Active Devices : 5 Working Devices : 6 Failed Devices : 0 Spare Devices : 1 Layout : near=2 Chunk Size : 512K Consistency Policy : bitmap Resync Status : 37% complete Name : 1 UUID : 5f5eec95:a2e3d32f:54e35255:1f98a5b3 Events : 6 Number Major Minor RaidDevice State 0 7 0 0 active sync /dev/loop0 1 7 1 1 active sync /dev/loop1 2 7 2 2 active sync /dev/loop2 3 7 3 3 active sync /dev/loop3 4 7 4 4 active sync /dev/loop4 5 7 5 - spare /dev/loop5 :: [ 15:37:43 ] :: [ PASS ] :: Command 'mdadm --detail /dev/md1' (Expected 0, got 0) :: [ 15:37:44 ] :: [ LOG ] :: INFO: Successfully created md raid /dev/md1 1664134664 start_time against this md array: /dev/md1 state is clean, 1664134670 start_time against this md array: /dev/md1 state is clean :: [ 15:37:50 ] :: [ LOG ] :: mkfs -t xfs /dev/md1 log stripe unit (524288 bytes) is too large (maximum is 256KiB) log stripe unit adjusted to 32KiB meta-data=/dev/md1 isize=512 agcount=8, agsize=39936 blks = sectsz=512 attr=2, projid32bit=1 = crc=1 finobt=1, sparse=1, rmapbt=0 = reflink=1 bigtime=1 inobtcount=1 nrext64=0 data = bsize=4096 blocks=318720, imaxpct=25 = sunit=128 swidth=640 blks naming =version 2 bsize=4096 ascii-ci=0, ftype=1 log =internal log bsize=4096 blocks=16384, version=2 = sectsz=512 sunit=8 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0 Discarding blocks...Done. :: [ 15:37:54 ] :: [ BEGIN ] :: Running 'mount -t xfs /dev/md1 /mnt/md_test ' :: [ 15:37:54 ] :: [ PASS ] :: Command 'mount -t xfs /dev/md1 /mnt/md_test ' (Expected 0, got 0) :: [ 15:37:55 ] :: [ BEGIN ] :: Running 'fstrim -v /mnt/md_test' /mnt/md_test: 1.2 GiB (1238007808 bytes) trimmed :: [ 15:37:55 ] :: [ PASS ] :: Command 'fstrim -v /mnt/md_test' (Expected 0, got 0) :: [ 15:37:55 ] :: [ BEGIN ] :: Running 'umount /dev/md1' :: [ 15:37:56 ] :: [ PASS ] :: Command 'umount /dev/md1' (Expected 0, got 0) INFO: Executing MD_Clean_RAID() against this md device: mdadm --stop /dev/md1 mdadm: stopped /dev/md1 clean devs : /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 /dev/loop5 mdadm --zero-superblock /dev/loop0 mdadm --zero-superblock /dev/loop1 mdadm --zero-superblock /dev/loop2 mdadm --zero-superblock /dev/loop3 mdadm --zero-superblock /dev/loop4 mdadm --zero-superblock /dev/loop5 ret is 0 ls /dev/md1 ls: cannot access '/dev/md1': No such file or directory mdadm --stop can delete md node name /dev/md1 in /dev losetup -d /dev/loop0 losetup -d /dev/loop1 losetup -d /dev/loop2 losetup -d /dev/loop3 losetup -d /dev/loop4 losetup -d /dev/loop5 :: [ 15:38:19 ] :: [ BEGIN ] :: check the errors :: actually running 'dmesg | grep -i 'Call Trace:'' :: [ 15:38:19 ] :: [ PASS ] :: check the errors (Expected 1, got 1) :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: :: Duration: 962s :: Assertions: 47 good, 0 bad :: RESULT: PASS (Test) ** Test PASS Score:0 Uploading resultoutputfile.log .done :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: :: TEST PROTOCOL :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: Test run ID : 6 Package : unknown beakerlib RPM : beakerlib-1.29.2-1.fc38.noarch bl-redhat RPM : beakerlib-redhat-1-33.fc37eng.noarch Test name : unknown Test started : 2022-09-25 15:22:15 EDT Test finished : 2022-09-25 15:38:25 EDT (still running) Test duration : 970 seconds Distro : Fedora release 38 (Rawhide) Hostname : hpe-ml350egen8-01.hpe2.lab.eng.bos.redhat.com Architecture : unknown CPUs : 32 x Intel(R) Xeon(R) CPU E5-2450 0 @ 2.10GHz RAM size : 13575 MB HDD size : 931.39 GB :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: :: Test description :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: PURPOSE of trim Description: Test the function of TRIM which quickly erase invalid data for the RAID that created by mdadm. :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: :: Test :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: :: [ 15:22:19 ] :: [ PASS ] :: Command 'uname -a' (Expected 0, got 0) :: [ 15:22:21 ] :: [ PASS ] :: Command 'rpm -q mdadm || dnf install -y mdadm' (Expected 0, got 0) :: [ 15:22:21 ] :: [ LOG ] :: ./runtest.sh :: [ 15:22:22 ] :: [ PASS ] :: Command 'modprobe raid456 devices_handle_discard_safely=Y' (Expected 0, got 0) :: [ 15:22:23 ] :: [ PASS ] :: Command 'echo Y >/sys/module/raid456/parameters/devices_handle_discard_safely' (Expected 0, got 0) :: [ 15:23:00 ] :: [ PASS ] :: Command 'mdadm --create --run /dev/md1 --level 0 --metadata 1.2 --raid-devices 5 /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 --chunk 512' (Expected 0, got 0) :: [ 15:23:00 ] :: [ PASS ] :: Command 'cat /proc/mdstat' (Expected 0, got 0) :: [ 15:23:01 ] :: [ PASS ] :: Command 'lsblk' (Expected 0, got 0) :: [ 15:23:02 ] :: [ PASS ] :: Command 'mdadm --detail /dev/md1' (Expected 0, got 0) :: [ 15:23:02 ] :: [ LOG ] :: INFO: Successfully created md raid /dev/md1 :: [ 15:23:03 ] :: [ LOG ] :: mkfs -t xfs /dev/md1 :: [ 15:23:05 ] :: [ PASS ] :: Command 'mount -t xfs /dev/md1 /mnt/md_test ' (Expected 0, got 0) :: [ 15:23:06 ] :: [ PASS ] :: Command 'fstrim -v /mnt/md_test' (Expected 0, got 0) :: [ 15:23:07 ] :: [ PASS ] :: Command 'umount /dev/md1' (Expected 0, got 0) :: [ 15:23:34 ] :: [ PASS ] :: Command 'mdadm --create --run /dev/md1 --level 1 --metadata 1.2 --raid-devices 5 /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 --spare-devices 1 /dev/loop5 --bitmap=internal --bitmap-chunk=64M' (Expected 0, got 0) :: [ 15:23:35 ] :: [ PASS ] :: Command 'cat /proc/mdstat' (Expected 0, got 0) :: [ 15:23:36 ] :: [ PASS ] :: Command 'lsblk' (Expected 0, got 0) :: [ 15:23:37 ] :: [ PASS ] :: Command 'mdadm --detail /dev/md1' (Expected 0, got 0) :: [ 15:23:37 ] :: [ LOG ] :: INFO: Successfully created md raid /dev/md1 :: [ 15:24:10 ] :: [ LOG ] :: mkfs -t xfs /dev/md1 :: [ 15:24:16 ] :: [ PASS ] :: Command 'mount -t xfs /dev/md1 /mnt/md_test ' (Expected 0, got 0) :: [ 15:24:17 ] :: [ PASS ] :: Command 'fstrim -v /mnt/md_test' (Expected 0, got 0) :: [ 15:24:18 ] :: [ PASS ] :: Command 'umount /dev/md1' (Expected 0, got 0) :: [ 15:24:45 ] :: [ PASS ] :: Command 'mdadm --create --run /dev/md1 --level 4 --metadata 1.2 --raid-devices 5 /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 --spare-devices 1 /dev/loop5 --chunk 512 --bitmap=internal --bitmap-chunk=64M' (Expected 0, got 0) :: [ 15:24:46 ] :: [ PASS ] :: Command 'cat /proc/mdstat' (Expected 0, got 0) :: [ 15:24:47 ] :: [ PASS ] :: Command 'lsblk' (Expected 0, got 0) :: [ 15:24:48 ] :: [ PASS ] :: Command 'mdadm --detail /dev/md1' (Expected 0, got 0) :: [ 15:24:48 ] :: [ LOG ] :: INFO: Successfully created md raid /dev/md1 :: [ 15:25:37 ] :: [ LOG ] :: mkfs -t xfs /dev/md1 :: [ 15:28:32 ] :: [ PASS ] :: Command 'mount -t xfs /dev/md1 /mnt/md_test ' (Expected 0, got 0) :: [ 15:29:21 ] :: [ PASS ] :: Command 'fstrim -v /mnt/md_test' (Expected 0, got 0) :: [ 15:29:22 ] :: [ PASS ] :: Command 'umount /dev/md1' (Expected 0, got 0) :: [ 15:29:51 ] :: [ PASS ] :: Command 'mdadm --create --run /dev/md1 --level 5 --metadata 1.2 --raid-devices 5 /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 --spare-devices 1 /dev/loop5 --chunk 512 --bitmap=internal --bitmap-chunk=64M' (Expected 0, got 0) :: [ 15:29:52 ] :: [ PASS ] :: Command 'cat /proc/mdstat' (Expected 0, got 0) :: [ 15:29:53 ] :: [ PASS ] :: Command 'lsblk' (Expected 0, got 0) :: [ 15:29:54 ] :: [ PASS ] :: Command 'mdadm --detail /dev/md1' (Expected 0, got 0) :: [ 15:29:54 ] :: [ LOG ] :: INFO: Successfully created md raid /dev/md1 :: [ 15:30:47 ] :: [ LOG ] :: mkfs -t xfs /dev/md1 :: [ 15:33:34 ] :: [ PASS ] :: Command 'mount -t xfs /dev/md1 /mnt/md_test ' (Expected 0, got 0) :: [ 15:34:22 ] :: [ PASS ] :: Command 'fstrim -v /mnt/md_test' (Expected 0, got 0) :: [ 15:34:23 ] :: [ PASS ] :: Command 'umount /dev/md1' (Expected 0, got 0) :: [ 15:34:52 ] :: [ PASS ] :: Command 'mdadm --create --run /dev/md1 --level 6 --metadata 1.2 --raid-devices 5 /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 --spare-devices 1 /dev/loop5 --chunk 512 --bitmap=internal --bitmap-chunk=64M' (Expected 0, got 0) :: [ 15:34:52 ] :: [ PASS ] :: Command 'cat /proc/mdstat' (Expected 0, got 0) :: [ 15:34:54 ] :: [ PASS ] :: Command 'lsblk' (Expected 0, got 0) :: [ 15:34:55 ] :: [ PASS ] :: Command 'mdadm --detail /dev/md1' (Expected 0, got 0) :: [ 15:34:56 ] :: [ LOG ] :: INFO: Successfully created md raid /dev/md1 :: [ 15:35:13 ] :: [ LOG ] :: mkfs -t xfs /dev/md1 :: [ 15:36:13 ] :: [ PASS ] :: Command 'mount -t xfs /dev/md1 /mnt/md_test ' (Expected 0, got 0) :: [ 15:37:09 ] :: [ PASS ] :: Command 'fstrim -v /mnt/md_test' (Expected 0, got 0) :: [ 15:37:10 ] :: [ PASS ] :: Command 'umount /dev/md1' (Expected 0, got 0) :: [ 15:37:39 ] :: [ PASS ] :: Command 'mdadm --create --run /dev/md1 --level 10 --metadata 1.2 --raid-devices 5 /dev/loop0 /dev/loop1 /dev/loop2 /dev/loop3 /dev/loop4 --spare-devices 1 /dev/loop5 --chunk 512 --bitmap=internal --bitmap-chunk=64M' (Expected 0, got 0) :: [ 15:37:40 ] :: [ PASS ] :: Command 'cat /proc/mdstat' (Expected 0, got 0) :: [ 15:37:42 ] :: [ PASS ] :: Command 'lsblk' (Expected 0, got 0) :: [ 15:37:43 ] :: [ PASS ] :: Command 'mdadm --detail /dev/md1' (Expected 0, got 0) :: [ 15:37:44 ] :: [ LOG ] :: INFO: Successfully created md raid /dev/md1 :: [ 15:37:50 ] :: [ LOG ] :: mkfs -t xfs /dev/md1 :: [ 15:37:54 ] :: [ PASS ] :: Command 'mount -t xfs /dev/md1 /mnt/md_test ' (Expected 0, got 0) :: [ 15:37:55 ] :: [ PASS ] :: Command 'fstrim -v /mnt/md_test' (Expected 0, got 0) :: [ 15:37:56 ] :: [ PASS ] :: Command 'umount /dev/md1' (Expected 0, got 0) :: [ 15:38:19 ] :: [ PASS ] :: check the errors (Expected 1, got 1) :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: :: Duration: 962s :: Assertions: 47 good, 0 bad :: RESULT: PASS (Test) :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: :: unknown :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: :: [ 15:38:26 ] :: [ LOG ] :: Phases fingerprint: L5rLAvqh :: [ 15:38:27 ] :: [ LOG ] :: Asserts fingerprint: IcvkrVvU Uploading journal.xml .done :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: :: Duration: 971s :: Phases: 1 good, 0 bad :: OVERALL RESULT: PASS (unknown)