1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
|
#!/bin/bash
mdadm -CR $md0 -l1 -b clustered -n2 $dev0 $dev1 --assume-clean
ssh $NODE2 mdadm -A $md0 $dev0 $dev1
check all nosync
check all raid1
check all bitmap
check all state UU
check all dmesg
mdadm --grow $md0 --raid-devices=3 --add $dev2
sleep 0.3
grep recovery /proc/mdstat
if [ $? -eq '0' ]
then
check $NODE1 wait
else
check $NODE2 recovery
check $NODE2 wait
fi
check all state UUU
check all dmesg
stop_md all $md0
mdadm -CR $md0 -l1 -b clustered -n2 -x1 $dev0 $dev1 $dev2 --assume-clean
ssh $NODE2 mdadm -A $md0 $dev0 $dev1 $dev2
check all nosync
check all raid1
check all bitmap
check all spares 1
check all state UU
check all dmesg
mdadm --grow $md0 --raid-devices=3 --add $dev3
sleep 0.3
grep recovery /proc/mdstat
if [ $? -eq '0' ]
then
check $NODE1 wait
else
check $NODE2 recovery
check $NODE2 wait
fi
check all state UUU
check all dmesg
stop_md all $md0
mdadm -CR $md0 -l1 -b clustered -n2 -x1 $dev0 $dev1 $dev2 --assume-clean
ssh $NODE2 mdadm -A $md0 $dev0 $dev1 $dev2
check all nosync
check all raid1
check all bitmap
check all spares 1
check all state UU
check all dmesg
mdadm --grow $md0 --raid-devices=3
sleep 0.3
grep recovery /proc/mdstat
if [ $? -eq '0' ]
then
check $NODE1 wait
else
check $NODE2 recovery
check $NODE2 wait
fi
check all state UUU
check all dmesg
stop_md all $md0
exit 0
|