8941 zpool add: assertion failed in get_replication() with nested interior VDEVs
[unleashed.git] / usr / src / test / zfs-tests / tests / functional / cli_root / zpool_add / add_nested_replacing_spare.ksh
blob54193db866bf0a50785d882b0b171fde338ff208
1 #!/bin/ksh -p
3 # CDDL HEADER START
5 # The contents of this file are subject to the terms of the
6 # Common Development and Distribution License (the "License").
7 # You may not use this file except in compliance with the License.
9 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 # or http://www.opensolaris.org/os/licensing.
11 # See the License for the specific language governing permissions
12 # and limitations under the License.
14 # When distributing Covered Code, include this CDDL HEADER in each
15 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 # If applicable, add the following below this CDDL HEADER, with the
17 # fields enclosed by brackets "[]" replaced with your own identifying
18 # information: Portions Copyright [yyyy] [name of copyright owner]
20 # CDDL HEADER END
24 # Copyright 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
27 . $STF_SUITE/include/libtest.shlib
28 . $STF_SUITE/tests/functional/cli_root/zpool_create/zpool_create.shlib
31 # DESCRIPTION:
32 # 'zpool add' works with nested replacing/spare vdevs
34 # STRATEGY:
35 # 1. Create a redundant pool with a spare device
36 # 2. Manually fault a device, wait for the hot-spare and then replace it:
37 # this creates a situation where replacing and spare vdevs are nested.
38 # 3. Verify 'zpool add' is able to add new devices to the pool.
41 verify_runnable "global"
43 function cleanup
45 log_must zinject -c all
46 poolexists $TESTPOOL && \
47 destroy_pool $TESTPOOL
48 log_must rm -f $DATA_DEVS $SPARE_DEVS
51 log_assert "'zpool add' works with nested replacing/spare vdevs"
52 log_onexit cleanup
54 TMPDIR='/var/tmp'
55 FAULT_DEV="$TMPDIR/fault-dev"
56 SAFE_DEV1="$TMPDIR/safe-dev1"
57 SAFE_DEV2="$TMPDIR/safe-dev2"
58 SAFE_DEV3="$TMPDIR/safe-dev3"
59 SAFE_DEVS="$SAFE_DEV1 $SAFE_DEV2 $SAFE_DEV3"
60 REPLACE_DEV="$TMPDIR/replace-dev"
61 ADD_DEV="$TMPDIR/add-dev"
62 DATA_DEVS="$FAULT_DEV $SAFE_DEVS $REPLACE_DEV $ADD_DEV"
63 SPARE_DEV1="$TMPDIR/spare-dev1"
64 SPARE_DEV2="$TMPDIR/spare-dev2"
65 SPARE_DEVS="$SPARE_DEV1 $SPARE_DEV2"
67 for type in "mirror" "raidz1" "raidz2" "raidz3"
69 # 1. Create a redundant pool with a spare device
70 truncate -s $SPA_MINDEVSIZE $DATA_DEVS $SPARE_DEVS
71 log_must zpool create $TESTPOOL $type $FAULT_DEV $SAFE_DEVS
72 log_must zpool add $TESTPOOL spare $SPARE_DEV1
74 # 2.1 Fault a device, verify the spare is kicked in
75 log_must zinject -d $FAULT_DEV -e nxio -T all -f 100 $TESTPOOL
76 log_must zpool scrub $TESTPOOL
77 log_must wait_vdev_state $TESTPOOL $FAULT_DEV "UNAVAIL" 60
78 log_must wait_vdev_state $TESTPOOL $SPARE_DEV1 "ONLINE" 60
79 log_must wait_hotspare_state $TESTPOOL $SPARE_DEV1 "INUSE"
80 log_must check_state $TESTPOOL "$type-0" "DEGRADED"
82 # 2.2 Replace the faulted device: this creates a replacing vdev inside a
83 # spare vdev
84 log_must zpool replace $TESTPOOL $FAULT_DEV $REPLACE_DEV
85 log_must wait_vdev_state $TESTPOOL $REPLACE_DEV "ONLINE" 60
86 zpool status | nawk -v poolname="$TESTPOOL" -v type="$type" 'BEGIN {s=""}
87 $1 ~ poolname {c=4}; (c && c--) { s=s$1":" }
88 END { if (s != poolname":"type"-0:spare-0:replacing-0:") exit 1; }'
89 if [[ $? -ne 0 ]]; then
90 log_fail "Pool does not contain nested replacing/spare vdevs"
93 # 3. Verify 'zpool add' is able to add new devices
94 log_must zpool add $TESTPOOL spare $SPARE_DEV2
95 log_must wait_hotspare_state $TESTPOOL $SPARE_DEV2 "AVAIL"
96 log_must zpool add -f $TESTPOOL $ADD_DEV
97 log_must wait_vdev_state $TESTPOOL $ADD_DEV "ONLINE" 60
99 # Cleanup
100 cleanup
101 done
103 log_pass "'zpool add' works with nested replacing/spare vdevs"