7290 ZFS test suite needs to control what utilities it can run
[unleashed.git] / usr / src / test / zfs-tests / tests / functional / inuse / inuse_005_pos.ksh
blob882ae2557648e32476b4b70796cf3de1edb0292a
1 #!/usr/bin/ksh -p
3 # CDDL HEADER START
5 # The contents of this file are subject to the terms of the
6 # Common Development and Distribution License (the "License").
7 # You may not use this file except in compliance with the License.
9 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 # or http://www.opensolaris.org/os/licensing.
11 # See the License for the specific language governing permissions
12 # and limitations under the License.
14 # When distributing Covered Code, include this CDDL HEADER in each
15 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 # If applicable, add the following below this CDDL HEADER, with the
17 # fields enclosed by brackets "[]" replaced with your own identifying
18 # information: Portions Copyright [yyyy] [name of copyright owner]
20 # CDDL HEADER END
24 # Copyright 2009 Sun Microsystems, Inc. All rights reserved.
25 # Use is subject to license terms.
29 # Copyright (c) 2013, 2016 by Delphix. All rights reserved.
32 . $STF_SUITE/include/libtest.shlib
33 . $STF_SUITE/tests/functional/inuse/inuse.cfg
36 # DESCRIPTION:
37 # newfs will not interfere with devices and spare devices that are in use
38 # by active pool.
40 # STRATEGY:
41 # 1. Create a regular|mirror|raidz|raidz2 pool with the given disk
42 # 2. Try to newfs against the disk, verify it fails as expect.
45 verify_runnable "global"
47 function cleanup
49 poolexists $TESTPOOL1 && destroy_pool $TESTPOOL1
52 # Tidy up the disks we used.
54 cleanup_devices $vdisks $sdisks
57 function verify_assertion #slices
59 typeset targets=$1
61 for t in $targets; do
62 echo "y" | newfs -v $t > /dev/null 2>&1
63 (( $? !=0 )) || \
64 log_fail "newfs over active pool " \
65 "unexpected return code of 0"
66 done
68 return 0
71 log_assert "Verify newfs over active pool fails."
73 log_onexit cleanup
75 set -A vdevs "" "mirror" "raidz" "raidz1" "raidz2"
77 typeset -i i=0
79 unset NOINUSE_CHECK
80 while (( i < ${#vdevs[*]} )); do
82 for num in 0 1 2 3 ; do
83 eval typeset slice=\${FS_SIDE$num}
84 disk=${slice%s*}
85 slice=${slice##*s}
86 log_must set_partition $slice "" $FS_SIZE $disk
87 done
89 if [[ -n $SINGLE_DISK && -n ${vdevs[i]} ]]; then
90 (( i = i + 1 ))
91 continue
94 create_pool $TESTPOOL1 ${vdevs[i]} $vslices spare $sslices
95 verify_assertion "$rawtargets"
96 destroy_pool $TESTPOOL1
98 if [[ ( $FS_DISK0 == $FS_DISK2 ) && -n ${vdevs[i]} ]]; then
99 (( i = i + 1 ))
100 continue
103 if [[ ( $FS_DISK0 == $FS_DISK3 ) && ( ${vdevs[i]} == "raidz2" ) ]]; then
104 (( i = i + 1 ))
105 continue
108 create_pool $TESTPOOL1 ${vdevs[i]} $vdisks spare $sdisks
109 verify_assertion "$rawtargets"
110 destroy_pool $TESTPOOL1
112 (( i = i + 1 ))
113 done
115 log_pass "Newfs over active pool fails."