4 # The contents of this file are subject to the terms of the
5 # Common Development and Distribution License (the "License").
6 # You may not use this file except in compliance with the License.
8 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 # or http://www.opensolaris.org/os/licensing.
10 # See the License for the specific language governing permissions
11 # and limitations under the License.
13 # When distributing Covered Code, include this CDDL HEADER in each
14 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 # If applicable, add the following below this CDDL HEADER, with the
16 # fields enclosed by brackets "[]" replaced with your own identifying
17 # information: Portions Copyright [yyyy] [name of copyright owner]
23 # Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 # Use is subject to license terms.
28 # Copyright (c) 2012, 2014 by Delphix. All rights reserved.
31 . ${STF_TOOLS}/contrib/include/logapi.shlib
33 ZFS=${ZFS:-/usr/sbin/zfs}
34 ZPOOL=${ZPOOL:-/usr/sbin/zpool}
36 # Determine whether a dataset is mounted
39 # $2 filesystem type; optional - defaulted to zfs
41 # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
46 [[ -z $fstype ]] && fstype=zfs
47 typeset out dir name ret
51 if [[ "$1" == "/"* ]] ; then
52 for out in $($ZFS mount | $AWK '{print $2}'); do
53 [[ $1 == $out ]] && return 0
56 for out in $($ZFS mount | $AWK '{print $1}'); do
57 [[ $1 == $out ]] && return 0
62 out=$($DF -F $fstype $1 2>/dev/null)
64 (($ret != 0)) && return $ret
72 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
79 # Return 0 if a dataset is mounted; 1 otherwise
82 # $2 filesystem type; optional - defaulted to zfs
87 (($? == 0)) && return 0
91 # Return 0 if a dataset is unmounted; 1 otherwise
94 # $2 filesystem type; optional - defaulted to zfs
99 (($? == 1)) && return 0
109 $ECHO $1 | $SED "s/,/ /g"
112 function default_setup
114 default_setup_noexit "$@"
120 # Given a list of disks, setup storage pools and datasets.
122 function default_setup_noexit
128 if is_global_zone; then
129 if poolexists $TESTPOOL ; then
130 destroy_pool $TESTPOOL
132 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
133 log_must $ZPOOL create -f $TESTPOOL $disklist
138 $RM -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
139 $MKDIR -p $TESTDIR || log_unresolved Could not create $TESTDIR
141 log_must $ZFS create $TESTPOOL/$TESTFS
142 log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
144 if [[ -n $container ]]; then
145 $RM -rf $TESTDIR1 || \
146 log_unresolved Could not remove $TESTDIR1
147 $MKDIR -p $TESTDIR1 || \
148 log_unresolved Could not create $TESTDIR1
150 log_must $ZFS create $TESTPOOL/$TESTCTR
151 log_must $ZFS set canmount=off $TESTPOOL/$TESTCTR
152 log_must $ZFS create $TESTPOOL/$TESTCTR/$TESTFS1
153 log_must $ZFS set mountpoint=$TESTDIR1 \
154 $TESTPOOL/$TESTCTR/$TESTFS1
157 if [[ -n $volume ]]; then
158 if is_global_zone ; then
159 log_must $ZFS create -V $VOLSIZE $TESTPOOL/$TESTVOL
161 log_must $ZFS create $TESTPOOL/$TESTVOL
167 # Given a list of disks, setup a storage pool, file system and
170 function default_container_setup
174 default_setup "$disklist" "true"
178 # Given a list of disks, setup a storage pool,file system
181 function default_volume_setup
185 default_setup "$disklist" "" "true"
189 # Given a list of disks, setup a storage pool,file system,
190 # a container and a volume.
192 function default_container_volume_setup
196 default_setup "$disklist" "true" "true"
200 # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
203 # $1 Existing filesystem or volume name. Default, $TESTFS
204 # $2 snapshot name. Default, $TESTSNAP
206 function create_snapshot
208 typeset fs_vol=${1:-$TESTFS}
209 typeset snap=${2:-$TESTSNAP}
211 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
212 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
214 if snapexists $fs_vol@$snap; then
215 log_fail "$fs_vol@$snap already exists."
217 datasetexists $fs_vol || \
218 log_fail "$fs_vol must exist."
220 log_must $ZFS snapshot $fs_vol@$snap
224 # Create a clone from a snapshot, default clone name is $TESTCLONE.
226 # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
227 # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
229 function create_clone # snapshot clone
231 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
232 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
235 log_fail "Snapshot name is undefined."
237 log_fail "Clone name is undefined."
239 log_must $ZFS clone $snap $clone
242 function default_mirror_setup
244 default_mirror_setup_noexit $1 $2 $3
250 # Given a pair of disks, set up a storage pool and dataset for the mirror
251 # @parameters: $1 the primary side of the mirror
252 # $2 the secondary side of the mirror
253 # @uses: ZPOOL ZFS TESTPOOL TESTFS
254 function default_mirror_setup_noexit
256 readonly func="default_mirror_setup_noexit"
260 [[ -z $primary ]] && \
261 log_fail "$func: No parameters passed"
262 [[ -z $secondary ]] && \
263 log_fail "$func: No secondary partition passed"
264 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
265 log_must $ZPOOL create -f $TESTPOOL mirror $@
266 log_must $ZFS create $TESTPOOL/$TESTFS
267 log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
271 # create a number of mirrors.
272 # We create a number($1) of 2 way mirrors using the pairs of disks named
273 # on the command line. These mirrors are *not* mounted
274 # @parameters: $1 the number of mirrors to create
275 # $... the devices to use to create the mirrors on
276 # @uses: ZPOOL ZFS TESTPOOL
277 function setup_mirrors
279 typeset -i nmirrors=$1
282 while ((nmirrors > 0)); do
283 log_must test -n "$1" -a -n "$2"
284 [[ -d /$TESTPOOL$nmirrors ]] && $RM -rf /$TESTPOOL$nmirrors
285 log_must $ZPOOL create -f $TESTPOOL$nmirrors mirror $1 $2
287 ((nmirrors = nmirrors - 1))
292 # create a number of raidz pools.
293 # We create a number($1) of 2 raidz pools using the pairs of disks named
294 # on the command line. These pools are *not* mounted
295 # @parameters: $1 the number of pools to create
296 # $... the devices to use to create the pools on
297 # @uses: ZPOOL ZFS TESTPOOL
298 function setup_raidzs
300 typeset -i nraidzs=$1
303 while ((nraidzs > 0)); do
304 log_must test -n "$1" -a -n "$2"
305 [[ -d /$TESTPOOL$nraidzs ]] && $RM -rf /$TESTPOOL$nraidzs
306 log_must $ZPOOL create -f $TESTPOOL$nraidzs raidz $1 $2
308 ((nraidzs = nraidzs - 1))
313 # Destroy the configured testpool mirrors.
314 # the mirrors are of the form ${TESTPOOL}{number}
315 # @uses: ZPOOL ZFS TESTPOOL
316 function destroy_mirrors
318 default_cleanup_noexit
324 # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
325 # $1 the list of disks
327 function default_raidz_setup
329 typeset disklist="$*"
330 disks=(${disklist[*]})
332 if [[ ${#disks[*]} -lt 2 ]]; then
333 log_fail "A raid-z requires a minimum of two disks."
336 [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
337 log_must $ZPOOL create -f $TESTPOOL raidz $1 $2 $3
338 log_must $ZFS create $TESTPOOL/$TESTFS
339 log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
345 # Common function used to cleanup storage pools and datasets.
347 # Invoked at the start of the test suite to ensure the system
348 # is in a known state, and also at the end of each set of
349 # sub-tests to ensure errors from one set of tests doesn't
350 # impact the execution of the next set.
352 function default_cleanup
354 default_cleanup_noexit
359 function default_cleanup_noexit
364 # Destroying the pool will also destroy any
365 # filesystems it contains.
367 if is_global_zone; then
368 $ZFS unmount -a > /dev/null 2>&1
369 exclude=`eval $ECHO \"'(${KEEP})'\"`
370 ALL_POOLS=$($ZPOOL list -H -o name \
371 | $GREP -v "$NO_POOLS" | $EGREP -v "$exclude")
372 # Here, we loop through the pools we're allowed to
373 # destroy, only destroying them if it's safe to do
375 while [ ! -z ${ALL_POOLS} ]
377 for pool in ${ALL_POOLS}
379 if safe_to_destroy_pool $pool ;
383 ALL_POOLS=$($ZPOOL list -H -o name \
384 | $GREP -v "$NO_POOLS" \
385 | $EGREP -v "$exclude")
392 for fs in $($ZFS list -H -o name \
393 | $GREP "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
394 datasetexists $fs && \
395 log_must $ZFS destroy -Rf $fs
398 # Need cleanup here to avoid garbage dir left.
399 for fs in $($ZFS list -H -o name); do
400 [[ $fs == /$ZONE_POOL ]] && continue
401 [[ -d $fs ]] && log_must $RM -rf $fs/*
405 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
408 for fs in $($ZFS list -H -o name); do
409 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
410 log_must $ZFS set reservation=none $fs
411 log_must $ZFS set recordsize=128K $fs
412 log_must $ZFS set mountpoint=/$fs $fs
414 enc=$(get_prop encryption $fs)
415 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
416 [[ "$enc" == "off" ]]; then
417 log_must $ZFS set checksum=on $fs
419 log_must $ZFS set compression=off $fs
420 log_must $ZFS set atime=on $fs
421 log_must $ZFS set devices=off $fs
422 log_must $ZFS set exec=on $fs
423 log_must $ZFS set setuid=on $fs
424 log_must $ZFS set readonly=off $fs
425 log_must $ZFS set snapdir=hidden $fs
426 log_must $ZFS set aclmode=groupmask $fs
427 log_must $ZFS set aclinherit=secure $fs
432 [[ -d $TESTDIR ]] && \
433 log_must $RM -rf $TESTDIR
438 # Common function used to cleanup storage pools, file systems
441 function default_container_cleanup
443 if ! is_global_zone; then
447 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
449 log_must $ZFS unmount $TESTPOOL/$TESTCTR/$TESTFS1
451 datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
452 log_must $ZFS destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
454 datasetexists $TESTPOOL/$TESTCTR && \
455 log_must $ZFS destroy -Rf $TESTPOOL/$TESTCTR
457 [[ -e $TESTDIR1 ]] && \
458 log_must $RM -rf $TESTDIR1 > /dev/null 2>&1
464 # Common function used to cleanup snapshot of file system or volume. Default to
465 # delete the file system's snapshot
469 function destroy_snapshot
471 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
473 if ! snapexists $snap; then
474 log_fail "'$snap' does not existed."
478 # For the sake of the value which come from 'get_prop' is not equal
479 # to the really mountpoint when the snapshot is unmounted. So, firstly
480 # check and make sure this snapshot's been mounted in current system.
483 if ismounted $snap; then
484 mtpt=$(get_prop mountpoint $snap)
486 log_fail "get_prop mountpoint $snap failed."
489 log_must $ZFS destroy $snap
490 [[ $mtpt != "" && -d $mtpt ]] && \
491 log_must $RM -rf $mtpt
495 # Common function used to cleanup clone.
499 function destroy_clone
501 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
503 if ! datasetexists $clone; then
504 log_fail "'$clone' does not existed."
507 # With the same reason in destroy_snapshot
509 if ismounted $clone; then
510 mtpt=$(get_prop mountpoint $clone)
512 log_fail "get_prop mountpoint $clone failed."
515 log_must $ZFS destroy $clone
516 [[ $mtpt != "" && -d $mtpt ]] && \
517 log_must $RM -rf $mtpt
520 # Return 0 if a snapshot exists; $? otherwise
526 $ZFS list -H -t snapshot "$1" > /dev/null 2>&1
531 # Set a property to a certain value on a dataset.
532 # Sets a property of the dataset to the value as passed in.
534 # $1 dataset who's property is being set
536 # $3 value to set property to
538 # 0 if the property could be set.
539 # non-zero otherwise.
542 function dataset_setprop
544 typeset fn=dataset_setprop
547 log_note "$fn: Insufficient parameters (need 3, had $#)"
551 output=$($ZFS set $2=$3 $1 2>&1)
554 log_note "Setting property on $1 failed."
555 log_note "property $2=$3"
556 log_note "Return Code: $rv"
557 log_note "Output: $output"
564 # Assign suite defined dataset properties.
565 # This function is used to apply the suite's defined default set of
566 # properties to a dataset.
567 # @parameters: $1 dataset to use
568 # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
570 # 0 if the dataset has been altered.
571 # 1 if no pool name was passed in.
572 # 2 if the dataset could not be found.
573 # 3 if the dataset could not have it's properties set.
575 function dataset_set_defaultproperties
579 [[ -z $dataset ]] && return 1
583 for confset in $($ZFS list); do
584 if [[ $dataset = $confset ]]; then
589 [[ $found -eq 0 ]] && return 2
590 if [[ -n $COMPRESSION_PROP ]]; then
591 dataset_setprop $dataset compression $COMPRESSION_PROP || \
593 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
595 if [[ -n $CHECKSUM_PROP ]]; then
596 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
598 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
604 # Check a numeric assertion
605 # @parameter: $@ the assertion to check
606 # @output: big loud notice if assertion failed
611 (($@)) || log_fail "$@"
615 # Function to format partition size of a disk
616 # Given a disk cxtxdx reduces all partitions
619 function zero_partitions #<whole_disk_name>
624 for i in 0 1 3 4 5 6 7
626 set_partition $i "" 0mb $diskname
631 # Given a slice, size and disk, this function
632 # formats the slice to the specified size.
633 # Size should be specified with units as per
634 # the `format` command requirements eg. 100mb 3gb
636 function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
638 typeset -i slicenum=$1
642 [[ -z $slicenum || -z $size || -z $disk ]] && \
643 log_fail "The slice, size or disk name is unspecified."
644 typeset format_file=/var/tmp/format_in.$$
646 $ECHO "partition" >$format_file
647 $ECHO "$slicenum" >> $format_file
648 $ECHO "" >> $format_file
649 $ECHO "" >> $format_file
650 $ECHO "$start" >> $format_file
651 $ECHO "$size" >> $format_file
652 $ECHO "label" >> $format_file
653 $ECHO "" >> $format_file
654 $ECHO "q" >> $format_file
655 $ECHO "q" >> $format_file
657 $FORMAT -e -s -d $disk -f $format_file
660 [[ $ret_val -ne 0 ]] && \
661 log_fail "Unable to format $disk slice $slicenum to $size"
666 # Get the end cyl of the given slice
668 function get_endslice #<disk> <slice>
672 if [[ -z $disk || -z $slice ]] ; then
673 log_fail "The disk name or slice number is unspecified."
676 disk=${disk#/dev/dsk/}
677 disk=${disk#/dev/rdsk/}
681 ratio=$($PRTVTOC /dev/rdsk/${disk}s2 | \
682 $GREP "sectors\/cylinder" | \
685 if ((ratio == 0)); then
689 typeset -i endcyl=$($PRTVTOC -h /dev/rdsk/${disk}s2 |
690 $NAWK -v token="$slice" '{if ($1==token) print $6}')
692 ((endcyl = (endcyl + 1) / ratio))
698 # Given a size,disk and total slice number, this function formats the
699 # disk slices from 0 to the total slice number with the same specified
702 function partition_disk #<slice_size> <whole_disk_name> <total_slices>
705 typeset slice_size=$1
707 typeset total_slices=$3
710 zero_partitions $disk_name
711 while ((i < $total_slices)); do
716 set_partition $i "$cyl" $slice_size $disk_name
717 cyl=$(get_endslice $disk_name $i)
723 # This function continues to write to a filenum number of files into dirnum
724 # number of directories until either $FILE_WRITE returns an error or the
725 # maximum number of files per directory have been written.
728 # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
730 # Return value: 0 on success
734 # destdir: is the directory where everything is to be created under
735 # dirnum: the maximum number of subdirectories to use, -1 no limit
736 # filenum: the maximum number of files per subdirectory
737 # bytes: number of bytes to write
738 # num_writes: numer of types to write out bytes
739 # data: the data that will be writen
742 # file_fs /testdir 20 25 1024 256 0
744 # Note: bytes * num_writes equals the size of the testfile
746 function fill_fs # destdir dirnum filenum bytes num_writes data
748 typeset destdir=${1:-$TESTDIR}
749 typeset -i dirnum=${2:-50}
750 typeset -i filenum=${3:-50}
751 typeset -i bytes=${4:-8192}
752 typeset -i num_writes=${5:-10240}
753 typeset -i data=${6:-0}
760 log_must $MKDIR -p $destdir/$idirnum
761 while (($odirnum > 0)); do
762 if ((dirnum >= 0 && idirnum >= dirnum)); then
766 $FILE_WRITE -o create -f $destdir/$idirnum/$TESTFILE.$fn \
767 -b $bytes -c $num_writes -d $data
769 if (($retval != 0)); then
773 if (($fn >= $filenum)); then
775 ((idirnum = idirnum + 1))
776 log_must $MKDIR -p $destdir/$idirnum
785 # Simple function to get the specified property. If unable to
786 # get the property then exits.
788 # Note property is in 'parsable' format (-p)
790 function get_prop # property dataset
796 prop_val=$($ZFS get -pH -o value $prop $dataset 2>/dev/null)
797 if [[ $? -ne 0 ]]; then
798 log_note "Unable to get $prop property for dataset " \
808 # Simple function to get the specified property of pool. If unable to
809 # get the property then exits.
811 function get_pool_prop # property pool
817 if poolexists $pool ; then
818 prop_val=$($ZPOOL get $prop $pool 2>/dev/null | $TAIL -1 | \
820 if [[ $? -ne 0 ]]; then
821 log_note "Unable to get $prop property for pool " \
826 log_note "Pool $pool not exists."
834 # Return 0 if a pool exists; $? otherwise
842 if [[ -z $pool ]]; then
843 log_note "No pool name given."
847 $ZPOOL get name "$pool" > /dev/null 2>&1
851 # Return 0 if all the specified datasets exist; $? otherwise
854 function datasetexists
857 log_note "No dataset name given."
862 $ZFS get name $1 > /dev/null 2>&1 || \
870 # return 0 if none of the specified datasets exists, otherwise return 1.
873 function datasetnonexists
876 log_note "No dataset name given."
881 $ZFS list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
890 # Given a mountpoint, or a dataset name, determine if it is shared.
892 # Returns 0 if shared, 1 otherwise.
899 if [[ $fs != "/"* ]] ; then
900 if datasetnonexists "$fs" ; then
903 mtpt=$(get_prop mountpoint "$fs")
905 none|legacy|-) return 1
913 for mtpt in `$SHARE | $AWK '{print $2}'` ; do
914 if [[ $mtpt == $fs ]] ; then
919 typeset stat=$($SVCS -H -o STA nfs/server:default)
920 if [[ $stat != "ON" ]]; then
921 log_note "Current nfs/server status: $stat"
928 # Given a mountpoint, determine if it is not shared.
930 # Returns 0 if not shared, 1 otherwise.
945 # Helper function to unshare a mountpoint.
947 function unshare_fs #fs
953 log_must $ZFS unshare $fs
960 # Check NFS server status and trigger it online.
962 function setup_nfs_server
964 # Cannot share directory in non-global zone.
966 if ! is_global_zone; then
967 log_note "Cannot trigger NFS server by sharing in LZ."
971 typeset nfs_fmri="svc:/network/nfs/server:default"
972 if [[ $($SVCS -Ho STA $nfs_fmri) != "ON" ]]; then
974 # Only really sharing operation can enable NFS server
975 # to online permanently.
977 typeset dummy=/tmp/dummy
979 if [[ -d $dummy ]]; then
980 log_must $RM -rf $dummy
983 log_must $MKDIR $dummy
984 log_must $SHARE $dummy
987 # Waiting for fmri's status to be the final status.
988 # Otherwise, in transition, an asterisk (*) is appended for
989 # instances, unshare will reverse status to 'DIS' again.
991 # Waiting for 1's at least.
995 while [[ timeout -ne 0 && $($SVCS -Ho STA $nfs_fmri) == *'*' ]]
1002 log_must $UNSHARE $dummy
1003 log_must $RM -rf $dummy
1006 log_note "Current NFS status: '$($SVCS -Ho STA,FMRI $nfs_fmri)'"
1010 # To verify whether calling process is in global zone
1012 # Return 0 if in global zone, 1 in non-global zone
1014 function is_global_zone
1016 typeset cur_zone=$($ZONENAME 2>/dev/null)
1017 if [[ $cur_zone != "global" ]]; then
1024 # Verify whether test is permitted to run from
1025 # global zone, local zone, or both
1027 # $1 zone limit, could be "global", "local", or "both"(no limit)
1029 # Return 0 if permitted, otherwise exit with log_unsupported
1031 function verify_runnable # zone limit
1035 [[ -z $limit ]] && return 0
1037 if is_global_zone ; then
1041 local) log_unsupported "Test is unable to run from "\
1044 *) log_note "Warning: unknown limit $limit - " \
1052 global) log_unsupported "Test is unable to run from "\
1055 *) log_note "Warning: unknown limit $limit - " \
1066 # Return 0 if create successfully or the pool exists; $? otherwise
1067 # Note: In local zones, this function should return 0 silently.
1070 # $2-n - [keyword] devs_list
1072 function create_pool #pool devs_list
1074 typeset pool=${1%%/*}
1078 if [[ -z $pool ]]; then
1079 log_note "Missing pool name."
1083 if poolexists $pool ; then
1087 if is_global_zone ; then
1088 [[ -d /$pool ]] && $RM -rf /$pool
1089 log_must $ZPOOL create -f $pool $@
1095 # Return 0 if destroy successfully or the pool exists; $? otherwise
1096 # Note: In local zones, this function should return 0 silently.
1099 # Destroy pool with the given parameters.
1101 function destroy_pool #pool
1103 typeset pool=${1%%/*}
1106 if [[ -z $pool ]]; then
1107 log_note "No pool name given."
1111 if is_global_zone ; then
1112 if poolexists "$pool" ; then
1113 mtpt=$(get_prop mountpoint "$pool")
1114 log_must $ZPOOL destroy -f $pool
1117 log_must $RM -rf $mtpt
1119 log_note "Pool not exist. ($pool)"
1128 # Firstly, create a pool with 5 datasets. Then, create a single zone and
1129 # export the 5 datasets to it. In addition, we also add a ZFS filesystem
1130 # and a zvol device to the zone.
1133 # $2 zone root directory prefix
1136 function zfs_zones_setup #zone_name zone_root zone_ip
1138 typeset zone_name=${1:-$(hostname)-z}
1139 typeset zone_root=${2:-"/zone_root"}
1140 typeset zone_ip=${3:-"10.1.1.10"}
1141 typeset prefix_ctr=$ZONE_CTR
1142 typeset pool_name=$ZONE_POOL
1146 # Create pool and 5 container within it
1148 [[ -d /$pool_name ]] && $RM -rf /$pool_name
1149 log_must $ZPOOL create -f $pool_name $DISKS
1150 while ((i < cntctr)); do
1151 log_must $ZFS create $pool_name/$prefix_ctr$i
1156 log_must $ZFS create -V 1g $pool_name/zone_zvol
1159 # If current system support slog, add slog device for pool
1161 if verify_slog_support ; then
1162 typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1163 log_must $MKFILE 100M $sdevs
1164 log_must $ZPOOL add $pool_name log mirror $sdevs
1167 # this isn't supported just yet.
1168 # Create a filesystem. In order to add this to
1169 # the zone, it must have it's mountpoint set to 'legacy'
1170 # log_must $ZFS create $pool_name/zfs_filesystem
1171 # log_must $ZFS set mountpoint=legacy $pool_name/zfs_filesystem
1173 [[ -d $zone_root ]] && \
1174 log_must $RM -rf $zone_root/$zone_name
1175 [[ ! -d $zone_root ]] && \
1176 log_must $MKDIR -p -m 0700 $zone_root/$zone_name
1178 # Create zone configure file and configure the zone
1180 typeset zone_conf=/tmp/zone_conf.$$
1181 $ECHO "create" > $zone_conf
1182 $ECHO "set zonepath=$zone_root/$zone_name" >> $zone_conf
1183 $ECHO "set autoboot=true" >> $zone_conf
1185 while ((i < cntctr)); do
1186 $ECHO "add dataset" >> $zone_conf
1187 $ECHO "set name=$pool_name/$prefix_ctr$i" >> \
1189 $ECHO "end" >> $zone_conf
1193 # add our zvol to the zone
1194 $ECHO "add device" >> $zone_conf
1195 $ECHO "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1196 $ECHO "end" >> $zone_conf
1198 # add a corresponding zvol rdsk to the zone
1199 $ECHO "add device" >> $zone_conf
1200 $ECHO "set match=/dev/zvol/rdsk/$pool_name/zone_zvol" >> $zone_conf
1201 $ECHO "end" >> $zone_conf
1203 # once it's supported, we'll add our filesystem to the zone
1204 # $ECHO "add fs" >> $zone_conf
1205 # $ECHO "set type=zfs" >> $zone_conf
1206 # $ECHO "set special=$pool_name/zfs_filesystem" >> $zone_conf
1207 # $ECHO "set dir=/export/zfs_filesystem" >> $zone_conf
1208 # $ECHO "end" >> $zone_conf
1210 $ECHO "verify" >> $zone_conf
1211 $ECHO "commit" >> $zone_conf
1212 log_must $ZONECFG -z $zone_name -f $zone_conf
1213 log_must $RM -f $zone_conf
1216 $ZONEADM -z $zone_name install
1217 if (($? == 0)); then
1218 log_note "SUCCESS: $ZONEADM -z $zone_name install"
1220 log_fail "FAIL: $ZONEADM -z $zone_name install"
1223 # Install sysidcfg file
1225 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1226 $ECHO "system_locale=C" > $sysidcfg
1227 $ECHO "terminal=dtterm" >> $sysidcfg
1228 $ECHO "network_interface=primary {" >> $sysidcfg
1229 $ECHO "hostname=$zone_name" >> $sysidcfg
1230 $ECHO "}" >> $sysidcfg
1231 $ECHO "name_service=NONE" >> $sysidcfg
1232 $ECHO "root_password=mo791xfZ/SFiw" >> $sysidcfg
1233 $ECHO "security_policy=NONE" >> $sysidcfg
1234 $ECHO "timezone=US/Eastern" >> $sysidcfg
1237 log_must $ZONEADM -z $zone_name boot
1241 # Reexport TESTPOOL & TESTPOOL(1-4)
1243 function reexport_pool
1248 while ((i < cntctr)); do
1250 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1251 if ! ismounted $TESTPOOL; then
1252 log_must $ZFS mount $TESTPOOL
1255 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1256 if eval ! ismounted \$TESTPOOL$i; then
1257 log_must eval $ZFS mount \$TESTPOOL$i
1265 # Verify a given disk is online or offline
1267 # Return 0 is pool/disk matches expected state, 1 otherwise
1269 function check_state # pool disk state{online,offline}
1272 typeset disk=${2#/dev/dsk/}
1275 $ZPOOL status -v $pool | grep "$disk" \
1276 | grep -i "$state" > /dev/null 2>&1
1282 # Get the mountpoint of snapshot
1283 # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1286 function snapshot_mountpoint
1288 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1290 if [[ $dataset != *@* ]]; then
1291 log_fail "Error name of snapshot '$dataset'."
1294 typeset fs=${dataset%@*}
1295 typeset snap=${dataset#*@}
1297 if [[ -z $fs || -z $snap ]]; then
1298 log_fail "Error name of snapshot '$dataset'."
1301 $ECHO $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1305 # Given a pool and file system, this function will verify the file system
1306 # using the zdb internal tool. Note that the pool is exported and imported
1307 # to ensure it has consistent state.
1309 function verify_filesys # pool filesystem dir
1312 typeset filesys="$2"
1313 typeset zdbout="/tmp/zdbout.$$"
1318 typeset search_path=""
1320 log_note "Calling $ZDB to verify filesystem '$filesys'"
1321 $ZFS unmount -a > /dev/null 2>&1
1322 log_must $ZPOOL export $pool
1324 if [[ -n $dirs ]] ; then
1325 for dir in $dirs ; do
1326 search_path="$search_path -d $dir"
1330 log_must $ZPOOL import $search_path $pool
1332 $ZDB -cudi $filesys > $zdbout 2>&1
1333 if [[ $? != 0 ]]; then
1334 log_note "Output: $ZDB -cudi $filesys"
1336 log_fail "$ZDB detected errors with: '$filesys'"
1339 log_must $ZFS mount -a
1340 log_must $RM -rf $zdbout
1344 # Given a pool, and this function list all disks in the pool
1346 function get_disklist # pool
1350 disklist=$($ZPOOL iostat -v $1 | $NAWK '(NR >4) {print $1}' | \
1351 $GREP -v "\-\-\-\-\-" | \
1352 $EGREP -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1358 # Destroy all existing metadevices and state database
1360 function destroy_metas
1364 for metad in $($METASTAT -p | $AWK '{print $1}'); do
1365 log_must $METACLEAR -rf $metad
1368 for metad in $($METADB | $CUT -f6 | $GREP dev | $UNIQ); do
1369 log_must $METADB -fd $metad
1374 # This function kills a given list of processes after a time period. We use
1375 # this in the stress tests instead of STF_TIMEOUT so that we can have processes
1376 # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1377 # would be listed as FAIL, which we don't want : we're happy with stress tests
1378 # running for a certain amount of time, then finishing.
1380 # @param $1 the time in seconds after which we should terminate these processes
1381 # @param $2..$n the processes we wish to terminate.
1383 function stress_timeout
1385 typeset -i TIMEOUT=$1
1389 log_note "Waiting for child processes($cpids). " \
1390 "It could last dozens of minutes, please be patient ..."
1391 log_must $SLEEP $TIMEOUT
1393 log_note "Killing child processes after ${TIMEOUT} stress timeout."
1395 for pid in $cpids; do
1396 $PS -p $pid > /dev/null 2>&1
1397 if (($? == 0)); then
1398 log_must $KILL -USR1 $pid
1404 # Verify a given hotspare disk is inuse or avail
1406 # Return 0 is pool/disk matches expected state, 1 otherwise
1408 function check_hotspare_state # pool disk state{inuse,avail}
1411 typeset disk=${2#/dev/dsk/}
1414 cur_state=$(get_device_state $pool $disk "spares")
1416 if [[ $state != ${cur_state} ]]; then
1423 # Verify a given slog disk is inuse or avail
1425 # Return 0 is pool/disk matches expected state, 1 otherwise
1427 function check_slog_state # pool disk state{online,offline,unavail}
1430 typeset disk=${2#/dev/dsk/}
1433 cur_state=$(get_device_state $pool $disk "logs")
1435 if [[ $state != ${cur_state} ]]; then
1442 # Verify a given vdev disk is inuse or avail
1444 # Return 0 is pool/disk matches expected state, 1 otherwise
1446 function check_vdev_state # pool disk state{online,offline,unavail}
1449 typeset disk=${2#/dev/dsk/}
1452 cur_state=$(get_device_state $pool $disk)
1454 if [[ $state != ${cur_state} ]]; then
1461 # Check the output of 'zpool status -v <pool>',
1462 # and to see if the content of <token> contain the <keyword> specified.
1464 # Return 0 is contain, 1 otherwise
1466 function check_pool_status # pool token keyword
1472 $ZPOOL status -v "$pool" 2>/dev/null | $NAWK -v token="$token:" '
1473 ($1==token) {print $0}' \
1474 | $GREP -i "$keyword" > /dev/null 2>&1
1480 # These 5 following functions are instance of check_pool_status()
1481 # is_pool_resilvering - to check if the pool is resilver in progress
1482 # is_pool_resilvered - to check if the pool is resilver completed
1483 # is_pool_scrubbing - to check if the pool is scrub in progress
1484 # is_pool_scrubbed - to check if the pool is scrub completed
1485 # is_pool_scrub_stopped - to check if the pool is scrub stopped
1487 function is_pool_resilvering #pool
1489 check_pool_status "$1" "scan" "resilver in progress since "
1493 function is_pool_resilvered #pool
1495 check_pool_status "$1" "scan" "resilvered "
1499 function is_pool_scrubbing #pool
1501 check_pool_status "$1" "scan" "scrub in progress since "
1505 function is_pool_scrubbed #pool
1507 check_pool_status "$1" "scan" "scrub repaired"
1511 function is_pool_scrub_stopped #pool
1513 check_pool_status "$1" "scan" "scrub canceled"
1518 # Use create_pool()/destroy_pool() to clean up the infomation in
1519 # in the given disk to avoid slice overlapping.
1521 function cleanup_devices #vdevs
1523 typeset pool="foopool$$"
1525 if poolexists $pool ; then
1529 create_pool $pool $@
1536 # Verify the rsh connectivity to each remote host in RHOSTS.
1538 # Return 0 if remote host is accessible; otherwise 1.
1539 # $1 remote host name
1542 function verify_rsh_connect #rhost, username
1546 typeset rsh_cmd="$RSH -n"
1549 $GETENT hosts $rhost >/dev/null 2>&1
1550 if (($? != 0)); then
1551 log_note "$rhost cannot be found from" \
1552 "administrative database."
1556 $PING $rhost 3 >/dev/null 2>&1
1557 if (($? != 0)); then
1558 log_note "$rhost is not reachable."
1562 if ((${#username} != 0)); then
1563 rsh_cmd="$rsh_cmd -l $username"
1564 cur_user="given user \"$username\""
1566 cur_user="current user \"`$LOGNAME`\""
1569 if ! $rsh_cmd $rhost $TRUE; then
1570 log_note "$RSH to $rhost is not accessible" \
1579 # Verify the remote host connection via rsh after rebooting
1582 function verify_remote
1587 # The following loop waits for the remote system rebooting.
1588 # Each iteration will wait for 150 seconds. there are
1589 # total 5 iterations, so the total timeout value will
1590 # be 12.5 minutes for the system rebooting. This number
1591 # is an approxiate number.
1594 while ! verify_rsh_connect $rhost; do
1596 ((count = count + 1))
1597 if ((count > 5)); then
1605 # Replacement function for /usr/bin/rsh. This function will include
1606 # the /usr/bin/rsh and meanwhile return the execution status of the
1609 # $1 usrname passing down to -l option of /usr/bin/rsh
1610 # $2 remote machine hostname
1611 # $3... command string
1625 err_file=/tmp/${rhost}.$$.err
1626 if ((${#ruser} == 0)); then
1629 rsh_str="$RSH -n -l $ruser"
1632 $rsh_str $rhost /usr/bin/ksh -c "'$cmd_str; \
1633 print -u 2 \"status=\$?\"'" \
1634 >/dev/null 2>$err_file
1636 if (($ret != 0)); then
1638 $RM -f $std_file $err_file
1639 log_fail "$RSH itself failed with exit code $ret..."
1642 ret=$($GREP -v 'print -u 2' $err_file | $GREP 'status=' | \
1644 (($ret != 0)) && $CAT $err_file >&2
1646 $RM -f $err_file >/dev/null 2>&1
1651 # Get the SUNWstc-fs-zfs package installation path in a remote host
1652 # $1 remote host name
1654 function get_remote_pkgpath
1659 pkgpath=$($RSH -n $rhost "$PKGINFO -l SUNWstc-fs-zfs | $GREP BASEDIR: |\
1666 # A function to find and locate free disks on a system or from given
1667 # disks as the parameter. It works by locating disks that are in use
1668 # as swap devices, SVM devices, and dump devices, and also disks
1669 # listed in /etc/vfstab
1671 # $@ given disks to find which are free, default is all disks in
1674 # @return a string containing the list of available disks
1678 sfi=/tmp/swaplist.$$
1679 msi=/tmp/metastat.$$
1680 dmpi=/tmp/dumpdev.$$
1681 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
1684 $METASTAT -c > $msi 2>/dev/null
1685 $DUMPADM > $dmpi 2>/dev/null
1687 # write an awk script that can process the output of format
1688 # to produce a list of disks we know about. Note that we have
1689 # to escape "$2" so that the shell doesn't interpret it while
1690 # we're creating the awk script.
1691 # -------------------
1692 $CAT > /tmp/find_disks.awk <<EOF
1701 if (searchdisks && \$2 !~ "^$"){
1707 /^AVAILABLE DISK SELECTIONS:/{
1711 #---------------------
1713 $CHMOD 755 /tmp/find_disks.awk
1714 disks=${@:-$($ECHO "" | $FORMAT -e 2>/dev/null | /tmp/find_disks.awk)}
1715 $RM /tmp/find_disks.awk
1718 for disk in $disks; do
1720 $GREP "${disk}[sp]" /etc/mnttab >/dev/null
1721 (($? == 0)) && continue
1723 $GREP "${disk}[sp]" $sfi >/dev/null
1724 (($? == 0)) && continue
1726 $GREP "${disk}" $msi >/dev/null
1727 (($? == 0)) && continue
1728 # check for dump device
1729 $GREP "${disk}[sp]" $dmpi >/dev/null
1730 (($? == 0)) && continue
1731 # check to see if this disk hasn't been explicitly excluded
1732 # by a user-set environment variable
1733 $ECHO "${ZFS_HOST_DEVICES_IGNORE}" | $GREP "${disk}" > /dev/null
1734 (($? == 0)) && continue
1735 unused_candidates="$unused_candidates $disk"
1741 # now just check to see if those disks do actually exist
1742 # by looking for a device pointing to the first slice in
1743 # each case. limit the number to max_finddisksnum
1745 for disk in $unused_candidates; do
1746 if [ -b /dev/dsk/${disk}s0 ]; then
1747 if [ $count -lt $max_finddisksnum ]; then
1748 unused="$unused $disk"
1749 # do not impose limit if $@ is provided
1750 [[ -z $@ ]] && ((count = count + 1))
1755 # finally, return our disk list
1760 # Add specified user to specified group
1764 # $3 base of the homedir (optional)
1766 function add_user #<group_name> <user_name> <basedir>
1770 typeset basedir=${3:-"/var/tmp"}
1772 if ((${#gname} == 0 || ${#uname} == 0)); then
1773 log_fail "group name or user name are not defined."
1776 log_must $USERADD -g $gname -d $basedir/$uname -m $uname
1782 # Delete the specified user.
1785 # $2 base of the homedir (optional)
1787 function del_user #<logname> <basedir>
1790 typeset basedir=${2:-"/var/tmp"}
1792 if ((${#user} == 0)); then
1793 log_fail "login name is necessary."
1796 if $ID $user > /dev/null 2>&1; then
1797 log_must $USERDEL $user
1800 [[ -d $basedir/$user ]] && $RM -fr $basedir/$user
1806 # Select valid gid and create specified group.
1810 function add_group #<group_name>
1814 if ((${#group} == 0)); then
1815 log_fail "group name is necessary."
1818 # Assign 100 as the base gid
1821 $GROUPADD -g $gid $group > /dev/null 2>&1
1825 # The gid is not unique
1833 # Delete the specified group.
1837 function del_group #<group_name>
1840 if ((${#grp} == 0)); then
1841 log_fail "group name is necessary."
1844 $GROUPMOD -n $grp $grp > /dev/null 2>&1
1847 # Group does not exist.
1849 # Name already exists as a group name
1850 9) log_must $GROUPDEL $grp ;;
1858 # This function will return true if it's safe to destroy the pool passed
1859 # as argument 1. It checks for pools based on zvols and files, and also
1860 # files contained in a pool that may have a different mountpoint.
1862 function safe_to_destroy_pool { # $1 the pool name
1865 typeset DONT_DESTROY=""
1867 # We check that by deleting the $1 pool, we're not
1868 # going to pull the rug out from other pools. Do this
1869 # by looking at all other pools, ensuring that they
1870 # aren't built from files or zvols contained in this pool.
1872 for pool in $($ZPOOL list -H -o name)
1876 # this is a list of the top-level directories in each of the
1877 # files that make up the path to the files the pool is based on
1878 FILEPOOL=$($ZPOOL status -v $pool | $GREP /$1/ | \
1881 # this is a list of the zvols that make up the pool
1882 ZVOLPOOL=$($ZPOOL status -v $pool | $GREP "/dev/zvol/dsk/$1$" \
1883 | $AWK '{print $1}')
1885 # also want to determine if it's a file-based pool using an
1886 # alternate mountpoint...
1887 POOL_FILE_DIRS=$($ZPOOL status -v $pool | \
1888 $GREP / | $AWK '{print $1}' | \
1889 $AWK -F/ '{print $2}' | $GREP -v "dev")
1891 for pooldir in $POOL_FILE_DIRS
1893 OUTPUT=$($ZFS list -H -r -o mountpoint $1 | \
1894 $GREP "${pooldir}$" | $AWK '{print $1}')
1896 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
1900 if [ ! -z "$ZVOLPOOL" ]
1903 log_note "Pool $pool is built from $ZVOLPOOL on $1"
1906 if [ ! -z "$FILEPOOL" ]
1909 log_note "Pool $pool is built from $FILEPOOL on $1"
1912 if [ ! -z "$ALTMOUNTPOOL" ]
1915 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
1919 if [ -z "${DONT_DESTROY}" ]
1923 log_note "Warning: it is not safe to destroy $1!"
1929 # Get IP address of hostname
1932 function getipbyhost
1935 ip=`$ARP $1 2>/dev/null | $AWK -F\) '{print $1}' \
1936 | $AWK -F\('{print $2}'`
1941 # Setup iSCSI initiator to target
1942 # $1 target hostname
1944 function iscsi_isetup
1946 # check svc:/network/iscsi_initiator:default state, try to enable it
1947 # if the state is not ON
1948 typeset ISCSII_FMRI="svc:/network/iscsi_initiator:default"
1949 if [[ "ON" != $($SVCS -H -o sta $ISCSII_FMRI) ]]; then
1950 log_must $SVCADM enable $ISCSII_FMRI
1953 while [[ "ON" != $($SVCS -H -o sta $ISCSII_FMRI) && \
1956 ((retry = retry - 1))
1960 if [[ "ON" != $($SVCS -H -o sta $ISCSII_FMRI) ]]; then
1961 log_fail "$ISCSII_FMRI service can not be enabled!"
1965 log_must $ISCSIADM add discovery-address $(getipbyhost $1)
1966 log_must $ISCSIADM modify discovery --sendtargets enable
1967 log_must $DEVFSADM -i iscsi
1971 # Check whether iscsi parameter is set as remote
1973 # return 0 if iscsi is set as remote, otherwise 1
1975 function check_iscsi_remote
1977 if [[ $iscsi == "remote" ]] ; then
1985 # Check if a volume is a valide iscsi target
1987 # return 0 if suceeds, otherwise, return 1
1989 function is_iscsi_target
1992 typeset target targets
1994 [[ -z $dataset ]] && return 1
1996 targets=$($ISCSITADM list target | $GREP "Target:" | $AWK '{print $2}')
1997 [[ -z $targets ]] && return 1
1999 for target in $targets; do
2000 [[ $dataset == $target ]] && return 0
2007 # Get the iSCSI name of a target
2015 [[ -z $target ]] && log_fail "No parameter."
2017 if ! is_iscsi_target $target ; then
2018 log_fail "Not a target."
2021 name=$($ISCSITADM list target $target | $GREP "iSCSI Name:" \
2022 | $AWK '{print $2}')
2028 # check svc:/system/iscsitgt:default state, try to enable it if the state
2031 function iscsitgt_setup
2033 log_must $RM -f $ISCSITGTFILE
2034 if [[ "ON" == $($SVCS -H -o sta $ISCSITGT_FMRI) ]]; then
2035 log_note "iscsitgt is already enabled"
2039 log_must $SVCADM enable -t $ISCSITGT_FMRI
2042 while [[ "ON" != $($SVCS -H -o sta $ISCSITGT_FMRI) && \
2046 ((retry = retry - 1))
2049 if [[ "ON" != $($SVCS -H -o sta $ISCSITGT_FMRI) ]]; then
2050 log_fail "$ISCSITGT_FMRI service can not be enabled!"
2053 log_must $TOUCH $ISCSITGTFILE
2057 # set DISABLED state of svc:/system/iscsitgt:default
2058 # which is the most suiteable state if $ISCSITGTFILE exists
2060 function iscsitgt_cleanup
2062 if [[ -e $ISCSITGTFILE ]]; then
2063 log_must $SVCADM disable $ISCSITGT_FMRI
2064 log_must $RM -f $ISCSITGTFILE
2069 # Close iSCSI initiator to target
2070 # $1 target hostname
2072 function iscsi_iclose
2074 log_must $ISCSIADM modify discovery --sendtargets disable
2075 log_must $ISCSIADM remove discovery-address $(getipbyhost $1)
2080 # Get the available ZFS compression options
2081 # $1 option type zfs_set|zfs_compress
2083 function get_compress_opts
2085 typeset COMPRESS_OPTS
2086 typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
2087 gzip-6 gzip-7 gzip-8 gzip-9"
2089 if [[ $1 == "zfs_compress" ]] ; then
2090 COMPRESS_OPTS="on lzjb"
2091 elif [[ $1 == "zfs_set" ]] ; then
2092 COMPRESS_OPTS="on off lzjb"
2094 typeset valid_opts="$COMPRESS_OPTS"
2095 $ZFS get 2>&1 | $GREP gzip >/dev/null 2>&1
2096 if [[ $? -eq 0 ]]; then
2097 valid_opts="$valid_opts $GZIP_OPTS"
2103 # Verify zfs operation with -p option work as expected
2104 # $1 operation, value could be create, clone or rename
2105 # $2 dataset type, value could be fs or vol
2107 # $4 new dataset name
2109 function verify_opt_p_ops
2114 typeset newdataset=$4
2116 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2117 log_fail "$datatype is not supported."
2120 # check parameters accordingly
2125 if [[ $datatype == "vol" ]]; then
2126 ops="create -V $VOLSIZE"
2130 if [[ -z $newdataset ]]; then
2131 log_fail "newdataset should not be empty" \
2134 log_must datasetexists $dataset
2135 log_must snapexists $dataset
2138 if [[ -z $newdataset ]]; then
2139 log_fail "newdataset should not be empty" \
2142 log_must datasetexists $dataset
2143 log_mustnot snapexists $dataset
2146 log_fail "$ops is not supported."
2150 # make sure the upper level filesystem does not exist
2151 if datasetexists ${newdataset%/*} ; then
2152 log_must $ZFS destroy -rRf ${newdataset%/*}
2155 # without -p option, operation will fail
2156 log_mustnot $ZFS $ops $dataset $newdataset
2157 log_mustnot datasetexists $newdataset ${newdataset%/*}
2159 # with -p option, operation should succeed
2160 log_must $ZFS $ops -p $dataset $newdataset
2161 if ! datasetexists $newdataset ; then
2162 log_fail "-p option does not work for $ops"
2165 # when $ops is create or clone, redo the operation still return zero
2166 if [[ $ops != "rename" ]]; then
2167 log_must $ZFS $ops -p $dataset $newdataset
2174 # Get configuration of pool
2184 if ! poolexists "$pool" ; then
2187 alt_root=$($ZPOOL list -H $pool | $AWK '{print $NF}')
2188 if [[ $alt_root == "-" ]]; then
2189 value=$($ZDB -C $pool | $GREP "$config:" | $AWK -F: \
2192 value=$($ZDB -e $pool | $GREP "$config:" | $AWK -F: \
2195 if [[ -n $value ]] ; then
2205 # Privated function. Random select one of items from arguments.
2210 function _random_get
2217 ((ind = RANDOM % cnt + 1))
2219 typeset ret=$($ECHO "$str" | $CUT -f $ind -d ' ')
2224 # Random select one of item from arguments which include NONE string
2226 function random_get_with_non
2231 _random_get "$cnt" "$@"
2235 # Random select one of item from arguments which doesn't include NONE string
2239 _random_get "$#" "$@"
2243 # Detect if the current system support slog
2245 function verify_slog_support
2247 typeset dir=/tmp/disk.$$
2253 $MKFILE 64M $vdev $sdev
2256 if ! $ZPOOL create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2265 # The function will generate a dataset name with specific length
2266 # $1, the length of the name
2267 # $2, the base string to construct the name
2269 function gen_dataset_name
2272 typeset basestr="$2"
2273 typeset -i baselen=${#basestr}
2277 if ((len % baselen == 0)); then
2278 ((iter = len / baselen))
2280 ((iter = len / baselen + 1))
2282 while ((iter > 0)); do
2283 l_name="${l_name}$basestr"
2292 # Get cksum tuple of dataset
2295 # sample zdb output:
2296 # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2297 # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2298 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2299 # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2300 function datasetcksum
2304 cksum=$($ZDB -vvv $1 | $GREP "^Dataset $1 \[" | $GREP "cksum" \
2305 | $AWK -F= '{print $7}')
2316 cksum=$($CKSUM $1 | $AWK '{print $1}')
2321 # Get the given disk/slice state from the specific field of the pool
2323 function get_device_state #pool disk field("", "spares","logs")
2326 typeset disk=${2#/dev/dsk/}
2327 typeset field=${3:-$pool}
2329 state=$($ZPOOL status -v "$pool" 2>/dev/null | \
2330 $NAWK -v device=$disk -v pool=$pool -v field=$field \
2331 'BEGIN {startconfig=0; startfield=0; }
2332 /config:/ {startconfig=1}
2333 (startconfig==1) && ($1==field) {startfield=1; next;}
2334 (startfield==1) && ($1==device) {print $2; exit;}
2336 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2342 # print the given directory filesystem type
2350 if [[ -z $dir ]]; then
2351 log_fail "Usage: get_fstype <directory>"
2358 $DF -n $dir | $AWK '{print $3}'
2362 # Given a disk, label it to VTOC regardless what label was on the disk
2368 if [[ -z $disk ]]; then
2369 log_fail "The disk name is unspecified."
2371 typeset label_file=/var/tmp/labelvtoc.$$
2372 typeset arch=$($UNAME -p)
2374 if [[ $arch == "i386" ]]; then
2375 $ECHO "label" > $label_file
2376 $ECHO "0" >> $label_file
2377 $ECHO "" >> $label_file
2378 $ECHO "q" >> $label_file
2379 $ECHO "q" >> $label_file
2381 $FDISK -B $disk >/dev/null 2>&1
2382 # wait a while for fdisk finishes
2384 elif [[ $arch == "sparc" ]]; then
2385 $ECHO "label" > $label_file
2386 $ECHO "0" >> $label_file
2387 $ECHO "" >> $label_file
2388 $ECHO "" >> $label_file
2389 $ECHO "" >> $label_file
2390 $ECHO "q" >> $label_file
2392 log_fail "unknown arch type"
2395 $FORMAT -e -s -d $disk -f $label_file
2396 typeset -i ret_val=$?
2399 # wait the format to finish
2402 if ((ret_val != 0)); then
2403 log_fail "unable to label $disk as VTOC."
2410 # check if the system was installed as zfsroot or not
2411 # return: 0 ture, otherwise false
2415 $DF -n / | $GREP zfs > /dev/null 2>&1
2420 # get the root filesystem name if it's zfsroot system.
2422 # return: root filesystem name
2426 rootfs=$($AWK '{if ($2 == "/" && $3 == "zfs") print $1}' \
2428 if [[ -z "$rootfs" ]]; then
2429 log_fail "Can not get rootfs"
2431 $ZFS list $rootfs > /dev/null 2>&1
2432 if (($? == 0)); then
2435 log_fail "This is not a zfsroot system."
2440 # get the rootfs's pool name
2444 function get_rootpool
2448 rootfs=$($AWK '{if ($2 == "/" && $3 =="zfs") print $1}' \
2450 if [[ -z "$rootfs" ]]; then
2451 log_fail "Can not get rootpool"
2453 $ZFS list $rootfs > /dev/null 2>&1
2454 if (($? == 0)); then
2455 rootpool=`$ECHO $rootfs | awk -F\/ '{print $1}'`
2458 log_fail "This is not a zfsroot system."
2463 # Get the sub string from specified source string
2466 # $2 start position. Count from 1
2469 function get_substr #src_str pos offset
2474 $NAWK -v pos=$2 -v offset=$3 '{print substr($0, pos, offset)}'
2478 # Check if the given device is physical device
2480 function is_physical_device #device
2482 typeset device=${1#/dev/dsk/}
2483 device=${device#/dev/rdsk/}
2485 $ECHO $device | $EGREP "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2490 # Get the directory path of given device
2492 function get_device_dir #device
2496 if ! $(is_physical_device $device) ; then
2497 if [[ $device != "/" ]]; then
2507 # Get the package name
2509 function get_package_name
2511 typeset dirpath=${1:-$STC_NAME}
2513 echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2517 # Get the word numbers from a string separated by white space
2519 function get_word_count
2525 # To verify if the require numbers of disks is given
2527 function verify_disk_count
2529 typeset -i min=${2:-1}
2531 typeset -i count=$(get_word_count "$1")
2533 if ((count < min)); then
2534 log_untested "A minimum of $min disks is required to run." \
2535 " You specified $count disk(s)"
2539 function ds_is_volume
2541 typeset type=$(get_prop type $1)
2542 [[ $type = "volume" ]] && return 0
2546 function ds_is_filesystem
2548 typeset type=$(get_prop type $1)
2549 [[ $type = "filesystem" ]] && return 0
2553 function ds_is_snapshot
2555 typeset type=$(get_prop type $1)
2556 [[ $type = "snapshot" ]] && return 0
2561 # Check if Trusted Extensions are installed and enabled
2563 function is_te_enabled
2565 $SVCS -H -o state labeld 2>/dev/null | $GREP "enabled"
2566 if (($? != 0)); then
2573 # Utility function to determine if a system has multiple cpus.
2576 (($($PSRINFO | $WC -l) > 1))
2579 # Run the given command as the user provided.
2585 eval \$SU \$user -c \"$@\" > /tmp/out 2>/tmp/err
2590 # Check if the pool contains the specified vdevs
2595 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2596 # vdevs is not in the pool, and 2 if pool name is missing.
2598 function vdevs_in_pool
2603 if [[ -z $pool ]]; then
2604 log_note "Missing pool name."
2610 typeset tmpfile=$($MKTEMP)
2611 $ZPOOL list -Hv "$pool" >$tmpfile
2613 $GREP -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2614 [[ $? -ne 0 ]] && return 1