4 # The contents of this file are subject to the terms of the
5 # Common Development and Distribution License (the "License").
6 # You may not use this file except in compliance with the License.
8 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 # or http://www.opensolaris.org/os/licensing.
10 # See the License for the specific language governing permissions
11 # and limitations under the License.
13 # When distributing Covered Code, include this CDDL HEADER in each
14 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 # If applicable, add the following below this CDDL HEADER, with the
16 # fields enclosed by brackets "[]" replaced with your own identifying
17 # information: Portions Copyright [yyyy] [name of copyright owner]
23 # Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 # Use is subject to license terms.
25 # Copyright (c) 2012, 2017 by Delphix. All rights reserved.
26 # Copyright 2016 Nexenta Systems, Inc.
27 # Copyright (c) 2017 Datto Inc.
30 . ${STF_TOOLS}/contrib/include/logapi.shlib
32 # Determine whether a dataset is mounted
35 # $2 filesystem type; optional - defaulted to zfs
37 # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
42 [[ -z $fstype ]] && fstype=zfs
43 typeset out dir name ret
47 if [[ "$1" == "/"* ]] ; then
48 for out in $(zfs mount | awk '{print $2}'); do
49 [[ $1 == $out ]] && return 0
52 for out in $(zfs mount | awk '{print $1}'); do
53 [[ $1 == $out ]] && return 0
58 out=$(df -F $fstype $1 2>/dev/null)
60 (($ret != 0)) && return $ret
68 [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
75 # Return 0 if a dataset is mounted; 1 otherwise
78 # $2 filesystem type; optional - defaulted to zfs
83 (($? == 0)) && return 0
87 # Return 0 if a dataset is unmounted; 1 otherwise
90 # $2 filesystem type; optional - defaulted to zfs
95 (($? == 1)) && return 0
105 echo $1 | sed "s/,/ /g"
108 function default_setup
110 default_setup_noexit "$@"
116 # Given a list of disks, setup storage pools and datasets.
118 function default_setup_noexit
124 if is_global_zone; then
125 if poolexists $TESTPOOL ; then
126 destroy_pool $TESTPOOL
128 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
129 log_must zpool create -f $TESTPOOL $disklist
134 rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
135 mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
137 log_must zfs create $TESTPOOL/$TESTFS
138 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
140 if [[ -n $container ]]; then
141 rm -rf $TESTDIR1 || \
142 log_unresolved Could not remove $TESTDIR1
143 mkdir -p $TESTDIR1 || \
144 log_unresolved Could not create $TESTDIR1
146 log_must zfs create $TESTPOOL/$TESTCTR
147 log_must zfs set canmount=off $TESTPOOL/$TESTCTR
148 log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
149 log_must zfs set mountpoint=$TESTDIR1 \
150 $TESTPOOL/$TESTCTR/$TESTFS1
153 if [[ -n $volume ]]; then
154 if is_global_zone ; then
155 log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
157 log_must zfs create $TESTPOOL/$TESTVOL
163 # Given a list of disks, setup a storage pool, file system and
166 function default_container_setup
170 default_setup "$disklist" "true"
174 # Given a list of disks, setup a storage pool,file system
177 function default_volume_setup
181 default_setup "$disklist" "" "true"
185 # Given a list of disks, setup a storage pool,file system,
186 # a container and a volume.
188 function default_container_volume_setup
192 default_setup "$disklist" "true" "true"
196 # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
199 # $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
200 # $2 snapshot name. Default, $TESTSNAP
202 function create_snapshot
204 typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
205 typeset snap=${2:-$TESTSNAP}
207 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
208 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
210 if snapexists $fs_vol@$snap; then
211 log_fail "$fs_vol@$snap already exists."
213 datasetexists $fs_vol || \
214 log_fail "$fs_vol must exist."
216 log_must zfs snapshot $fs_vol@$snap
220 # Create a clone from a snapshot, default clone name is $TESTCLONE.
222 # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
223 # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
225 function create_clone # snapshot clone
227 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
228 typeset clone=${2:-$TESTPOOL/$TESTCLONE}
231 log_fail "Snapshot name is undefined."
233 log_fail "Clone name is undefined."
235 log_must zfs clone $snap $clone
239 # Create a bookmark of the given snapshot. Defaultly create a bookmark on
242 # $1 Existing filesystem or volume name. Default, $TESTFS
243 # $2 Existing snapshot name. Default, $TESTSNAP
244 # $3 bookmark name. Default, $TESTBKMARK
246 function create_bookmark
248 typeset fs_vol=${1:-$TESTFS}
249 typeset snap=${2:-$TESTSNAP}
250 typeset bkmark=${3:-$TESTBKMARK}
252 [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
253 [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
254 [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
256 if bkmarkexists $fs_vol#$bkmark; then
257 log_fail "$fs_vol#$bkmark already exists."
259 datasetexists $fs_vol || \
260 log_fail "$fs_vol must exist."
261 snapexists $fs_vol@$snap || \
262 log_fail "$fs_vol@$snap must exist."
264 log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
268 # Create a temporary clone result of an interrupted resumable 'zfs receive'
269 # $1 Destination filesystem name. Must not exist, will be created as the result
270 # of this function along with its %recv temporary clone
271 # $2 Source filesystem name. Must not exist, will be created and destroyed
273 function create_recv_clone
276 typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
277 typeset snap="$sendfs@snap1"
278 typeset incr="$sendfs@snap2"
279 typeset mountpoint="$TESTDIR/create_recv_clone"
280 typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
282 [[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
284 datasetexists $recvfs && log_fail "Recv filesystem must not exist."
285 datasetexists $sendfs && log_fail "Send filesystem must not exist."
287 log_must zfs create -o mountpoint="$mountpoint" $sendfs
288 log_must zfs snapshot $snap
289 log_must eval "zfs send $snap | zfs recv -u $recvfs"
290 log_must mkfile 1m "$mountpoint/data"
291 log_must zfs snapshot $incr
292 log_must eval "zfs send -i $snap $incr | dd bs=10k count=1 > $sendfile"
293 log_mustnot eval "zfs recv -su $recvfs < $sendfile"
294 log_must zfs destroy -r $sendfs
295 log_must rm -f "$sendfile"
297 if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
298 log_fail "Error creating temporary $recvfs/%recv clone"
302 function default_mirror_setup
304 default_mirror_setup_noexit $1 $2 $3
310 # Given a pair of disks, set up a storage pool and dataset for the mirror
311 # @parameters: $1 the primary side of the mirror
312 # $2 the secondary side of the mirror
313 # @uses: ZPOOL ZFS TESTPOOL TESTFS
314 function default_mirror_setup_noexit
316 readonly func="default_mirror_setup_noexit"
320 [[ -z $primary ]] && \
321 log_fail "$func: No parameters passed"
322 [[ -z $secondary ]] && \
323 log_fail "$func: No secondary partition passed"
324 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
325 log_must zpool create -f $TESTPOOL mirror $@
326 log_must zfs create $TESTPOOL/$TESTFS
327 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
331 # create a number of mirrors.
332 # We create a number($1) of 2 way mirrors using the pairs of disks named
333 # on the command line. These mirrors are *not* mounted
334 # @parameters: $1 the number of mirrors to create
335 # $... the devices to use to create the mirrors on
336 # @uses: ZPOOL ZFS TESTPOOL
337 function setup_mirrors
339 typeset -i nmirrors=$1
342 while ((nmirrors > 0)); do
343 log_must test -n "$1" -a -n "$2"
344 [[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
345 log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
347 ((nmirrors = nmirrors - 1))
352 # create a number of raidz pools.
353 # We create a number($1) of 2 raidz pools using the pairs of disks named
354 # on the command line. These pools are *not* mounted
355 # @parameters: $1 the number of pools to create
356 # $... the devices to use to create the pools on
357 # @uses: ZPOOL ZFS TESTPOOL
358 function setup_raidzs
360 typeset -i nraidzs=$1
363 while ((nraidzs > 0)); do
364 log_must test -n "$1" -a -n "$2"
365 [[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
366 log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
368 ((nraidzs = nraidzs - 1))
373 # Destroy the configured testpool mirrors.
374 # the mirrors are of the form ${TESTPOOL}{number}
375 # @uses: ZPOOL ZFS TESTPOOL
376 function destroy_mirrors
378 default_cleanup_noexit
384 # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
385 # $1 the list of disks
387 function default_raidz_setup
389 typeset disklist="$*"
390 disks=(${disklist[*]})
392 if [[ ${#disks[*]} -lt 2 ]]; then
393 log_fail "A raid-z requires a minimum of two disks."
396 [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
397 log_must zpool create -f $TESTPOOL raidz $1 $2 $3
398 log_must zfs create $TESTPOOL/$TESTFS
399 log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
405 # Common function used to cleanup storage pools and datasets.
407 # Invoked at the start of the test suite to ensure the system
408 # is in a known state, and also at the end of each set of
409 # sub-tests to ensure errors from one set of tests doesn't
410 # impact the execution of the next set.
412 function default_cleanup
414 default_cleanup_noexit
419 function default_cleanup_noexit
424 # Destroying the pool will also destroy any
425 # filesystems it contains.
427 if is_global_zone; then
428 zfs unmount -a > /dev/null 2>&1
429 exclude=`eval echo \"'(${KEEP})'\"`
430 ALL_POOLS=$(zpool list -H -o name \
431 | grep -v "$NO_POOLS" | egrep -v "$exclude")
432 # Here, we loop through the pools we're allowed to
433 # destroy, only destroying them if it's safe to do
435 while [ ! -z ${ALL_POOLS} ]
437 for pool in ${ALL_POOLS}
439 if safe_to_destroy_pool $pool ;
443 ALL_POOLS=$(zpool list -H -o name \
444 | grep -v "$NO_POOLS" \
445 | egrep -v "$exclude")
452 for fs in $(zfs list -H -o name \
453 | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
454 datasetexists $fs && \
455 log_must zfs destroy -Rf $fs
458 # Need cleanup here to avoid garbage dir left.
459 for fs in $(zfs list -H -o name); do
460 [[ $fs == /$ZONE_POOL ]] && continue
461 [[ -d $fs ]] && log_must rm -rf $fs/*
465 # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
468 for fs in $(zfs list -H -o name); do
469 if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
470 log_must zfs set reservation=none $fs
471 log_must zfs set recordsize=128K $fs
472 log_must zfs set mountpoint=/$fs $fs
474 enc=$(get_prop encryption $fs)
475 if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
476 [[ "$enc" == "off" ]]; then
477 log_must zfs set checksum=on $fs
479 log_must zfs set compression=off $fs
480 log_must zfs set atime=on $fs
481 log_must zfs set devices=off $fs
482 log_must zfs set exec=on $fs
483 log_must zfs set setuid=on $fs
484 log_must zfs set readonly=off $fs
485 log_must zfs set snapdir=hidden $fs
486 log_must zfs set aclmode=groupmask $fs
487 log_must zfs set aclinherit=secure $fs
492 [[ -d $TESTDIR ]] && \
493 log_must rm -rf $TESTDIR
498 # Common function used to cleanup storage pools, file systems
501 function default_container_cleanup
503 if ! is_global_zone; then
507 ismounted $TESTPOOL/$TESTCTR/$TESTFS1
509 log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
511 datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
512 log_must zfs destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
514 datasetexists $TESTPOOL/$TESTCTR && \
515 log_must zfs destroy -Rf $TESTPOOL/$TESTCTR
517 [[ -e $TESTDIR1 ]] && \
518 log_must rm -rf $TESTDIR1 > /dev/null 2>&1
524 # Common function used to cleanup snapshot of file system or volume. Default to
525 # delete the file system's snapshot
529 function destroy_snapshot
531 typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
533 if ! snapexists $snap; then
534 log_fail "'$snap' does not existed."
538 # For the sake of the value which come from 'get_prop' is not equal
539 # to the really mountpoint when the snapshot is unmounted. So, firstly
540 # check and make sure this snapshot's been mounted in current system.
543 if ismounted $snap; then
544 mtpt=$(get_prop mountpoint $snap)
546 log_fail "get_prop mountpoint $snap failed."
549 log_must zfs destroy $snap
550 [[ $mtpt != "" && -d $mtpt ]] && \
551 log_must rm -rf $mtpt
555 # Common function used to cleanup clone.
559 function destroy_clone
561 typeset clone=${1:-$TESTPOOL/$TESTCLONE}
563 if ! datasetexists $clone; then
564 log_fail "'$clone' does not existed."
567 # With the same reason in destroy_snapshot
569 if ismounted $clone; then
570 mtpt=$(get_prop mountpoint $clone)
572 log_fail "get_prop mountpoint $clone failed."
575 log_must zfs destroy $clone
576 [[ $mtpt != "" && -d $mtpt ]] && \
577 log_must rm -rf $mtpt
581 # Common function used to cleanup bookmark of file system or volume. Default
582 # to delete the file system's bookmark.
586 function destroy_bookmark
588 typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
590 if ! bkmarkexists $bkmark; then
591 log_fail "'$bkmarkp' does not existed."
594 log_must zfs destroy $bkmark
597 # Return 0 if a snapshot exists; $? otherwise
603 zfs list -H -t snapshot "$1" > /dev/null 2>&1
608 # Return 0 if a bookmark exists; $? otherwise
612 function bkmarkexists
614 zfs list -H -t bookmark "$1" > /dev/null 2>&1
619 # Set a property to a certain value on a dataset.
620 # Sets a property of the dataset to the value as passed in.
622 # $1 dataset who's property is being set
624 # $3 value to set property to
626 # 0 if the property could be set.
627 # non-zero otherwise.
630 function dataset_setprop
632 typeset fn=dataset_setprop
635 log_note "$fn: Insufficient parameters (need 3, had $#)"
639 output=$(zfs set $2=$3 $1 2>&1)
642 log_note "Setting property on $1 failed."
643 log_note "property $2=$3"
644 log_note "Return Code: $rv"
645 log_note "Output: $output"
652 # Assign suite defined dataset properties.
653 # This function is used to apply the suite's defined default set of
654 # properties to a dataset.
655 # @parameters: $1 dataset to use
656 # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
658 # 0 if the dataset has been altered.
659 # 1 if no pool name was passed in.
660 # 2 if the dataset could not be found.
661 # 3 if the dataset could not have it's properties set.
663 function dataset_set_defaultproperties
667 [[ -z $dataset ]] && return 1
671 for confset in $(zfs list); do
672 if [[ $dataset = $confset ]]; then
677 [[ $found -eq 0 ]] && return 2
678 if [[ -n $COMPRESSION_PROP ]]; then
679 dataset_setprop $dataset compression $COMPRESSION_PROP || \
681 log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
683 if [[ -n $CHECKSUM_PROP ]]; then
684 dataset_setprop $dataset checksum $CHECKSUM_PROP || \
686 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
692 # Check a numeric assertion
693 # @parameter: $@ the assertion to check
694 # @output: big loud notice if assertion failed
699 (($@)) || log_fail "$@"
703 # Function to format partition size of a disk
704 # Given a disk cxtxdx reduces all partitions
707 function zero_partitions #<whole_disk_name>
712 for i in 0 1 3 4 5 6 7
714 set_partition $i "" 0mb $diskname
719 # Given a slice, size and disk, this function
720 # formats the slice to the specified size.
721 # Size should be specified with units as per
722 # the `format` command requirements eg. 100mb 3gb
724 function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
726 typeset -i slicenum=$1
730 [[ -z $slicenum || -z $size || -z $disk ]] && \
731 log_fail "The slice, size or disk name is unspecified."
732 typeset format_file=/var/tmp/format_in.$$
734 echo "partition" >$format_file
735 echo "$slicenum" >> $format_file
736 echo "" >> $format_file
737 echo "" >> $format_file
738 echo "$start" >> $format_file
739 echo "$size" >> $format_file
740 echo "label" >> $format_file
741 echo "" >> $format_file
742 echo "q" >> $format_file
743 echo "q" >> $format_file
745 format -e -s -d $disk -f $format_file
748 [[ $ret_val -ne 0 ]] && \
749 log_fail "Unable to format $disk slice $slicenum to $size"
754 # Get the end cyl of the given slice
756 function get_endslice #<disk> <slice>
760 if [[ -z $disk || -z $slice ]] ; then
761 log_fail "The disk name or slice number is unspecified."
764 disk=${disk#/dev/dsk/}
765 disk=${disk#/dev/rdsk/}
769 ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
770 grep "sectors\/cylinder" | \
773 if ((ratio == 0)); then
777 typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
778 nawk -v token="$slice" '{if ($1==token) print $6}')
780 ((endcyl = (endcyl + 1) / ratio))
786 # Given a size,disk and total slice number, this function formats the
787 # disk slices from 0 to the total slice number with the same specified
790 function partition_disk #<slice_size> <whole_disk_name> <total_slices>
793 typeset slice_size=$1
795 typeset total_slices=$3
798 zero_partitions $disk_name
799 while ((i < $total_slices)); do
804 set_partition $i "$cyl" $slice_size $disk_name
805 cyl=$(get_endslice $disk_name $i)
811 # This function continues to write to a filenum number of files into dirnum
812 # number of directories until either file_write returns an error or the
813 # maximum number of files per directory have been written.
816 # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
818 # Return value: 0 on success
822 # destdir: is the directory where everything is to be created under
823 # dirnum: the maximum number of subdirectories to use, -1 no limit
824 # filenum: the maximum number of files per subdirectory
825 # bytes: number of bytes to write
826 # num_writes: numer of types to write out bytes
827 # data: the data that will be writen
830 # file_fs /testdir 20 25 1024 256 0
832 # Note: bytes * num_writes equals the size of the testfile
834 function fill_fs # destdir dirnum filenum bytes num_writes data
836 typeset destdir=${1:-$TESTDIR}
837 typeset -i dirnum=${2:-50}
838 typeset -i filenum=${3:-50}
839 typeset -i bytes=${4:-8192}
840 typeset -i num_writes=${5:-10240}
841 typeset -i data=${6:-0}
848 log_must mkdir -p $destdir/$idirnum
849 while (($odirnum > 0)); do
850 if ((dirnum >= 0 && idirnum >= dirnum)); then
854 file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
855 -b $bytes -c $num_writes -d $data
857 if (($retval != 0)); then
861 if (($fn >= $filenum)); then
863 ((idirnum = idirnum + 1))
864 log_must mkdir -p $destdir/$idirnum
873 # Simple function to get the specified property. If unable to
874 # get the property then exits.
876 # Note property is in 'parsable' format (-p)
878 function get_prop # property dataset
884 prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
885 if [[ $? -ne 0 ]]; then
886 log_note "Unable to get $prop property for dataset " \
896 # Simple function to get the specified property of pool. If unable to
897 # get the property then exits.
899 function get_pool_prop # property pool
905 if poolexists $pool ; then
906 prop_val=$(zpool get $prop $pool 2>/dev/null | tail -1 | \
908 if [[ $? -ne 0 ]]; then
909 log_note "Unable to get $prop property for pool " \
914 log_note "Pool $pool not exists."
922 # Return 0 if a pool exists; $? otherwise
930 if [[ -z $pool ]]; then
931 log_note "No pool name given."
935 zpool get name "$pool" > /dev/null 2>&1
939 # Return 0 if all the specified datasets exist; $? otherwise
942 function datasetexists
945 log_note "No dataset name given."
950 zfs get name $1 > /dev/null 2>&1 || \
958 # return 0 if none of the specified datasets exists, otherwise return 1.
961 function datasetnonexists
964 log_note "No dataset name given."
969 zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
978 # Given a mountpoint, or a dataset name, determine if it is shared.
980 # Returns 0 if shared, 1 otherwise.
987 if [[ $fs != "/"* ]] ; then
988 if datasetnonexists "$fs" ; then
991 mtpt=$(get_prop mountpoint "$fs")
993 none|legacy|-) return 1
1001 for mtpt in `share | awk '{print $2}'` ; do
1002 if [[ $mtpt == $fs ]] ; then
1007 typeset stat=$(svcs -H -o STA nfs/server:default)
1008 if [[ $stat != "ON" ]]; then
1009 log_note "Current nfs/server status: $stat"
1016 # Given a mountpoint, determine if it is not shared.
1018 # Returns 0 if not shared, 1 otherwise.
1025 if (($? == 0)); then
1033 # Helper function to unshare a mountpoint.
1035 function unshare_fs #fs
1040 if (($? == 0)); then
1041 log_must zfs unshare $fs
1048 # Check NFS server status and trigger it online.
1050 function setup_nfs_server
1052 # Cannot share directory in non-global zone.
1054 if ! is_global_zone; then
1055 log_note "Cannot trigger NFS server by sharing in LZ."
1059 typeset nfs_fmri="svc:/network/nfs/server:default"
1060 if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
1062 # Only really sharing operation can enable NFS server
1063 # to online permanently.
1065 typeset dummy=/tmp/dummy
1067 if [[ -d $dummy ]]; then
1068 log_must rm -rf $dummy
1071 log_must mkdir $dummy
1072 log_must share $dummy
1075 # Waiting for fmri's status to be the final status.
1076 # Otherwise, in transition, an asterisk (*) is appended for
1077 # instances, unshare will reverse status to 'DIS' again.
1079 # Waiting for 1's at least.
1083 while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
1090 log_must unshare $dummy
1091 log_must rm -rf $dummy
1094 log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
1098 # To verify whether calling process is in global zone
1100 # Return 0 if in global zone, 1 in non-global zone
1102 function is_global_zone
1104 typeset cur_zone=$(zonename 2>/dev/null)
1105 if [[ $cur_zone != "global" ]]; then
1112 # Verify whether test is permitted to run from
1113 # global zone, local zone, or both
1115 # $1 zone limit, could be "global", "local", or "both"(no limit)
1117 # Return 0 if permitted, otherwise exit with log_unsupported
1119 function verify_runnable # zone limit
1123 [[ -z $limit ]] && return 0
1125 if is_global_zone ; then
1129 local) log_unsupported "Test is unable to run from "\
1132 *) log_note "Warning: unknown limit $limit - " \
1140 global) log_unsupported "Test is unable to run from "\
1143 *) log_note "Warning: unknown limit $limit - " \
1154 # Return 0 if create successfully or the pool exists; $? otherwise
1155 # Note: In local zones, this function should return 0 silently.
1158 # $2-n - [keyword] devs_list
1160 function create_pool #pool devs_list
1162 typeset pool=${1%%/*}
1166 if [[ -z $pool ]]; then
1167 log_note "Missing pool name."
1171 if poolexists $pool ; then
1175 if is_global_zone ; then
1176 [[ -d /$pool ]] && rm -rf /$pool
1177 log_must zpool create -f $pool $@
1183 # Return 0 if destroy successfully or the pool exists; $? otherwise
1184 # Note: In local zones, this function should return 0 silently.
1187 # Destroy pool with the given parameters.
1189 function destroy_pool #pool
1191 typeset pool=${1%%/*}
1194 if [[ -z $pool ]]; then
1195 log_note "No pool name given."
1199 if is_global_zone ; then
1200 if poolexists "$pool" ; then
1201 mtpt=$(get_prop mountpoint "$pool")
1203 # At times, syseventd activity can cause attempts to
1204 # destroy a pool to fail with EBUSY. We retry a few
1205 # times allowing failures before requiring the destroy
1207 typeset -i wait_time=10 ret=1 count=0
1209 while [[ $ret -ne 0 ]]; do
1210 $must zpool destroy -f $pool
1212 [[ $ret -eq 0 ]] && break
1213 log_note "zpool destroy failed with $ret"
1214 [[ count++ -ge 7 ]] && must=log_must
1219 log_must rm -rf $mtpt
1221 log_note "Pool does not exist. ($pool)"
1230 # Firstly, create a pool with 5 datasets. Then, create a single zone and
1231 # export the 5 datasets to it. In addition, we also add a ZFS filesystem
1232 # and a zvol device to the zone.
1235 # $2 zone root directory prefix
1238 function zfs_zones_setup #zone_name zone_root zone_ip
1240 typeset zone_name=${1:-$(hostname)-z}
1241 typeset zone_root=${2:-"/zone_root"}
1242 typeset zone_ip=${3:-"10.1.1.10"}
1243 typeset prefix_ctr=$ZONE_CTR
1244 typeset pool_name=$ZONE_POOL
1248 # Create pool and 5 container within it
1250 [[ -d /$pool_name ]] && rm -rf /$pool_name
1251 log_must zpool create -f $pool_name $DISKS
1252 while ((i < cntctr)); do
1253 log_must zfs create $pool_name/$prefix_ctr$i
1258 log_must zfs create -V 1g $pool_name/zone_zvol
1261 # If current system support slog, add slog device for pool
1263 if verify_slog_support ; then
1264 typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
1265 log_must mkfile $MINVDEVSIZE $sdevs
1266 log_must zpool add $pool_name log mirror $sdevs
1269 # this isn't supported just yet.
1270 # Create a filesystem. In order to add this to
1271 # the zone, it must have it's mountpoint set to 'legacy'
1272 # log_must zfs create $pool_name/zfs_filesystem
1273 # log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
1275 [[ -d $zone_root ]] && \
1276 log_must rm -rf $zone_root/$zone_name
1277 [[ ! -d $zone_root ]] && \
1278 log_must mkdir -p -m 0700 $zone_root/$zone_name
1280 # Create zone configure file and configure the zone
1282 typeset zone_conf=/tmp/zone_conf.$$
1283 echo "create" > $zone_conf
1284 echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
1285 echo "set autoboot=true" >> $zone_conf
1287 while ((i < cntctr)); do
1288 echo "add dataset" >> $zone_conf
1289 echo "set name=$pool_name/$prefix_ctr$i" >> \
1291 echo "end" >> $zone_conf
1295 # add our zvol to the zone
1296 echo "add device" >> $zone_conf
1297 echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
1298 echo "end" >> $zone_conf
1300 # add a corresponding zvol rdsk to the zone
1301 echo "add device" >> $zone_conf
1302 echo "set match=/dev/zvol/rdsk/$pool_name/zone_zvol" >> $zone_conf
1303 echo "end" >> $zone_conf
1305 # once it's supported, we'll add our filesystem to the zone
1306 # echo "add fs" >> $zone_conf
1307 # echo "set type=zfs" >> $zone_conf
1308 # echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
1309 # echo "set dir=/export/zfs_filesystem" >> $zone_conf
1310 # echo "end" >> $zone_conf
1312 echo "verify" >> $zone_conf
1313 echo "commit" >> $zone_conf
1314 log_must zonecfg -z $zone_name -f $zone_conf
1315 log_must rm -f $zone_conf
1318 zoneadm -z $zone_name install
1319 if (($? == 0)); then
1320 log_note "SUCCESS: zoneadm -z $zone_name install"
1322 log_fail "FAIL: zoneadm -z $zone_name install"
1325 # Install sysidcfg file
1327 typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
1328 echo "system_locale=C" > $sysidcfg
1329 echo "terminal=dtterm" >> $sysidcfg
1330 echo "network_interface=primary {" >> $sysidcfg
1331 echo "hostname=$zone_name" >> $sysidcfg
1332 echo "}" >> $sysidcfg
1333 echo "name_service=NONE" >> $sysidcfg
1334 echo "root_password=mo791xfZ/SFiw" >> $sysidcfg
1335 echo "security_policy=NONE" >> $sysidcfg
1336 echo "timezone=US/Eastern" >> $sysidcfg
1339 log_must zoneadm -z $zone_name boot
1343 # Reexport TESTPOOL & TESTPOOL(1-4)
1345 function reexport_pool
1350 while ((i < cntctr)); do
1352 TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
1353 if ! ismounted $TESTPOOL; then
1354 log_must zfs mount $TESTPOOL
1357 eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
1358 if eval ! ismounted \$TESTPOOL$i; then
1359 log_must eval zfs mount \$TESTPOOL$i
1367 # Verify a given disk is online or offline
1369 # Return 0 is pool/disk matches expected state, 1 otherwise
1371 function check_state # pool disk state{online,offline}
1374 typeset disk=${2#/dev/dsk/}
1377 zpool status -v $pool | grep "$disk" \
1378 | grep -i "$state" > /dev/null 2>&1
1384 # Get the mountpoint of snapshot
1385 # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
1388 function snapshot_mountpoint
1390 typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
1392 if [[ $dataset != *@* ]]; then
1393 log_fail "Error name of snapshot '$dataset'."
1396 typeset fs=${dataset%@*}
1397 typeset snap=${dataset#*@}
1399 if [[ -z $fs || -z $snap ]]; then
1400 log_fail "Error name of snapshot '$dataset'."
1403 echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
1407 # Given a pool and file system, this function will verify the file system
1408 # using the zdb internal tool. Note that the pool is exported and imported
1409 # to ensure it has consistent state.
1411 function verify_filesys # pool filesystem dir
1414 typeset filesys="$2"
1415 typeset zdbout="/tmp/zdbout.$$"
1420 typeset search_path=""
1422 log_note "Calling zdb to verify filesystem '$filesys'"
1423 zfs unmount -a > /dev/null 2>&1
1424 log_must zpool export $pool
1426 if [[ -n $dirs ]] ; then
1427 for dir in $dirs ; do
1428 search_path="$search_path -d $dir"
1432 log_must zpool import $search_path $pool
1434 zdb -cudi $filesys > $zdbout 2>&1
1435 if [[ $? != 0 ]]; then
1436 log_note "Output: zdb -cudi $filesys"
1438 log_fail "zdb detected errors with: '$filesys'"
1441 log_must zfs mount -a
1442 log_must rm -rf $zdbout
1446 # Given a pool, and this function list all disks in the pool
1448 function get_disklist # pool
1452 disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
1453 grep -v "\-\-\-\-\-" | \
1454 egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
1460 # This function kills a given list of processes after a time period. We use
1461 # this in the stress tests instead of STF_TIMEOUT so that we can have processes
1462 # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
1463 # would be listed as FAIL, which we don't want : we're happy with stress tests
1464 # running for a certain amount of time, then finishing.
1466 # @param $1 the time in seconds after which we should terminate these processes
1467 # @param $2..$n the processes we wish to terminate.
1469 function stress_timeout
1471 typeset -i TIMEOUT=$1
1475 log_note "Waiting for child processes($cpids). " \
1476 "It could last dozens of minutes, please be patient ..."
1477 log_must sleep $TIMEOUT
1479 log_note "Killing child processes after ${TIMEOUT} stress timeout."
1481 for pid in $cpids; do
1482 ps -p $pid > /dev/null 2>&1
1483 if (($? == 0)); then
1484 log_must kill -USR1 $pid
1490 # Verify a given hotspare disk is inuse or avail
1492 # Return 0 is pool/disk matches expected state, 1 otherwise
1494 function check_hotspare_state # pool disk state{inuse,avail}
1497 typeset disk=${2#/dev/dsk/}
1500 cur_state=$(get_device_state $pool $disk "spares")
1502 if [[ $state != ${cur_state} ]]; then
1509 # Wait until a hotspare transitions to a given state or times out.
1511 # Return 0 when pool/disk matches expected state, 1 on timeout.
1513 function wait_hotspare_state # pool disk state timeout
1516 typeset disk=${2#$/DEV_DSKDIR/}
1518 typeset timeout=${4:-60}
1521 while [[ $i -lt $timeout ]]; do
1522 if check_hotspare_state $pool $disk $state; then
1534 # Verify a given slog disk is inuse or avail
1536 # Return 0 is pool/disk matches expected state, 1 otherwise
1538 function check_slog_state # pool disk state{online,offline,unavail}
1541 typeset disk=${2#/dev/dsk/}
1544 cur_state=$(get_device_state $pool $disk "logs")
1546 if [[ $state != ${cur_state} ]]; then
1553 # Verify a given vdev disk is inuse or avail
1555 # Return 0 is pool/disk matches expected state, 1 otherwise
1557 function check_vdev_state # pool disk state{online,offline,unavail}
1560 typeset disk=${2#/dev/dsk/}
1563 cur_state=$(get_device_state $pool $disk)
1565 if [[ $state != ${cur_state} ]]; then
1572 # Wait until a vdev transitions to a given state or times out.
1574 # Return 0 when pool/disk matches expected state, 1 on timeout.
1576 function wait_vdev_state # pool disk state timeout
1579 typeset disk=${2#$/DEV_DSKDIR/}
1581 typeset timeout=${4:-60}
1584 while [[ $i -lt $timeout ]]; do
1585 if check_vdev_state $pool $disk $state; then
1597 # Check the output of 'zpool status -v <pool>',
1598 # and to see if the content of <token> contain the <keyword> specified.
1600 # Return 0 is contain, 1 otherwise
1602 function check_pool_status # pool token keyword <verbose>
1607 typeset verbose=${4:-false}
1609 scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
1610 ($1==token) {print $0}')
1611 if [[ $verbose == true ]]; then
1614 echo $scan | grep -i "$keyword" > /dev/null 2>&1
1620 # These 6 following functions are instance of check_pool_status()
1621 # is_pool_resilvering - to check if the pool is resilver in progress
1622 # is_pool_resilvered - to check if the pool is resilver completed
1623 # is_pool_scrubbing - to check if the pool is scrub in progress
1624 # is_pool_scrubbed - to check if the pool is scrub completed
1625 # is_pool_scrub_stopped - to check if the pool is scrub stopped
1626 # is_pool_scrub_paused - to check if the pool has scrub paused
1627 # is_pool_removing - to check if the pool is removing a vdev
1628 # is_pool_removed - to check if the pool is remove completed
1630 function is_pool_resilvering #pool <verbose>
1632 check_pool_status "$1" "scan" "resilver in progress since " $2
1636 function is_pool_resilvered #pool <verbose>
1638 check_pool_status "$1" "scan" "resilvered " $2
1642 function is_pool_scrubbing #pool <verbose>
1644 check_pool_status "$1" "scan" "scrub in progress since " $2
1648 function is_pool_scrubbed #pool <verbose>
1650 check_pool_status "$1" "scan" "scrub repaired" $2
1654 function is_pool_scrub_stopped #pool <verbose>
1656 check_pool_status "$1" "scan" "scrub canceled" $2
1660 function is_pool_scrub_paused #pool <verbose>
1662 check_pool_status "$1" "scan" "scrub paused since " $2
1666 function is_pool_removing #pool
1668 check_pool_status "$1" "remove" "in progress since "
1672 function is_pool_removed #pool
1674 check_pool_status "$1" "remove" "completed on"
1678 function wait_for_degraded
1681 typeset timeout=${2:-30}
1685 [[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
1686 log_note "$pool is not yet degraded."
1688 if ((SECONDS - t0 > $timeout)); then
1689 log_note "$pool not degraded after $timeout seconds."
1698 # Use create_pool()/destroy_pool() to clean up the infomation in
1699 # in the given disk to avoid slice overlapping.
1701 function cleanup_devices #vdevs
1703 typeset pool="foopool$$"
1705 if poolexists $pool ; then
1709 create_pool $pool $@
1716 # A function to find and locate free disks on a system or from given
1717 # disks as the parameter. It works by locating disks that are in use
1718 # as swap devices and dump devices, and also disks listed in /etc/vfstab
1720 # $@ given disks to find which are free, default is all disks in
1723 # @return a string containing the list of available disks
1727 sfi=/tmp/swaplist.$$
1728 dmpi=/tmp/dumpdev.$$
1729 max_finddisksnum=${MAX_FINDDISKSNUM:-6}
1732 dumpadm > $dmpi 2>/dev/null
1734 # write an awk script that can process the output of format
1735 # to produce a list of disks we know about. Note that we have
1736 # to escape "$2" so that the shell doesn't interpret it while
1737 # we're creating the awk script.
1738 # -------------------
1739 cat > /tmp/find_disks.awk <<EOF
1748 if (searchdisks && \$2 !~ "^$"){
1754 /^AVAILABLE DISK SELECTIONS:/{
1758 #---------------------
1760 chmod 755 /tmp/find_disks.awk
1761 disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
1762 rm /tmp/find_disks.awk
1765 for disk in $disks; do
1767 grep "${disk}[sp]" /etc/mnttab >/dev/null
1768 (($? == 0)) && continue
1770 grep "${disk}[sp]" $sfi >/dev/null
1771 (($? == 0)) && continue
1772 # check for dump device
1773 grep "${disk}[sp]" $dmpi >/dev/null
1774 (($? == 0)) && continue
1775 # check to see if this disk hasn't been explicitly excluded
1776 # by a user-set environment variable
1777 echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
1778 (($? == 0)) && continue
1779 unused_candidates="$unused_candidates $disk"
1784 # now just check to see if those disks do actually exist
1785 # by looking for a device pointing to the first slice in
1786 # each case. limit the number to max_finddisksnum
1788 for disk in $unused_candidates; do
1789 if [ -b /dev/dsk/${disk}s0 ]; then
1790 if [ $count -lt $max_finddisksnum ]; then
1791 unused="$unused $disk"
1792 # do not impose limit if $@ is provided
1793 [[ -z $@ ]] && ((count = count + 1))
1798 # finally, return our disk list
1803 # Add specified user to specified group
1807 # $3 base of the homedir (optional)
1809 function add_user #<group_name> <user_name> <basedir>
1813 typeset basedir=${3:-"/var/tmp"}
1815 if ((${#gname} == 0 || ${#uname} == 0)); then
1816 log_fail "group name or user name are not defined."
1819 log_must useradd -g $gname -d $basedir/$uname -m $uname
1825 # Delete the specified user.
1828 # $2 base of the homedir (optional)
1830 function del_user #<logname> <basedir>
1833 typeset basedir=${2:-"/var/tmp"}
1835 if ((${#user} == 0)); then
1836 log_fail "login name is necessary."
1839 if id $user > /dev/null 2>&1; then
1840 log_must userdel $user
1843 [[ -d $basedir/$user ]] && rm -fr $basedir/$user
1849 # Select valid gid and create specified group.
1853 function add_group #<group_name>
1857 if ((${#group} == 0)); then
1858 log_fail "group name is necessary."
1861 # Assign 100 as the base gid
1864 groupadd -g $gid $group > /dev/null 2>&1
1868 # The gid is not unique
1876 # Delete the specified group.
1880 function del_group #<group_name>
1883 if ((${#grp} == 0)); then
1884 log_fail "group name is necessary."
1887 groupmod -n $grp $grp > /dev/null 2>&1
1890 # Group does not exist.
1892 # Name already exists as a group name
1893 9) log_must groupdel $grp ;;
1901 # This function will return true if it's safe to destroy the pool passed
1902 # as argument 1. It checks for pools based on zvols and files, and also
1903 # files contained in a pool that may have a different mountpoint.
1905 function safe_to_destroy_pool { # $1 the pool name
1908 typeset DONT_DESTROY=""
1910 # We check that by deleting the $1 pool, we're not
1911 # going to pull the rug out from other pools. Do this
1912 # by looking at all other pools, ensuring that they
1913 # aren't built from files or zvols contained in this pool.
1915 for pool in $(zpool list -H -o name)
1919 # this is a list of the top-level directories in each of the
1920 # files that make up the path to the files the pool is based on
1921 FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
1924 # this is a list of the zvols that make up the pool
1925 ZVOLPOOL=$(zpool status -v $pool | grep "/dev/zvol/dsk/$1$" \
1928 # also want to determine if it's a file-based pool using an
1929 # alternate mountpoint...
1930 POOL_FILE_DIRS=$(zpool status -v $pool | \
1931 grep / | awk '{print $1}' | \
1932 awk -F/ '{print $2}' | grep -v "dev")
1934 for pooldir in $POOL_FILE_DIRS
1936 OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
1937 grep "${pooldir}$" | awk '{print $1}')
1939 ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
1943 if [ ! -z "$ZVOLPOOL" ]
1946 log_note "Pool $pool is built from $ZVOLPOOL on $1"
1949 if [ ! -z "$FILEPOOL" ]
1952 log_note "Pool $pool is built from $FILEPOOL on $1"
1955 if [ ! -z "$ALTMOUNTPOOL" ]
1958 log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
1962 if [ -z "${DONT_DESTROY}" ]
1966 log_note "Warning: it is not safe to destroy $1!"
1972 # Get the available ZFS compression options
1973 # $1 option type zfs_set|zfs_compress
1975 function get_compress_opts
1977 typeset COMPRESS_OPTS
1978 typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
1979 gzip-6 gzip-7 gzip-8 gzip-9"
1981 if [[ $1 == "zfs_compress" ]] ; then
1982 COMPRESS_OPTS="on lzjb"
1983 elif [[ $1 == "zfs_set" ]] ; then
1984 COMPRESS_OPTS="on off lzjb"
1986 typeset valid_opts="$COMPRESS_OPTS"
1987 zfs get 2>&1 | grep gzip >/dev/null 2>&1
1988 if [[ $? -eq 0 ]]; then
1989 valid_opts="$valid_opts $GZIP_OPTS"
1995 # Verify zfs operation with -p option work as expected
1996 # $1 operation, value could be create, clone or rename
1997 # $2 dataset type, value could be fs or vol
1999 # $4 new dataset name
2001 function verify_opt_p_ops
2006 typeset newdataset=$4
2008 if [[ $datatype != "fs" && $datatype != "vol" ]]; then
2009 log_fail "$datatype is not supported."
2012 # check parameters accordingly
2017 if [[ $datatype == "vol" ]]; then
2018 ops="create -V $VOLSIZE"
2022 if [[ -z $newdataset ]]; then
2023 log_fail "newdataset should not be empty" \
2026 log_must datasetexists $dataset
2027 log_must snapexists $dataset
2030 if [[ -z $newdataset ]]; then
2031 log_fail "newdataset should not be empty" \
2034 log_must datasetexists $dataset
2035 log_mustnot snapexists $dataset
2038 log_fail "$ops is not supported."
2042 # make sure the upper level filesystem does not exist
2043 if datasetexists ${newdataset%/*} ; then
2044 log_must zfs destroy -rRf ${newdataset%/*}
2047 # without -p option, operation will fail
2048 log_mustnot zfs $ops $dataset $newdataset
2049 log_mustnot datasetexists $newdataset ${newdataset%/*}
2051 # with -p option, operation should succeed
2052 log_must zfs $ops -p $dataset $newdataset
2053 if ! datasetexists $newdataset ; then
2054 log_fail "-p option does not work for $ops"
2057 # when $ops is create or clone, redo the operation still return zero
2058 if [[ $ops != "rename" ]]; then
2059 log_must zfs $ops -p $dataset $newdataset
2066 # Get configuration of pool
2076 if ! poolexists "$pool" ; then
2079 alt_root=$(zpool list -H $pool | awk '{print $NF}')
2080 if [[ $alt_root == "-" ]]; then
2081 value=$(zdb -C $pool | grep "$config:" | awk -F: \
2084 value=$(zdb -e $pool | grep "$config:" | awk -F: \
2087 if [[ -n $value ]] ; then
2097 # Privated function. Random select one of items from arguments.
2102 function _random_get
2109 ((ind = RANDOM % cnt + 1))
2111 typeset ret=$(echo "$str" | cut -f $ind -d ' ')
2116 # Random select one of item from arguments which include NONE string
2118 function random_get_with_non
2123 _random_get "$cnt" "$@"
2127 # Random select one of item from arguments which doesn't include NONE string
2131 _random_get "$#" "$@"
2135 # Detect if the current system support slog
2137 function verify_slog_support
2139 typeset dir=/tmp/disk.$$
2145 mkfile $MINVDEVSIZE $vdev $sdev
2148 if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
2157 # The function will generate a dataset name with specific length
2158 # $1, the length of the name
2159 # $2, the base string to construct the name
2161 function gen_dataset_name
2164 typeset basestr="$2"
2165 typeset -i baselen=${#basestr}
2169 if ((len % baselen == 0)); then
2170 ((iter = len / baselen))
2172 ((iter = len / baselen + 1))
2174 while ((iter > 0)); do
2175 l_name="${l_name}$basestr"
2184 # Get cksum tuple of dataset
2187 # sample zdb output:
2188 # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
2189 # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
2190 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
2191 # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
2192 function datasetcksum
2196 cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
2197 | awk -F= '{print $7}')
2208 cksum=$(cksum $1 | awk '{print $1}')
2213 # Get the given disk/slice state from the specific field of the pool
2215 function get_device_state #pool disk field("", "spares","logs")
2218 typeset disk=${2#/dev/dsk/}
2219 typeset field=${3:-$pool}
2221 state=$(zpool status -v "$pool" 2>/dev/null | \
2222 nawk -v device=$disk -v pool=$pool -v field=$field \
2223 'BEGIN {startconfig=0; startfield=0; }
2224 /config:/ {startconfig=1}
2225 (startconfig==1) && ($1==field) {startfield=1; next;}
2226 (startfield==1) && ($1==device) {print $2; exit;}
2228 ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
2234 # print the given directory filesystem type
2242 if [[ -z $dir ]]; then
2243 log_fail "Usage: get_fstype <directory>"
2250 df -n $dir | awk '{print $3}'
2254 # Given a disk, label it to VTOC regardless what label was on the disk
2260 if [[ -z $disk ]]; then
2261 log_fail "The disk name is unspecified."
2263 typeset label_file=/var/tmp/labelvtoc.$$
2264 typeset arch=$(uname -p)
2266 if [[ $arch == "i386" ]]; then
2267 echo "label" > $label_file
2268 echo "0" >> $label_file
2269 echo "" >> $label_file
2270 echo "q" >> $label_file
2271 echo "q" >> $label_file
2273 fdisk -B $disk >/dev/null 2>&1
2274 # wait a while for fdisk finishes
2276 elif [[ $arch == "sparc" ]]; then
2277 echo "label" > $label_file
2278 echo "0" >> $label_file
2279 echo "" >> $label_file
2280 echo "" >> $label_file
2281 echo "" >> $label_file
2282 echo "q" >> $label_file
2284 log_fail "unknown arch type"
2287 format -e -s -d $disk -f $label_file
2288 typeset -i ret_val=$?
2291 # wait the format to finish
2294 if ((ret_val != 0)); then
2295 log_fail "unable to label $disk as VTOC."
2302 # check if the system was installed as zfsroot or not
2303 # return: 0 ture, otherwise false
2307 df -n / | grep zfs > /dev/null 2>&1
2312 # get the root filesystem name if it's zfsroot system.
2314 # return: root filesystem name
2318 rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
2320 if [[ -z "$rootfs" ]]; then
2321 log_fail "Can not get rootfs"
2323 zfs list $rootfs > /dev/null 2>&1
2324 if (($? == 0)); then
2327 log_fail "This is not a zfsroot system."
2332 # get the rootfs's pool name
2336 function get_rootpool
2340 rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
2342 if [[ -z "$rootfs" ]]; then
2343 log_fail "Can not get rootpool"
2345 zfs list $rootfs > /dev/null 2>&1
2346 if (($? == 0)); then
2347 rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
2350 log_fail "This is not a zfsroot system."
2355 # Check if the given device is physical device
2357 function is_physical_device #device
2359 typeset device=${1#/dev/dsk/}
2360 device=${device#/dev/rdsk/}
2362 echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
2367 # Get the directory path of given device
2369 function get_device_dir #device
2373 if ! $(is_physical_device $device) ; then
2374 if [[ $device != "/" ]]; then
2384 # Get the package name
2386 function get_package_name
2388 typeset dirpath=${1:-$STC_NAME}
2390 echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
2394 # Get the word numbers from a string separated by white space
2396 function get_word_count
2402 # To verify if the require numbers of disks is given
2404 function verify_disk_count
2406 typeset -i min=${2:-1}
2408 typeset -i count=$(get_word_count "$1")
2410 if ((count < min)); then
2411 log_untested "A minimum of $min disks is required to run." \
2412 " You specified $count disk(s)"
2416 function ds_is_volume
2418 typeset type=$(get_prop type $1)
2419 [[ $type = "volume" ]] && return 0
2423 function ds_is_filesystem
2425 typeset type=$(get_prop type $1)
2426 [[ $type = "filesystem" ]] && return 0
2430 function ds_is_snapshot
2432 typeset type=$(get_prop type $1)
2433 [[ $type = "snapshot" ]] && return 0
2438 # Check if Trusted Extensions are installed and enabled
2440 function is_te_enabled
2442 svcs -H -o state labeld 2>/dev/null | grep "enabled"
2443 if (($? != 0)); then
2450 # Utility function to determine if a system has multiple cpus.
2453 (($(psrinfo | wc -l) > 1))
2456 function get_cpu_freq
2458 psrinfo -v 0 | awk '/processor operates at/ {print $6}'
2461 # Run the given command as the user provided.
2467 eval su \$user -c \"$@\" > /tmp/out 2>/tmp/err
2472 # Check if the pool contains the specified vdevs
2477 # Return 0 if the vdevs are contained in the pool, 1 if any of the specified
2478 # vdevs is not in the pool, and 2 if pool name is missing.
2480 function vdevs_in_pool
2485 if [[ -z $pool ]]; then
2486 log_note "Missing pool name."
2492 typeset tmpfile=$(mktemp)
2493 zpool list -Hv "$pool" >$tmpfile
2495 grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
2496 [[ $? -ne 0 ]] && return 1
2510 max=$(echo $((max > i ? max : i)))
2522 min=$(echo $((min < i ? min : i)))
2529 # Generate a random number between 1 and the argument.
2534 echo $(( ($RANDOM % $max) + 1 ))
2537 # Write data that can be compressed into a directory
2538 function write_compressible
2542 typeset nfiles=${3:-1}
2543 typeset bs=${4:-1024k}
2544 typeset fname=${5:-file}
2546 [[ -d $dir ]] || log_fail "No directory: $dir"
2548 log_must eval "fio \
2553 --buffer_compress_percentage=66 \
2554 --buffer_compress_chunk=4096 \
2560 --filename_format='$fname.\$jobnum' >/dev/null"
2568 [[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
2569 objnum=$(stat -c %i $pathname)
2574 # Prints the current time in seconds since UNIX Epoch.
2576 function current_epoch
2582 # Get decimal value of global uint32_t variable using mdb.
2584 function mdb_get_uint32
2589 value=$(mdb -k -e "$variable/X | ::eval .=U")
2590 if [[ $? -ne 0 ]]; then
2591 log_fail "Failed to get value of '$variable' from mdb."
2600 # Set global uint32_t variable to a decimal value using mdb.
2602 function mdb_set_uint32
2607 mdb -kw -e "$variable/W 0t$value" > /dev/null
2608 if [[ $? -ne 0 ]]; then
2609 echo "Failed to set '$variable' to '$value' in mdb."
2617 # Set global scalar integer variable to a hex value using mdb.
2618 # Note: Target should have CTF data loaded.
2620 function mdb_ctf_set_int
2625 mdb -kw -e "$variable/z $value" > /dev/null
2626 if [[ $? -ne 0 ]]; then
2627 echo "Failed to set '$variable' to '$value' in mdb."