9166 zfs storage pool checkpoint
[unleashed.git] / usr / src / test / zfs-tests / tests / longevity / slop_space_test.ksh
blobac61f961abae949ce71b65ec869c14aa03e9fadf
1 #!/usr/bin/ksh -p
4 # This file and its contents are supplied under the terms of the
5 # Common Development and Distribution License ("CDDL"), version 1.0.
6 # You may only use this file in accordance with the terms of version
7 # 1.0 of the CDDL.
9 # A full copy of the text of the CDDL should have accompanied this
10 # source. A copy of the CDDL is also available via the Internet at
11 # http://www.illumos.org/license/CDDL.
15 # Copyright (c) 2017 by Delphix. All rights reserved.
18 . $STF_SUITE/include/libtest.shlib
19 . $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib
22 # DESCRIPTION:
23 # Ensure that all levels of reserved slop space are
24 # followed by ZFS.
26 # STRATEGY:
27 # 1. Create testpool with two filesystems
28 # 2. On the first filesystem create a big file that holds
29 # a large portion of the pool's space. Then overwrite it
30 # in such a way that if we free it after taking a
31 # checkpoint it will append a lot of small entries to
32 # the checkpoint's space map
33 # 3. Checkpoint the pool
34 # 4. On the second filesystem, create a file and keep writing
35 # to it until we hit the first level of reserved space
36 # (128M)
37 # 5. Then start adding properties to second filesystem until
38 # we hit the second level of reserved space (64M)
39 # 6. Destroy the first filesystem and wait until the async
40 # destroys of this operation hit the last level of
41 # reserved space (32M)
42 # 7. Attempt to destroy the second filesystem (should fail)
43 # 8. Discard the checkpoint
46 DISK="$(echo $DISKS | cut -d' ' -f1)"
47 DISKFS=$TESTPOOL/disks
49 NESTEDPOOL=nestedpool
51 FILEDISKSIZE=4g
52 FILEDISKLOCATION=/$DISKFS
53 FILEDISK=$FILEDISKLOCATION/dsk1
55 FS0=$NESTEDPOOL/fs0
56 FS1=$NESTEDPOOL/fs1
58 FS0FILE=/$FS0/file
59 FS1FILE=/$FS1/file
61 CKPOINTEDFILEBLOCKS=3200
62 NUMOVERWRITTENBLOCKS=$(($CKPOINTEDFILEBLOCKS * 1024 * 1024 / 512 / 2))
64 verify_runnable "global"
66 function test_cleanup
68 log_must mdb_ctf_set_int zfs_async_block_max_blocks 0xffffffffffffffff
69 poolexists $NESTEDPOOL && destroy_pool $NESTEDPOOL
70 log_must zpool destroy $TESTPOOL
73 function wait_until_extra_reserved
76 # Loop until we get from gigabytes to megabytes
78 size_range=$(zpool list | awk '{print $1,$4}' | \
79 grep $NESTEDPOOL | awk '{print $2}' | grep G)
80 while [ "" != "$size_range" ]; do
81 sleep 5
82 size_range=$(zpool list | awk '{print $1,$4}' | \
83 grep $NESTEDPOOL | awk '{print $2}' | grep G)
84 done
88 # Loop until we hit the 32M limit
90 free=$(zpool list | awk '{print $1,$4}' | \
91 grep $NESTEDPOOL | awk '{print $2}' | cut -d"M" -f1 | \
92 cut -d"." -f1)
93 while (( $free > 32 )); do
94 sleep 5
95 free=$(zpool list | awk '{print $1,$4}' | \
96 grep $NESTEDPOOL | awk '{print $2}' | cut -d"M" -f1 | \
97 cut -d"." -f1)
98 done
101 # Even though we may have hit the 32M limit we
102 # still need to wait to ensure that we are at
103 # the stable state where async destroys are suspended.
105 sleep 300
108 log_must zpool create $TESTPOOL $DISK
109 log_onexit test_cleanup
111 log_must zfs create $DISKFS
113 log_must mkfile -n $FILEDISKSIZE $FILEDISK
114 log_must zpool create $NESTEDPOOL $FILEDISK
115 log_must zfs create -o recordsize=512 $FS0
116 log_must zfs create -o recordsize=512 $FS1
120 # Create a ~3.2G file and ensure it is
121 # synced to disk
123 log_must dd if=/dev/zero of=$FS0FILE bs=1M count=$CKPOINTEDFILEBLOCKS
124 log_must sync
126 # for debugging purposes
127 log_must zpool list $NESTEDPOOL
130 # Overwrite every second block of the file.
131 # The idea is to make long space map regions
132 # where we have subsequent entries that cycle
133 # between marked as ALLOCATED and FREE. This
134 # way we attempt to keep the space maps long
135 # and fragmented.
137 # So later, when there is a checkpoint and we
138 # destroy the filesystem, all of these entries
139 # should be copied over to the checkpoint's
140 # space map increasing capacity beyond the
141 # extra reserved slop space.
143 log_must dd if=/dev/zero of=$FS0FILE bs=512 ostride=2 \
144 count=$NUMOVERWRITTENBLOCKS conv=notrunc
146 # for debugging purposes
147 log_must zpool list $NESTEDPOOL
149 log_must zpool checkpoint $NESTEDPOOL
152 # Keep writing to the pool until we get to
153 # the first slop space limit.
155 log_mustnot dd if=/dev/zero of=$FS1FILE bs=512
157 # for debugging purposes
158 log_must zpool list $NESTEDPOOL
161 # Keep adding properties to our second
162 # filesystem until we hit we hit the
163 # second slop space limit.
165 for i in {1..100}
168 # We use this nested loop logic to fit more
169 # properties in one zfs command and reducing
170 # the overhead caused by the number of times
171 # we wait for a txg to sync (e.g. equal to the
172 # number of times we execute zfs(1m))
174 PROPERTIES=""
175 for j in {1..100}
177 PROPVAL=$(dd if=/dev/urandom bs=6000 count=1 | base64 -w 0)
178 PROP="user:prop-$i-$j=$PROPVAL"
179 PROPERTIES="$PROPERTIES $PROP"
180 done
181 zfs set $PROPERTIES $FS1 || break
182 log_note "- setting properties: iteration $i out of 100 -"
183 done
185 for k in {1..100}
188 # In case we broke out of the loop above because we
189 # couldn't fit 100 props in the space left, make sure
190 # to fill up the space that's left by setting one property
191 # at a time
193 PROPVAL=$(dd if=/dev/urandom bs=6000 count=1 | base64 -w 0)
194 PROP="user:prop-extra-$k=$PROPVAL"
195 zfs set $PROP $FS1 || break
196 done
198 # for debugging purposes
199 log_must zpool list $NESTEDPOOL
202 # By the time we are done with the loop above
203 # we should be getting ENOSPC for trying to add
204 # new properties. As a sanity check though, try
205 # again (this time with log_mustnot).
207 log_mustnot zfs set user:proptest="should fail!" $FS0
208 log_mustnot zfs set user:proptest="should fail!" $FS1
210 # for debugging purposes
211 log_must zpool list $NESTEDPOOL
214 # We are about to destroy the first filesystem,
215 # but we want to do so in a way that generates
216 # as many entries as possible in the vdev's
217 # checkpoint space map. Thus, we reduce the
218 # amount of checkpointed blocks that we "free"
219 # every txg.
221 log_must mdb_ctf_set_int zfs_async_block_max_blocks 0t10000
223 log_must zfs destroy $FS0
226 # Keep looping until we hit that point where
227 # we are at the last slop space limit (32.0M)
228 # and async destroys are suspended.
230 wait_until_extra_reserved
232 # for debugging purposes
233 log_must zpool list $NESTEDPOOL
236 # At this point we shouldn't be allowed to
237 # destroy anything.
239 log_mustnot zfs destroy $FS1
242 # The only operations that should be allowed
243 # is discarding the checkpoint.
245 log_must zpool checkpoint -d $NESTEDPOOL
247 wait_discard_finish $NESTEDPOOL
250 # Now that we have space again, we should be
251 # able to destroy that filesystem.
253 log_must zfs destroy $FS1
255 log_pass "All levels of slop space work as expected."