4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2013 Steven Hartland. All rights reserved.
29 * The objective of this program is to provide a DMU/ZAP/SPA stress test
30 * that runs entirely in userland, is easy to use, and easy to extend.
32 * The overall design of the ztest program is as follows:
34 * (1) For each major functional area (e.g. adding vdevs to a pool,
35 * creating and destroying datasets, reading and writing objects, etc)
36 * we have a simple routine to test that functionality. These
37 * individual routines do not have to do anything "stressful".
39 * (2) We turn these simple functionality tests into a stress test by
40 * running them all in parallel, with as many threads as desired,
41 * and spread across as many datasets, objects, and vdevs as desired.
43 * (3) While all this is happening, we inject faults into the pool to
44 * verify that self-healing data really works.
46 * (4) Every time we open a dataset, we change its checksum and compression
47 * functions. Thus even individual objects vary from block to block
48 * in which checksum they use and whether they're compressed.
50 * (5) To verify that we never lose on-disk consistency after a crash,
51 * we run the entire test in a child of the main process.
52 * At random times, the child self-immolates with a SIGKILL.
53 * This is the software equivalent of pulling the power cord.
54 * The parent then runs the test again, using the existing
55 * storage pool, as many times as desired. If backwards compatibility
56 * testing is enabled ztest will sometimes run the "older" version
57 * of ztest after a SIGKILL.
59 * (6) To verify that we don't have future leaks or temporal incursions,
60 * many of the functional tests record the transaction group number
61 * as part of their data. When reading old data, they verify that
62 * the transaction group number is less than the current, open txg.
63 * If you add a new test, please do this if applicable.
65 * When run with no arguments, ztest runs for about five minutes and
66 * produces no output if successful. To get a little bit of information,
67 * specify -V. To get more information, specify -VV, and so on.
69 * To turn this into an overnight stress test, use -T to specify run time.
71 * You can ask more more vdevs [-v], datasets [-d], or threads [-t]
72 * to increase the pool capacity, fanout, and overall stress level.
74 * Use the -k option to set the desired frequency of kills.
76 * When ztest invokes itself it passes all relevant information through a
77 * temporary file which is mmap-ed in the child process. This allows shared
78 * memory to survive the exec syscall. The ztest_shared_hdr_t struct is always
79 * stored at offset 0 of this file and contains information on the size and
80 * number of shared structures in the file. The information stored in this file
81 * must remain backwards compatible with older versions of ztest so that
82 * ztest can invoke them during backwards compatibility testing (-B).
85 #include <sys/zfs_context.h>
91 #include <sys/dmu_objset.h>
97 #include <sys/resource.h>
100 #include <sys/zil_impl.h>
101 #include <sys/vdev_impl.h>
102 #include <sys/vdev_file.h>
103 #include <sys/spa_impl.h>
104 #include <sys/metaslab_impl.h>
105 #include <sys/dsl_prop.h>
106 #include <sys/dsl_dataset.h>
107 #include <sys/dsl_destroy.h>
108 #include <sys/dsl_scan.h>
109 #include <sys/zio_checksum.h>
110 #include <sys/refcount.h>
111 #include <sys/zfeature.h>
112 #include <sys/dsl_userhold.h>
114 #include <stdio_ext.h>
122 #include <sys/fs/zfs.h>
123 #include <libnvpair.h>
125 static int ztest_fd_data
= -1;
126 static int ztest_fd_rand
= -1;
128 typedef struct ztest_shared_hdr
{
129 uint64_t zh_hdr_size
;
130 uint64_t zh_opts_size
;
132 uint64_t zh_stats_size
;
133 uint64_t zh_stats_count
;
135 uint64_t zh_ds_count
;
136 } ztest_shared_hdr_t
;
138 static ztest_shared_hdr_t
*ztest_shared_hdr
;
140 typedef struct ztest_shared_opts
{
141 char zo_pool
[MAXNAMELEN
];
142 char zo_dir
[MAXNAMELEN
];
143 char zo_alt_ztest
[MAXNAMELEN
];
144 char zo_alt_libpath
[MAXNAMELEN
];
146 uint64_t zo_vdevtime
;
154 uint64_t zo_passtime
;
155 uint64_t zo_killrate
;
159 uint64_t zo_maxloops
;
160 uint64_t zo_metaslab_gang_bang
;
161 } ztest_shared_opts_t
;
163 static const ztest_shared_opts_t ztest_opts_defaults
= {
164 .zo_pool
= { 'z', 't', 'e', 's', 't', '\0' },
165 .zo_dir
= { '/', 't', 'm', 'p', '\0' },
166 .zo_alt_ztest
= { '\0' },
167 .zo_alt_libpath
= { '\0' },
169 .zo_ashift
= SPA_MINBLOCKSHIFT
,
172 .zo_raidz_parity
= 1,
173 .zo_vdev_size
= SPA_MINDEVSIZE
,
176 .zo_passtime
= 60, /* 60 seconds */
177 .zo_killrate
= 70, /* 70% kill rate */
180 .zo_time
= 300, /* 5 minutes */
181 .zo_maxloops
= 50, /* max loops during spa_freeze() */
182 .zo_metaslab_gang_bang
= 32 << 10
185 extern uint64_t metaslab_gang_bang
;
186 extern uint64_t metaslab_df_alloc_threshold
;
187 extern uint64_t zfs_deadman_synctime_ms
;
188 extern int metaslab_preload_limit
;
190 static ztest_shared_opts_t
*ztest_shared_opts
;
191 static ztest_shared_opts_t ztest_opts
;
193 typedef struct ztest_shared_ds
{
197 static ztest_shared_ds_t
*ztest_shared_ds
;
198 #define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d])
200 #define BT_MAGIC 0x123456789abcdefULL
201 #define MAXFAULTS() \
202 (MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1)
206 ZTEST_IO_WRITE_PATTERN
,
207 ZTEST_IO_WRITE_ZEROES
,
214 typedef struct ztest_block_tag
{
224 typedef struct bufwad
{
231 * XXX -- fix zfs range locks to be generic so we can use them here.
253 #define ZTEST_RANGE_LOCKS 64
254 #define ZTEST_OBJECT_LOCKS 64
257 * Object descriptor. Used as a template for object lookup/create/remove.
259 typedef struct ztest_od
{
262 dmu_object_type_t od_type
;
263 dmu_object_type_t od_crtype
;
264 uint64_t od_blocksize
;
265 uint64_t od_crblocksize
;
268 char od_name
[MAXNAMELEN
];
274 typedef struct ztest_ds
{
275 ztest_shared_ds_t
*zd_shared
;
277 rwlock_t zd_zilog_lock
;
279 ztest_od_t
*zd_od
; /* debugging aid */
280 char zd_name
[MAXNAMELEN
];
281 mutex_t zd_dirobj_lock
;
282 rll_t zd_object_lock
[ZTEST_OBJECT_LOCKS
];
283 rll_t zd_range_lock
[ZTEST_RANGE_LOCKS
];
287 * Per-iteration state.
289 typedef void ztest_func_t(ztest_ds_t
*zd
, uint64_t id
);
291 typedef struct ztest_info
{
292 ztest_func_t
*zi_func
; /* test function */
293 uint64_t zi_iters
; /* iterations per execution */
294 uint64_t *zi_interval
; /* execute every <interval> seconds */
297 typedef struct ztest_shared_callstate
{
298 uint64_t zc_count
; /* per-pass count */
299 uint64_t zc_time
; /* per-pass time */
300 uint64_t zc_next
; /* next time to call this function */
301 } ztest_shared_callstate_t
;
303 static ztest_shared_callstate_t
*ztest_shared_callstate
;
304 #define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c])
307 * Note: these aren't static because we want dladdr() to work.
309 ztest_func_t ztest_dmu_read_write
;
310 ztest_func_t ztest_dmu_write_parallel
;
311 ztest_func_t ztest_dmu_object_alloc_free
;
312 ztest_func_t ztest_dmu_commit_callbacks
;
313 ztest_func_t ztest_zap
;
314 ztest_func_t ztest_zap_parallel
;
315 ztest_func_t ztest_zil_commit
;
316 ztest_func_t ztest_zil_remount
;
317 ztest_func_t ztest_dmu_read_write_zcopy
;
318 ztest_func_t ztest_dmu_objset_create_destroy
;
319 ztest_func_t ztest_dmu_prealloc
;
320 ztest_func_t ztest_fzap
;
321 ztest_func_t ztest_dmu_snapshot_create_destroy
;
322 ztest_func_t ztest_dsl_prop_get_set
;
323 ztest_func_t ztest_spa_prop_get_set
;
324 ztest_func_t ztest_spa_create_destroy
;
325 ztest_func_t ztest_fault_inject
;
326 ztest_func_t ztest_ddt_repair
;
327 ztest_func_t ztest_dmu_snapshot_hold
;
328 ztest_func_t ztest_spa_rename
;
329 ztest_func_t ztest_scrub
;
330 ztest_func_t ztest_dsl_dataset_promote_busy
;
331 ztest_func_t ztest_vdev_attach_detach
;
332 ztest_func_t ztest_vdev_LUN_growth
;
333 ztest_func_t ztest_vdev_add_remove
;
334 ztest_func_t ztest_vdev_aux_add_remove
;
335 ztest_func_t ztest_split_pool
;
336 ztest_func_t ztest_reguid
;
337 ztest_func_t ztest_spa_upgrade
;
339 uint64_t zopt_always
= 0ULL * NANOSEC
; /* all the time */
340 uint64_t zopt_incessant
= 1ULL * NANOSEC
/ 10; /* every 1/10 second */
341 uint64_t zopt_often
= 1ULL * NANOSEC
; /* every second */
342 uint64_t zopt_sometimes
= 10ULL * NANOSEC
; /* every 10 seconds */
343 uint64_t zopt_rarely
= 60ULL * NANOSEC
; /* every 60 seconds */
345 ztest_info_t ztest_info
[] = {
346 { ztest_dmu_read_write
, 1, &zopt_always
},
347 { ztest_dmu_write_parallel
, 10, &zopt_always
},
348 { ztest_dmu_object_alloc_free
, 1, &zopt_always
},
349 { ztest_dmu_commit_callbacks
, 1, &zopt_always
},
350 { ztest_zap
, 30, &zopt_always
},
351 { ztest_zap_parallel
, 100, &zopt_always
},
352 { ztest_split_pool
, 1, &zopt_always
},
353 { ztest_zil_commit
, 1, &zopt_incessant
},
354 { ztest_zil_remount
, 1, &zopt_sometimes
},
355 { ztest_dmu_read_write_zcopy
, 1, &zopt_often
},
356 { ztest_dmu_objset_create_destroy
, 1, &zopt_often
},
357 { ztest_dsl_prop_get_set
, 1, &zopt_often
},
358 { ztest_spa_prop_get_set
, 1, &zopt_sometimes
},
360 { ztest_dmu_prealloc
, 1, &zopt_sometimes
},
362 { ztest_fzap
, 1, &zopt_sometimes
},
363 { ztest_dmu_snapshot_create_destroy
, 1, &zopt_sometimes
},
364 { ztest_spa_create_destroy
, 1, &zopt_sometimes
},
365 { ztest_fault_inject
, 1, &zopt_sometimes
},
366 { ztest_ddt_repair
, 1, &zopt_sometimes
},
367 { ztest_dmu_snapshot_hold
, 1, &zopt_sometimes
},
368 { ztest_reguid
, 1, &zopt_rarely
},
369 { ztest_spa_rename
, 1, &zopt_rarely
},
370 { ztest_scrub
, 1, &zopt_rarely
},
371 { ztest_spa_upgrade
, 1, &zopt_rarely
},
372 { ztest_dsl_dataset_promote_busy
, 1, &zopt_rarely
},
373 { ztest_vdev_attach_detach
, 1, &zopt_sometimes
},
374 { ztest_vdev_LUN_growth
, 1, &zopt_rarely
},
375 { ztest_vdev_add_remove
, 1,
376 &ztest_opts
.zo_vdevtime
},
377 { ztest_vdev_aux_add_remove
, 1,
378 &ztest_opts
.zo_vdevtime
},
381 #define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t))
384 * The following struct is used to hold a list of uncalled commit callbacks.
385 * The callbacks are ordered by txg number.
387 typedef struct ztest_cb_list
{
388 mutex_t zcl_callbacks_lock
;
389 list_t zcl_callbacks
;
393 * Stuff we need to share writably between parent and child.
395 typedef struct ztest_shared
{
396 boolean_t zs_do_init
;
397 hrtime_t zs_proc_start
;
398 hrtime_t zs_proc_stop
;
399 hrtime_t zs_thread_start
;
400 hrtime_t zs_thread_stop
;
401 hrtime_t zs_thread_kill
;
402 uint64_t zs_enospc_count
;
403 uint64_t zs_vdev_next_leaf
;
404 uint64_t zs_vdev_aux
;
409 uint64_t zs_metaslab_sz
;
410 uint64_t zs_metaslab_df_alloc_threshold
;
414 #define ID_PARALLEL -1ULL
416 static char ztest_dev_template
[] = "%s/%s.%llua";
417 static char ztest_aux_template
[] = "%s/%s.%s.%llu";
418 ztest_shared_t
*ztest_shared
;
420 static spa_t
*ztest_spa
= NULL
;
421 static ztest_ds_t
*ztest_ds
;
423 static mutex_t ztest_vdev_lock
;
426 * The ztest_name_lock protects the pool and dataset namespace used by
427 * the individual tests. To modify the namespace, consumers must grab
428 * this lock as writer. Grabbing the lock as reader will ensure that the
429 * namespace does not change while the lock is held.
431 static rwlock_t ztest_name_lock
;
433 static boolean_t ztest_dump_core
= B_TRUE
;
434 static boolean_t ztest_exiting
;
436 /* Global commit callback list */
437 static ztest_cb_list_t zcl
;
440 ZTEST_META_DNODE
= 0,
445 static void usage(boolean_t
) __NORETURN
;
448 * These libumem hooks provide a reasonable set of defaults for the allocator's
449 * debugging facilities.
454 return ("default,verbose"); /* $UMEM_DEBUG setting */
458 _umem_logging_init(void)
460 return ("fail,contents"); /* $UMEM_LOGGING setting */
463 #define FATAL_MSG_SZ 1024
468 fatal(int do_perror
, char *message
, ...)
471 int save_errno
= errno
;
472 char buf
[FATAL_MSG_SZ
];
474 (void) fflush(stdout
);
476 va_start(args
, message
);
477 (void) sprintf(buf
, "ztest: ");
479 (void) vsprintf(buf
+ strlen(buf
), message
, args
);
482 (void) snprintf(buf
+ strlen(buf
), FATAL_MSG_SZ
- strlen(buf
),
483 ": %s", strerror(save_errno
));
485 (void) fprintf(stderr
, "%s\n", buf
);
486 fatal_msg
= buf
; /* to ease debugging */
493 str2shift(const char *buf
)
495 const char *ends
= "BKMGTPEZ";
500 for (i
= 0; i
< strlen(ends
); i
++) {
501 if (toupper(buf
[0]) == ends
[i
])
504 if (i
== strlen(ends
)) {
505 (void) fprintf(stderr
, "ztest: invalid bytes suffix: %s\n",
509 if (buf
[1] == '\0' || (toupper(buf
[1]) == 'B' && buf
[2] == '\0')) {
512 (void) fprintf(stderr
, "ztest: invalid bytes suffix: %s\n", buf
);
518 nicenumtoull(const char *buf
)
523 val
= strtoull(buf
, &end
, 0);
525 (void) fprintf(stderr
, "ztest: bad numeric value: %s\n", buf
);
527 } else if (end
[0] == '.') {
528 double fval
= strtod(buf
, &end
);
529 fval
*= pow(2, str2shift(end
));
530 if (fval
> UINT64_MAX
) {
531 (void) fprintf(stderr
, "ztest: value too large: %s\n",
535 val
= (uint64_t)fval
;
537 int shift
= str2shift(end
);
538 if (shift
>= 64 || (val
<< shift
) >> shift
!= val
) {
539 (void) fprintf(stderr
, "ztest: value too large: %s\n",
549 usage(boolean_t requested
)
551 const ztest_shared_opts_t
*zo
= &ztest_opts_defaults
;
553 char nice_vdev_size
[10];
554 char nice_gang_bang
[10];
555 FILE *fp
= requested
? stdout
: stderr
;
557 nicenum(zo
->zo_vdev_size
, nice_vdev_size
);
558 nicenum(zo
->zo_metaslab_gang_bang
, nice_gang_bang
);
560 (void) fprintf(fp
, "Usage: %s\n"
561 "\t[-v vdevs (default: %llu)]\n"
562 "\t[-s size_of_each_vdev (default: %s)]\n"
563 "\t[-a alignment_shift (default: %d)] use 0 for random\n"
564 "\t[-m mirror_copies (default: %d)]\n"
565 "\t[-r raidz_disks (default: %d)]\n"
566 "\t[-R raidz_parity (default: %d)]\n"
567 "\t[-d datasets (default: %d)]\n"
568 "\t[-t threads (default: %d)]\n"
569 "\t[-g gang_block_threshold (default: %s)]\n"
570 "\t[-i init_count (default: %d)] initialize pool i times\n"
571 "\t[-k kill_percentage (default: %llu%%)]\n"
572 "\t[-p pool_name (default: %s)]\n"
573 "\t[-f dir (default: %s)] file directory for vdev files\n"
574 "\t[-V] verbose (use multiple times for ever more blather)\n"
575 "\t[-E] use existing pool instead of creating new one\n"
576 "\t[-T time (default: %llu sec)] total run time\n"
577 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n"
578 "\t[-P passtime (default: %llu sec)] time per pass\n"
579 "\t[-B alt_ztest (default: <none>)] alternate ztest path\n"
580 "\t[-h] (print help)\n"
583 (u_longlong_t
)zo
->zo_vdevs
, /* -v */
584 nice_vdev_size
, /* -s */
585 zo
->zo_ashift
, /* -a */
586 zo
->zo_mirrors
, /* -m */
587 zo
->zo_raidz
, /* -r */
588 zo
->zo_raidz_parity
, /* -R */
589 zo
->zo_datasets
, /* -d */
590 zo
->zo_threads
, /* -t */
591 nice_gang_bang
, /* -g */
592 zo
->zo_init
, /* -i */
593 (u_longlong_t
)zo
->zo_killrate
, /* -k */
594 zo
->zo_pool
, /* -p */
596 (u_longlong_t
)zo
->zo_time
, /* -T */
597 (u_longlong_t
)zo
->zo_maxloops
, /* -F */
598 (u_longlong_t
)zo
->zo_passtime
);
599 exit(requested
? 0 : 1);
603 process_options(int argc
, char **argv
)
606 ztest_shared_opts_t
*zo
= &ztest_opts
;
610 char altdir
[MAXNAMELEN
] = { 0 };
612 bcopy(&ztest_opts_defaults
, zo
, sizeof (*zo
));
614 while ((opt
= getopt(argc
, argv
,
615 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:B:")) != EOF
) {
632 value
= nicenumtoull(optarg
);
636 zo
->zo_vdevs
= value
;
639 zo
->zo_vdev_size
= MAX(SPA_MINDEVSIZE
, value
);
642 zo
->zo_ashift
= value
;
645 zo
->zo_mirrors
= value
;
648 zo
->zo_raidz
= MAX(1, value
);
651 zo
->zo_raidz_parity
= MIN(MAX(value
, 1), 3);
654 zo
->zo_datasets
= MAX(1, value
);
657 zo
->zo_threads
= MAX(1, value
);
660 zo
->zo_metaslab_gang_bang
= MAX(SPA_MINBLOCKSIZE
<< 1,
667 zo
->zo_killrate
= value
;
670 (void) strlcpy(zo
->zo_pool
, optarg
,
671 sizeof (zo
->zo_pool
));
674 path
= realpath(optarg
, NULL
);
676 (void) fprintf(stderr
, "error: %s: %s\n",
677 optarg
, strerror(errno
));
680 (void) strlcpy(zo
->zo_dir
, path
,
681 sizeof (zo
->zo_dir
));
694 zo
->zo_passtime
= MAX(1, value
);
697 zo
->zo_maxloops
= MAX(1, value
);
700 (void) strlcpy(altdir
, optarg
, sizeof (altdir
));
712 zo
->zo_raidz_parity
= MIN(zo
->zo_raidz_parity
, zo
->zo_raidz
- 1);
715 (zo
->zo_vdevs
> 0 ? zo
->zo_time
* NANOSEC
/ zo
->zo_vdevs
:
718 if (strlen(altdir
) > 0) {
726 cmd
= umem_alloc(MAXPATHLEN
, UMEM_NOFAIL
);
727 realaltdir
= umem_alloc(MAXPATHLEN
, UMEM_NOFAIL
);
729 VERIFY(NULL
!= realpath(getexecname(), cmd
));
730 if (0 != access(altdir
, F_OK
)) {
731 ztest_dump_core
= B_FALSE
;
732 fatal(B_TRUE
, "invalid alternate ztest path: %s",
735 VERIFY(NULL
!= realpath(altdir
, realaltdir
));
738 * 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest".
739 * We want to extract <isa> to determine if we should use
740 * 32 or 64 bit binaries.
742 bin
= strstr(cmd
, "/usr/bin/");
743 ztest
= strstr(bin
, "/ztest");
745 isalen
= ztest
- isa
;
746 (void) snprintf(zo
->zo_alt_ztest
, sizeof (zo
->zo_alt_ztest
),
747 "%s/usr/bin/%.*s/ztest", realaltdir
, isalen
, isa
);
748 (void) snprintf(zo
->zo_alt_libpath
, sizeof (zo
->zo_alt_libpath
),
749 "%s/usr/lib/%.*s", realaltdir
, isalen
, isa
);
751 if (0 != access(zo
->zo_alt_ztest
, X_OK
)) {
752 ztest_dump_core
= B_FALSE
;
753 fatal(B_TRUE
, "invalid alternate ztest: %s",
755 } else if (0 != access(zo
->zo_alt_libpath
, X_OK
)) {
756 ztest_dump_core
= B_FALSE
;
757 fatal(B_TRUE
, "invalid alternate lib directory %s",
761 umem_free(cmd
, MAXPATHLEN
);
762 umem_free(realaltdir
, MAXPATHLEN
);
767 ztest_kill(ztest_shared_t
*zs
)
769 zs
->zs_alloc
= metaslab_class_get_alloc(spa_normal_class(ztest_spa
));
770 zs
->zs_space
= metaslab_class_get_space(spa_normal_class(ztest_spa
));
773 * Before we kill off ztest, make sure that the config is updated.
774 * See comment above spa_config_sync().
776 mutex_enter(&spa_namespace_lock
);
777 spa_config_sync(ztest_spa
, B_FALSE
, B_FALSE
);
778 mutex_exit(&spa_namespace_lock
);
780 zfs_dbgmsg_print(FTAG
);
781 (void) kill(getpid(), SIGKILL
);
785 ztest_random(uint64_t range
)
789 ASSERT3S(ztest_fd_rand
, >=, 0);
794 if (read(ztest_fd_rand
, &r
, sizeof (r
)) != sizeof (r
))
795 fatal(1, "short read from /dev/urandom");
802 ztest_record_enospc(const char *s
)
804 ztest_shared
->zs_enospc_count
++;
808 ztest_get_ashift(void)
810 if (ztest_opts
.zo_ashift
== 0)
811 return (SPA_MINBLOCKSHIFT
+ ztest_random(3));
812 return (ztest_opts
.zo_ashift
);
816 make_vdev_file(char *path
, char *aux
, char *pool
, size_t size
, uint64_t ashift
)
818 char pathbuf
[MAXPATHLEN
];
823 ashift
= ztest_get_ashift();
829 vdev
= ztest_shared
->zs_vdev_aux
;
830 (void) snprintf(path
, sizeof (pathbuf
),
831 ztest_aux_template
, ztest_opts
.zo_dir
,
832 pool
== NULL
? ztest_opts
.zo_pool
: pool
,
835 vdev
= ztest_shared
->zs_vdev_next_leaf
++;
836 (void) snprintf(path
, sizeof (pathbuf
),
837 ztest_dev_template
, ztest_opts
.zo_dir
,
838 pool
== NULL
? ztest_opts
.zo_pool
: pool
, vdev
);
843 int fd
= open(path
, O_RDWR
| O_CREAT
| O_TRUNC
, 0666);
845 fatal(1, "can't open %s", path
);
846 if (ftruncate(fd
, size
) != 0)
847 fatal(1, "can't ftruncate %s", path
);
851 VERIFY(nvlist_alloc(&file
, NV_UNIQUE_NAME
, 0) == 0);
852 VERIFY(nvlist_add_string(file
, ZPOOL_CONFIG_TYPE
, VDEV_TYPE_FILE
) == 0);
853 VERIFY(nvlist_add_string(file
, ZPOOL_CONFIG_PATH
, path
) == 0);
854 VERIFY(nvlist_add_uint64(file
, ZPOOL_CONFIG_ASHIFT
, ashift
) == 0);
860 make_vdev_raidz(char *path
, char *aux
, char *pool
, size_t size
,
861 uint64_t ashift
, int r
)
863 nvlist_t
*raidz
, **child
;
867 return (make_vdev_file(path
, aux
, pool
, size
, ashift
));
868 child
= umem_alloc(r
* sizeof (nvlist_t
*), UMEM_NOFAIL
);
870 for (c
= 0; c
< r
; c
++)
871 child
[c
] = make_vdev_file(path
, aux
, pool
, size
, ashift
);
873 VERIFY(nvlist_alloc(&raidz
, NV_UNIQUE_NAME
, 0) == 0);
874 VERIFY(nvlist_add_string(raidz
, ZPOOL_CONFIG_TYPE
,
875 VDEV_TYPE_RAIDZ
) == 0);
876 VERIFY(nvlist_add_uint64(raidz
, ZPOOL_CONFIG_NPARITY
,
877 ztest_opts
.zo_raidz_parity
) == 0);
878 VERIFY(nvlist_add_nvlist_array(raidz
, ZPOOL_CONFIG_CHILDREN
,
881 for (c
= 0; c
< r
; c
++)
882 nvlist_free(child
[c
]);
884 umem_free(child
, r
* sizeof (nvlist_t
*));
890 make_vdev_mirror(char *path
, char *aux
, char *pool
, size_t size
,
891 uint64_t ashift
, int r
, int m
)
893 nvlist_t
*mirror
, **child
;
897 return (make_vdev_raidz(path
, aux
, pool
, size
, ashift
, r
));
899 child
= umem_alloc(m
* sizeof (nvlist_t
*), UMEM_NOFAIL
);
901 for (c
= 0; c
< m
; c
++)
902 child
[c
] = make_vdev_raidz(path
, aux
, pool
, size
, ashift
, r
);
904 VERIFY(nvlist_alloc(&mirror
, NV_UNIQUE_NAME
, 0) == 0);
905 VERIFY(nvlist_add_string(mirror
, ZPOOL_CONFIG_TYPE
,
906 VDEV_TYPE_MIRROR
) == 0);
907 VERIFY(nvlist_add_nvlist_array(mirror
, ZPOOL_CONFIG_CHILDREN
,
910 for (c
= 0; c
< m
; c
++)
911 nvlist_free(child
[c
]);
913 umem_free(child
, m
* sizeof (nvlist_t
*));
919 make_vdev_root(char *path
, char *aux
, char *pool
, size_t size
, uint64_t ashift
,
920 int log
, int r
, int m
, int t
)
922 nvlist_t
*root
, **child
;
927 child
= umem_alloc(t
* sizeof (nvlist_t
*), UMEM_NOFAIL
);
929 for (c
= 0; c
< t
; c
++) {
930 child
[c
] = make_vdev_mirror(path
, aux
, pool
, size
, ashift
,
932 VERIFY(nvlist_add_uint64(child
[c
], ZPOOL_CONFIG_IS_LOG
,
936 VERIFY(nvlist_alloc(&root
, NV_UNIQUE_NAME
, 0) == 0);
937 VERIFY(nvlist_add_string(root
, ZPOOL_CONFIG_TYPE
, VDEV_TYPE_ROOT
) == 0);
938 VERIFY(nvlist_add_nvlist_array(root
, aux
? aux
: ZPOOL_CONFIG_CHILDREN
,
941 for (c
= 0; c
< t
; c
++)
942 nvlist_free(child
[c
]);
944 umem_free(child
, t
* sizeof (nvlist_t
*));
950 * Find a random spa version. Returns back a random spa version in the
951 * range [initial_version, SPA_VERSION_FEATURES].
954 ztest_random_spa_version(uint64_t initial_version
)
956 uint64_t version
= initial_version
;
958 if (version
<= SPA_VERSION_BEFORE_FEATURES
) {
960 ztest_random(SPA_VERSION_BEFORE_FEATURES
- version
+ 1);
963 if (version
> SPA_VERSION_BEFORE_FEATURES
)
964 version
= SPA_VERSION_FEATURES
;
966 ASSERT(SPA_VERSION_IS_SUPPORTED(version
));
971 ztest_random_blocksize(void)
973 return (1 << (SPA_MINBLOCKSHIFT
+
974 ztest_random(SPA_MAXBLOCKSHIFT
- SPA_MINBLOCKSHIFT
+ 1)));
978 ztest_random_ibshift(void)
980 return (DN_MIN_INDBLKSHIFT
+
981 ztest_random(DN_MAX_INDBLKSHIFT
- DN_MIN_INDBLKSHIFT
+ 1));
985 ztest_random_vdev_top(spa_t
*spa
, boolean_t log_ok
)
988 vdev_t
*rvd
= spa
->spa_root_vdev
;
991 ASSERT(spa_config_held(spa
, SCL_ALL
, RW_READER
) != 0);
994 top
= ztest_random(rvd
->vdev_children
);
995 tvd
= rvd
->vdev_child
[top
];
996 } while (tvd
->vdev_ishole
|| (tvd
->vdev_islog
&& !log_ok
) ||
997 tvd
->vdev_mg
== NULL
|| tvd
->vdev_mg
->mg_class
== NULL
);
1003 ztest_random_dsl_prop(zfs_prop_t prop
)
1008 value
= zfs_prop_random_value(prop
, ztest_random(-1ULL));
1009 } while (prop
== ZFS_PROP_CHECKSUM
&& value
== ZIO_CHECKSUM_OFF
);
1015 ztest_dsl_prop_set_uint64(char *osname
, zfs_prop_t prop
, uint64_t value
,
1018 const char *propname
= zfs_prop_to_name(prop
);
1019 const char *valname
;
1020 char setpoint
[MAXPATHLEN
];
1024 error
= dsl_prop_set_int(osname
, propname
,
1025 (inherit
? ZPROP_SRC_NONE
: ZPROP_SRC_LOCAL
), value
);
1027 if (error
== ENOSPC
) {
1028 ztest_record_enospc(FTAG
);
1033 VERIFY0(dsl_prop_get_integer(osname
, propname
, &curval
, setpoint
));
1035 if (ztest_opts
.zo_verbose
>= 6) {
1036 VERIFY(zfs_prop_index_to_string(prop
, curval
, &valname
) == 0);
1037 (void) printf("%s %s = %s at '%s'\n",
1038 osname
, propname
, valname
, setpoint
);
1045 ztest_spa_prop_set_uint64(zpool_prop_t prop
, uint64_t value
)
1047 spa_t
*spa
= ztest_spa
;
1048 nvlist_t
*props
= NULL
;
1051 VERIFY(nvlist_alloc(&props
, NV_UNIQUE_NAME
, 0) == 0);
1052 VERIFY(nvlist_add_uint64(props
, zpool_prop_to_name(prop
), value
) == 0);
1054 error
= spa_prop_set(spa
, props
);
1058 if (error
== ENOSPC
) {
1059 ztest_record_enospc(FTAG
);
1068 ztest_rll_init(rll_t
*rll
)
1070 rll
->rll_writer
= NULL
;
1071 rll
->rll_readers
= 0;
1072 VERIFY(_mutex_init(&rll
->rll_lock
, USYNC_THREAD
, NULL
) == 0);
1073 VERIFY(cond_init(&rll
->rll_cv
, USYNC_THREAD
, NULL
) == 0);
1077 ztest_rll_destroy(rll_t
*rll
)
1079 ASSERT(rll
->rll_writer
== NULL
);
1080 ASSERT(rll
->rll_readers
== 0);
1081 VERIFY(_mutex_destroy(&rll
->rll_lock
) == 0);
1082 VERIFY(cond_destroy(&rll
->rll_cv
) == 0);
1086 ztest_rll_lock(rll_t
*rll
, rl_type_t type
)
1088 VERIFY(mutex_lock(&rll
->rll_lock
) == 0);
1090 if (type
== RL_READER
) {
1091 while (rll
->rll_writer
!= NULL
)
1092 (void) cond_wait(&rll
->rll_cv
, &rll
->rll_lock
);
1095 while (rll
->rll_writer
!= NULL
|| rll
->rll_readers
)
1096 (void) cond_wait(&rll
->rll_cv
, &rll
->rll_lock
);
1097 rll
->rll_writer
= curthread
;
1100 VERIFY(mutex_unlock(&rll
->rll_lock
) == 0);
1104 ztest_rll_unlock(rll_t
*rll
)
1106 VERIFY(mutex_lock(&rll
->rll_lock
) == 0);
1108 if (rll
->rll_writer
) {
1109 ASSERT(rll
->rll_readers
== 0);
1110 rll
->rll_writer
= NULL
;
1112 ASSERT(rll
->rll_readers
!= 0);
1113 ASSERT(rll
->rll_writer
== NULL
);
1117 if (rll
->rll_writer
== NULL
&& rll
->rll_readers
== 0)
1118 VERIFY(cond_broadcast(&rll
->rll_cv
) == 0);
1120 VERIFY(mutex_unlock(&rll
->rll_lock
) == 0);
1124 ztest_object_lock(ztest_ds_t
*zd
, uint64_t object
, rl_type_t type
)
1126 rll_t
*rll
= &zd
->zd_object_lock
[object
& (ZTEST_OBJECT_LOCKS
- 1)];
1128 ztest_rll_lock(rll
, type
);
1132 ztest_object_unlock(ztest_ds_t
*zd
, uint64_t object
)
1134 rll_t
*rll
= &zd
->zd_object_lock
[object
& (ZTEST_OBJECT_LOCKS
- 1)];
1136 ztest_rll_unlock(rll
);
1140 ztest_range_lock(ztest_ds_t
*zd
, uint64_t object
, uint64_t offset
,
1141 uint64_t size
, rl_type_t type
)
1143 uint64_t hash
= object
^ (offset
% (ZTEST_RANGE_LOCKS
+ 1));
1144 rll_t
*rll
= &zd
->zd_range_lock
[hash
& (ZTEST_RANGE_LOCKS
- 1)];
1147 rl
= umem_alloc(sizeof (*rl
), UMEM_NOFAIL
);
1148 rl
->rl_object
= object
;
1149 rl
->rl_offset
= offset
;
1153 ztest_rll_lock(rll
, type
);
1159 ztest_range_unlock(rl_t
*rl
)
1161 rll_t
*rll
= rl
->rl_lock
;
1163 ztest_rll_unlock(rll
);
1165 umem_free(rl
, sizeof (*rl
));
1169 ztest_zd_init(ztest_ds_t
*zd
, ztest_shared_ds_t
*szd
, objset_t
*os
)
1172 zd
->zd_zilog
= dmu_objset_zil(os
);
1173 zd
->zd_shared
= szd
;
1174 dmu_objset_name(os
, zd
->zd_name
);
1176 if (zd
->zd_shared
!= NULL
)
1177 zd
->zd_shared
->zd_seq
= 0;
1179 VERIFY(rwlock_init(&zd
->zd_zilog_lock
, USYNC_THREAD
, NULL
) == 0);
1180 VERIFY(_mutex_init(&zd
->zd_dirobj_lock
, USYNC_THREAD
, NULL
) == 0);
1182 for (int l
= 0; l
< ZTEST_OBJECT_LOCKS
; l
++)
1183 ztest_rll_init(&zd
->zd_object_lock
[l
]);
1185 for (int l
= 0; l
< ZTEST_RANGE_LOCKS
; l
++)
1186 ztest_rll_init(&zd
->zd_range_lock
[l
]);
1190 ztest_zd_fini(ztest_ds_t
*zd
)
1192 VERIFY(_mutex_destroy(&zd
->zd_dirobj_lock
) == 0);
1194 for (int l
= 0; l
< ZTEST_OBJECT_LOCKS
; l
++)
1195 ztest_rll_destroy(&zd
->zd_object_lock
[l
]);
1197 for (int l
= 0; l
< ZTEST_RANGE_LOCKS
; l
++)
1198 ztest_rll_destroy(&zd
->zd_range_lock
[l
]);
1201 #define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
1204 ztest_tx_assign(dmu_tx_t
*tx
, uint64_t txg_how
, const char *tag
)
1210 * Attempt to assign tx to some transaction group.
1212 error
= dmu_tx_assign(tx
, txg_how
);
1214 if (error
== ERESTART
) {
1215 ASSERT(txg_how
== TXG_NOWAIT
);
1218 ASSERT3U(error
, ==, ENOSPC
);
1219 ztest_record_enospc(tag
);
1224 txg
= dmu_tx_get_txg(tx
);
1230 ztest_pattern_set(void *buf
, uint64_t size
, uint64_t value
)
1233 uint64_t *ip_end
= (uint64_t *)((uintptr_t)buf
+ (uintptr_t)size
);
1240 ztest_pattern_match(void *buf
, uint64_t size
, uint64_t value
)
1243 uint64_t *ip_end
= (uint64_t *)((uintptr_t)buf
+ (uintptr_t)size
);
1247 diff
|= (value
- *ip
++);
1253 ztest_bt_generate(ztest_block_tag_t
*bt
, objset_t
*os
, uint64_t object
,
1254 uint64_t offset
, uint64_t gen
, uint64_t txg
, uint64_t crtxg
)
1256 bt
->bt_magic
= BT_MAGIC
;
1257 bt
->bt_objset
= dmu_objset_id(os
);
1258 bt
->bt_object
= object
;
1259 bt
->bt_offset
= offset
;
1262 bt
->bt_crtxg
= crtxg
;
1266 ztest_bt_verify(ztest_block_tag_t
*bt
, objset_t
*os
, uint64_t object
,
1267 uint64_t offset
, uint64_t gen
, uint64_t txg
, uint64_t crtxg
)
1269 ASSERT3U(bt
->bt_magic
, ==, BT_MAGIC
);
1270 ASSERT3U(bt
->bt_objset
, ==, dmu_objset_id(os
));
1271 ASSERT3U(bt
->bt_object
, ==, object
);
1272 ASSERT3U(bt
->bt_offset
, ==, offset
);
1273 ASSERT3U(bt
->bt_gen
, <=, gen
);
1274 ASSERT3U(bt
->bt_txg
, <=, txg
);
1275 ASSERT3U(bt
->bt_crtxg
, ==, crtxg
);
1278 static ztest_block_tag_t
*
1279 ztest_bt_bonus(dmu_buf_t
*db
)
1281 dmu_object_info_t doi
;
1282 ztest_block_tag_t
*bt
;
1284 dmu_object_info_from_db(db
, &doi
);
1285 ASSERT3U(doi
.doi_bonus_size
, <=, db
->db_size
);
1286 ASSERT3U(doi
.doi_bonus_size
, >=, sizeof (*bt
));
1287 bt
= (void *)((char *)db
->db_data
+ doi
.doi_bonus_size
- sizeof (*bt
));
1296 #define lrz_type lr_mode
1297 #define lrz_blocksize lr_uid
1298 #define lrz_ibshift lr_gid
1299 #define lrz_bonustype lr_rdev
1300 #define lrz_bonuslen lr_crtime[1]
1303 ztest_log_create(ztest_ds_t
*zd
, dmu_tx_t
*tx
, lr_create_t
*lr
)
1305 char *name
= (void *)(lr
+ 1); /* name follows lr */
1306 size_t namesize
= strlen(name
) + 1;
1309 if (zil_replaying(zd
->zd_zilog
, tx
))
1312 itx
= zil_itx_create(TX_CREATE
, sizeof (*lr
) + namesize
);
1313 bcopy(&lr
->lr_common
+ 1, &itx
->itx_lr
+ 1,
1314 sizeof (*lr
) + namesize
- sizeof (lr_t
));
1316 zil_itx_assign(zd
->zd_zilog
, itx
, tx
);
1320 ztest_log_remove(ztest_ds_t
*zd
, dmu_tx_t
*tx
, lr_remove_t
*lr
, uint64_t object
)
1322 char *name
= (void *)(lr
+ 1); /* name follows lr */
1323 size_t namesize
= strlen(name
) + 1;
1326 if (zil_replaying(zd
->zd_zilog
, tx
))
1329 itx
= zil_itx_create(TX_REMOVE
, sizeof (*lr
) + namesize
);
1330 bcopy(&lr
->lr_common
+ 1, &itx
->itx_lr
+ 1,
1331 sizeof (*lr
) + namesize
- sizeof (lr_t
));
1333 itx
->itx_oid
= object
;
1334 zil_itx_assign(zd
->zd_zilog
, itx
, tx
);
1338 ztest_log_write(ztest_ds_t
*zd
, dmu_tx_t
*tx
, lr_write_t
*lr
)
1341 itx_wr_state_t write_state
= ztest_random(WR_NUM_STATES
);
1343 if (zil_replaying(zd
->zd_zilog
, tx
))
1346 if (lr
->lr_length
> ZIL_MAX_LOG_DATA
)
1347 write_state
= WR_INDIRECT
;
1349 itx
= zil_itx_create(TX_WRITE
,
1350 sizeof (*lr
) + (write_state
== WR_COPIED
? lr
->lr_length
: 0));
1352 if (write_state
== WR_COPIED
&&
1353 dmu_read(zd
->zd_os
, lr
->lr_foid
, lr
->lr_offset
, lr
->lr_length
,
1354 ((lr_write_t
*)&itx
->itx_lr
) + 1, DMU_READ_NO_PREFETCH
) != 0) {
1355 zil_itx_destroy(itx
);
1356 itx
= zil_itx_create(TX_WRITE
, sizeof (*lr
));
1357 write_state
= WR_NEED_COPY
;
1359 itx
->itx_private
= zd
;
1360 itx
->itx_wr_state
= write_state
;
1361 itx
->itx_sync
= (ztest_random(8) == 0);
1362 itx
->itx_sod
+= (write_state
== WR_NEED_COPY
? lr
->lr_length
: 0);
1364 bcopy(&lr
->lr_common
+ 1, &itx
->itx_lr
+ 1,
1365 sizeof (*lr
) - sizeof (lr_t
));
1367 zil_itx_assign(zd
->zd_zilog
, itx
, tx
);
1371 ztest_log_truncate(ztest_ds_t
*zd
, dmu_tx_t
*tx
, lr_truncate_t
*lr
)
1375 if (zil_replaying(zd
->zd_zilog
, tx
))
1378 itx
= zil_itx_create(TX_TRUNCATE
, sizeof (*lr
));
1379 bcopy(&lr
->lr_common
+ 1, &itx
->itx_lr
+ 1,
1380 sizeof (*lr
) - sizeof (lr_t
));
1382 itx
->itx_sync
= B_FALSE
;
1383 zil_itx_assign(zd
->zd_zilog
, itx
, tx
);
1387 ztest_log_setattr(ztest_ds_t
*zd
, dmu_tx_t
*tx
, lr_setattr_t
*lr
)
1391 if (zil_replaying(zd
->zd_zilog
, tx
))
1394 itx
= zil_itx_create(TX_SETATTR
, sizeof (*lr
));
1395 bcopy(&lr
->lr_common
+ 1, &itx
->itx_lr
+ 1,
1396 sizeof (*lr
) - sizeof (lr_t
));
1398 itx
->itx_sync
= B_FALSE
;
1399 zil_itx_assign(zd
->zd_zilog
, itx
, tx
);
1406 ztest_replay_create(ztest_ds_t
*zd
, lr_create_t
*lr
, boolean_t byteswap
)
1408 char *name
= (void *)(lr
+ 1); /* name follows lr */
1409 objset_t
*os
= zd
->zd_os
;
1410 ztest_block_tag_t
*bbt
;
1417 byteswap_uint64_array(lr
, sizeof (*lr
));
1419 ASSERT(lr
->lr_doid
== ZTEST_DIROBJ
);
1420 ASSERT(name
[0] != '\0');
1422 tx
= dmu_tx_create(os
);
1424 dmu_tx_hold_zap(tx
, lr
->lr_doid
, B_TRUE
, name
);
1426 if (lr
->lrz_type
== DMU_OT_ZAP_OTHER
) {
1427 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1429 dmu_tx_hold_bonus(tx
, DMU_NEW_OBJECT
);
1432 txg
= ztest_tx_assign(tx
, TXG_WAIT
, FTAG
);
1436 ASSERT(dmu_objset_zil(os
)->zl_replay
== !!lr
->lr_foid
);
1438 if (lr
->lrz_type
== DMU_OT_ZAP_OTHER
) {
1439 if (lr
->lr_foid
== 0) {
1440 lr
->lr_foid
= zap_create(os
,
1441 lr
->lrz_type
, lr
->lrz_bonustype
,
1442 lr
->lrz_bonuslen
, tx
);
1444 error
= zap_create_claim(os
, lr
->lr_foid
,
1445 lr
->lrz_type
, lr
->lrz_bonustype
,
1446 lr
->lrz_bonuslen
, tx
);
1449 if (lr
->lr_foid
== 0) {
1450 lr
->lr_foid
= dmu_object_alloc(os
,
1451 lr
->lrz_type
, 0, lr
->lrz_bonustype
,
1452 lr
->lrz_bonuslen
, tx
);
1454 error
= dmu_object_claim(os
, lr
->lr_foid
,
1455 lr
->lrz_type
, 0, lr
->lrz_bonustype
,
1456 lr
->lrz_bonuslen
, tx
);
1461 ASSERT3U(error
, ==, EEXIST
);
1462 ASSERT(zd
->zd_zilog
->zl_replay
);
1467 ASSERT(lr
->lr_foid
!= 0);
1469 if (lr
->lrz_type
!= DMU_OT_ZAP_OTHER
)
1470 VERIFY3U(0, ==, dmu_object_set_blocksize(os
, lr
->lr_foid
,
1471 lr
->lrz_blocksize
, lr
->lrz_ibshift
, tx
));
1473 VERIFY3U(0, ==, dmu_bonus_hold(os
, lr
->lr_foid
, FTAG
, &db
));
1474 bbt
= ztest_bt_bonus(db
);
1475 dmu_buf_will_dirty(db
, tx
);
1476 ztest_bt_generate(bbt
, os
, lr
->lr_foid
, -1ULL, lr
->lr_gen
, txg
, txg
);
1477 dmu_buf_rele(db
, FTAG
);
1479 VERIFY3U(0, ==, zap_add(os
, lr
->lr_doid
, name
, sizeof (uint64_t), 1,
1482 (void) ztest_log_create(zd
, tx
, lr
);
1490 ztest_replay_remove(ztest_ds_t
*zd
, lr_remove_t
*lr
, boolean_t byteswap
)
1492 char *name
= (void *)(lr
+ 1); /* name follows lr */
1493 objset_t
*os
= zd
->zd_os
;
1494 dmu_object_info_t doi
;
1496 uint64_t object
, txg
;
1499 byteswap_uint64_array(lr
, sizeof (*lr
));
1501 ASSERT(lr
->lr_doid
== ZTEST_DIROBJ
);
1502 ASSERT(name
[0] != '\0');
1505 zap_lookup(os
, lr
->lr_doid
, name
, sizeof (object
), 1, &object
));
1506 ASSERT(object
!= 0);
1508 ztest_object_lock(zd
, object
, RL_WRITER
);
1510 VERIFY3U(0, ==, dmu_object_info(os
, object
, &doi
));
1512 tx
= dmu_tx_create(os
);
1514 dmu_tx_hold_zap(tx
, lr
->lr_doid
, B_FALSE
, name
);
1515 dmu_tx_hold_free(tx
, object
, 0, DMU_OBJECT_END
);
1517 txg
= ztest_tx_assign(tx
, TXG_WAIT
, FTAG
);
1519 ztest_object_unlock(zd
, object
);
1523 if (doi
.doi_type
== DMU_OT_ZAP_OTHER
) {
1524 VERIFY3U(0, ==, zap_destroy(os
, object
, tx
));
1526 VERIFY3U(0, ==, dmu_object_free(os
, object
, tx
));
1529 VERIFY3U(0, ==, zap_remove(os
, lr
->lr_doid
, name
, tx
));
1531 (void) ztest_log_remove(zd
, tx
, lr
, object
);
1535 ztest_object_unlock(zd
, object
);
1541 ztest_replay_write(ztest_ds_t
*zd
, lr_write_t
*lr
, boolean_t byteswap
)
1543 objset_t
*os
= zd
->zd_os
;
1544 void *data
= lr
+ 1; /* data follows lr */
1545 uint64_t offset
, length
;
1546 ztest_block_tag_t
*bt
= data
;
1547 ztest_block_tag_t
*bbt
;
1548 uint64_t gen
, txg
, lrtxg
, crtxg
;
1549 dmu_object_info_t doi
;
1552 arc_buf_t
*abuf
= NULL
;
1556 byteswap_uint64_array(lr
, sizeof (*lr
));
1558 offset
= lr
->lr_offset
;
1559 length
= lr
->lr_length
;
1561 /* If it's a dmu_sync() block, write the whole block */
1562 if (lr
->lr_common
.lrc_reclen
== sizeof (lr_write_t
)) {
1563 uint64_t blocksize
= BP_GET_LSIZE(&lr
->lr_blkptr
);
1564 if (length
< blocksize
) {
1565 offset
-= offset
% blocksize
;
1570 if (bt
->bt_magic
== BSWAP_64(BT_MAGIC
))
1571 byteswap_uint64_array(bt
, sizeof (*bt
));
1573 if (bt
->bt_magic
!= BT_MAGIC
)
1576 ztest_object_lock(zd
, lr
->lr_foid
, RL_READER
);
1577 rl
= ztest_range_lock(zd
, lr
->lr_foid
, offset
, length
, RL_WRITER
);
1579 VERIFY3U(0, ==, dmu_bonus_hold(os
, lr
->lr_foid
, FTAG
, &db
));
1581 dmu_object_info_from_db(db
, &doi
);
1583 bbt
= ztest_bt_bonus(db
);
1584 ASSERT3U(bbt
->bt_magic
, ==, BT_MAGIC
);
1586 crtxg
= bbt
->bt_crtxg
;
1587 lrtxg
= lr
->lr_common
.lrc_txg
;
1589 tx
= dmu_tx_create(os
);
1591 dmu_tx_hold_write(tx
, lr
->lr_foid
, offset
, length
);
1593 if (ztest_random(8) == 0 && length
== doi
.doi_data_block_size
&&
1594 P2PHASE(offset
, length
) == 0)
1595 abuf
= dmu_request_arcbuf(db
, length
);
1597 txg
= ztest_tx_assign(tx
, TXG_WAIT
, FTAG
);
1600 dmu_return_arcbuf(abuf
);
1601 dmu_buf_rele(db
, FTAG
);
1602 ztest_range_unlock(rl
);
1603 ztest_object_unlock(zd
, lr
->lr_foid
);
1609 * Usually, verify the old data before writing new data --
1610 * but not always, because we also want to verify correct
1611 * behavior when the data was not recently read into cache.
1613 ASSERT(offset
% doi
.doi_data_block_size
== 0);
1614 if (ztest_random(4) != 0) {
1615 int prefetch
= ztest_random(2) ?
1616 DMU_READ_PREFETCH
: DMU_READ_NO_PREFETCH
;
1617 ztest_block_tag_t rbt
;
1619 VERIFY(dmu_read(os
, lr
->lr_foid
, offset
,
1620 sizeof (rbt
), &rbt
, prefetch
) == 0);
1621 if (rbt
.bt_magic
== BT_MAGIC
) {
1622 ztest_bt_verify(&rbt
, os
, lr
->lr_foid
,
1623 offset
, gen
, txg
, crtxg
);
1628 * Writes can appear to be newer than the bonus buffer because
1629 * the ztest_get_data() callback does a dmu_read() of the
1630 * open-context data, which may be different than the data
1631 * as it was when the write was generated.
1633 if (zd
->zd_zilog
->zl_replay
) {
1634 ztest_bt_verify(bt
, os
, lr
->lr_foid
, offset
,
1635 MAX(gen
, bt
->bt_gen
), MAX(txg
, lrtxg
),
1640 * Set the bt's gen/txg to the bonus buffer's gen/txg
1641 * so that all of the usual ASSERTs will work.
1643 ztest_bt_generate(bt
, os
, lr
->lr_foid
, offset
, gen
, txg
, crtxg
);
1647 dmu_write(os
, lr
->lr_foid
, offset
, length
, data
, tx
);
1649 bcopy(data
, abuf
->b_data
, length
);
1650 dmu_assign_arcbuf(db
, offset
, abuf
, tx
);
1653 (void) ztest_log_write(zd
, tx
, lr
);
1655 dmu_buf_rele(db
, FTAG
);
1659 ztest_range_unlock(rl
);
1660 ztest_object_unlock(zd
, lr
->lr_foid
);
1666 ztest_replay_truncate(ztest_ds_t
*zd
, lr_truncate_t
*lr
, boolean_t byteswap
)
1668 objset_t
*os
= zd
->zd_os
;
1674 byteswap_uint64_array(lr
, sizeof (*lr
));
1676 ztest_object_lock(zd
, lr
->lr_foid
, RL_READER
);
1677 rl
= ztest_range_lock(zd
, lr
->lr_foid
, lr
->lr_offset
, lr
->lr_length
,
1680 tx
= dmu_tx_create(os
);
1682 dmu_tx_hold_free(tx
, lr
->lr_foid
, lr
->lr_offset
, lr
->lr_length
);
1684 txg
= ztest_tx_assign(tx
, TXG_WAIT
, FTAG
);
1686 ztest_range_unlock(rl
);
1687 ztest_object_unlock(zd
, lr
->lr_foid
);
1691 VERIFY(dmu_free_range(os
, lr
->lr_foid
, lr
->lr_offset
,
1692 lr
->lr_length
, tx
) == 0);
1694 (void) ztest_log_truncate(zd
, tx
, lr
);
1698 ztest_range_unlock(rl
);
1699 ztest_object_unlock(zd
, lr
->lr_foid
);
1705 ztest_replay_setattr(ztest_ds_t
*zd
, lr_setattr_t
*lr
, boolean_t byteswap
)
1707 objset_t
*os
= zd
->zd_os
;
1710 ztest_block_tag_t
*bbt
;
1711 uint64_t txg
, lrtxg
, crtxg
;
1714 byteswap_uint64_array(lr
, sizeof (*lr
));
1716 ztest_object_lock(zd
, lr
->lr_foid
, RL_WRITER
);
1718 VERIFY3U(0, ==, dmu_bonus_hold(os
, lr
->lr_foid
, FTAG
, &db
));
1720 tx
= dmu_tx_create(os
);
1721 dmu_tx_hold_bonus(tx
, lr
->lr_foid
);
1723 txg
= ztest_tx_assign(tx
, TXG_WAIT
, FTAG
);
1725 dmu_buf_rele(db
, FTAG
);
1726 ztest_object_unlock(zd
, lr
->lr_foid
);
1730 bbt
= ztest_bt_bonus(db
);
1731 ASSERT3U(bbt
->bt_magic
, ==, BT_MAGIC
);
1732 crtxg
= bbt
->bt_crtxg
;
1733 lrtxg
= lr
->lr_common
.lrc_txg
;
1735 if (zd
->zd_zilog
->zl_replay
) {
1736 ASSERT(lr
->lr_size
!= 0);
1737 ASSERT(lr
->lr_mode
!= 0);
1741 * Randomly change the size and increment the generation.
1743 lr
->lr_size
= (ztest_random(db
->db_size
/ sizeof (*bbt
)) + 1) *
1745 lr
->lr_mode
= bbt
->bt_gen
+ 1;
1750 * Verify that the current bonus buffer is not newer than our txg.
1752 ztest_bt_verify(bbt
, os
, lr
->lr_foid
, -1ULL, lr
->lr_mode
,
1753 MAX(txg
, lrtxg
), crtxg
);
1755 dmu_buf_will_dirty(db
, tx
);
1757 ASSERT3U(lr
->lr_size
, >=, sizeof (*bbt
));
1758 ASSERT3U(lr
->lr_size
, <=, db
->db_size
);
1759 VERIFY0(dmu_set_bonus(db
, lr
->lr_size
, tx
));
1760 bbt
= ztest_bt_bonus(db
);
1762 ztest_bt_generate(bbt
, os
, lr
->lr_foid
, -1ULL, lr
->lr_mode
, txg
, crtxg
);
1764 dmu_buf_rele(db
, FTAG
);
1766 (void) ztest_log_setattr(zd
, tx
, lr
);
1770 ztest_object_unlock(zd
, lr
->lr_foid
);
1775 zil_replay_func_t
*ztest_replay_vector
[TX_MAX_TYPE
] = {
1776 NULL
, /* 0 no such transaction type */
1777 ztest_replay_create
, /* TX_CREATE */
1778 NULL
, /* TX_MKDIR */
1779 NULL
, /* TX_MKXATTR */
1780 NULL
, /* TX_SYMLINK */
1781 ztest_replay_remove
, /* TX_REMOVE */
1782 NULL
, /* TX_RMDIR */
1784 NULL
, /* TX_RENAME */
1785 ztest_replay_write
, /* TX_WRITE */
1786 ztest_replay_truncate
, /* TX_TRUNCATE */
1787 ztest_replay_setattr
, /* TX_SETATTR */
1789 NULL
, /* TX_CREATE_ACL */
1790 NULL
, /* TX_CREATE_ATTR */
1791 NULL
, /* TX_CREATE_ACL_ATTR */
1792 NULL
, /* TX_MKDIR_ACL */
1793 NULL
, /* TX_MKDIR_ATTR */
1794 NULL
, /* TX_MKDIR_ACL_ATTR */
1795 NULL
, /* TX_WRITE2 */
1799 * ZIL get_data callbacks
1803 ztest_get_done(zgd_t
*zgd
, int error
)
1805 ztest_ds_t
*zd
= zgd
->zgd_private
;
1806 uint64_t object
= zgd
->zgd_rl
->rl_object
;
1809 dmu_buf_rele(zgd
->zgd_db
, zgd
);
1811 ztest_range_unlock(zgd
->zgd_rl
);
1812 ztest_object_unlock(zd
, object
);
1814 if (error
== 0 && zgd
->zgd_bp
)
1815 zil_add_block(zgd
->zgd_zilog
, zgd
->zgd_bp
);
1817 umem_free(zgd
, sizeof (*zgd
));
1821 ztest_get_data(void *arg
, lr_write_t
*lr
, char *buf
, zio_t
*zio
)
1823 ztest_ds_t
*zd
= arg
;
1824 objset_t
*os
= zd
->zd_os
;
1825 uint64_t object
= lr
->lr_foid
;
1826 uint64_t offset
= lr
->lr_offset
;
1827 uint64_t size
= lr
->lr_length
;
1828 blkptr_t
*bp
= &lr
->lr_blkptr
;
1829 uint64_t txg
= lr
->lr_common
.lrc_txg
;
1831 dmu_object_info_t doi
;
1836 ztest_object_lock(zd
, object
, RL_READER
);
1837 error
= dmu_bonus_hold(os
, object
, FTAG
, &db
);
1839 ztest_object_unlock(zd
, object
);
1843 crtxg
= ztest_bt_bonus(db
)->bt_crtxg
;
1845 if (crtxg
== 0 || crtxg
> txg
) {
1846 dmu_buf_rele(db
, FTAG
);
1847 ztest_object_unlock(zd
, object
);
1851 dmu_object_info_from_db(db
, &doi
);
1852 dmu_buf_rele(db
, FTAG
);
1855 zgd
= umem_zalloc(sizeof (*zgd
), UMEM_NOFAIL
);
1856 zgd
->zgd_zilog
= zd
->zd_zilog
;
1857 zgd
->zgd_private
= zd
;
1859 if (buf
!= NULL
) { /* immediate write */
1860 zgd
->zgd_rl
= ztest_range_lock(zd
, object
, offset
, size
,
1863 error
= dmu_read(os
, object
, offset
, size
, buf
,
1864 DMU_READ_NO_PREFETCH
);
1867 size
= doi
.doi_data_block_size
;
1869 offset
= P2ALIGN(offset
, size
);
1871 ASSERT(offset
< size
);
1875 zgd
->zgd_rl
= ztest_range_lock(zd
, object
, offset
, size
,
1878 error
= dmu_buf_hold(os
, object
, offset
, zgd
, &db
,
1879 DMU_READ_NO_PREFETCH
);
1882 blkptr_t
*obp
= dmu_buf_get_blkptr(db
);
1884 ASSERT(BP_IS_HOLE(bp
));
1891 ASSERT(db
->db_offset
== offset
);
1892 ASSERT(db
->db_size
== size
);
1894 error
= dmu_sync(zio
, lr
->lr_common
.lrc_txg
,
1895 ztest_get_done
, zgd
);
1902 ztest_get_done(zgd
, error
);
1908 ztest_lr_alloc(size_t lrsize
, char *name
)
1911 size_t namesize
= name
? strlen(name
) + 1 : 0;
1913 lr
= umem_zalloc(lrsize
+ namesize
, UMEM_NOFAIL
);
1916 bcopy(name
, lr
+ lrsize
, namesize
);
1922 ztest_lr_free(void *lr
, size_t lrsize
, char *name
)
1924 size_t namesize
= name
? strlen(name
) + 1 : 0;
1926 umem_free(lr
, lrsize
+ namesize
);
1930 * Lookup a bunch of objects. Returns the number of objects not found.
1933 ztest_lookup(ztest_ds_t
*zd
, ztest_od_t
*od
, int count
)
1938 ASSERT(_mutex_held(&zd
->zd_dirobj_lock
));
1940 for (int i
= 0; i
< count
; i
++, od
++) {
1942 error
= zap_lookup(zd
->zd_os
, od
->od_dir
, od
->od_name
,
1943 sizeof (uint64_t), 1, &od
->od_object
);
1945 ASSERT(error
== ENOENT
);
1946 ASSERT(od
->od_object
== 0);
1950 ztest_block_tag_t
*bbt
;
1951 dmu_object_info_t doi
;
1953 ASSERT(od
->od_object
!= 0);
1954 ASSERT(missing
== 0); /* there should be no gaps */
1956 ztest_object_lock(zd
, od
->od_object
, RL_READER
);
1957 VERIFY3U(0, ==, dmu_bonus_hold(zd
->zd_os
,
1958 od
->od_object
, FTAG
, &db
));
1959 dmu_object_info_from_db(db
, &doi
);
1960 bbt
= ztest_bt_bonus(db
);
1961 ASSERT3U(bbt
->bt_magic
, ==, BT_MAGIC
);
1962 od
->od_type
= doi
.doi_type
;
1963 od
->od_blocksize
= doi
.doi_data_block_size
;
1964 od
->od_gen
= bbt
->bt_gen
;
1965 dmu_buf_rele(db
, FTAG
);
1966 ztest_object_unlock(zd
, od
->od_object
);
1974 ztest_create(ztest_ds_t
*zd
, ztest_od_t
*od
, int count
)
1978 ASSERT(_mutex_held(&zd
->zd_dirobj_lock
));
1980 for (int i
= 0; i
< count
; i
++, od
++) {
1987 lr_create_t
*lr
= ztest_lr_alloc(sizeof (*lr
), od
->od_name
);
1989 lr
->lr_doid
= od
->od_dir
;
1990 lr
->lr_foid
= 0; /* 0 to allocate, > 0 to claim */
1991 lr
->lrz_type
= od
->od_crtype
;
1992 lr
->lrz_blocksize
= od
->od_crblocksize
;
1993 lr
->lrz_ibshift
= ztest_random_ibshift();
1994 lr
->lrz_bonustype
= DMU_OT_UINT64_OTHER
;
1995 lr
->lrz_bonuslen
= dmu_bonus_max();
1996 lr
->lr_gen
= od
->od_crgen
;
1997 lr
->lr_crtime
[0] = time(NULL
);
1999 if (ztest_replay_create(zd
, lr
, B_FALSE
) != 0) {
2000 ASSERT(missing
== 0);
2004 od
->od_object
= lr
->lr_foid
;
2005 od
->od_type
= od
->od_crtype
;
2006 od
->od_blocksize
= od
->od_crblocksize
;
2007 od
->od_gen
= od
->od_crgen
;
2008 ASSERT(od
->od_object
!= 0);
2011 ztest_lr_free(lr
, sizeof (*lr
), od
->od_name
);
2018 ztest_remove(ztest_ds_t
*zd
, ztest_od_t
*od
, int count
)
2023 ASSERT(_mutex_held(&zd
->zd_dirobj_lock
));
2027 for (int i
= count
- 1; i
>= 0; i
--, od
--) {
2034 * No object was found.
2036 if (od
->od_object
== 0)
2039 lr_remove_t
*lr
= ztest_lr_alloc(sizeof (*lr
), od
->od_name
);
2041 lr
->lr_doid
= od
->od_dir
;
2043 if ((error
= ztest_replay_remove(zd
, lr
, B_FALSE
)) != 0) {
2044 ASSERT3U(error
, ==, ENOSPC
);
2049 ztest_lr_free(lr
, sizeof (*lr
), od
->od_name
);
2056 ztest_write(ztest_ds_t
*zd
, uint64_t object
, uint64_t offset
, uint64_t size
,
2062 lr
= ztest_lr_alloc(sizeof (*lr
) + size
, NULL
);
2064 lr
->lr_foid
= object
;
2065 lr
->lr_offset
= offset
;
2066 lr
->lr_length
= size
;
2068 BP_ZERO(&lr
->lr_blkptr
);
2070 bcopy(data
, lr
+ 1, size
);
2072 error
= ztest_replay_write(zd
, lr
, B_FALSE
);
2074 ztest_lr_free(lr
, sizeof (*lr
) + size
, NULL
);
2080 ztest_truncate(ztest_ds_t
*zd
, uint64_t object
, uint64_t offset
, uint64_t size
)
2085 lr
= ztest_lr_alloc(sizeof (*lr
), NULL
);
2087 lr
->lr_foid
= object
;
2088 lr
->lr_offset
= offset
;
2089 lr
->lr_length
= size
;
2091 error
= ztest_replay_truncate(zd
, lr
, B_FALSE
);
2093 ztest_lr_free(lr
, sizeof (*lr
), NULL
);
2099 ztest_setattr(ztest_ds_t
*zd
, uint64_t object
)
2104 lr
= ztest_lr_alloc(sizeof (*lr
), NULL
);
2106 lr
->lr_foid
= object
;
2110 error
= ztest_replay_setattr(zd
, lr
, B_FALSE
);
2112 ztest_lr_free(lr
, sizeof (*lr
), NULL
);
2118 ztest_prealloc(ztest_ds_t
*zd
, uint64_t object
, uint64_t offset
, uint64_t size
)
2120 objset_t
*os
= zd
->zd_os
;
2125 txg_wait_synced(dmu_objset_pool(os
), 0);
2127 ztest_object_lock(zd
, object
, RL_READER
);
2128 rl
= ztest_range_lock(zd
, object
, offset
, size
, RL_WRITER
);
2130 tx
= dmu_tx_create(os
);
2132 dmu_tx_hold_write(tx
, object
, offset
, size
);
2134 txg
= ztest_tx_assign(tx
, TXG_WAIT
, FTAG
);
2137 dmu_prealloc(os
, object
, offset
, size
, tx
);
2139 txg_wait_synced(dmu_objset_pool(os
), txg
);
2141 (void) dmu_free_long_range(os
, object
, offset
, size
);
2144 ztest_range_unlock(rl
);
2145 ztest_object_unlock(zd
, object
);
2149 ztest_io(ztest_ds_t
*zd
, uint64_t object
, uint64_t offset
)
2152 ztest_block_tag_t wbt
;
2153 dmu_object_info_t doi
;
2154 enum ztest_io_type io_type
;
2158 VERIFY(dmu_object_info(zd
->zd_os
, object
, &doi
) == 0);
2159 blocksize
= doi
.doi_data_block_size
;
2160 data
= umem_alloc(blocksize
, UMEM_NOFAIL
);
2163 * Pick an i/o type at random, biased toward writing block tags.
2165 io_type
= ztest_random(ZTEST_IO_TYPES
);
2166 if (ztest_random(2) == 0)
2167 io_type
= ZTEST_IO_WRITE_TAG
;
2169 (void) rw_rdlock(&zd
->zd_zilog_lock
);
2173 case ZTEST_IO_WRITE_TAG
:
2174 ztest_bt_generate(&wbt
, zd
->zd_os
, object
, offset
, 0, 0, 0);
2175 (void) ztest_write(zd
, object
, offset
, sizeof (wbt
), &wbt
);
2178 case ZTEST_IO_WRITE_PATTERN
:
2179 (void) memset(data
, 'a' + (object
+ offset
) % 5, blocksize
);
2180 if (ztest_random(2) == 0) {
2182 * Induce fletcher2 collisions to ensure that
2183 * zio_ddt_collision() detects and resolves them
2184 * when using fletcher2-verify for deduplication.
2186 ((uint64_t *)data
)[0] ^= 1ULL << 63;
2187 ((uint64_t *)data
)[4] ^= 1ULL << 63;
2189 (void) ztest_write(zd
, object
, offset
, blocksize
, data
);
2192 case ZTEST_IO_WRITE_ZEROES
:
2193 bzero(data
, blocksize
);
2194 (void) ztest_write(zd
, object
, offset
, blocksize
, data
);
2197 case ZTEST_IO_TRUNCATE
:
2198 (void) ztest_truncate(zd
, object
, offset
, blocksize
);
2201 case ZTEST_IO_SETATTR
:
2202 (void) ztest_setattr(zd
, object
);
2205 case ZTEST_IO_REWRITE
:
2206 (void) rw_rdlock(&ztest_name_lock
);
2207 err
= ztest_dsl_prop_set_uint64(zd
->zd_name
,
2208 ZFS_PROP_CHECKSUM
, spa_dedup_checksum(ztest_spa
),
2210 VERIFY(err
== 0 || err
== ENOSPC
);
2211 err
= ztest_dsl_prop_set_uint64(zd
->zd_name
,
2212 ZFS_PROP_COMPRESSION
,
2213 ztest_random_dsl_prop(ZFS_PROP_COMPRESSION
),
2215 VERIFY(err
== 0 || err
== ENOSPC
);
2216 (void) rw_unlock(&ztest_name_lock
);
2218 VERIFY0(dmu_read(zd
->zd_os
, object
, offset
, blocksize
, data
,
2219 DMU_READ_NO_PREFETCH
));
2221 (void) ztest_write(zd
, object
, offset
, blocksize
, data
);
2225 (void) rw_unlock(&zd
->zd_zilog_lock
);
2227 umem_free(data
, blocksize
);
2231 * Initialize an object description template.
2234 ztest_od_init(ztest_od_t
*od
, uint64_t id
, char *tag
, uint64_t index
,
2235 dmu_object_type_t type
, uint64_t blocksize
, uint64_t gen
)
2237 od
->od_dir
= ZTEST_DIROBJ
;
2240 od
->od_crtype
= type
;
2241 od
->od_crblocksize
= blocksize
? blocksize
: ztest_random_blocksize();
2244 od
->od_type
= DMU_OT_NONE
;
2245 od
->od_blocksize
= 0;
2248 (void) snprintf(od
->od_name
, sizeof (od
->od_name
), "%s(%lld)[%llu]",
2249 tag
, (int64_t)id
, index
);
2253 * Lookup or create the objects for a test using the od template.
2254 * If the objects do not all exist, or if 'remove' is specified,
2255 * remove any existing objects and create new ones. Otherwise,
2256 * use the existing objects.
2259 ztest_object_init(ztest_ds_t
*zd
, ztest_od_t
*od
, size_t size
, boolean_t remove
)
2261 int count
= size
/ sizeof (*od
);
2264 VERIFY(mutex_lock(&zd
->zd_dirobj_lock
) == 0);
2265 if ((ztest_lookup(zd
, od
, count
) != 0 || remove
) &&
2266 (ztest_remove(zd
, od
, count
) != 0 ||
2267 ztest_create(zd
, od
, count
) != 0))
2270 VERIFY(mutex_unlock(&zd
->zd_dirobj_lock
) == 0);
2277 ztest_zil_commit(ztest_ds_t
*zd
, uint64_t id
)
2279 zilog_t
*zilog
= zd
->zd_zilog
;
2281 (void) rw_rdlock(&zd
->zd_zilog_lock
);
2283 zil_commit(zilog
, ztest_random(ZTEST_OBJECTS
));
2286 * Remember the committed values in zd, which is in parent/child
2287 * shared memory. If we die, the next iteration of ztest_run()
2288 * will verify that the log really does contain this record.
2290 mutex_enter(&zilog
->zl_lock
);
2291 ASSERT(zd
->zd_shared
!= NULL
);
2292 ASSERT3U(zd
->zd_shared
->zd_seq
, <=, zilog
->zl_commit_lr_seq
);
2293 zd
->zd_shared
->zd_seq
= zilog
->zl_commit_lr_seq
;
2294 mutex_exit(&zilog
->zl_lock
);
2296 (void) rw_unlock(&zd
->zd_zilog_lock
);
2300 * This function is designed to simulate the operations that occur during a
2301 * mount/unmount operation. We hold the dataset across these operations in an
2302 * attempt to expose any implicit assumptions about ZIL management.
2306 ztest_zil_remount(ztest_ds_t
*zd
, uint64_t id
)
2308 objset_t
*os
= zd
->zd_os
;
2311 * We grab the zd_dirobj_lock to ensure that no other thread is
2312 * updating the zil (i.e. adding in-memory log records) and the
2313 * zd_zilog_lock to block any I/O.
2315 VERIFY0(mutex_lock(&zd
->zd_dirobj_lock
));
2316 (void) rw_wrlock(&zd
->zd_zilog_lock
);
2318 /* zfsvfs_teardown() */
2319 zil_close(zd
->zd_zilog
);
2321 /* zfsvfs_setup() */
2322 VERIFY(zil_open(os
, ztest_get_data
) == zd
->zd_zilog
);
2323 zil_replay(os
, zd
, ztest_replay_vector
);
2325 (void) rw_unlock(&zd
->zd_zilog_lock
);
2326 VERIFY(mutex_unlock(&zd
->zd_dirobj_lock
) == 0);
2330 * Verify that we can't destroy an active pool, create an existing pool,
2331 * or create a pool with a bad vdev spec.
2335 ztest_spa_create_destroy(ztest_ds_t
*zd
, uint64_t id
)
2337 ztest_shared_opts_t
*zo
= &ztest_opts
;
2342 * Attempt to create using a bad file.
2344 nvroot
= make_vdev_root("/dev/bogus", NULL
, NULL
, 0, 0, 0, 0, 0, 1);
2345 VERIFY3U(ENOENT
, ==,
2346 spa_create("ztest_bad_file", nvroot
, NULL
, NULL
));
2347 nvlist_free(nvroot
);
2350 * Attempt to create using a bad mirror.
2352 nvroot
= make_vdev_root("/dev/bogus", NULL
, NULL
, 0, 0, 0, 0, 2, 1);
2353 VERIFY3U(ENOENT
, ==,
2354 spa_create("ztest_bad_mirror", nvroot
, NULL
, NULL
));
2355 nvlist_free(nvroot
);
2358 * Attempt to create an existing pool. It shouldn't matter
2359 * what's in the nvroot; we should fail with EEXIST.
2361 (void) rw_rdlock(&ztest_name_lock
);
2362 nvroot
= make_vdev_root("/dev/bogus", NULL
, NULL
, 0, 0, 0, 0, 0, 1);
2363 VERIFY3U(EEXIST
, ==, spa_create(zo
->zo_pool
, nvroot
, NULL
, NULL
));
2364 nvlist_free(nvroot
);
2365 VERIFY3U(0, ==, spa_open(zo
->zo_pool
, &spa
, FTAG
));
2366 VERIFY3U(EBUSY
, ==, spa_destroy(zo
->zo_pool
));
2367 spa_close(spa
, FTAG
);
2369 (void) rw_unlock(&ztest_name_lock
);
2374 ztest_spa_upgrade(ztest_ds_t
*zd
, uint64_t id
)
2377 uint64_t initial_version
= SPA_VERSION_INITIAL
;
2378 uint64_t version
, newversion
;
2379 nvlist_t
*nvroot
, *props
;
2382 VERIFY0(mutex_lock(&ztest_vdev_lock
));
2383 name
= kmem_asprintf("%s_upgrade", ztest_opts
.zo_pool
);
2386 * Clean up from previous runs.
2388 (void) spa_destroy(name
);
2390 nvroot
= make_vdev_root(NULL
, NULL
, name
, ztest_opts
.zo_vdev_size
, 0,
2391 0, ztest_opts
.zo_raidz
, ztest_opts
.zo_mirrors
, 1);
2394 * If we're configuring a RAIDZ device then make sure that the
2395 * the initial version is capable of supporting that feature.
2397 switch (ztest_opts
.zo_raidz_parity
) {
2400 initial_version
= SPA_VERSION_INITIAL
;
2403 initial_version
= SPA_VERSION_RAIDZ2
;
2406 initial_version
= SPA_VERSION_RAIDZ3
;
2411 * Create a pool with a spa version that can be upgraded. Pick
2412 * a value between initial_version and SPA_VERSION_BEFORE_FEATURES.
2415 version
= ztest_random_spa_version(initial_version
);
2416 } while (version
> SPA_VERSION_BEFORE_FEATURES
);
2418 props
= fnvlist_alloc();
2419 fnvlist_add_uint64(props
,
2420 zpool_prop_to_name(ZPOOL_PROP_VERSION
), version
);
2421 VERIFY0(spa_create(name
, nvroot
, props
, NULL
));
2422 fnvlist_free(nvroot
);
2423 fnvlist_free(props
);
2425 VERIFY0(spa_open(name
, &spa
, FTAG
));
2426 VERIFY3U(spa_version(spa
), ==, version
);
2427 newversion
= ztest_random_spa_version(version
+ 1);
2429 if (ztest_opts
.zo_verbose
>= 4) {
2430 (void) printf("upgrading spa version from %llu to %llu\n",
2431 (u_longlong_t
)version
, (u_longlong_t
)newversion
);
2434 spa_upgrade(spa
, newversion
);
2435 VERIFY3U(spa_version(spa
), >, version
);
2436 VERIFY3U(spa_version(spa
), ==, fnvlist_lookup_uint64(spa
->spa_config
,
2437 zpool_prop_to_name(ZPOOL_PROP_VERSION
)));
2438 spa_close(spa
, FTAG
);
2441 VERIFY0(mutex_unlock(&ztest_vdev_lock
));
2445 vdev_lookup_by_path(vdev_t
*vd
, const char *path
)
2449 if (vd
->vdev_path
!= NULL
&& strcmp(path
, vd
->vdev_path
) == 0)
2452 for (int c
= 0; c
< vd
->vdev_children
; c
++)
2453 if ((mvd
= vdev_lookup_by_path(vd
->vdev_child
[c
], path
)) !=
2461 * Find the first available hole which can be used as a top-level.
2464 find_vdev_hole(spa_t
*spa
)
2466 vdev_t
*rvd
= spa
->spa_root_vdev
;
2469 ASSERT(spa_config_held(spa
, SCL_VDEV
, RW_READER
) == SCL_VDEV
);
2471 for (c
= 0; c
< rvd
->vdev_children
; c
++) {
2472 vdev_t
*cvd
= rvd
->vdev_child
[c
];
2474 if (cvd
->vdev_ishole
)
2481 * Verify that vdev_add() works as expected.
2485 ztest_vdev_add_remove(ztest_ds_t
*zd
, uint64_t id
)
2487 ztest_shared_t
*zs
= ztest_shared
;
2488 spa_t
*spa
= ztest_spa
;
2494 VERIFY(mutex_lock(&ztest_vdev_lock
) == 0);
2495 leaves
= MAX(zs
->zs_mirrors
+ zs
->zs_splits
, 1) * ztest_opts
.zo_raidz
;
2497 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2499 ztest_shared
->zs_vdev_next_leaf
= find_vdev_hole(spa
) * leaves
;
2502 * If we have slogs then remove them 1/4 of the time.
2504 if (spa_has_slogs(spa
) && ztest_random(4) == 0) {
2506 * Grab the guid from the head of the log class rotor.
2508 guid
= spa_log_class(spa
)->mc_rotor
->mg_vd
->vdev_guid
;
2510 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2513 * We have to grab the zs_name_lock as writer to
2514 * prevent a race between removing a slog (dmu_objset_find)
2515 * and destroying a dataset. Removing the slog will
2516 * grab a reference on the dataset which may cause
2517 * dmu_objset_destroy() to fail with EBUSY thus
2518 * leaving the dataset in an inconsistent state.
2520 VERIFY(rw_wrlock(&ztest_name_lock
) == 0);
2521 error
= spa_vdev_remove(spa
, guid
, B_FALSE
);
2522 VERIFY(rw_unlock(&ztest_name_lock
) == 0);
2524 if (error
&& error
!= EEXIST
)
2525 fatal(0, "spa_vdev_remove() = %d", error
);
2527 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2530 * Make 1/4 of the devices be log devices.
2532 nvroot
= make_vdev_root(NULL
, NULL
, NULL
,
2533 ztest_opts
.zo_vdev_size
, 0,
2534 ztest_random(4) == 0, ztest_opts
.zo_raidz
,
2537 error
= spa_vdev_add(spa
, nvroot
);
2538 nvlist_free(nvroot
);
2540 if (error
== ENOSPC
)
2541 ztest_record_enospc("spa_vdev_add");
2542 else if (error
!= 0)
2543 fatal(0, "spa_vdev_add() = %d", error
);
2546 VERIFY(mutex_unlock(&ztest_vdev_lock
) == 0);
2550 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected.
2554 ztest_vdev_aux_add_remove(ztest_ds_t
*zd
, uint64_t id
)
2556 ztest_shared_t
*zs
= ztest_shared
;
2557 spa_t
*spa
= ztest_spa
;
2558 vdev_t
*rvd
= spa
->spa_root_vdev
;
2559 spa_aux_vdev_t
*sav
;
2564 if (ztest_random(2) == 0) {
2565 sav
= &spa
->spa_spares
;
2566 aux
= ZPOOL_CONFIG_SPARES
;
2568 sav
= &spa
->spa_l2cache
;
2569 aux
= ZPOOL_CONFIG_L2CACHE
;
2572 VERIFY(mutex_lock(&ztest_vdev_lock
) == 0);
2574 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2576 if (sav
->sav_count
!= 0 && ztest_random(4) == 0) {
2578 * Pick a random device to remove.
2580 guid
= sav
->sav_vdevs
[ztest_random(sav
->sav_count
)]->vdev_guid
;
2583 * Find an unused device we can add.
2585 zs
->zs_vdev_aux
= 0;
2587 char path
[MAXPATHLEN
];
2589 (void) snprintf(path
, sizeof (path
), ztest_aux_template
,
2590 ztest_opts
.zo_dir
, ztest_opts
.zo_pool
, aux
,
2592 for (c
= 0; c
< sav
->sav_count
; c
++)
2593 if (strcmp(sav
->sav_vdevs
[c
]->vdev_path
,
2596 if (c
== sav
->sav_count
&&
2597 vdev_lookup_by_path(rvd
, path
) == NULL
)
2603 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2609 nvlist_t
*nvroot
= make_vdev_root(NULL
, aux
, NULL
,
2610 (ztest_opts
.zo_vdev_size
* 5) / 4, 0, 0, 0, 0, 1);
2611 error
= spa_vdev_add(spa
, nvroot
);
2613 fatal(0, "spa_vdev_add(%p) = %d", nvroot
, error
);
2614 nvlist_free(nvroot
);
2617 * Remove an existing device. Sometimes, dirty its
2618 * vdev state first to make sure we handle removal
2619 * of devices that have pending state changes.
2621 if (ztest_random(2) == 0)
2622 (void) vdev_online(spa
, guid
, 0, NULL
);
2624 error
= spa_vdev_remove(spa
, guid
, B_FALSE
);
2625 if (error
!= 0 && error
!= EBUSY
)
2626 fatal(0, "spa_vdev_remove(%llu) = %d", guid
, error
);
2629 VERIFY(mutex_unlock(&ztest_vdev_lock
) == 0);
2633 * split a pool if it has mirror tlvdevs
2637 ztest_split_pool(ztest_ds_t
*zd
, uint64_t id
)
2639 ztest_shared_t
*zs
= ztest_shared
;
2640 spa_t
*spa
= ztest_spa
;
2641 vdev_t
*rvd
= spa
->spa_root_vdev
;
2642 nvlist_t
*tree
, **child
, *config
, *split
, **schild
;
2643 uint_t c
, children
, schildren
= 0, lastlogid
= 0;
2646 VERIFY(mutex_lock(&ztest_vdev_lock
) == 0);
2648 /* ensure we have a useable config; mirrors of raidz aren't supported */
2649 if (zs
->zs_mirrors
< 3 || ztest_opts
.zo_raidz
> 1) {
2650 VERIFY(mutex_unlock(&ztest_vdev_lock
) == 0);
2654 /* clean up the old pool, if any */
2655 (void) spa_destroy("splitp");
2657 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2659 /* generate a config from the existing config */
2660 mutex_enter(&spa
->spa_props_lock
);
2661 VERIFY(nvlist_lookup_nvlist(spa
->spa_config
, ZPOOL_CONFIG_VDEV_TREE
,
2663 mutex_exit(&spa
->spa_props_lock
);
2665 VERIFY(nvlist_lookup_nvlist_array(tree
, ZPOOL_CONFIG_CHILDREN
, &child
,
2668 schild
= malloc(rvd
->vdev_children
* sizeof (nvlist_t
*));
2669 for (c
= 0; c
< children
; c
++) {
2670 vdev_t
*tvd
= rvd
->vdev_child
[c
];
2674 if (tvd
->vdev_islog
|| tvd
->vdev_ops
== &vdev_hole_ops
) {
2675 VERIFY(nvlist_alloc(&schild
[schildren
], NV_UNIQUE_NAME
,
2677 VERIFY(nvlist_add_string(schild
[schildren
],
2678 ZPOOL_CONFIG_TYPE
, VDEV_TYPE_HOLE
) == 0);
2679 VERIFY(nvlist_add_uint64(schild
[schildren
],
2680 ZPOOL_CONFIG_IS_HOLE
, 1) == 0);
2682 lastlogid
= schildren
;
2687 VERIFY(nvlist_lookup_nvlist_array(child
[c
],
2688 ZPOOL_CONFIG_CHILDREN
, &mchild
, &mchildren
) == 0);
2689 VERIFY(nvlist_dup(mchild
[0], &schild
[schildren
++], 0) == 0);
2692 /* OK, create a config that can be used to split */
2693 VERIFY(nvlist_alloc(&split
, NV_UNIQUE_NAME
, 0) == 0);
2694 VERIFY(nvlist_add_string(split
, ZPOOL_CONFIG_TYPE
,
2695 VDEV_TYPE_ROOT
) == 0);
2696 VERIFY(nvlist_add_nvlist_array(split
, ZPOOL_CONFIG_CHILDREN
, schild
,
2697 lastlogid
!= 0 ? lastlogid
: schildren
) == 0);
2699 VERIFY(nvlist_alloc(&config
, NV_UNIQUE_NAME
, 0) == 0);
2700 VERIFY(nvlist_add_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
, split
) == 0);
2702 for (c
= 0; c
< schildren
; c
++)
2703 nvlist_free(schild
[c
]);
2707 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2709 (void) rw_wrlock(&ztest_name_lock
);
2710 error
= spa_vdev_split_mirror(spa
, "splitp", config
, NULL
, B_FALSE
);
2711 (void) rw_unlock(&ztest_name_lock
);
2713 nvlist_free(config
);
2716 (void) printf("successful split - results:\n");
2717 mutex_enter(&spa_namespace_lock
);
2718 show_pool_stats(spa
);
2719 show_pool_stats(spa_lookup("splitp"));
2720 mutex_exit(&spa_namespace_lock
);
2724 VERIFY(mutex_unlock(&ztest_vdev_lock
) == 0);
2729 * Verify that we can attach and detach devices.
2733 ztest_vdev_attach_detach(ztest_ds_t
*zd
, uint64_t id
)
2735 ztest_shared_t
*zs
= ztest_shared
;
2736 spa_t
*spa
= ztest_spa
;
2737 spa_aux_vdev_t
*sav
= &spa
->spa_spares
;
2738 vdev_t
*rvd
= spa
->spa_root_vdev
;
2739 vdev_t
*oldvd
, *newvd
, *pvd
;
2743 uint64_t ashift
= ztest_get_ashift();
2744 uint64_t oldguid
, pguid
;
2745 uint64_t oldsize
, newsize
;
2746 char oldpath
[MAXPATHLEN
], newpath
[MAXPATHLEN
];
2748 int oldvd_has_siblings
= B_FALSE
;
2749 int newvd_is_spare
= B_FALSE
;
2751 int error
, expected_error
;
2753 VERIFY(mutex_lock(&ztest_vdev_lock
) == 0);
2754 leaves
= MAX(zs
->zs_mirrors
, 1) * ztest_opts
.zo_raidz
;
2756 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2759 * Decide whether to do an attach or a replace.
2761 replacing
= ztest_random(2);
2764 * Pick a random top-level vdev.
2766 top
= ztest_random_vdev_top(spa
, B_TRUE
);
2769 * Pick a random leaf within it.
2771 leaf
= ztest_random(leaves
);
2776 oldvd
= rvd
->vdev_child
[top
];
2777 if (zs
->zs_mirrors
>= 1) {
2778 ASSERT(oldvd
->vdev_ops
== &vdev_mirror_ops
);
2779 ASSERT(oldvd
->vdev_children
>= zs
->zs_mirrors
);
2780 oldvd
= oldvd
->vdev_child
[leaf
/ ztest_opts
.zo_raidz
];
2782 if (ztest_opts
.zo_raidz
> 1) {
2783 ASSERT(oldvd
->vdev_ops
== &vdev_raidz_ops
);
2784 ASSERT(oldvd
->vdev_children
== ztest_opts
.zo_raidz
);
2785 oldvd
= oldvd
->vdev_child
[leaf
% ztest_opts
.zo_raidz
];
2789 * If we're already doing an attach or replace, oldvd may be a
2790 * mirror vdev -- in which case, pick a random child.
2792 while (oldvd
->vdev_children
!= 0) {
2793 oldvd_has_siblings
= B_TRUE
;
2794 ASSERT(oldvd
->vdev_children
>= 2);
2795 oldvd
= oldvd
->vdev_child
[ztest_random(oldvd
->vdev_children
)];
2798 oldguid
= oldvd
->vdev_guid
;
2799 oldsize
= vdev_get_min_asize(oldvd
);
2800 oldvd_is_log
= oldvd
->vdev_top
->vdev_islog
;
2801 (void) strcpy(oldpath
, oldvd
->vdev_path
);
2802 pvd
= oldvd
->vdev_parent
;
2803 pguid
= pvd
->vdev_guid
;
2806 * If oldvd has siblings, then half of the time, detach it.
2808 if (oldvd_has_siblings
&& ztest_random(2) == 0) {
2809 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2810 error
= spa_vdev_detach(spa
, oldguid
, pguid
, B_FALSE
);
2811 if (error
!= 0 && error
!= ENODEV
&& error
!= EBUSY
&&
2813 fatal(0, "detach (%s) returned %d", oldpath
, error
);
2814 VERIFY(mutex_unlock(&ztest_vdev_lock
) == 0);
2819 * For the new vdev, choose with equal probability between the two
2820 * standard paths (ending in either 'a' or 'b') or a random hot spare.
2822 if (sav
->sav_count
!= 0 && ztest_random(3) == 0) {
2823 newvd
= sav
->sav_vdevs
[ztest_random(sav
->sav_count
)];
2824 newvd_is_spare
= B_TRUE
;
2825 (void) strcpy(newpath
, newvd
->vdev_path
);
2827 (void) snprintf(newpath
, sizeof (newpath
), ztest_dev_template
,
2828 ztest_opts
.zo_dir
, ztest_opts
.zo_pool
,
2829 top
* leaves
+ leaf
);
2830 if (ztest_random(2) == 0)
2831 newpath
[strlen(newpath
) - 1] = 'b';
2832 newvd
= vdev_lookup_by_path(rvd
, newpath
);
2836 newsize
= vdev_get_min_asize(newvd
);
2839 * Make newsize a little bigger or smaller than oldsize.
2840 * If it's smaller, the attach should fail.
2841 * If it's larger, and we're doing a replace,
2842 * we should get dynamic LUN growth when we're done.
2844 newsize
= 10 * oldsize
/ (9 + ztest_random(3));
2848 * If pvd is not a mirror or root, the attach should fail with ENOTSUP,
2849 * unless it's a replace; in that case any non-replacing parent is OK.
2851 * If newvd is already part of the pool, it should fail with EBUSY.
2853 * If newvd is too small, it should fail with EOVERFLOW.
2855 if (pvd
->vdev_ops
!= &vdev_mirror_ops
&&
2856 pvd
->vdev_ops
!= &vdev_root_ops
&& (!replacing
||
2857 pvd
->vdev_ops
== &vdev_replacing_ops
||
2858 pvd
->vdev_ops
== &vdev_spare_ops
))
2859 expected_error
= ENOTSUP
;
2860 else if (newvd_is_spare
&& (!replacing
|| oldvd_is_log
))
2861 expected_error
= ENOTSUP
;
2862 else if (newvd
== oldvd
)
2863 expected_error
= replacing
? 0 : EBUSY
;
2864 else if (vdev_lookup_by_path(rvd
, newpath
) != NULL
)
2865 expected_error
= EBUSY
;
2866 else if (newsize
< oldsize
)
2867 expected_error
= EOVERFLOW
;
2868 else if (ashift
> oldvd
->vdev_top
->vdev_ashift
)
2869 expected_error
= EDOM
;
2873 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2876 * Build the nvlist describing newpath.
2878 root
= make_vdev_root(newpath
, NULL
, NULL
, newvd
== NULL
? newsize
: 0,
2879 ashift
, 0, 0, 0, 1);
2881 error
= spa_vdev_attach(spa
, oldguid
, root
, replacing
);
2886 * If our parent was the replacing vdev, but the replace completed,
2887 * then instead of failing with ENOTSUP we may either succeed,
2888 * fail with ENODEV, or fail with EOVERFLOW.
2890 if (expected_error
== ENOTSUP
&&
2891 (error
== 0 || error
== ENODEV
|| error
== EOVERFLOW
))
2892 expected_error
= error
;
2895 * If someone grew the LUN, the replacement may be too small.
2897 if (error
== EOVERFLOW
|| error
== EBUSY
)
2898 expected_error
= error
;
2900 /* XXX workaround 6690467 */
2901 if (error
!= expected_error
&& expected_error
!= EBUSY
) {
2902 fatal(0, "attach (%s %llu, %s %llu, %d) "
2903 "returned %d, expected %d",
2904 oldpath
, oldsize
, newpath
,
2905 newsize
, replacing
, error
, expected_error
);
2908 VERIFY(mutex_unlock(&ztest_vdev_lock
) == 0);
2912 * Callback function which expands the physical size of the vdev.
2915 grow_vdev(vdev_t
*vd
, void *arg
)
2917 spa_t
*spa
= vd
->vdev_spa
;
2918 size_t *newsize
= arg
;
2922 ASSERT(spa_config_held(spa
, SCL_STATE
, RW_READER
) == SCL_STATE
);
2923 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
2925 if ((fd
= open(vd
->vdev_path
, O_RDWR
)) == -1)
2928 fsize
= lseek(fd
, 0, SEEK_END
);
2929 (void) ftruncate(fd
, *newsize
);
2931 if (ztest_opts
.zo_verbose
>= 6) {
2932 (void) printf("%s grew from %lu to %lu bytes\n",
2933 vd
->vdev_path
, (ulong_t
)fsize
, (ulong_t
)*newsize
);
2940 * Callback function which expands a given vdev by calling vdev_online().
2944 online_vdev(vdev_t
*vd
, void *arg
)
2946 spa_t
*spa
= vd
->vdev_spa
;
2947 vdev_t
*tvd
= vd
->vdev_top
;
2948 uint64_t guid
= vd
->vdev_guid
;
2949 uint64_t generation
= spa
->spa_config_generation
+ 1;
2950 vdev_state_t newstate
= VDEV_STATE_UNKNOWN
;
2953 ASSERT(spa_config_held(spa
, SCL_STATE
, RW_READER
) == SCL_STATE
);
2954 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
2956 /* Calling vdev_online will initialize the new metaslabs */
2957 spa_config_exit(spa
, SCL_STATE
, spa
);
2958 error
= vdev_online(spa
, guid
, ZFS_ONLINE_EXPAND
, &newstate
);
2959 spa_config_enter(spa
, SCL_STATE
, spa
, RW_READER
);
2962 * If vdev_online returned an error or the underlying vdev_open
2963 * failed then we abort the expand. The only way to know that
2964 * vdev_open fails is by checking the returned newstate.
2966 if (error
|| newstate
!= VDEV_STATE_HEALTHY
) {
2967 if (ztest_opts
.zo_verbose
>= 5) {
2968 (void) printf("Unable to expand vdev, state %llu, "
2969 "error %d\n", (u_longlong_t
)newstate
, error
);
2973 ASSERT3U(newstate
, ==, VDEV_STATE_HEALTHY
);
2976 * Since we dropped the lock we need to ensure that we're
2977 * still talking to the original vdev. It's possible this
2978 * vdev may have been detached/replaced while we were
2979 * trying to online it.
2981 if (generation
!= spa
->spa_config_generation
) {
2982 if (ztest_opts
.zo_verbose
>= 5) {
2983 (void) printf("vdev configuration has changed, "
2984 "guid %llu, state %llu, expected gen %llu, "
2987 (u_longlong_t
)tvd
->vdev_state
,
2988 (u_longlong_t
)generation
,
2989 (u_longlong_t
)spa
->spa_config_generation
);
2997 * Traverse the vdev tree calling the supplied function.
2998 * We continue to walk the tree until we either have walked all
2999 * children or we receive a non-NULL return from the callback.
3000 * If a NULL callback is passed, then we just return back the first
3001 * leaf vdev we encounter.
3004 vdev_walk_tree(vdev_t
*vd
, vdev_t
*(*func
)(vdev_t
*, void *), void *arg
)
3006 if (vd
->vdev_ops
->vdev_op_leaf
) {
3010 return (func(vd
, arg
));
3013 for (uint_t c
= 0; c
< vd
->vdev_children
; c
++) {
3014 vdev_t
*cvd
= vd
->vdev_child
[c
];
3015 if ((cvd
= vdev_walk_tree(cvd
, func
, arg
)) != NULL
)
3022 * Verify that dynamic LUN growth works as expected.
3026 ztest_vdev_LUN_growth(ztest_ds_t
*zd
, uint64_t id
)
3028 spa_t
*spa
= ztest_spa
;
3030 metaslab_class_t
*mc
;
3031 metaslab_group_t
*mg
;
3032 size_t psize
, newsize
;
3034 uint64_t old_class_space
, new_class_space
, old_ms_count
, new_ms_count
;
3036 VERIFY(mutex_lock(&ztest_vdev_lock
) == 0);
3037 spa_config_enter(spa
, SCL_STATE
, spa
, RW_READER
);
3039 top
= ztest_random_vdev_top(spa
, B_TRUE
);
3041 tvd
= spa
->spa_root_vdev
->vdev_child
[top
];
3044 old_ms_count
= tvd
->vdev_ms_count
;
3045 old_class_space
= metaslab_class_get_space(mc
);
3048 * Determine the size of the first leaf vdev associated with
3049 * our top-level device.
3051 vd
= vdev_walk_tree(tvd
, NULL
, NULL
);
3052 ASSERT3P(vd
, !=, NULL
);
3053 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
3055 psize
= vd
->vdev_psize
;
3058 * We only try to expand the vdev if it's healthy, less than 4x its
3059 * original size, and it has a valid psize.
3061 if (tvd
->vdev_state
!= VDEV_STATE_HEALTHY
||
3062 psize
== 0 || psize
>= 4 * ztest_opts
.zo_vdev_size
) {
3063 spa_config_exit(spa
, SCL_STATE
, spa
);
3064 VERIFY(mutex_unlock(&ztest_vdev_lock
) == 0);
3068 newsize
= psize
+ psize
/ 8;
3069 ASSERT3U(newsize
, >, psize
);
3071 if (ztest_opts
.zo_verbose
>= 6) {
3072 (void) printf("Expanding LUN %s from %lu to %lu\n",
3073 vd
->vdev_path
, (ulong_t
)psize
, (ulong_t
)newsize
);
3077 * Growing the vdev is a two step process:
3078 * 1). expand the physical size (i.e. relabel)
3079 * 2). online the vdev to create the new metaslabs
3081 if (vdev_walk_tree(tvd
, grow_vdev
, &newsize
) != NULL
||
3082 vdev_walk_tree(tvd
, online_vdev
, NULL
) != NULL
||
3083 tvd
->vdev_state
!= VDEV_STATE_HEALTHY
) {
3084 if (ztest_opts
.zo_verbose
>= 5) {
3085 (void) printf("Could not expand LUN because "
3086 "the vdev configuration changed.\n");
3088 spa_config_exit(spa
, SCL_STATE
, spa
);
3089 VERIFY(mutex_unlock(&ztest_vdev_lock
) == 0);
3093 spa_config_exit(spa
, SCL_STATE
, spa
);
3096 * Expanding the LUN will update the config asynchronously,
3097 * thus we must wait for the async thread to complete any
3098 * pending tasks before proceeding.
3102 mutex_enter(&spa
->spa_async_lock
);
3103 done
= (spa
->spa_async_thread
== NULL
&& !spa
->spa_async_tasks
);
3104 mutex_exit(&spa
->spa_async_lock
);
3107 txg_wait_synced(spa_get_dsl(spa
), 0);
3108 (void) poll(NULL
, 0, 100);
3111 spa_config_enter(spa
, SCL_STATE
, spa
, RW_READER
);
3113 tvd
= spa
->spa_root_vdev
->vdev_child
[top
];
3114 new_ms_count
= tvd
->vdev_ms_count
;
3115 new_class_space
= metaslab_class_get_space(mc
);
3117 if (tvd
->vdev_mg
!= mg
|| mg
->mg_class
!= mc
) {
3118 if (ztest_opts
.zo_verbose
>= 5) {
3119 (void) printf("Could not verify LUN expansion due to "
3120 "intervening vdev offline or remove.\n");
3122 spa_config_exit(spa
, SCL_STATE
, spa
);
3123 VERIFY(mutex_unlock(&ztest_vdev_lock
) == 0);
3128 * Make sure we were able to grow the vdev.
3130 if (new_ms_count
<= old_ms_count
)
3131 fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n",
3132 old_ms_count
, new_ms_count
);
3135 * Make sure we were able to grow the pool.
3137 if (new_class_space
<= old_class_space
)
3138 fatal(0, "LUN expansion failed: class_space %llu <= %llu\n",
3139 old_class_space
, new_class_space
);
3141 if (ztest_opts
.zo_verbose
>= 5) {
3142 char oldnumbuf
[6], newnumbuf
[6];
3144 nicenum(old_class_space
, oldnumbuf
);
3145 nicenum(new_class_space
, newnumbuf
);
3146 (void) printf("%s grew from %s to %s\n",
3147 spa
->spa_name
, oldnumbuf
, newnumbuf
);
3150 spa_config_exit(spa
, SCL_STATE
, spa
);
3151 VERIFY(mutex_unlock(&ztest_vdev_lock
) == 0);
3155 * Verify that dmu_objset_{create,destroy,open,close} work as expected.
3159 ztest_objset_create_cb(objset_t
*os
, void *arg
, cred_t
*cr
, dmu_tx_t
*tx
)
3162 * Create the objects common to all ztest datasets.
3164 VERIFY(zap_create_claim(os
, ZTEST_DIROBJ
,
3165 DMU_OT_ZAP_OTHER
, DMU_OT_NONE
, 0, tx
) == 0);
3169 ztest_dataset_create(char *dsname
)
3171 uint64_t zilset
= ztest_random(100);
3172 int err
= dmu_objset_create(dsname
, DMU_OST_OTHER
, 0,
3173 ztest_objset_create_cb
, NULL
);
3175 if (err
|| zilset
< 80)
3178 if (ztest_opts
.zo_verbose
>= 6)
3179 (void) printf("Setting dataset %s to sync always\n", dsname
);
3180 return (ztest_dsl_prop_set_uint64(dsname
, ZFS_PROP_SYNC
,
3181 ZFS_SYNC_ALWAYS
, B_FALSE
));
3186 ztest_objset_destroy_cb(const char *name
, void *arg
)
3189 dmu_object_info_t doi
;
3193 * Verify that the dataset contains a directory object.
3195 VERIFY0(dmu_objset_own(name
, DMU_OST_OTHER
, B_TRUE
, FTAG
, &os
));
3196 error
= dmu_object_info(os
, ZTEST_DIROBJ
, &doi
);
3197 if (error
!= ENOENT
) {
3198 /* We could have crashed in the middle of destroying it */
3200 ASSERT3U(doi
.doi_type
, ==, DMU_OT_ZAP_OTHER
);
3201 ASSERT3S(doi
.doi_physical_blocks_512
, >=, 0);
3203 dmu_objset_disown(os
, FTAG
);
3206 * Destroy the dataset.
3208 if (strchr(name
, '@') != NULL
) {
3209 VERIFY0(dsl_destroy_snapshot(name
, B_FALSE
));
3211 VERIFY0(dsl_destroy_head(name
));
3217 ztest_snapshot_create(char *osname
, uint64_t id
)
3219 char snapname
[MAXNAMELEN
];
3222 (void) snprintf(snapname
, sizeof (snapname
), "%llu", (u_longlong_t
)id
);
3224 error
= dmu_objset_snapshot_one(osname
, snapname
);
3225 if (error
== ENOSPC
) {
3226 ztest_record_enospc(FTAG
);
3229 if (error
!= 0 && error
!= EEXIST
) {
3230 fatal(0, "ztest_snapshot_create(%s@%s) = %d", osname
,
3237 ztest_snapshot_destroy(char *osname
, uint64_t id
)
3239 char snapname
[MAXNAMELEN
];
3242 (void) snprintf(snapname
, MAXNAMELEN
, "%s@%llu", osname
,
3245 error
= dsl_destroy_snapshot(snapname
, B_FALSE
);
3246 if (error
!= 0 && error
!= ENOENT
)
3247 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname
, error
);
3253 ztest_dmu_objset_create_destroy(ztest_ds_t
*zd
, uint64_t id
)
3259 char name
[MAXNAMELEN
];
3262 (void) rw_rdlock(&ztest_name_lock
);
3264 (void) snprintf(name
, MAXNAMELEN
, "%s/temp_%llu",
3265 ztest_opts
.zo_pool
, (u_longlong_t
)id
);
3268 * If this dataset exists from a previous run, process its replay log
3269 * half of the time. If we don't replay it, then dmu_objset_destroy()
3270 * (invoked from ztest_objset_destroy_cb()) should just throw it away.
3272 if (ztest_random(2) == 0 &&
3273 dmu_objset_own(name
, DMU_OST_OTHER
, B_FALSE
, FTAG
, &os
) == 0) {
3274 ztest_zd_init(&zdtmp
, NULL
, os
);
3275 zil_replay(os
, &zdtmp
, ztest_replay_vector
);
3276 ztest_zd_fini(&zdtmp
);
3277 dmu_objset_disown(os
, FTAG
);
3281 * There may be an old instance of the dataset we're about to
3282 * create lying around from a previous run. If so, destroy it
3283 * and all of its snapshots.
3285 (void) dmu_objset_find(name
, ztest_objset_destroy_cb
, NULL
,
3286 DS_FIND_CHILDREN
| DS_FIND_SNAPSHOTS
);
3289 * Verify that the destroyed dataset is no longer in the namespace.
3291 VERIFY3U(ENOENT
, ==, dmu_objset_own(name
, DMU_OST_OTHER
, B_TRUE
,
3295 * Verify that we can create a new dataset.
3297 error
= ztest_dataset_create(name
);
3299 if (error
== ENOSPC
) {
3300 ztest_record_enospc(FTAG
);
3301 (void) rw_unlock(&ztest_name_lock
);
3304 fatal(0, "dmu_objset_create(%s) = %d", name
, error
);
3307 VERIFY0(dmu_objset_own(name
, DMU_OST_OTHER
, B_FALSE
, FTAG
, &os
));
3309 ztest_zd_init(&zdtmp
, NULL
, os
);
3312 * Open the intent log for it.
3314 zilog
= zil_open(os
, ztest_get_data
);
3317 * Put some objects in there, do a little I/O to them,
3318 * and randomly take a couple of snapshots along the way.
3320 iters
= ztest_random(5);
3321 for (int i
= 0; i
< iters
; i
++) {
3322 ztest_dmu_object_alloc_free(&zdtmp
, id
);
3323 if (ztest_random(iters
) == 0)
3324 (void) ztest_snapshot_create(name
, i
);
3328 * Verify that we cannot create an existing dataset.
3330 VERIFY3U(EEXIST
, ==,
3331 dmu_objset_create(name
, DMU_OST_OTHER
, 0, NULL
, NULL
));
3334 * Verify that we can hold an objset that is also owned.
3336 VERIFY3U(0, ==, dmu_objset_hold(name
, FTAG
, &os2
));
3337 dmu_objset_rele(os2
, FTAG
);
3340 * Verify that we cannot own an objset that is already owned.
3343 dmu_objset_own(name
, DMU_OST_OTHER
, B_FALSE
, FTAG
, &os2
));
3346 dmu_objset_disown(os
, FTAG
);
3347 ztest_zd_fini(&zdtmp
);
3349 (void) rw_unlock(&ztest_name_lock
);
3353 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
3356 ztest_dmu_snapshot_create_destroy(ztest_ds_t
*zd
, uint64_t id
)
3358 (void) rw_rdlock(&ztest_name_lock
);
3359 (void) ztest_snapshot_destroy(zd
->zd_name
, id
);
3360 (void) ztest_snapshot_create(zd
->zd_name
, id
);
3361 (void) rw_unlock(&ztest_name_lock
);
3365 * Cleanup non-standard snapshots and clones.
3368 ztest_dsl_dataset_cleanup(char *osname
, uint64_t id
)
3370 char snap1name
[MAXNAMELEN
];
3371 char clone1name
[MAXNAMELEN
];
3372 char snap2name
[MAXNAMELEN
];
3373 char clone2name
[MAXNAMELEN
];
3374 char snap3name
[MAXNAMELEN
];
3377 (void) snprintf(snap1name
, MAXNAMELEN
, "%s@s1_%llu", osname
, id
);
3378 (void) snprintf(clone1name
, MAXNAMELEN
, "%s/c1_%llu", osname
, id
);
3379 (void) snprintf(snap2name
, MAXNAMELEN
, "%s@s2_%llu", clone1name
, id
);
3380 (void) snprintf(clone2name
, MAXNAMELEN
, "%s/c2_%llu", osname
, id
);
3381 (void) snprintf(snap3name
, MAXNAMELEN
, "%s@s3_%llu", clone1name
, id
);
3383 error
= dsl_destroy_head(clone2name
);
3384 if (error
&& error
!= ENOENT
)
3385 fatal(0, "dsl_destroy_head(%s) = %d", clone2name
, error
);
3386 error
= dsl_destroy_snapshot(snap3name
, B_FALSE
);
3387 if (error
&& error
!= ENOENT
)
3388 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap3name
, error
);
3389 error
= dsl_destroy_snapshot(snap2name
, B_FALSE
);
3390 if (error
&& error
!= ENOENT
)
3391 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap2name
, error
);
3392 error
= dsl_destroy_head(clone1name
);
3393 if (error
&& error
!= ENOENT
)
3394 fatal(0, "dsl_destroy_head(%s) = %d", clone1name
, error
);
3395 error
= dsl_destroy_snapshot(snap1name
, B_FALSE
);
3396 if (error
&& error
!= ENOENT
)
3397 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap1name
, error
);
3401 * Verify dsl_dataset_promote handles EBUSY
3404 ztest_dsl_dataset_promote_busy(ztest_ds_t
*zd
, uint64_t id
)
3407 char snap1name
[MAXNAMELEN
];
3408 char clone1name
[MAXNAMELEN
];
3409 char snap2name
[MAXNAMELEN
];
3410 char clone2name
[MAXNAMELEN
];
3411 char snap3name
[MAXNAMELEN
];
3412 char *osname
= zd
->zd_name
;
3415 (void) rw_rdlock(&ztest_name_lock
);
3417 ztest_dsl_dataset_cleanup(osname
, id
);
3419 (void) snprintf(snap1name
, MAXNAMELEN
, "%s@s1_%llu", osname
, id
);
3420 (void) snprintf(clone1name
, MAXNAMELEN
, "%s/c1_%llu", osname
, id
);
3421 (void) snprintf(snap2name
, MAXNAMELEN
, "%s@s2_%llu", clone1name
, id
);
3422 (void) snprintf(clone2name
, MAXNAMELEN
, "%s/c2_%llu", osname
, id
);
3423 (void) snprintf(snap3name
, MAXNAMELEN
, "%s@s3_%llu", clone1name
, id
);
3425 error
= dmu_objset_snapshot_one(osname
, strchr(snap1name
, '@') + 1);
3426 if (error
&& error
!= EEXIST
) {
3427 if (error
== ENOSPC
) {
3428 ztest_record_enospc(FTAG
);
3431 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name
, error
);
3434 error
= dmu_objset_clone(clone1name
, snap1name
);
3436 if (error
== ENOSPC
) {
3437 ztest_record_enospc(FTAG
);
3440 fatal(0, "dmu_objset_create(%s) = %d", clone1name
, error
);
3443 error
= dmu_objset_snapshot_one(clone1name
, strchr(snap2name
, '@') + 1);
3444 if (error
&& error
!= EEXIST
) {
3445 if (error
== ENOSPC
) {
3446 ztest_record_enospc(FTAG
);
3449 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name
, error
);
3452 error
= dmu_objset_snapshot_one(clone1name
, strchr(snap3name
, '@') + 1);
3453 if (error
&& error
!= EEXIST
) {
3454 if (error
== ENOSPC
) {
3455 ztest_record_enospc(FTAG
);
3458 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name
, error
);
3461 error
= dmu_objset_clone(clone2name
, snap3name
);
3463 if (error
== ENOSPC
) {
3464 ztest_record_enospc(FTAG
);
3467 fatal(0, "dmu_objset_create(%s) = %d", clone2name
, error
);
3470 error
= dmu_objset_own(snap2name
, DMU_OST_ANY
, B_TRUE
, FTAG
, &os
);
3472 fatal(0, "dmu_objset_own(%s) = %d", snap2name
, error
);
3473 error
= dsl_dataset_promote(clone2name
, NULL
);
3474 if (error
== ENOSPC
) {
3475 dmu_objset_disown(os
, FTAG
);
3476 ztest_record_enospc(FTAG
);
3480 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name
,
3482 dmu_objset_disown(os
, FTAG
);
3485 ztest_dsl_dataset_cleanup(osname
, id
);
3487 (void) rw_unlock(&ztest_name_lock
);
3491 * Verify that dmu_object_{alloc,free} work as expected.
3494 ztest_dmu_object_alloc_free(ztest_ds_t
*zd
, uint64_t id
)
3497 int batchsize
= sizeof (od
) / sizeof (od
[0]);
3499 for (int b
= 0; b
< batchsize
; b
++)
3500 ztest_od_init(&od
[b
], id
, FTAG
, b
, DMU_OT_UINT64_OTHER
, 0, 0);
3503 * Destroy the previous batch of objects, create a new batch,
3504 * and do some I/O on the new objects.
3506 if (ztest_object_init(zd
, od
, sizeof (od
), B_TRUE
) != 0)
3509 while (ztest_random(4 * batchsize
) != 0)
3510 ztest_io(zd
, od
[ztest_random(batchsize
)].od_object
,
3511 ztest_random(ZTEST_RANGE_LOCKS
) << SPA_MAXBLOCKSHIFT
);
3515 * Verify that dmu_{read,write} work as expected.
3518 ztest_dmu_read_write(ztest_ds_t
*zd
, uint64_t id
)
3520 objset_t
*os
= zd
->zd_os
;
3523 int i
, freeit
, error
;
3525 bufwad_t
*packbuf
, *bigbuf
, *pack
, *bigH
, *bigT
;
3526 uint64_t packobj
, packoff
, packsize
, bigobj
, bigoff
, bigsize
;
3527 uint64_t chunksize
= (1000 + ztest_random(1000)) * sizeof (uint64_t);
3528 uint64_t regions
= 997;
3529 uint64_t stride
= 123456789ULL;
3530 uint64_t width
= 40;
3531 int free_percent
= 5;
3534 * This test uses two objects, packobj and bigobj, that are always
3535 * updated together (i.e. in the same tx) so that their contents are
3536 * in sync and can be compared. Their contents relate to each other
3537 * in a simple way: packobj is a dense array of 'bufwad' structures,
3538 * while bigobj is a sparse array of the same bufwads. Specifically,
3539 * for any index n, there are three bufwads that should be identical:
3541 * packobj, at offset n * sizeof (bufwad_t)
3542 * bigobj, at the head of the nth chunk
3543 * bigobj, at the tail of the nth chunk
3545 * The chunk size is arbitrary. It doesn't have to be a power of two,
3546 * and it doesn't have any relation to the object blocksize.
3547 * The only requirement is that it can hold at least two bufwads.
3549 * Normally, we write the bufwad to each of these locations.
3550 * However, free_percent of the time we instead write zeroes to
3551 * packobj and perform a dmu_free_range() on bigobj. By comparing
3552 * bigobj to packobj, we can verify that the DMU is correctly
3553 * tracking which parts of an object are allocated and free,
3554 * and that the contents of the allocated blocks are correct.
3558 * Read the directory info. If it's the first time, set things up.
3560 ztest_od_init(&od
[0], id
, FTAG
, 0, DMU_OT_UINT64_OTHER
, 0, chunksize
);
3561 ztest_od_init(&od
[1], id
, FTAG
, 1, DMU_OT_UINT64_OTHER
, 0, chunksize
);
3563 if (ztest_object_init(zd
, od
, sizeof (od
), B_FALSE
) != 0)
3566 bigobj
= od
[0].od_object
;
3567 packobj
= od
[1].od_object
;
3568 chunksize
= od
[0].od_gen
;
3569 ASSERT(chunksize
== od
[1].od_gen
);
3572 * Prefetch a random chunk of the big object.
3573 * Our aim here is to get some async reads in flight
3574 * for blocks that we may free below; the DMU should
3575 * handle this race correctly.
3577 n
= ztest_random(regions
) * stride
+ ztest_random(width
);
3578 s
= 1 + ztest_random(2 * width
- 1);
3579 dmu_prefetch(os
, bigobj
, n
* chunksize
, s
* chunksize
);
3582 * Pick a random index and compute the offsets into packobj and bigobj.
3584 n
= ztest_random(regions
) * stride
+ ztest_random(width
);
3585 s
= 1 + ztest_random(width
- 1);
3587 packoff
= n
* sizeof (bufwad_t
);
3588 packsize
= s
* sizeof (bufwad_t
);
3590 bigoff
= n
* chunksize
;
3591 bigsize
= s
* chunksize
;
3593 packbuf
= umem_alloc(packsize
, UMEM_NOFAIL
);
3594 bigbuf
= umem_alloc(bigsize
, UMEM_NOFAIL
);
3597 * free_percent of the time, free a range of bigobj rather than
3600 freeit
= (ztest_random(100) < free_percent
);
3603 * Read the current contents of our objects.
3605 error
= dmu_read(os
, packobj
, packoff
, packsize
, packbuf
,
3608 error
= dmu_read(os
, bigobj
, bigoff
, bigsize
, bigbuf
,
3613 * Get a tx for the mods to both packobj and bigobj.
3615 tx
= dmu_tx_create(os
);
3617 dmu_tx_hold_write(tx
, packobj
, packoff
, packsize
);
3620 dmu_tx_hold_free(tx
, bigobj
, bigoff
, bigsize
);
3622 dmu_tx_hold_write(tx
, bigobj
, bigoff
, bigsize
);
3624 /* This accounts for setting the checksum/compression. */
3625 dmu_tx_hold_bonus(tx
, bigobj
);
3627 txg
= ztest_tx_assign(tx
, TXG_MIGHTWAIT
, FTAG
);
3629 umem_free(packbuf
, packsize
);
3630 umem_free(bigbuf
, bigsize
);
3634 enum zio_checksum cksum
;
3636 cksum
= (enum zio_checksum
)
3637 ztest_random_dsl_prop(ZFS_PROP_CHECKSUM
);
3638 } while (cksum
>= ZIO_CHECKSUM_LEGACY_FUNCTIONS
);
3639 dmu_object_set_checksum(os
, bigobj
, cksum
, tx
);
3641 enum zio_compress comp
;
3643 comp
= (enum zio_compress
)
3644 ztest_random_dsl_prop(ZFS_PROP_COMPRESSION
);
3645 } while (comp
>= ZIO_COMPRESS_LEGACY_FUNCTIONS
);
3646 dmu_object_set_compress(os
, bigobj
, comp
, tx
);
3649 * For each index from n to n + s, verify that the existing bufwad
3650 * in packobj matches the bufwads at the head and tail of the
3651 * corresponding chunk in bigobj. Then update all three bufwads
3652 * with the new values we want to write out.
3654 for (i
= 0; i
< s
; i
++) {
3656 pack
= (bufwad_t
*)((char *)packbuf
+ i
* sizeof (bufwad_t
));
3658 bigH
= (bufwad_t
*)((char *)bigbuf
+ i
* chunksize
);
3660 bigT
= (bufwad_t
*)((char *)bigH
+ chunksize
) - 1;
3662 ASSERT((uintptr_t)bigH
- (uintptr_t)bigbuf
< bigsize
);
3663 ASSERT((uintptr_t)bigT
- (uintptr_t)bigbuf
< bigsize
);
3665 if (pack
->bw_txg
> txg
)
3666 fatal(0, "future leak: got %llx, open txg is %llx",
3669 if (pack
->bw_data
!= 0 && pack
->bw_index
!= n
+ i
)
3670 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3671 pack
->bw_index
, n
, i
);
3673 if (bcmp(pack
, bigH
, sizeof (bufwad_t
)) != 0)
3674 fatal(0, "pack/bigH mismatch in %p/%p", pack
, bigH
);
3676 if (bcmp(pack
, bigT
, sizeof (bufwad_t
)) != 0)
3677 fatal(0, "pack/bigT mismatch in %p/%p", pack
, bigT
);
3680 bzero(pack
, sizeof (bufwad_t
));
3682 pack
->bw_index
= n
+ i
;
3684 pack
->bw_data
= 1 + ztest_random(-2ULL);
3691 * We've verified all the old bufwads, and made new ones.
3692 * Now write them out.
3694 dmu_write(os
, packobj
, packoff
, packsize
, packbuf
, tx
);
3697 if (ztest_opts
.zo_verbose
>= 7) {
3698 (void) printf("freeing offset %llx size %llx"
3700 (u_longlong_t
)bigoff
,
3701 (u_longlong_t
)bigsize
,
3704 VERIFY(0 == dmu_free_range(os
, bigobj
, bigoff
, bigsize
, tx
));
3706 if (ztest_opts
.zo_verbose
>= 7) {
3707 (void) printf("writing offset %llx size %llx"
3709 (u_longlong_t
)bigoff
,
3710 (u_longlong_t
)bigsize
,
3713 dmu_write(os
, bigobj
, bigoff
, bigsize
, bigbuf
, tx
);
3719 * Sanity check the stuff we just wrote.
3722 void *packcheck
= umem_alloc(packsize
, UMEM_NOFAIL
);
3723 void *bigcheck
= umem_alloc(bigsize
, UMEM_NOFAIL
);
3725 VERIFY(0 == dmu_read(os
, packobj
, packoff
,
3726 packsize
, packcheck
, DMU_READ_PREFETCH
));
3727 VERIFY(0 == dmu_read(os
, bigobj
, bigoff
,
3728 bigsize
, bigcheck
, DMU_READ_PREFETCH
));
3730 ASSERT(bcmp(packbuf
, packcheck
, packsize
) == 0);
3731 ASSERT(bcmp(bigbuf
, bigcheck
, bigsize
) == 0);
3733 umem_free(packcheck
, packsize
);
3734 umem_free(bigcheck
, bigsize
);
3737 umem_free(packbuf
, packsize
);
3738 umem_free(bigbuf
, bigsize
);
3742 compare_and_update_pbbufs(uint64_t s
, bufwad_t
*packbuf
, bufwad_t
*bigbuf
,
3743 uint64_t bigsize
, uint64_t n
, uint64_t chunksize
, uint64_t txg
)
3751 * For each index from n to n + s, verify that the existing bufwad
3752 * in packobj matches the bufwads at the head and tail of the
3753 * corresponding chunk in bigobj. Then update all three bufwads
3754 * with the new values we want to write out.
3756 for (i
= 0; i
< s
; i
++) {
3758 pack
= (bufwad_t
*)((char *)packbuf
+ i
* sizeof (bufwad_t
));
3760 bigH
= (bufwad_t
*)((char *)bigbuf
+ i
* chunksize
);
3762 bigT
= (bufwad_t
*)((char *)bigH
+ chunksize
) - 1;
3764 ASSERT((uintptr_t)bigH
- (uintptr_t)bigbuf
< bigsize
);
3765 ASSERT((uintptr_t)bigT
- (uintptr_t)bigbuf
< bigsize
);
3767 if (pack
->bw_txg
> txg
)
3768 fatal(0, "future leak: got %llx, open txg is %llx",
3771 if (pack
->bw_data
!= 0 && pack
->bw_index
!= n
+ i
)
3772 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3773 pack
->bw_index
, n
, i
);
3775 if (bcmp(pack
, bigH
, sizeof (bufwad_t
)) != 0)
3776 fatal(0, "pack/bigH mismatch in %p/%p", pack
, bigH
);
3778 if (bcmp(pack
, bigT
, sizeof (bufwad_t
)) != 0)
3779 fatal(0, "pack/bigT mismatch in %p/%p", pack
, bigT
);
3781 pack
->bw_index
= n
+ i
;
3783 pack
->bw_data
= 1 + ztest_random(-2ULL);
3791 ztest_dmu_read_write_zcopy(ztest_ds_t
*zd
, uint64_t id
)
3793 objset_t
*os
= zd
->zd_os
;
3799 bufwad_t
*packbuf
, *bigbuf
;
3800 uint64_t packobj
, packoff
, packsize
, bigobj
, bigoff
, bigsize
;
3801 uint64_t blocksize
= ztest_random_blocksize();
3802 uint64_t chunksize
= blocksize
;
3803 uint64_t regions
= 997;
3804 uint64_t stride
= 123456789ULL;
3806 dmu_buf_t
*bonus_db
;
3807 arc_buf_t
**bigbuf_arcbufs
;
3808 dmu_object_info_t doi
;
3811 * This test uses two objects, packobj and bigobj, that are always
3812 * updated together (i.e. in the same tx) so that their contents are
3813 * in sync and can be compared. Their contents relate to each other
3814 * in a simple way: packobj is a dense array of 'bufwad' structures,
3815 * while bigobj is a sparse array of the same bufwads. Specifically,
3816 * for any index n, there are three bufwads that should be identical:
3818 * packobj, at offset n * sizeof (bufwad_t)
3819 * bigobj, at the head of the nth chunk
3820 * bigobj, at the tail of the nth chunk
3822 * The chunk size is set equal to bigobj block size so that
3823 * dmu_assign_arcbuf() can be tested for object updates.
3827 * Read the directory info. If it's the first time, set things up.
3829 ztest_od_init(&od
[0], id
, FTAG
, 0, DMU_OT_UINT64_OTHER
, blocksize
, 0);
3830 ztest_od_init(&od
[1], id
, FTAG
, 1, DMU_OT_UINT64_OTHER
, 0, chunksize
);
3832 if (ztest_object_init(zd
, od
, sizeof (od
), B_FALSE
) != 0)
3835 bigobj
= od
[0].od_object
;
3836 packobj
= od
[1].od_object
;
3837 blocksize
= od
[0].od_blocksize
;
3838 chunksize
= blocksize
;
3839 ASSERT(chunksize
== od
[1].od_gen
);
3841 VERIFY(dmu_object_info(os
, bigobj
, &doi
) == 0);
3842 VERIFY(ISP2(doi
.doi_data_block_size
));
3843 VERIFY(chunksize
== doi
.doi_data_block_size
);
3844 VERIFY(chunksize
>= 2 * sizeof (bufwad_t
));
3847 * Pick a random index and compute the offsets into packobj and bigobj.
3849 n
= ztest_random(regions
) * stride
+ ztest_random(width
);
3850 s
= 1 + ztest_random(width
- 1);
3852 packoff
= n
* sizeof (bufwad_t
);
3853 packsize
= s
* sizeof (bufwad_t
);
3855 bigoff
= n
* chunksize
;
3856 bigsize
= s
* chunksize
;
3858 packbuf
= umem_zalloc(packsize
, UMEM_NOFAIL
);
3859 bigbuf
= umem_zalloc(bigsize
, UMEM_NOFAIL
);
3861 VERIFY3U(0, ==, dmu_bonus_hold(os
, bigobj
, FTAG
, &bonus_db
));
3863 bigbuf_arcbufs
= umem_zalloc(2 * s
* sizeof (arc_buf_t
*), UMEM_NOFAIL
);
3866 * Iteration 0 test zcopy for DB_UNCACHED dbufs.
3867 * Iteration 1 test zcopy to already referenced dbufs.
3868 * Iteration 2 test zcopy to dirty dbuf in the same txg.
3869 * Iteration 3 test zcopy to dbuf dirty in previous txg.
3870 * Iteration 4 test zcopy when dbuf is no longer dirty.
3871 * Iteration 5 test zcopy when it can't be done.
3872 * Iteration 6 one more zcopy write.
3874 for (i
= 0; i
< 7; i
++) {
3879 * In iteration 5 (i == 5) use arcbufs
3880 * that don't match bigobj blksz to test
3881 * dmu_assign_arcbuf() when it can't directly
3882 * assign an arcbuf to a dbuf.
3884 for (j
= 0; j
< s
; j
++) {
3887 dmu_request_arcbuf(bonus_db
, chunksize
);
3889 bigbuf_arcbufs
[2 * j
] =
3890 dmu_request_arcbuf(bonus_db
, chunksize
/ 2);
3891 bigbuf_arcbufs
[2 * j
+ 1] =
3892 dmu_request_arcbuf(bonus_db
, chunksize
/ 2);
3897 * Get a tx for the mods to both packobj and bigobj.
3899 tx
= dmu_tx_create(os
);
3901 dmu_tx_hold_write(tx
, packobj
, packoff
, packsize
);
3902 dmu_tx_hold_write(tx
, bigobj
, bigoff
, bigsize
);
3904 txg
= ztest_tx_assign(tx
, TXG_MIGHTWAIT
, FTAG
);
3906 umem_free(packbuf
, packsize
);
3907 umem_free(bigbuf
, bigsize
);
3908 for (j
= 0; j
< s
; j
++) {
3910 dmu_return_arcbuf(bigbuf_arcbufs
[j
]);
3913 bigbuf_arcbufs
[2 * j
]);
3915 bigbuf_arcbufs
[2 * j
+ 1]);
3918 umem_free(bigbuf_arcbufs
, 2 * s
* sizeof (arc_buf_t
*));
3919 dmu_buf_rele(bonus_db
, FTAG
);
3924 * 50% of the time don't read objects in the 1st iteration to
3925 * test dmu_assign_arcbuf() for the case when there're no
3926 * existing dbufs for the specified offsets.
3928 if (i
!= 0 || ztest_random(2) != 0) {
3929 error
= dmu_read(os
, packobj
, packoff
,
3930 packsize
, packbuf
, DMU_READ_PREFETCH
);
3932 error
= dmu_read(os
, bigobj
, bigoff
, bigsize
,
3933 bigbuf
, DMU_READ_PREFETCH
);
3936 compare_and_update_pbbufs(s
, packbuf
, bigbuf
, bigsize
,
3940 * We've verified all the old bufwads, and made new ones.
3941 * Now write them out.
3943 dmu_write(os
, packobj
, packoff
, packsize
, packbuf
, tx
);
3944 if (ztest_opts
.zo_verbose
>= 7) {
3945 (void) printf("writing offset %llx size %llx"
3947 (u_longlong_t
)bigoff
,
3948 (u_longlong_t
)bigsize
,
3951 for (off
= bigoff
, j
= 0; j
< s
; j
++, off
+= chunksize
) {
3954 bcopy((caddr_t
)bigbuf
+ (off
- bigoff
),
3955 bigbuf_arcbufs
[j
]->b_data
, chunksize
);
3957 bcopy((caddr_t
)bigbuf
+ (off
- bigoff
),
3958 bigbuf_arcbufs
[2 * j
]->b_data
,
3960 bcopy((caddr_t
)bigbuf
+ (off
- bigoff
) +
3962 bigbuf_arcbufs
[2 * j
+ 1]->b_data
,
3967 VERIFY(dmu_buf_hold(os
, bigobj
, off
,
3968 FTAG
, &dbt
, DMU_READ_NO_PREFETCH
) == 0);
3971 dmu_assign_arcbuf(bonus_db
, off
,
3972 bigbuf_arcbufs
[j
], tx
);
3974 dmu_assign_arcbuf(bonus_db
, off
,
3975 bigbuf_arcbufs
[2 * j
], tx
);
3976 dmu_assign_arcbuf(bonus_db
,
3977 off
+ chunksize
/ 2,
3978 bigbuf_arcbufs
[2 * j
+ 1], tx
);
3981 dmu_buf_rele(dbt
, FTAG
);
3987 * Sanity check the stuff we just wrote.
3990 void *packcheck
= umem_alloc(packsize
, UMEM_NOFAIL
);
3991 void *bigcheck
= umem_alloc(bigsize
, UMEM_NOFAIL
);
3993 VERIFY(0 == dmu_read(os
, packobj
, packoff
,
3994 packsize
, packcheck
, DMU_READ_PREFETCH
));
3995 VERIFY(0 == dmu_read(os
, bigobj
, bigoff
,
3996 bigsize
, bigcheck
, DMU_READ_PREFETCH
));
3998 ASSERT(bcmp(packbuf
, packcheck
, packsize
) == 0);
3999 ASSERT(bcmp(bigbuf
, bigcheck
, bigsize
) == 0);
4001 umem_free(packcheck
, packsize
);
4002 umem_free(bigcheck
, bigsize
);
4005 txg_wait_open(dmu_objset_pool(os
), 0);
4006 } else if (i
== 3) {
4007 txg_wait_synced(dmu_objset_pool(os
), 0);
4011 dmu_buf_rele(bonus_db
, FTAG
);
4012 umem_free(packbuf
, packsize
);
4013 umem_free(bigbuf
, bigsize
);
4014 umem_free(bigbuf_arcbufs
, 2 * s
* sizeof (arc_buf_t
*));
4019 ztest_dmu_write_parallel(ztest_ds_t
*zd
, uint64_t id
)
4022 uint64_t offset
= (1ULL << (ztest_random(20) + 43)) +
4023 (ztest_random(ZTEST_RANGE_LOCKS
) << SPA_MAXBLOCKSHIFT
);
4026 * Have multiple threads write to large offsets in an object
4027 * to verify that parallel writes to an object -- even to the
4028 * same blocks within the object -- doesn't cause any trouble.
4030 ztest_od_init(&od
[0], ID_PARALLEL
, FTAG
, 0, DMU_OT_UINT64_OTHER
, 0, 0);
4032 if (ztest_object_init(zd
, od
, sizeof (od
), B_FALSE
) != 0)
4035 while (ztest_random(10) != 0)
4036 ztest_io(zd
, od
[0].od_object
, offset
);
4040 ztest_dmu_prealloc(ztest_ds_t
*zd
, uint64_t id
)
4043 uint64_t offset
= (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT
)) +
4044 (ztest_random(ZTEST_RANGE_LOCKS
) << SPA_MAXBLOCKSHIFT
);
4045 uint64_t count
= ztest_random(20) + 1;
4046 uint64_t blocksize
= ztest_random_blocksize();
4049 ztest_od_init(&od
[0], id
, FTAG
, 0, DMU_OT_UINT64_OTHER
, blocksize
, 0);
4051 if (ztest_object_init(zd
, od
, sizeof (od
), !ztest_random(2)) != 0)
4054 if (ztest_truncate(zd
, od
[0].od_object
, offset
, count
* blocksize
) != 0)
4057 ztest_prealloc(zd
, od
[0].od_object
, offset
, count
* blocksize
);
4059 data
= umem_zalloc(blocksize
, UMEM_NOFAIL
);
4061 while (ztest_random(count
) != 0) {
4062 uint64_t randoff
= offset
+ (ztest_random(count
) * blocksize
);
4063 if (ztest_write(zd
, od
[0].od_object
, randoff
, blocksize
,
4066 while (ztest_random(4) != 0)
4067 ztest_io(zd
, od
[0].od_object
, randoff
);
4070 umem_free(data
, blocksize
);
4074 * Verify that zap_{create,destroy,add,remove,update} work as expected.
4076 #define ZTEST_ZAP_MIN_INTS 1
4077 #define ZTEST_ZAP_MAX_INTS 4
4078 #define ZTEST_ZAP_MAX_PROPS 1000
4081 ztest_zap(ztest_ds_t
*zd
, uint64_t id
)
4083 objset_t
*os
= zd
->zd_os
;
4086 uint64_t txg
, last_txg
;
4087 uint64_t value
[ZTEST_ZAP_MAX_INTS
];
4088 uint64_t zl_ints
, zl_intsize
, prop
;
4091 char propname
[100], txgname
[100];
4093 char *hc
[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
4095 ztest_od_init(&od
[0], id
, FTAG
, 0, DMU_OT_ZAP_OTHER
, 0, 0);
4097 if (ztest_object_init(zd
, od
, sizeof (od
), !ztest_random(2)) != 0)
4100 object
= od
[0].od_object
;
4103 * Generate a known hash collision, and verify that
4104 * we can lookup and remove both entries.
4106 tx
= dmu_tx_create(os
);
4107 dmu_tx_hold_zap(tx
, object
, B_TRUE
, NULL
);
4108 txg
= ztest_tx_assign(tx
, TXG_MIGHTWAIT
, FTAG
);
4111 for (i
= 0; i
< 2; i
++) {
4113 VERIFY3U(0, ==, zap_add(os
, object
, hc
[i
], sizeof (uint64_t),
4116 for (i
= 0; i
< 2; i
++) {
4117 VERIFY3U(EEXIST
, ==, zap_add(os
, object
, hc
[i
],
4118 sizeof (uint64_t), 1, &value
[i
], tx
));
4120 zap_length(os
, object
, hc
[i
], &zl_intsize
, &zl_ints
));
4121 ASSERT3U(zl_intsize
, ==, sizeof (uint64_t));
4122 ASSERT3U(zl_ints
, ==, 1);
4124 for (i
= 0; i
< 2; i
++) {
4125 VERIFY3U(0, ==, zap_remove(os
, object
, hc
[i
], tx
));
4130 * Generate a buch of random entries.
4132 ints
= MAX(ZTEST_ZAP_MIN_INTS
, object
% ZTEST_ZAP_MAX_INTS
);
4134 prop
= ztest_random(ZTEST_ZAP_MAX_PROPS
);
4135 (void) sprintf(propname
, "prop_%llu", (u_longlong_t
)prop
);
4136 (void) sprintf(txgname
, "txg_%llu", (u_longlong_t
)prop
);
4137 bzero(value
, sizeof (value
));
4141 * If these zap entries already exist, validate their contents.
4143 error
= zap_length(os
, object
, txgname
, &zl_intsize
, &zl_ints
);
4145 ASSERT3U(zl_intsize
, ==, sizeof (uint64_t));
4146 ASSERT3U(zl_ints
, ==, 1);
4148 VERIFY(zap_lookup(os
, object
, txgname
, zl_intsize
,
4149 zl_ints
, &last_txg
) == 0);
4151 VERIFY(zap_length(os
, object
, propname
, &zl_intsize
,
4154 ASSERT3U(zl_intsize
, ==, sizeof (uint64_t));
4155 ASSERT3U(zl_ints
, ==, ints
);
4157 VERIFY(zap_lookup(os
, object
, propname
, zl_intsize
,
4158 zl_ints
, value
) == 0);
4160 for (i
= 0; i
< ints
; i
++) {
4161 ASSERT3U(value
[i
], ==, last_txg
+ object
+ i
);
4164 ASSERT3U(error
, ==, ENOENT
);
4168 * Atomically update two entries in our zap object.
4169 * The first is named txg_%llu, and contains the txg
4170 * in which the property was last updated. The second
4171 * is named prop_%llu, and the nth element of its value
4172 * should be txg + object + n.
4174 tx
= dmu_tx_create(os
);
4175 dmu_tx_hold_zap(tx
, object
, B_TRUE
, NULL
);
4176 txg
= ztest_tx_assign(tx
, TXG_MIGHTWAIT
, FTAG
);
4181 fatal(0, "zap future leak: old %llu new %llu", last_txg
, txg
);
4183 for (i
= 0; i
< ints
; i
++)
4184 value
[i
] = txg
+ object
+ i
;
4186 VERIFY3U(0, ==, zap_update(os
, object
, txgname
, sizeof (uint64_t),
4188 VERIFY3U(0, ==, zap_update(os
, object
, propname
, sizeof (uint64_t),
4194 * Remove a random pair of entries.
4196 prop
= ztest_random(ZTEST_ZAP_MAX_PROPS
);
4197 (void) sprintf(propname
, "prop_%llu", (u_longlong_t
)prop
);
4198 (void) sprintf(txgname
, "txg_%llu", (u_longlong_t
)prop
);
4200 error
= zap_length(os
, object
, txgname
, &zl_intsize
, &zl_ints
);
4202 if (error
== ENOENT
)
4207 tx
= dmu_tx_create(os
);
4208 dmu_tx_hold_zap(tx
, object
, B_TRUE
, NULL
);
4209 txg
= ztest_tx_assign(tx
, TXG_MIGHTWAIT
, FTAG
);
4212 VERIFY3U(0, ==, zap_remove(os
, object
, txgname
, tx
));
4213 VERIFY3U(0, ==, zap_remove(os
, object
, propname
, tx
));
4218 * Testcase to test the upgrading of a microzap to fatzap.
4221 ztest_fzap(ztest_ds_t
*zd
, uint64_t id
)
4223 objset_t
*os
= zd
->zd_os
;
4225 uint64_t object
, txg
;
4227 ztest_od_init(&od
[0], id
, FTAG
, 0, DMU_OT_ZAP_OTHER
, 0, 0);
4229 if (ztest_object_init(zd
, od
, sizeof (od
), !ztest_random(2)) != 0)
4232 object
= od
[0].od_object
;
4235 * Add entries to this ZAP and make sure it spills over
4236 * and gets upgraded to a fatzap. Also, since we are adding
4237 * 2050 entries we should see ptrtbl growth and leaf-block split.
4239 for (int i
= 0; i
< 2050; i
++) {
4240 char name
[MAXNAMELEN
];
4245 (void) snprintf(name
, sizeof (name
), "fzap-%llu-%llu",
4248 tx
= dmu_tx_create(os
);
4249 dmu_tx_hold_zap(tx
, object
, B_TRUE
, name
);
4250 txg
= ztest_tx_assign(tx
, TXG_MIGHTWAIT
, FTAG
);
4253 error
= zap_add(os
, object
, name
, sizeof (uint64_t), 1,
4255 ASSERT(error
== 0 || error
== EEXIST
);
4262 ztest_zap_parallel(ztest_ds_t
*zd
, uint64_t id
)
4264 objset_t
*os
= zd
->zd_os
;
4266 uint64_t txg
, object
, count
, wsize
, wc
, zl_wsize
, zl_wc
;
4268 int i
, namelen
, error
;
4269 int micro
= ztest_random(2);
4270 char name
[20], string_value
[20];
4273 ztest_od_init(&od
[0], ID_PARALLEL
, FTAG
, micro
, DMU_OT_ZAP_OTHER
, 0, 0);
4275 if (ztest_object_init(zd
, od
, sizeof (od
), B_FALSE
) != 0)
4278 object
= od
[0].od_object
;
4281 * Generate a random name of the form 'xxx.....' where each
4282 * x is a random printable character and the dots are dots.
4283 * There are 94 such characters, and the name length goes from
4284 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names.
4286 namelen
= ztest_random(sizeof (name
) - 5) + 5 + 1;
4288 for (i
= 0; i
< 3; i
++)
4289 name
[i
] = '!' + ztest_random('~' - '!' + 1);
4290 for (; i
< namelen
- 1; i
++)
4294 if ((namelen
& 1) || micro
) {
4295 wsize
= sizeof (txg
);
4301 data
= string_value
;
4305 VERIFY0(zap_count(os
, object
, &count
));
4306 ASSERT(count
!= -1ULL);
4309 * Select an operation: length, lookup, add, update, remove.
4311 i
= ztest_random(5);
4314 tx
= dmu_tx_create(os
);
4315 dmu_tx_hold_zap(tx
, object
, B_TRUE
, NULL
);
4316 txg
= ztest_tx_assign(tx
, TXG_MIGHTWAIT
, FTAG
);
4319 bcopy(name
, string_value
, namelen
);
4323 bzero(string_value
, namelen
);
4329 error
= zap_length(os
, object
, name
, &zl_wsize
, &zl_wc
);
4331 ASSERT3U(wsize
, ==, zl_wsize
);
4332 ASSERT3U(wc
, ==, zl_wc
);
4334 ASSERT3U(error
, ==, ENOENT
);
4339 error
= zap_lookup(os
, object
, name
, wsize
, wc
, data
);
4341 if (data
== string_value
&&
4342 bcmp(name
, data
, namelen
) != 0)
4343 fatal(0, "name '%s' != val '%s' len %d",
4344 name
, data
, namelen
);
4346 ASSERT3U(error
, ==, ENOENT
);
4351 error
= zap_add(os
, object
, name
, wsize
, wc
, data
, tx
);
4352 ASSERT(error
== 0 || error
== EEXIST
);
4356 VERIFY(zap_update(os
, object
, name
, wsize
, wc
, data
, tx
) == 0);
4360 error
= zap_remove(os
, object
, name
, tx
);
4361 ASSERT(error
== 0 || error
== ENOENT
);
4370 * Commit callback data.
4372 typedef struct ztest_cb_data
{
4373 list_node_t zcd_node
;
4375 int zcd_expected_err
;
4376 boolean_t zcd_added
;
4377 boolean_t zcd_called
;
4381 /* This is the actual commit callback function */
4383 ztest_commit_callback(void *arg
, int error
)
4385 ztest_cb_data_t
*data
= arg
;
4386 uint64_t synced_txg
;
4388 VERIFY(data
!= NULL
);
4389 VERIFY3S(data
->zcd_expected_err
, ==, error
);
4390 VERIFY(!data
->zcd_called
);
4392 synced_txg
= spa_last_synced_txg(data
->zcd_spa
);
4393 if (data
->zcd_txg
> synced_txg
)
4394 fatal(0, "commit callback of txg %" PRIu64
" called prematurely"
4395 ", last synced txg = %" PRIu64
"\n", data
->zcd_txg
,
4398 data
->zcd_called
= B_TRUE
;
4400 if (error
== ECANCELED
) {
4401 ASSERT0(data
->zcd_txg
);
4402 ASSERT(!data
->zcd_added
);
4405 * The private callback data should be destroyed here, but
4406 * since we are going to check the zcd_called field after
4407 * dmu_tx_abort(), we will destroy it there.
4412 /* Was this callback added to the global callback list? */
4413 if (!data
->zcd_added
)
4416 ASSERT3U(data
->zcd_txg
, !=, 0);
4418 /* Remove our callback from the list */
4419 (void) mutex_lock(&zcl
.zcl_callbacks_lock
);
4420 list_remove(&zcl
.zcl_callbacks
, data
);
4421 (void) mutex_unlock(&zcl
.zcl_callbacks_lock
);
4424 umem_free(data
, sizeof (ztest_cb_data_t
));
4427 /* Allocate and initialize callback data structure */
4428 static ztest_cb_data_t
*
4429 ztest_create_cb_data(objset_t
*os
, uint64_t txg
)
4431 ztest_cb_data_t
*cb_data
;
4433 cb_data
= umem_zalloc(sizeof (ztest_cb_data_t
), UMEM_NOFAIL
);
4435 cb_data
->zcd_txg
= txg
;
4436 cb_data
->zcd_spa
= dmu_objset_spa(os
);
4442 * If a number of txgs equal to this threshold have been created after a commit
4443 * callback has been registered but not called, then we assume there is an
4444 * implementation bug.
4446 #define ZTEST_COMMIT_CALLBACK_THRESH (TXG_CONCURRENT_STATES + 2)
4449 * Commit callback test.
4452 ztest_dmu_commit_callbacks(ztest_ds_t
*zd
, uint64_t id
)
4454 objset_t
*os
= zd
->zd_os
;
4457 ztest_cb_data_t
*cb_data
[3], *tmp_cb
;
4458 uint64_t old_txg
, txg
;
4461 ztest_od_init(&od
[0], id
, FTAG
, 0, DMU_OT_UINT64_OTHER
, 0, 0);
4463 if (ztest_object_init(zd
, od
, sizeof (od
), B_FALSE
) != 0)
4466 tx
= dmu_tx_create(os
);
4468 cb_data
[0] = ztest_create_cb_data(os
, 0);
4469 dmu_tx_callback_register(tx
, ztest_commit_callback
, cb_data
[0]);
4471 dmu_tx_hold_write(tx
, od
[0].od_object
, 0, sizeof (uint64_t));
4473 /* Every once in a while, abort the transaction on purpose */
4474 if (ztest_random(100) == 0)
4478 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
4480 txg
= error
? 0 : dmu_tx_get_txg(tx
);
4482 cb_data
[0]->zcd_txg
= txg
;
4483 cb_data
[1] = ztest_create_cb_data(os
, txg
);
4484 dmu_tx_callback_register(tx
, ztest_commit_callback
, cb_data
[1]);
4488 * It's not a strict requirement to call the registered
4489 * callbacks from inside dmu_tx_abort(), but that's what
4490 * it's supposed to happen in the current implementation
4491 * so we will check for that.
4493 for (i
= 0; i
< 2; i
++) {
4494 cb_data
[i
]->zcd_expected_err
= ECANCELED
;
4495 VERIFY(!cb_data
[i
]->zcd_called
);
4500 for (i
= 0; i
< 2; i
++) {
4501 VERIFY(cb_data
[i
]->zcd_called
);
4502 umem_free(cb_data
[i
], sizeof (ztest_cb_data_t
));
4508 cb_data
[2] = ztest_create_cb_data(os
, txg
);
4509 dmu_tx_callback_register(tx
, ztest_commit_callback
, cb_data
[2]);
4512 * Read existing data to make sure there isn't a future leak.
4514 VERIFY(0 == dmu_read(os
, od
[0].od_object
, 0, sizeof (uint64_t),
4515 &old_txg
, DMU_READ_PREFETCH
));
4518 fatal(0, "future leak: got %" PRIu64
", open txg is %" PRIu64
,
4521 dmu_write(os
, od
[0].od_object
, 0, sizeof (uint64_t), &txg
, tx
);
4523 (void) mutex_lock(&zcl
.zcl_callbacks_lock
);
4526 * Since commit callbacks don't have any ordering requirement and since
4527 * it is theoretically possible for a commit callback to be called
4528 * after an arbitrary amount of time has elapsed since its txg has been
4529 * synced, it is difficult to reliably determine whether a commit
4530 * callback hasn't been called due to high load or due to a flawed
4533 * In practice, we will assume that if after a certain number of txgs a
4534 * commit callback hasn't been called, then most likely there's an
4535 * implementation bug..
4537 tmp_cb
= list_head(&zcl
.zcl_callbacks
);
4538 if (tmp_cb
!= NULL
&&
4539 (txg
- ZTEST_COMMIT_CALLBACK_THRESH
) > tmp_cb
->zcd_txg
) {
4540 fatal(0, "Commit callback threshold exceeded, oldest txg: %"
4541 PRIu64
", open txg: %" PRIu64
"\n", tmp_cb
->zcd_txg
, txg
);
4545 * Let's find the place to insert our callbacks.
4547 * Even though the list is ordered by txg, it is possible for the
4548 * insertion point to not be the end because our txg may already be
4549 * quiescing at this point and other callbacks in the open txg
4550 * (from other objsets) may have sneaked in.
4552 tmp_cb
= list_tail(&zcl
.zcl_callbacks
);
4553 while (tmp_cb
!= NULL
&& tmp_cb
->zcd_txg
> txg
)
4554 tmp_cb
= list_prev(&zcl
.zcl_callbacks
, tmp_cb
);
4556 /* Add the 3 callbacks to the list */
4557 for (i
= 0; i
< 3; i
++) {
4559 list_insert_head(&zcl
.zcl_callbacks
, cb_data
[i
]);
4561 list_insert_after(&zcl
.zcl_callbacks
, tmp_cb
,
4564 cb_data
[i
]->zcd_added
= B_TRUE
;
4565 VERIFY(!cb_data
[i
]->zcd_called
);
4567 tmp_cb
= cb_data
[i
];
4570 (void) mutex_unlock(&zcl
.zcl_callbacks_lock
);
4577 ztest_dsl_prop_get_set(ztest_ds_t
*zd
, uint64_t id
)
4579 zfs_prop_t proplist
[] = {
4581 ZFS_PROP_COMPRESSION
,
4586 (void) rw_rdlock(&ztest_name_lock
);
4588 for (int p
= 0; p
< sizeof (proplist
) / sizeof (proplist
[0]); p
++)
4589 (void) ztest_dsl_prop_set_uint64(zd
->zd_name
, proplist
[p
],
4590 ztest_random_dsl_prop(proplist
[p
]), (int)ztest_random(2));
4592 (void) rw_unlock(&ztest_name_lock
);
4597 ztest_spa_prop_get_set(ztest_ds_t
*zd
, uint64_t id
)
4599 nvlist_t
*props
= NULL
;
4601 (void) rw_rdlock(&ztest_name_lock
);
4603 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO
,
4604 ZIO_DEDUPDITTO_MIN
+ ztest_random(ZIO_DEDUPDITTO_MIN
));
4606 VERIFY0(spa_prop_get(ztest_spa
, &props
));
4608 if (ztest_opts
.zo_verbose
>= 6)
4609 dump_nvlist(props
, 4);
4613 (void) rw_unlock(&ztest_name_lock
);
4617 user_release_one(const char *snapname
, const char *holdname
)
4619 nvlist_t
*snaps
, *holds
;
4622 snaps
= fnvlist_alloc();
4623 holds
= fnvlist_alloc();
4624 fnvlist_add_boolean(holds
, holdname
);
4625 fnvlist_add_nvlist(snaps
, snapname
, holds
);
4626 fnvlist_free(holds
);
4627 error
= dsl_dataset_user_release(snaps
, NULL
);
4628 fnvlist_free(snaps
);
4633 * Test snapshot hold/release and deferred destroy.
4636 ztest_dmu_snapshot_hold(ztest_ds_t
*zd
, uint64_t id
)
4639 objset_t
*os
= zd
->zd_os
;
4643 char clonename
[100];
4645 char osname
[MAXNAMELEN
];
4648 (void) rw_rdlock(&ztest_name_lock
);
4650 dmu_objset_name(os
, osname
);
4652 (void) snprintf(snapname
, sizeof (snapname
), "sh1_%llu", id
);
4653 (void) snprintf(fullname
, sizeof (fullname
), "%s@%s", osname
, snapname
);
4654 (void) snprintf(clonename
, sizeof (clonename
),
4655 "%s/ch1_%llu", osname
, id
);
4656 (void) snprintf(tag
, sizeof (tag
), "tag_%llu", id
);
4659 * Clean up from any previous run.
4661 error
= dsl_destroy_head(clonename
);
4662 if (error
!= ENOENT
)
4664 error
= user_release_one(fullname
, tag
);
4665 if (error
!= ESRCH
&& error
!= ENOENT
)
4667 error
= dsl_destroy_snapshot(fullname
, B_FALSE
);
4668 if (error
!= ENOENT
)
4672 * Create snapshot, clone it, mark snap for deferred destroy,
4673 * destroy clone, verify snap was also destroyed.
4675 error
= dmu_objset_snapshot_one(osname
, snapname
);
4677 if (error
== ENOSPC
) {
4678 ztest_record_enospc("dmu_objset_snapshot");
4681 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname
, error
);
4684 error
= dmu_objset_clone(clonename
, fullname
);
4686 if (error
== ENOSPC
) {
4687 ztest_record_enospc("dmu_objset_clone");
4690 fatal(0, "dmu_objset_clone(%s) = %d", clonename
, error
);
4693 error
= dsl_destroy_snapshot(fullname
, B_TRUE
);
4695 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d",
4699 error
= dsl_destroy_head(clonename
);
4701 fatal(0, "dsl_destroy_head(%s) = %d", clonename
, error
);
4703 error
= dmu_objset_hold(fullname
, FTAG
, &origin
);
4704 if (error
!= ENOENT
)
4705 fatal(0, "dmu_objset_hold(%s) = %d", fullname
, error
);
4708 * Create snapshot, add temporary hold, verify that we can't
4709 * destroy a held snapshot, mark for deferred destroy,
4710 * release hold, verify snapshot was destroyed.
4712 error
= dmu_objset_snapshot_one(osname
, snapname
);
4714 if (error
== ENOSPC
) {
4715 ztest_record_enospc("dmu_objset_snapshot");
4718 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname
, error
);
4721 holds
= fnvlist_alloc();
4722 fnvlist_add_string(holds
, fullname
, tag
);
4723 error
= dsl_dataset_user_hold(holds
, 0, NULL
);
4724 fnvlist_free(holds
);
4726 if (error
== ENOSPC
) {
4727 ztest_record_enospc("dsl_dataset_user_hold");
4730 fatal(0, "dsl_dataset_user_hold(%s, %s) = %u",
4731 fullname
, tag
, error
);
4734 error
= dsl_destroy_snapshot(fullname
, B_FALSE
);
4735 if (error
!= EBUSY
) {
4736 fatal(0, "dsl_destroy_snapshot(%s, B_FALSE) = %d",
4740 error
= dsl_destroy_snapshot(fullname
, B_TRUE
);
4742 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d",
4746 error
= user_release_one(fullname
, tag
);
4748 fatal(0, "user_release_one(%s, %s) = %d", fullname
, tag
, error
);
4750 VERIFY3U(dmu_objset_hold(fullname
, FTAG
, &origin
), ==, ENOENT
);
4753 (void) rw_unlock(&ztest_name_lock
);
4757 * Inject random faults into the on-disk data.
4761 ztest_fault_inject(ztest_ds_t
*zd
, uint64_t id
)
4763 ztest_shared_t
*zs
= ztest_shared
;
4764 spa_t
*spa
= ztest_spa
;
4768 uint64_t bad
= 0x1990c0ffeedecade;
4770 char path0
[MAXPATHLEN
];
4771 char pathrand
[MAXPATHLEN
];
4773 int bshift
= SPA_MAXBLOCKSHIFT
+ 2; /* don't scrog all labels */
4779 boolean_t islog
= B_FALSE
;
4781 VERIFY(mutex_lock(&ztest_vdev_lock
) == 0);
4782 maxfaults
= MAXFAULTS();
4783 leaves
= MAX(zs
->zs_mirrors
, 1) * ztest_opts
.zo_raidz
;
4784 mirror_save
= zs
->zs_mirrors
;
4785 VERIFY(mutex_unlock(&ztest_vdev_lock
) == 0);
4787 ASSERT(leaves
>= 1);
4790 * Grab the name lock as reader. There are some operations
4791 * which don't like to have their vdevs changed while
4792 * they are in progress (i.e. spa_change_guid). Those
4793 * operations will have grabbed the name lock as writer.
4795 (void) rw_rdlock(&ztest_name_lock
);
4798 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
4800 spa_config_enter(spa
, SCL_STATE
, FTAG
, RW_READER
);
4802 if (ztest_random(2) == 0) {
4804 * Inject errors on a normal data device or slog device.
4806 top
= ztest_random_vdev_top(spa
, B_TRUE
);
4807 leaf
= ztest_random(leaves
) + zs
->zs_splits
;
4810 * Generate paths to the first leaf in this top-level vdev,
4811 * and to the random leaf we selected. We'll induce transient
4812 * write failures and random online/offline activity on leaf 0,
4813 * and we'll write random garbage to the randomly chosen leaf.
4815 (void) snprintf(path0
, sizeof (path0
), ztest_dev_template
,
4816 ztest_opts
.zo_dir
, ztest_opts
.zo_pool
,
4817 top
* leaves
+ zs
->zs_splits
);
4818 (void) snprintf(pathrand
, sizeof (pathrand
), ztest_dev_template
,
4819 ztest_opts
.zo_dir
, ztest_opts
.zo_pool
,
4820 top
* leaves
+ leaf
);
4822 vd0
= vdev_lookup_by_path(spa
->spa_root_vdev
, path0
);
4823 if (vd0
!= NULL
&& vd0
->vdev_top
->vdev_islog
)
4827 * If the top-level vdev needs to be resilvered
4828 * then we only allow faults on the device that is
4831 if (vd0
!= NULL
&& maxfaults
!= 1 &&
4832 (!vdev_resilver_needed(vd0
->vdev_top
, NULL
, NULL
) ||
4833 vd0
->vdev_resilver_txg
!= 0)) {
4835 * Make vd0 explicitly claim to be unreadable,
4836 * or unwriteable, or reach behind its back
4837 * and close the underlying fd. We can do this if
4838 * maxfaults == 0 because we'll fail and reexecute,
4839 * and we can do it if maxfaults >= 2 because we'll
4840 * have enough redundancy. If maxfaults == 1, the
4841 * combination of this with injection of random data
4842 * corruption below exceeds the pool's fault tolerance.
4844 vdev_file_t
*vf
= vd0
->vdev_tsd
;
4846 if (vf
!= NULL
&& ztest_random(3) == 0) {
4847 (void) close(vf
->vf_vnode
->v_fd
);
4848 vf
->vf_vnode
->v_fd
= -1;
4849 } else if (ztest_random(2) == 0) {
4850 vd0
->vdev_cant_read
= B_TRUE
;
4852 vd0
->vdev_cant_write
= B_TRUE
;
4854 guid0
= vd0
->vdev_guid
;
4858 * Inject errors on an l2cache device.
4860 spa_aux_vdev_t
*sav
= &spa
->spa_l2cache
;
4862 if (sav
->sav_count
== 0) {
4863 spa_config_exit(spa
, SCL_STATE
, FTAG
);
4864 (void) rw_unlock(&ztest_name_lock
);
4867 vd0
= sav
->sav_vdevs
[ztest_random(sav
->sav_count
)];
4868 guid0
= vd0
->vdev_guid
;
4869 (void) strcpy(path0
, vd0
->vdev_path
);
4870 (void) strcpy(pathrand
, vd0
->vdev_path
);
4874 maxfaults
= INT_MAX
; /* no limit on cache devices */
4877 spa_config_exit(spa
, SCL_STATE
, FTAG
);
4878 (void) rw_unlock(&ztest_name_lock
);
4881 * If we can tolerate two or more faults, or we're dealing
4882 * with a slog, randomly online/offline vd0.
4884 if ((maxfaults
>= 2 || islog
) && guid0
!= 0) {
4885 if (ztest_random(10) < 6) {
4886 int flags
= (ztest_random(2) == 0 ?
4887 ZFS_OFFLINE_TEMPORARY
: 0);
4890 * We have to grab the zs_name_lock as writer to
4891 * prevent a race between offlining a slog and
4892 * destroying a dataset. Offlining the slog will
4893 * grab a reference on the dataset which may cause
4894 * dmu_objset_destroy() to fail with EBUSY thus
4895 * leaving the dataset in an inconsistent state.
4898 (void) rw_wrlock(&ztest_name_lock
);
4900 VERIFY(vdev_offline(spa
, guid0
, flags
) != EBUSY
);
4903 (void) rw_unlock(&ztest_name_lock
);
4906 * Ideally we would like to be able to randomly
4907 * call vdev_[on|off]line without holding locks
4908 * to force unpredictable failures but the side
4909 * effects of vdev_[on|off]line prevent us from
4910 * doing so. We grab the ztest_vdev_lock here to
4911 * prevent a race between injection testing and
4914 VERIFY(mutex_lock(&ztest_vdev_lock
) == 0);
4915 (void) vdev_online(spa
, guid0
, 0, NULL
);
4916 VERIFY(mutex_unlock(&ztest_vdev_lock
) == 0);
4924 * We have at least single-fault tolerance, so inject data corruption.
4926 fd
= open(pathrand
, O_RDWR
);
4928 if (fd
== -1) /* we hit a gap in the device namespace */
4931 fsize
= lseek(fd
, 0, SEEK_END
);
4933 while (--iters
!= 0) {
4934 offset
= ztest_random(fsize
/ (leaves
<< bshift
)) *
4935 (leaves
<< bshift
) + (leaf
<< bshift
) +
4936 (ztest_random(1ULL << (bshift
- 1)) & -8ULL);
4938 if (offset
>= fsize
)
4941 VERIFY(mutex_lock(&ztest_vdev_lock
) == 0);
4942 if (mirror_save
!= zs
->zs_mirrors
) {
4943 VERIFY(mutex_unlock(&ztest_vdev_lock
) == 0);
4948 if (pwrite(fd
, &bad
, sizeof (bad
), offset
) != sizeof (bad
))
4949 fatal(1, "can't inject bad word at 0x%llx in %s",
4952 VERIFY(mutex_unlock(&ztest_vdev_lock
) == 0);
4954 if (ztest_opts
.zo_verbose
>= 7)
4955 (void) printf("injected bad word into %s,"
4956 " offset 0x%llx\n", pathrand
, (u_longlong_t
)offset
);
4963 * Verify that DDT repair works as expected.
4966 ztest_ddt_repair(ztest_ds_t
*zd
, uint64_t id
)
4968 ztest_shared_t
*zs
= ztest_shared
;
4969 spa_t
*spa
= ztest_spa
;
4970 objset_t
*os
= zd
->zd_os
;
4972 uint64_t object
, blocksize
, txg
, pattern
, psize
;
4973 enum zio_checksum checksum
= spa_dedup_checksum(spa
);
4978 int copies
= 2 * ZIO_DEDUPDITTO_MIN
;
4980 blocksize
= ztest_random_blocksize();
4981 blocksize
= MIN(blocksize
, 2048); /* because we write so many */
4983 ztest_od_init(&od
[0], id
, FTAG
, 0, DMU_OT_UINT64_OTHER
, blocksize
, 0);
4985 if (ztest_object_init(zd
, od
, sizeof (od
), B_FALSE
) != 0)
4989 * Take the name lock as writer to prevent anyone else from changing
4990 * the pool and dataset properies we need to maintain during this test.
4992 (void) rw_wrlock(&ztest_name_lock
);
4994 if (ztest_dsl_prop_set_uint64(zd
->zd_name
, ZFS_PROP_DEDUP
, checksum
,
4996 ztest_dsl_prop_set_uint64(zd
->zd_name
, ZFS_PROP_COPIES
, 1,
4998 (void) rw_unlock(&ztest_name_lock
);
5002 object
= od
[0].od_object
;
5003 blocksize
= od
[0].od_blocksize
;
5004 pattern
= zs
->zs_guid
^ dmu_objset_fsid_guid(os
);
5006 ASSERT(object
!= 0);
5008 tx
= dmu_tx_create(os
);
5009 dmu_tx_hold_write(tx
, object
, 0, copies
* blocksize
);
5010 txg
= ztest_tx_assign(tx
, TXG_WAIT
, FTAG
);
5012 (void) rw_unlock(&ztest_name_lock
);
5017 * Write all the copies of our block.
5019 for (int i
= 0; i
< copies
; i
++) {
5020 uint64_t offset
= i
* blocksize
;
5021 int error
= dmu_buf_hold(os
, object
, offset
, FTAG
, &db
,
5022 DMU_READ_NO_PREFETCH
);
5024 fatal(B_FALSE
, "dmu_buf_hold(%p, %llu, %llu) = %u",
5025 os
, (long long)object
, (long long) offset
, error
);
5027 ASSERT(db
->db_offset
== offset
);
5028 ASSERT(db
->db_size
== blocksize
);
5029 ASSERT(ztest_pattern_match(db
->db_data
, db
->db_size
, pattern
) ||
5030 ztest_pattern_match(db
->db_data
, db
->db_size
, 0ULL));
5031 dmu_buf_will_fill(db
, tx
);
5032 ztest_pattern_set(db
->db_data
, db
->db_size
, pattern
);
5033 dmu_buf_rele(db
, FTAG
);
5037 txg_wait_synced(spa_get_dsl(spa
), txg
);
5040 * Find out what block we got.
5042 VERIFY0(dmu_buf_hold(os
, object
, 0, FTAG
, &db
,
5043 DMU_READ_NO_PREFETCH
));
5044 blk
= *((dmu_buf_impl_t
*)db
)->db_blkptr
;
5045 dmu_buf_rele(db
, FTAG
);
5048 * Damage the block. Dedup-ditto will save us when we read it later.
5050 psize
= BP_GET_PSIZE(&blk
);
5051 buf
= zio_buf_alloc(psize
);
5052 ztest_pattern_set(buf
, psize
, ~pattern
);
5054 (void) zio_wait(zio_rewrite(NULL
, spa
, 0, &blk
,
5055 buf
, psize
, NULL
, NULL
, ZIO_PRIORITY_SYNC_WRITE
,
5056 ZIO_FLAG_CANFAIL
| ZIO_FLAG_INDUCE_DAMAGE
, NULL
));
5058 zio_buf_free(buf
, psize
);
5060 (void) rw_unlock(&ztest_name_lock
);
5068 ztest_scrub(ztest_ds_t
*zd
, uint64_t id
)
5070 spa_t
*spa
= ztest_spa
;
5072 (void) spa_scan(spa
, POOL_SCAN_SCRUB
);
5073 (void) poll(NULL
, 0, 100); /* wait a moment, then force a restart */
5074 (void) spa_scan(spa
, POOL_SCAN_SCRUB
);
5078 * Change the guid for the pool.
5082 ztest_reguid(ztest_ds_t
*zd
, uint64_t id
)
5084 spa_t
*spa
= ztest_spa
;
5085 uint64_t orig
, load
;
5088 orig
= spa_guid(spa
);
5089 load
= spa_load_guid(spa
);
5091 (void) rw_wrlock(&ztest_name_lock
);
5092 error
= spa_change_guid(spa
);
5093 (void) rw_unlock(&ztest_name_lock
);
5098 if (ztest_opts
.zo_verbose
>= 4) {
5099 (void) printf("Changed guid old %llu -> %llu\n",
5100 (u_longlong_t
)orig
, (u_longlong_t
)spa_guid(spa
));
5103 VERIFY3U(orig
, !=, spa_guid(spa
));
5104 VERIFY3U(load
, ==, spa_load_guid(spa
));
5108 * Rename the pool to a different name and then rename it back.
5112 ztest_spa_rename(ztest_ds_t
*zd
, uint64_t id
)
5114 char *oldname
, *newname
;
5117 (void) rw_wrlock(&ztest_name_lock
);
5119 oldname
= ztest_opts
.zo_pool
;
5120 newname
= umem_alloc(strlen(oldname
) + 5, UMEM_NOFAIL
);
5121 (void) strcpy(newname
, oldname
);
5122 (void) strcat(newname
, "_tmp");
5127 VERIFY3U(0, ==, spa_rename(oldname
, newname
));
5130 * Try to open it under the old name, which shouldn't exist
5132 VERIFY3U(ENOENT
, ==, spa_open(oldname
, &spa
, FTAG
));
5135 * Open it under the new name and make sure it's still the same spa_t.
5137 VERIFY3U(0, ==, spa_open(newname
, &spa
, FTAG
));
5139 ASSERT(spa
== ztest_spa
);
5140 spa_close(spa
, FTAG
);
5143 * Rename it back to the original
5145 VERIFY3U(0, ==, spa_rename(newname
, oldname
));
5148 * Make sure it can still be opened
5150 VERIFY3U(0, ==, spa_open(oldname
, &spa
, FTAG
));
5152 ASSERT(spa
== ztest_spa
);
5153 spa_close(spa
, FTAG
);
5155 umem_free(newname
, strlen(newname
) + 1);
5157 (void) rw_unlock(&ztest_name_lock
);
5161 * Verify pool integrity by running zdb.
5164 ztest_run_zdb(char *pool
)
5167 char zdb
[MAXPATHLEN
+ MAXNAMELEN
+ 20];
5175 (void) realpath(getexecname(), zdb
);
5177 /* zdb lives in /usr/sbin, while ztest lives in /usr/bin */
5178 bin
= strstr(zdb
, "/usr/bin/");
5179 ztest
= strstr(bin
, "/ztest");
5181 isalen
= ztest
- isa
;
5185 "/usr/sbin%.*s/zdb -bcc%s%s -d -U %s %s",
5188 ztest_opts
.zo_verbose
>= 3 ? "s" : "",
5189 ztest_opts
.zo_verbose
>= 4 ? "v" : "",
5194 if (ztest_opts
.zo_verbose
>= 5)
5195 (void) printf("Executing %s\n", strstr(zdb
, "zdb "));
5197 fp
= popen(zdb
, "r");
5199 while (fgets(zbuf
, sizeof (zbuf
), fp
) != NULL
)
5200 if (ztest_opts
.zo_verbose
>= 3)
5201 (void) printf("%s", zbuf
);
5203 status
= pclose(fp
);
5208 ztest_dump_core
= 0;
5209 if (WIFEXITED(status
))
5210 fatal(0, "'%s' exit code %d", zdb
, WEXITSTATUS(status
));
5212 fatal(0, "'%s' died with signal %d", zdb
, WTERMSIG(status
));
5216 ztest_walk_pool_directory(char *header
)
5220 if (ztest_opts
.zo_verbose
>= 6)
5221 (void) printf("%s\n", header
);
5223 mutex_enter(&spa_namespace_lock
);
5224 while ((spa
= spa_next(spa
)) != NULL
)
5225 if (ztest_opts
.zo_verbose
>= 6)
5226 (void) printf("\t%s\n", spa_name(spa
));
5227 mutex_exit(&spa_namespace_lock
);
5231 ztest_spa_import_export(char *oldname
, char *newname
)
5233 nvlist_t
*config
, *newconfig
;
5238 if (ztest_opts
.zo_verbose
>= 4) {
5239 (void) printf("import/export: old = %s, new = %s\n",
5244 * Clean up from previous runs.
5246 (void) spa_destroy(newname
);
5249 * Get the pool's configuration and guid.
5251 VERIFY3U(0, ==, spa_open(oldname
, &spa
, FTAG
));
5254 * Kick off a scrub to tickle scrub/export races.
5256 if (ztest_random(2) == 0)
5257 (void) spa_scan(spa
, POOL_SCAN_SCRUB
);
5259 pool_guid
= spa_guid(spa
);
5260 spa_close(spa
, FTAG
);
5262 ztest_walk_pool_directory("pools before export");
5267 VERIFY3U(0, ==, spa_export(oldname
, &config
, B_FALSE
, B_FALSE
));
5269 ztest_walk_pool_directory("pools after export");
5274 newconfig
= spa_tryimport(config
);
5275 ASSERT(newconfig
!= NULL
);
5276 nvlist_free(newconfig
);
5279 * Import it under the new name.
5281 error
= spa_import(newname
, config
, NULL
, 0);
5283 dump_nvlist(config
, 0);
5284 fatal(B_FALSE
, "couldn't import pool %s as %s: error %u",
5285 oldname
, newname
, error
);
5288 ztest_walk_pool_directory("pools after import");
5291 * Try to import it again -- should fail with EEXIST.
5293 VERIFY3U(EEXIST
, ==, spa_import(newname
, config
, NULL
, 0));
5296 * Try to import it under a different name -- should fail with EEXIST.
5298 VERIFY3U(EEXIST
, ==, spa_import(oldname
, config
, NULL
, 0));
5301 * Verify that the pool is no longer visible under the old name.
5303 VERIFY3U(ENOENT
, ==, spa_open(oldname
, &spa
, FTAG
));
5306 * Verify that we can open and close the pool using the new name.
5308 VERIFY3U(0, ==, spa_open(newname
, &spa
, FTAG
));
5309 ASSERT(pool_guid
== spa_guid(spa
));
5310 spa_close(spa
, FTAG
);
5312 nvlist_free(config
);
5316 ztest_resume(spa_t
*spa
)
5318 if (spa_suspended(spa
) && ztest_opts
.zo_verbose
>= 6)
5319 (void) printf("resuming from suspended state\n");
5320 spa_vdev_state_enter(spa
, SCL_NONE
);
5321 vdev_clear(spa
, NULL
);
5322 (void) spa_vdev_state_exit(spa
, NULL
, 0);
5323 (void) zio_resume(spa
);
5327 ztest_resume_thread(void *arg
)
5331 while (!ztest_exiting
) {
5332 if (spa_suspended(spa
))
5334 (void) poll(NULL
, 0, 100);
5340 ztest_deadman_thread(void *arg
)
5342 ztest_shared_t
*zs
= arg
;
5343 spa_t
*spa
= ztest_spa
;
5344 hrtime_t delta
, total
= 0;
5347 delta
= zs
->zs_thread_stop
- zs
->zs_thread_start
+
5348 MSEC2NSEC(zfs_deadman_synctime_ms
);
5350 (void) poll(NULL
, 0, (int)NSEC2MSEC(delta
));
5353 * If the pool is suspended then fail immediately. Otherwise,
5354 * check to see if the pool is making any progress. If
5355 * vdev_deadman() discovers that there hasn't been any recent
5356 * I/Os then it will end up aborting the tests.
5358 if (spa_suspended(spa
) || spa
->spa_root_vdev
== NULL
) {
5359 fatal(0, "aborting test after %llu seconds because "
5360 "pool has transitioned to a suspended state.",
5361 zfs_deadman_synctime_ms
/ 1000);
5364 vdev_deadman(spa
->spa_root_vdev
);
5366 total
+= zfs_deadman_synctime_ms
/1000;
5367 (void) printf("ztest has been running for %lld seconds\n",
5373 ztest_execute(int test
, ztest_info_t
*zi
, uint64_t id
)
5375 ztest_ds_t
*zd
= &ztest_ds
[id
% ztest_opts
.zo_datasets
];
5376 ztest_shared_callstate_t
*zc
= ZTEST_GET_SHARED_CALLSTATE(test
);
5377 hrtime_t functime
= gethrtime();
5379 for (int i
= 0; i
< zi
->zi_iters
; i
++)
5380 zi
->zi_func(zd
, id
);
5382 functime
= gethrtime() - functime
;
5384 atomic_add_64(&zc
->zc_count
, 1);
5385 atomic_add_64(&zc
->zc_time
, functime
);
5387 if (ztest_opts
.zo_verbose
>= 4) {
5389 (void) dladdr((void *)zi
->zi_func
, &dli
);
5390 (void) printf("%6.2f sec in %s\n",
5391 (double)functime
/ NANOSEC
, dli
.dli_sname
);
5396 ztest_thread(void *arg
)
5399 uint64_t id
= (uintptr_t)arg
;
5400 ztest_shared_t
*zs
= ztest_shared
;
5404 ztest_shared_callstate_t
*zc
;
5406 while ((now
= gethrtime()) < zs
->zs_thread_stop
) {
5408 * See if it's time to force a crash.
5410 if (now
> zs
->zs_thread_kill
)
5414 * If we're getting ENOSPC with some regularity, stop.
5416 if (zs
->zs_enospc_count
> 10)
5420 * Pick a random function to execute.
5422 rand
= ztest_random(ZTEST_FUNCS
);
5423 zi
= &ztest_info
[rand
];
5424 zc
= ZTEST_GET_SHARED_CALLSTATE(rand
);
5425 call_next
= zc
->zc_next
;
5427 if (now
>= call_next
&&
5428 atomic_cas_64(&zc
->zc_next
, call_next
, call_next
+
5429 ztest_random(2 * zi
->zi_interval
[0] + 1)) == call_next
) {
5430 ztest_execute(rand
, zi
, id
);
5438 ztest_dataset_name(char *dsname
, char *pool
, int d
)
5440 (void) snprintf(dsname
, MAXNAMELEN
, "%s/ds_%d", pool
, d
);
5444 ztest_dataset_destroy(int d
)
5446 char name
[MAXNAMELEN
];
5448 ztest_dataset_name(name
, ztest_opts
.zo_pool
, d
);
5450 if (ztest_opts
.zo_verbose
>= 3)
5451 (void) printf("Destroying %s to free up space\n", name
);
5454 * Cleanup any non-standard clones and snapshots. In general,
5455 * ztest thread t operates on dataset (t % zopt_datasets),
5456 * so there may be more than one thing to clean up.
5458 for (int t
= d
; t
< ztest_opts
.zo_threads
;
5459 t
+= ztest_opts
.zo_datasets
) {
5460 ztest_dsl_dataset_cleanup(name
, t
);
5463 (void) dmu_objset_find(name
, ztest_objset_destroy_cb
, NULL
,
5464 DS_FIND_SNAPSHOTS
| DS_FIND_CHILDREN
);
5468 ztest_dataset_dirobj_verify(ztest_ds_t
*zd
)
5470 uint64_t usedobjs
, dirobjs
, scratch
;
5473 * ZTEST_DIROBJ is the object directory for the entire dataset.
5474 * Therefore, the number of objects in use should equal the
5475 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself.
5476 * If not, we have an object leak.
5478 * Note that we can only check this in ztest_dataset_open(),
5479 * when the open-context and syncing-context values agree.
5480 * That's because zap_count() returns the open-context value,
5481 * while dmu_objset_space() returns the rootbp fill count.
5483 VERIFY3U(0, ==, zap_count(zd
->zd_os
, ZTEST_DIROBJ
, &dirobjs
));
5484 dmu_objset_space(zd
->zd_os
, &scratch
, &scratch
, &usedobjs
, &scratch
);
5485 ASSERT3U(dirobjs
+ 1, ==, usedobjs
);
5489 ztest_dataset_open(int d
)
5491 ztest_ds_t
*zd
= &ztest_ds
[d
];
5492 uint64_t committed_seq
= ZTEST_GET_SHARED_DS(d
)->zd_seq
;
5495 char name
[MAXNAMELEN
];
5498 ztest_dataset_name(name
, ztest_opts
.zo_pool
, d
);
5500 (void) rw_rdlock(&ztest_name_lock
);
5502 error
= ztest_dataset_create(name
);
5503 if (error
== ENOSPC
) {
5504 (void) rw_unlock(&ztest_name_lock
);
5505 ztest_record_enospc(FTAG
);
5508 ASSERT(error
== 0 || error
== EEXIST
);
5510 VERIFY0(dmu_objset_own(name
, DMU_OST_OTHER
, B_FALSE
, zd
, &os
));
5511 (void) rw_unlock(&ztest_name_lock
);
5513 ztest_zd_init(zd
, ZTEST_GET_SHARED_DS(d
), os
);
5515 zilog
= zd
->zd_zilog
;
5517 if (zilog
->zl_header
->zh_claim_lr_seq
!= 0 &&
5518 zilog
->zl_header
->zh_claim_lr_seq
< committed_seq
)
5519 fatal(0, "missing log records: claimed %llu < committed %llu",
5520 zilog
->zl_header
->zh_claim_lr_seq
, committed_seq
);
5522 ztest_dataset_dirobj_verify(zd
);
5524 zil_replay(os
, zd
, ztest_replay_vector
);
5526 ztest_dataset_dirobj_verify(zd
);
5528 if (ztest_opts
.zo_verbose
>= 6)
5529 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n",
5531 (u_longlong_t
)zilog
->zl_parse_blk_count
,
5532 (u_longlong_t
)zilog
->zl_parse_lr_count
,
5533 (u_longlong_t
)zilog
->zl_replaying_seq
);
5535 zilog
= zil_open(os
, ztest_get_data
);
5537 if (zilog
->zl_replaying_seq
!= 0 &&
5538 zilog
->zl_replaying_seq
< committed_seq
)
5539 fatal(0, "missing log records: replayed %llu < committed %llu",
5540 zilog
->zl_replaying_seq
, committed_seq
);
5546 ztest_dataset_close(int d
)
5548 ztest_ds_t
*zd
= &ztest_ds
[d
];
5550 zil_close(zd
->zd_zilog
);
5551 dmu_objset_disown(zd
->zd_os
, zd
);
5557 * Kick off threads to run tests on all datasets in parallel.
5560 ztest_run(ztest_shared_t
*zs
)
5565 thread_t resume_tid
;
5568 ztest_exiting
= B_FALSE
;
5571 * Initialize parent/child shared state.
5573 VERIFY(_mutex_init(&ztest_vdev_lock
, USYNC_THREAD
, NULL
) == 0);
5574 VERIFY(rwlock_init(&ztest_name_lock
, USYNC_THREAD
, NULL
) == 0);
5576 zs
->zs_thread_start
= gethrtime();
5577 zs
->zs_thread_stop
=
5578 zs
->zs_thread_start
+ ztest_opts
.zo_passtime
* NANOSEC
;
5579 zs
->zs_thread_stop
= MIN(zs
->zs_thread_stop
, zs
->zs_proc_stop
);
5580 zs
->zs_thread_kill
= zs
->zs_thread_stop
;
5581 if (ztest_random(100) < ztest_opts
.zo_killrate
) {
5582 zs
->zs_thread_kill
-=
5583 ztest_random(ztest_opts
.zo_passtime
* NANOSEC
);
5586 (void) _mutex_init(&zcl
.zcl_callbacks_lock
, USYNC_THREAD
, NULL
);
5588 list_create(&zcl
.zcl_callbacks
, sizeof (ztest_cb_data_t
),
5589 offsetof(ztest_cb_data_t
, zcd_node
));
5594 kernel_init(FREAD
| FWRITE
);
5595 VERIFY0(spa_open(ztest_opts
.zo_pool
, &spa
, FTAG
));
5596 spa
->spa_debug
= B_TRUE
;
5597 metaslab_preload_limit
= ztest_random(20) + 1;
5600 VERIFY0(dmu_objset_own(ztest_opts
.zo_pool
,
5601 DMU_OST_ANY
, B_TRUE
, FTAG
, &os
));
5602 zs
->zs_guid
= dmu_objset_fsid_guid(os
);
5603 dmu_objset_disown(os
, FTAG
);
5605 spa
->spa_dedup_ditto
= 2 * ZIO_DEDUPDITTO_MIN
;
5608 * We don't expect the pool to suspend unless maxfaults == 0,
5609 * in which case ztest_fault_inject() temporarily takes away
5610 * the only valid replica.
5612 if (MAXFAULTS() == 0)
5613 spa
->spa_failmode
= ZIO_FAILURE_MODE_WAIT
;
5615 spa
->spa_failmode
= ZIO_FAILURE_MODE_PANIC
;
5618 * Create a thread to periodically resume suspended I/O.
5620 VERIFY(thr_create(0, 0, ztest_resume_thread
, spa
, THR_BOUND
,
5624 * Create a deadman thread to abort() if we hang.
5626 VERIFY(thr_create(0, 0, ztest_deadman_thread
, zs
, THR_BOUND
,
5630 * Verify that we can safely inquire about about any object,
5631 * whether it's allocated or not. To make it interesting,
5632 * we probe a 5-wide window around each power of two.
5633 * This hits all edge cases, including zero and the max.
5635 for (int t
= 0; t
< 64; t
++) {
5636 for (int d
= -5; d
<= 5; d
++) {
5637 error
= dmu_object_info(spa
->spa_meta_objset
,
5638 (1ULL << t
) + d
, NULL
);
5639 ASSERT(error
== 0 || error
== ENOENT
||
5645 * If we got any ENOSPC errors on the previous run, destroy something.
5647 if (zs
->zs_enospc_count
!= 0) {
5648 int d
= ztest_random(ztest_opts
.zo_datasets
);
5649 ztest_dataset_destroy(d
);
5651 zs
->zs_enospc_count
= 0;
5653 tid
= umem_zalloc(ztest_opts
.zo_threads
* sizeof (thread_t
),
5656 if (ztest_opts
.zo_verbose
>= 4)
5657 (void) printf("starting main threads...\n");
5660 * Kick off all the tests that run in parallel.
5662 for (int t
= 0; t
< ztest_opts
.zo_threads
; t
++) {
5663 if (t
< ztest_opts
.zo_datasets
&&
5664 ztest_dataset_open(t
) != 0)
5666 VERIFY(thr_create(0, 0, ztest_thread
, (void *)(uintptr_t)t
,
5667 THR_BOUND
, &tid
[t
]) == 0);
5671 * Wait for all of the tests to complete. We go in reverse order
5672 * so we don't close datasets while threads are still using them.
5674 for (int t
= ztest_opts
.zo_threads
- 1; t
>= 0; t
--) {
5675 VERIFY(thr_join(tid
[t
], NULL
, NULL
) == 0);
5676 if (t
< ztest_opts
.zo_datasets
)
5677 ztest_dataset_close(t
);
5680 txg_wait_synced(spa_get_dsl(spa
), 0);
5682 zs
->zs_alloc
= metaslab_class_get_alloc(spa_normal_class(spa
));
5683 zs
->zs_space
= metaslab_class_get_space(spa_normal_class(spa
));
5684 zfs_dbgmsg_print(FTAG
);
5686 umem_free(tid
, ztest_opts
.zo_threads
* sizeof (thread_t
));
5688 /* Kill the resume thread */
5689 ztest_exiting
= B_TRUE
;
5690 VERIFY(thr_join(resume_tid
, NULL
, NULL
) == 0);
5694 * Right before closing the pool, kick off a bunch of async I/O;
5695 * spa_close() should wait for it to complete.
5697 for (uint64_t object
= 1; object
< 50; object
++)
5698 dmu_prefetch(spa
->spa_meta_objset
, object
, 0, 1ULL << 20);
5700 spa_close(spa
, FTAG
);
5703 * Verify that we can loop over all pools.
5705 mutex_enter(&spa_namespace_lock
);
5706 for (spa
= spa_next(NULL
); spa
!= NULL
; spa
= spa_next(spa
))
5707 if (ztest_opts
.zo_verbose
> 3)
5708 (void) printf("spa_next: found %s\n", spa_name(spa
));
5709 mutex_exit(&spa_namespace_lock
);
5712 * Verify that we can export the pool and reimport it under a
5715 if (ztest_random(2) == 0) {
5716 char name
[MAXNAMELEN
];
5717 (void) snprintf(name
, MAXNAMELEN
, "%s_import",
5718 ztest_opts
.zo_pool
);
5719 ztest_spa_import_export(ztest_opts
.zo_pool
, name
);
5720 ztest_spa_import_export(name
, ztest_opts
.zo_pool
);
5725 list_destroy(&zcl
.zcl_callbacks
);
5727 (void) _mutex_destroy(&zcl
.zcl_callbacks_lock
);
5729 (void) rwlock_destroy(&ztest_name_lock
);
5730 (void) _mutex_destroy(&ztest_vdev_lock
);
5736 ztest_ds_t
*zd
= &ztest_ds
[0];
5740 if (ztest_opts
.zo_verbose
>= 3)
5741 (void) printf("testing spa_freeze()...\n");
5743 kernel_init(FREAD
| FWRITE
);
5744 VERIFY3U(0, ==, spa_open(ztest_opts
.zo_pool
, &spa
, FTAG
));
5745 VERIFY3U(0, ==, ztest_dataset_open(0));
5746 spa
->spa_debug
= B_TRUE
;
5750 * Force the first log block to be transactionally allocated.
5751 * We have to do this before we freeze the pool -- otherwise
5752 * the log chain won't be anchored.
5754 while (BP_IS_HOLE(&zd
->zd_zilog
->zl_header
->zh_log
)) {
5755 ztest_dmu_object_alloc_free(zd
, 0);
5756 zil_commit(zd
->zd_zilog
, 0);
5759 txg_wait_synced(spa_get_dsl(spa
), 0);
5762 * Freeze the pool. This stops spa_sync() from doing anything,
5763 * so that the only way to record changes from now on is the ZIL.
5768 * Run tests that generate log records but don't alter the pool config
5769 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc).
5770 * We do a txg_wait_synced() after each iteration to force the txg
5771 * to increase well beyond the last synced value in the uberblock.
5772 * The ZIL should be OK with that.
5774 while (ztest_random(10) != 0 &&
5775 numloops
++ < ztest_opts
.zo_maxloops
) {
5776 ztest_dmu_write_parallel(zd
, 0);
5777 ztest_dmu_object_alloc_free(zd
, 0);
5778 txg_wait_synced(spa_get_dsl(spa
), 0);
5782 * Commit all of the changes we just generated.
5784 zil_commit(zd
->zd_zilog
, 0);
5785 txg_wait_synced(spa_get_dsl(spa
), 0);
5788 * Close our dataset and close the pool.
5790 ztest_dataset_close(0);
5791 spa_close(spa
, FTAG
);
5795 * Open and close the pool and dataset to induce log replay.
5797 kernel_init(FREAD
| FWRITE
);
5798 VERIFY3U(0, ==, spa_open(ztest_opts
.zo_pool
, &spa
, FTAG
));
5799 ASSERT(spa_freeze_txg(spa
) == UINT64_MAX
);
5800 VERIFY3U(0, ==, ztest_dataset_open(0));
5801 ztest_dataset_close(0);
5803 spa
->spa_debug
= B_TRUE
;
5805 txg_wait_synced(spa_get_dsl(spa
), 0);
5806 ztest_reguid(NULL
, 0);
5808 spa_close(spa
, FTAG
);
5813 print_time(hrtime_t t
, char *timebuf
)
5815 hrtime_t s
= t
/ NANOSEC
;
5816 hrtime_t m
= s
/ 60;
5817 hrtime_t h
= m
/ 60;
5818 hrtime_t d
= h
/ 24;
5827 (void) sprintf(timebuf
,
5828 "%llud%02lluh%02llum%02llus", d
, h
, m
, s
);
5830 (void) sprintf(timebuf
, "%lluh%02llum%02llus", h
, m
, s
);
5832 (void) sprintf(timebuf
, "%llum%02llus", m
, s
);
5834 (void) sprintf(timebuf
, "%llus", s
);
5842 VERIFY(nvlist_alloc(&props
, NV_UNIQUE_NAME
, 0) == 0);
5843 if (ztest_random(2) == 0)
5845 VERIFY(nvlist_add_uint64(props
, "autoreplace", 1) == 0);
5851 * Create a storage pool with the given name and initial vdev size.
5852 * Then test spa_freeze() functionality.
5855 ztest_init(ztest_shared_t
*zs
)
5858 nvlist_t
*nvroot
, *props
;
5860 VERIFY(_mutex_init(&ztest_vdev_lock
, USYNC_THREAD
, NULL
) == 0);
5861 VERIFY(rwlock_init(&ztest_name_lock
, USYNC_THREAD
, NULL
) == 0);
5863 kernel_init(FREAD
| FWRITE
);
5866 * Create the storage pool.
5868 (void) spa_destroy(ztest_opts
.zo_pool
);
5869 ztest_shared
->zs_vdev_next_leaf
= 0;
5871 zs
->zs_mirrors
= ztest_opts
.zo_mirrors
;
5872 nvroot
= make_vdev_root(NULL
, NULL
, NULL
, ztest_opts
.zo_vdev_size
, 0,
5873 0, ztest_opts
.zo_raidz
, zs
->zs_mirrors
, 1);
5874 props
= make_random_props();
5875 for (int i
= 0; i
< SPA_FEATURES
; i
++) {
5877 (void) snprintf(buf
, sizeof (buf
), "feature@%s",
5878 spa_feature_table
[i
].fi_uname
);
5879 VERIFY3U(0, ==, nvlist_add_uint64(props
, buf
, 0));
5881 VERIFY3U(0, ==, spa_create(ztest_opts
.zo_pool
, nvroot
, props
, NULL
));
5882 nvlist_free(nvroot
);
5884 VERIFY3U(0, ==, spa_open(ztest_opts
.zo_pool
, &spa
, FTAG
));
5885 zs
->zs_metaslab_sz
=
5886 1ULL << spa
->spa_root_vdev
->vdev_child
[0]->vdev_ms_shift
;
5888 spa_close(spa
, FTAG
);
5892 ztest_run_zdb(ztest_opts
.zo_pool
);
5896 ztest_run_zdb(ztest_opts
.zo_pool
);
5898 (void) rwlock_destroy(&ztest_name_lock
);
5899 (void) _mutex_destroy(&ztest_vdev_lock
);
5905 static char ztest_name_data
[] = "/tmp/ztest.data.XXXXXX";
5907 ztest_fd_data
= mkstemp(ztest_name_data
);
5908 ASSERT3S(ztest_fd_data
, >=, 0);
5909 (void) unlink(ztest_name_data
);
5914 shared_data_size(ztest_shared_hdr_t
*hdr
)
5918 size
= hdr
->zh_hdr_size
;
5919 size
+= hdr
->zh_opts_size
;
5920 size
+= hdr
->zh_size
;
5921 size
+= hdr
->zh_stats_size
* hdr
->zh_stats_count
;
5922 size
+= hdr
->zh_ds_size
* hdr
->zh_ds_count
;
5931 ztest_shared_hdr_t
*hdr
;
5933 hdr
= (void *)mmap(0, P2ROUNDUP(sizeof (*hdr
), getpagesize()),
5934 PROT_READ
| PROT_WRITE
, MAP_SHARED
, ztest_fd_data
, 0);
5935 ASSERT(hdr
!= MAP_FAILED
);
5937 VERIFY3U(0, ==, ftruncate(ztest_fd_data
, sizeof (ztest_shared_hdr_t
)));
5939 hdr
->zh_hdr_size
= sizeof (ztest_shared_hdr_t
);
5940 hdr
->zh_opts_size
= sizeof (ztest_shared_opts_t
);
5941 hdr
->zh_size
= sizeof (ztest_shared_t
);
5942 hdr
->zh_stats_size
= sizeof (ztest_shared_callstate_t
);
5943 hdr
->zh_stats_count
= ZTEST_FUNCS
;
5944 hdr
->zh_ds_size
= sizeof (ztest_shared_ds_t
);
5945 hdr
->zh_ds_count
= ztest_opts
.zo_datasets
;
5947 size
= shared_data_size(hdr
);
5948 VERIFY3U(0, ==, ftruncate(ztest_fd_data
, size
));
5950 (void) munmap((caddr_t
)hdr
, P2ROUNDUP(sizeof (*hdr
), getpagesize()));
5957 ztest_shared_hdr_t
*hdr
;
5960 hdr
= (void *)mmap(0, P2ROUNDUP(sizeof (*hdr
), getpagesize()),
5961 PROT_READ
, MAP_SHARED
, ztest_fd_data
, 0);
5962 ASSERT(hdr
!= MAP_FAILED
);
5964 size
= shared_data_size(hdr
);
5966 (void) munmap((caddr_t
)hdr
, P2ROUNDUP(sizeof (*hdr
), getpagesize()));
5967 hdr
= ztest_shared_hdr
= (void *)mmap(0, P2ROUNDUP(size
, getpagesize()),
5968 PROT_READ
| PROT_WRITE
, MAP_SHARED
, ztest_fd_data
, 0);
5969 ASSERT(hdr
!= MAP_FAILED
);
5970 buf
= (uint8_t *)hdr
;
5972 offset
= hdr
->zh_hdr_size
;
5973 ztest_shared_opts
= (void *)&buf
[offset
];
5974 offset
+= hdr
->zh_opts_size
;
5975 ztest_shared
= (void *)&buf
[offset
];
5976 offset
+= hdr
->zh_size
;
5977 ztest_shared_callstate
= (void *)&buf
[offset
];
5978 offset
+= hdr
->zh_stats_size
* hdr
->zh_stats_count
;
5979 ztest_shared_ds
= (void *)&buf
[offset
];
5983 exec_child(char *cmd
, char *libpath
, boolean_t ignorekill
, int *statusp
)
5987 char *cmdbuf
= NULL
;
5992 cmdbuf
= umem_alloc(MAXPATHLEN
, UMEM_NOFAIL
);
5993 (void) strlcpy(cmdbuf
, getexecname(), MAXPATHLEN
);
5998 fatal(1, "fork failed");
6000 if (pid
== 0) { /* child */
6001 char *emptyargv
[2] = { cmd
, NULL
};
6002 char fd_data_str
[12];
6004 struct rlimit rl
= { 1024, 1024 };
6005 (void) setrlimit(RLIMIT_NOFILE
, &rl
);
6007 (void) close(ztest_fd_rand
);
6009 snprintf(fd_data_str
, 12, "%d", ztest_fd_data
));
6010 VERIFY0(setenv("ZTEST_FD_DATA", fd_data_str
, 1));
6012 (void) enable_extended_FILE_stdio(-1, -1);
6013 if (libpath
!= NULL
)
6014 VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath
, 1));
6015 (void) execv(cmd
, emptyargv
);
6016 ztest_dump_core
= B_FALSE
;
6017 fatal(B_TRUE
, "exec failed: %s", cmd
);
6020 if (cmdbuf
!= NULL
) {
6021 umem_free(cmdbuf
, MAXPATHLEN
);
6025 while (waitpid(pid
, &status
, 0) != pid
)
6027 if (statusp
!= NULL
)
6030 if (WIFEXITED(status
)) {
6031 if (WEXITSTATUS(status
) != 0) {
6032 (void) fprintf(stderr
, "child exited with code %d\n",
6033 WEXITSTATUS(status
));
6037 } else if (WIFSIGNALED(status
)) {
6038 if (!ignorekill
|| WTERMSIG(status
) != SIGKILL
) {
6039 (void) fprintf(stderr
, "child died with signal %d\n",
6045 (void) fprintf(stderr
, "something strange happened to child\n");
6052 ztest_run_init(void)
6054 ztest_shared_t
*zs
= ztest_shared
;
6056 ASSERT(ztest_opts
.zo_init
!= 0);
6059 * Blow away any existing copy of zpool.cache
6061 (void) remove(spa_config_path
);
6064 * Create and initialize our storage pool.
6066 for (int i
= 1; i
<= ztest_opts
.zo_init
; i
++) {
6067 bzero(zs
, sizeof (ztest_shared_t
));
6068 if (ztest_opts
.zo_verbose
>= 3 &&
6069 ztest_opts
.zo_init
!= 1) {
6070 (void) printf("ztest_init(), pass %d\n", i
);
6077 main(int argc
, char **argv
)
6085 ztest_shared_callstate_t
*zc
;
6091 char *fd_data_str
= getenv("ZTEST_FD_DATA");
6093 (void) setvbuf(stdout
, NULL
, _IOLBF
, 0);
6095 dprintf_setup(&argc
, argv
);
6096 zfs_deadman_synctime_ms
= 300000;
6098 ztest_fd_rand
= open("/dev/urandom", O_RDONLY
);
6099 ASSERT3S(ztest_fd_rand
, >=, 0);
6102 process_options(argc
, argv
);
6107 bcopy(&ztest_opts
, ztest_shared_opts
,
6108 sizeof (*ztest_shared_opts
));
6110 ztest_fd_data
= atoi(fd_data_str
);
6112 bcopy(ztest_shared_opts
, &ztest_opts
, sizeof (ztest_opts
));
6114 ASSERT3U(ztest_opts
.zo_datasets
, ==, ztest_shared_hdr
->zh_ds_count
);
6116 /* Override location of zpool.cache */
6117 VERIFY3U(asprintf((char **)&spa_config_path
, "%s/zpool.cache",
6118 ztest_opts
.zo_dir
), !=, -1);
6120 ztest_ds
= umem_alloc(ztest_opts
.zo_datasets
* sizeof (ztest_ds_t
),
6125 metaslab_gang_bang
= ztest_opts
.zo_metaslab_gang_bang
;
6126 metaslab_df_alloc_threshold
=
6127 zs
->zs_metaslab_df_alloc_threshold
;
6136 hasalt
= (strlen(ztest_opts
.zo_alt_ztest
) != 0);
6138 if (ztest_opts
.zo_verbose
>= 1) {
6139 (void) printf("%llu vdevs, %d datasets, %d threads,"
6140 " %llu seconds...\n",
6141 (u_longlong_t
)ztest_opts
.zo_vdevs
,
6142 ztest_opts
.zo_datasets
,
6143 ztest_opts
.zo_threads
,
6144 (u_longlong_t
)ztest_opts
.zo_time
);
6147 cmd
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
6148 (void) strlcpy(cmd
, getexecname(), MAXNAMELEN
);
6150 zs
->zs_do_init
= B_TRUE
;
6151 if (strlen(ztest_opts
.zo_alt_ztest
) != 0) {
6152 if (ztest_opts
.zo_verbose
>= 1) {
6153 (void) printf("Executing older ztest for "
6154 "initialization: %s\n", ztest_opts
.zo_alt_ztest
);
6156 VERIFY(!exec_child(ztest_opts
.zo_alt_ztest
,
6157 ztest_opts
.zo_alt_libpath
, B_FALSE
, NULL
));
6159 VERIFY(!exec_child(NULL
, NULL
, B_FALSE
, NULL
));
6161 zs
->zs_do_init
= B_FALSE
;
6163 zs
->zs_proc_start
= gethrtime();
6164 zs
->zs_proc_stop
= zs
->zs_proc_start
+ ztest_opts
.zo_time
* NANOSEC
;
6166 for (int f
= 0; f
< ZTEST_FUNCS
; f
++) {
6167 zi
= &ztest_info
[f
];
6168 zc
= ZTEST_GET_SHARED_CALLSTATE(f
);
6169 if (zs
->zs_proc_start
+ zi
->zi_interval
[0] > zs
->zs_proc_stop
)
6170 zc
->zc_next
= UINT64_MAX
;
6172 zc
->zc_next
= zs
->zs_proc_start
+
6173 ztest_random(2 * zi
->zi_interval
[0] + 1);
6177 * Run the tests in a loop. These tests include fault injection
6178 * to verify that self-healing data works, and forced crashes
6179 * to verify that we never lose on-disk consistency.
6181 while (gethrtime() < zs
->zs_proc_stop
) {
6186 * Initialize the workload counters for each function.
6188 for (int f
= 0; f
< ZTEST_FUNCS
; f
++) {
6189 zc
= ZTEST_GET_SHARED_CALLSTATE(f
);
6194 /* Set the allocation switch size */
6195 zs
->zs_metaslab_df_alloc_threshold
=
6196 ztest_random(zs
->zs_metaslab_sz
/ 4) + 1;
6198 if (!hasalt
|| ztest_random(2) == 0) {
6199 if (hasalt
&& ztest_opts
.zo_verbose
>= 1) {
6200 (void) printf("Executing newer ztest: %s\n",
6204 killed
= exec_child(cmd
, NULL
, B_TRUE
, &status
);
6206 if (hasalt
&& ztest_opts
.zo_verbose
>= 1) {
6207 (void) printf("Executing older ztest: %s\n",
6208 ztest_opts
.zo_alt_ztest
);
6211 killed
= exec_child(ztest_opts
.zo_alt_ztest
,
6212 ztest_opts
.zo_alt_libpath
, B_TRUE
, &status
);
6219 if (ztest_opts
.zo_verbose
>= 1) {
6220 hrtime_t now
= gethrtime();
6222 now
= MIN(now
, zs
->zs_proc_stop
);
6223 print_time(zs
->zs_proc_stop
- now
, timebuf
);
6224 nicenum(zs
->zs_space
, numbuf
);
6226 (void) printf("Pass %3d, %8s, %3llu ENOSPC, "
6227 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n",
6229 WIFEXITED(status
) ? "Complete" : "SIGKILL",
6230 (u_longlong_t
)zs
->zs_enospc_count
,
6231 100.0 * zs
->zs_alloc
/ zs
->zs_space
,
6233 100.0 * (now
- zs
->zs_proc_start
) /
6234 (ztest_opts
.zo_time
* NANOSEC
), timebuf
);
6237 if (ztest_opts
.zo_verbose
>= 2) {
6238 (void) printf("\nWorkload summary:\n\n");
6239 (void) printf("%7s %9s %s\n",
6240 "Calls", "Time", "Function");
6241 (void) printf("%7s %9s %s\n",
6242 "-----", "----", "--------");
6243 for (int f
= 0; f
< ZTEST_FUNCS
; f
++) {
6246 zi
= &ztest_info
[f
];
6247 zc
= ZTEST_GET_SHARED_CALLSTATE(f
);
6248 print_time(zc
->zc_time
, timebuf
);
6249 (void) dladdr((void *)zi
->zi_func
, &dli
);
6250 (void) printf("%7llu %9s %s\n",
6251 (u_longlong_t
)zc
->zc_count
, timebuf
,
6254 (void) printf("\n");
6258 * It's possible that we killed a child during a rename test,
6259 * in which case we'll have a 'ztest_tmp' pool lying around
6260 * instead of 'ztest'. Do a blind rename in case this happened.
6263 if (spa_open(ztest_opts
.zo_pool
, &spa
, FTAG
) == 0) {
6264 spa_close(spa
, FTAG
);
6266 char tmpname
[MAXNAMELEN
];
6268 kernel_init(FREAD
| FWRITE
);
6269 (void) snprintf(tmpname
, sizeof (tmpname
), "%s_tmp",
6270 ztest_opts
.zo_pool
);
6271 (void) spa_rename(tmpname
, ztest_opts
.zo_pool
);
6275 ztest_run_zdb(ztest_opts
.zo_pool
);
6278 if (ztest_opts
.zo_verbose
>= 1) {
6280 (void) printf("%d runs of older ztest: %s\n", older
,
6281 ztest_opts
.zo_alt_ztest
);
6282 (void) printf("%d runs of newer ztest: %s\n", newer
,
6285 (void) printf("%d killed, %d completed, %.0f%% kill rate\n",
6286 kills
, iters
- kills
, (100.0 * kills
) / MAX(1, iters
));
6289 umem_free(cmd
, MAXNAMELEN
);