5056 ZFS deadlock on db_mtx and dn_holds
[illumos-gate.git] / usr / src / uts / common / fs / zfs / dsl_scan.c
blob0894ebd4545166726e5d795ecc11185ab5edcf5a
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
26 #include <sys/dsl_scan.h>
27 #include <sys/dsl_pool.h>
28 #include <sys/dsl_dataset.h>
29 #include <sys/dsl_prop.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_synctask.h>
32 #include <sys/dnode.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/dmu_objset.h>
35 #include <sys/arc.h>
36 #include <sys/zap.h>
37 #include <sys/zio.h>
38 #include <sys/zfs_context.h>
39 #include <sys/fs/zfs.h>
40 #include <sys/zfs_znode.h>
41 #include <sys/spa_impl.h>
42 #include <sys/vdev_impl.h>
43 #include <sys/zil_impl.h>
44 #include <sys/zio_checksum.h>
45 #include <sys/ddt.h>
46 #include <sys/sa.h>
47 #include <sys/sa_impl.h>
48 #include <sys/zfeature.h>
49 #ifdef _KERNEL
50 #include <sys/zfs_vfsops.h>
51 #endif
53 typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *,
54 const zbookmark_phys_t *);
56 static scan_cb_t dsl_scan_scrub_cb;
57 static void dsl_scan_cancel_sync(void *, dmu_tx_t *);
58 static void dsl_scan_sync_state(dsl_scan_t *, dmu_tx_t *tx);
60 int zfs_top_maxinflight = 32; /* maximum I/Os per top-level */
61 int zfs_resilver_delay = 2; /* number of ticks to delay resilver */
62 int zfs_scrub_delay = 4; /* number of ticks to delay scrub */
63 int zfs_scan_idle = 50; /* idle window in clock ticks */
65 int zfs_scan_min_time_ms = 1000; /* min millisecs to scrub per txg */
66 int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */
67 int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver per txg */
68 boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
69 boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */
70 enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
71 int dsl_scan_delay_completion = B_FALSE; /* set to delay scan completion */
72 /* max number of blocks to free in a single TXG */
73 uint64_t zfs_free_max_blocks = UINT64_MAX;
75 #define DSL_SCAN_IS_SCRUB_RESILVER(scn) \
76 ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
77 (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER)
79 extern int zfs_txg_timeout;
81 /* the order has to match pool_scan_type */
82 static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = {
83 NULL,
84 dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */
85 dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */
88 int
89 dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
91 int err;
92 dsl_scan_t *scn;
93 spa_t *spa = dp->dp_spa;
94 uint64_t f;
96 scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP);
97 scn->scn_dp = dp;
100 * It's possible that we're resuming a scan after a reboot so
101 * make sure that the scan_async_destroying flag is initialized
102 * appropriately.
104 ASSERT(!scn->scn_async_destroying);
105 scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa,
106 SPA_FEATURE_ASYNC_DESTROY);
108 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
109 "scrub_func", sizeof (uint64_t), 1, &f);
110 if (err == 0) {
112 * There was an old-style scrub in progress. Restart a
113 * new-style scrub from the beginning.
115 scn->scn_restart_txg = txg;
116 zfs_dbgmsg("old-style scrub was in progress; "
117 "restarting new-style scrub in txg %llu",
118 scn->scn_restart_txg);
121 * Load the queue obj from the old location so that it
122 * can be freed by dsl_scan_done().
124 (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
125 "scrub_queue", sizeof (uint64_t), 1,
126 &scn->scn_phys.scn_queue_obj);
127 } else {
128 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
129 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
130 &scn->scn_phys);
131 if (err == ENOENT)
132 return (0);
133 else if (err)
134 return (err);
136 if (scn->scn_phys.scn_state == DSS_SCANNING &&
137 spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) {
139 * A new-type scrub was in progress on an old
140 * pool, and the pool was accessed by old
141 * software. Restart from the beginning, since
142 * the old software may have changed the pool in
143 * the meantime.
145 scn->scn_restart_txg = txg;
146 zfs_dbgmsg("new-style scrub was modified "
147 "by old software; restarting in txg %llu",
148 scn->scn_restart_txg);
152 spa_scan_stat_init(spa);
153 return (0);
156 void
157 dsl_scan_fini(dsl_pool_t *dp)
159 if (dp->dp_scan) {
160 kmem_free(dp->dp_scan, sizeof (dsl_scan_t));
161 dp->dp_scan = NULL;
165 /* ARGSUSED */
166 static int
167 dsl_scan_setup_check(void *arg, dmu_tx_t *tx)
169 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
171 if (scn->scn_phys.scn_state == DSS_SCANNING)
172 return (SET_ERROR(EBUSY));
174 return (0);
177 static void
178 dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
180 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
181 pool_scan_func_t *funcp = arg;
182 dmu_object_type_t ot = 0;
183 dsl_pool_t *dp = scn->scn_dp;
184 spa_t *spa = dp->dp_spa;
186 ASSERT(scn->scn_phys.scn_state != DSS_SCANNING);
187 ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
188 bzero(&scn->scn_phys, sizeof (scn->scn_phys));
189 scn->scn_phys.scn_func = *funcp;
190 scn->scn_phys.scn_state = DSS_SCANNING;
191 scn->scn_phys.scn_min_txg = 0;
192 scn->scn_phys.scn_max_txg = tx->tx_txg;
193 scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */
194 scn->scn_phys.scn_start_time = gethrestime_sec();
195 scn->scn_phys.scn_errors = 0;
196 scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc;
197 scn->scn_restart_txg = 0;
198 scn->scn_done_txg = 0;
199 spa_scan_stat_init(spa);
201 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
202 scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max;
204 /* rewrite all disk labels */
205 vdev_config_dirty(spa->spa_root_vdev);
207 if (vdev_resilver_needed(spa->spa_root_vdev,
208 &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) {
209 spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_START);
210 } else {
211 spa_event_notify(spa, NULL, ESC_ZFS_SCRUB_START);
214 spa->spa_scrub_started = B_TRUE;
216 * If this is an incremental scrub, limit the DDT scrub phase
217 * to just the auto-ditto class (for correctness); the rest
218 * of the scrub should go faster using top-down pruning.
220 if (scn->scn_phys.scn_min_txg > TXG_INITIAL)
221 scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO;
225 /* back to the generic stuff */
227 if (dp->dp_blkstats == NULL) {
228 dp->dp_blkstats =
229 kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
231 bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
233 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB)
234 ot = DMU_OT_ZAP_OTHER;
236 scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset,
237 ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx);
239 dsl_scan_sync_state(scn, tx);
241 spa_history_log_internal(spa, "scan setup", tx,
242 "func=%u mintxg=%llu maxtxg=%llu",
243 *funcp, scn->scn_phys.scn_min_txg, scn->scn_phys.scn_max_txg);
246 /* ARGSUSED */
247 static void
248 dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
250 static const char *old_names[] = {
251 "scrub_bookmark",
252 "scrub_ddt_bookmark",
253 "scrub_ddt_class_max",
254 "scrub_queue",
255 "scrub_min_txg",
256 "scrub_max_txg",
257 "scrub_func",
258 "scrub_errors",
259 NULL
262 dsl_pool_t *dp = scn->scn_dp;
263 spa_t *spa = dp->dp_spa;
264 int i;
266 /* Remove any remnants of an old-style scrub. */
267 for (i = 0; old_names[i]; i++) {
268 (void) zap_remove(dp->dp_meta_objset,
269 DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx);
272 if (scn->scn_phys.scn_queue_obj != 0) {
273 VERIFY(0 == dmu_object_free(dp->dp_meta_objset,
274 scn->scn_phys.scn_queue_obj, tx));
275 scn->scn_phys.scn_queue_obj = 0;
279 * If we were "restarted" from a stopped state, don't bother
280 * with anything else.
282 if (scn->scn_phys.scn_state != DSS_SCANNING)
283 return;
285 if (complete)
286 scn->scn_phys.scn_state = DSS_FINISHED;
287 else
288 scn->scn_phys.scn_state = DSS_CANCELED;
290 spa_history_log_internal(spa, "scan done", tx,
291 "complete=%u", complete);
293 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
294 mutex_enter(&spa->spa_scrub_lock);
295 while (spa->spa_scrub_inflight > 0) {
296 cv_wait(&spa->spa_scrub_io_cv,
297 &spa->spa_scrub_lock);
299 mutex_exit(&spa->spa_scrub_lock);
300 spa->spa_scrub_started = B_FALSE;
301 spa->spa_scrub_active = B_FALSE;
304 * If the scrub/resilver completed, update all DTLs to
305 * reflect this. Whether it succeeded or not, vacate
306 * all temporary scrub DTLs.
308 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
309 complete ? scn->scn_phys.scn_max_txg : 0, B_TRUE);
310 if (complete) {
311 spa_event_notify(spa, NULL, scn->scn_phys.scn_min_txg ?
312 ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH);
314 spa_errlog_rotate(spa);
317 * We may have finished replacing a device.
318 * Let the async thread assess this and handle the detach.
320 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
323 scn->scn_phys.scn_end_time = gethrestime_sec();
326 /* ARGSUSED */
327 static int
328 dsl_scan_cancel_check(void *arg, dmu_tx_t *tx)
330 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
332 if (scn->scn_phys.scn_state != DSS_SCANNING)
333 return (SET_ERROR(ENOENT));
334 return (0);
337 /* ARGSUSED */
338 static void
339 dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx)
341 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
343 dsl_scan_done(scn, B_FALSE, tx);
344 dsl_scan_sync_state(scn, tx);
348 dsl_scan_cancel(dsl_pool_t *dp)
350 return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check,
351 dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED));
354 static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
355 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
356 dmu_objset_type_t ostype, dmu_tx_t *tx);
357 static void dsl_scan_visitdnode(dsl_scan_t *, dsl_dataset_t *ds,
358 dmu_objset_type_t ostype,
359 dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx);
361 void
362 dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp)
364 zio_free(dp->dp_spa, txg, bp);
367 void
368 dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp)
370 ASSERT(dsl_pool_sync_context(dp));
371 zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags));
374 static uint64_t
375 dsl_scan_ds_maxtxg(dsl_dataset_t *ds)
377 uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg;
378 if (ds->ds_is_snapshot)
379 return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg));
380 return (smt);
383 static void
384 dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx)
386 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
387 DMU_POOL_DIRECTORY_OBJECT,
388 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
389 &scn->scn_phys, tx));
392 extern int zfs_vdev_async_write_active_min_dirty_percent;
394 static boolean_t
395 dsl_scan_check_pause(dsl_scan_t *scn, const zbookmark_phys_t *zb)
397 /* we never skip user/group accounting objects */
398 if (zb && (int64_t)zb->zb_object < 0)
399 return (B_FALSE);
401 if (scn->scn_pausing)
402 return (B_TRUE); /* we're already pausing */
404 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark))
405 return (B_FALSE); /* we're resuming */
407 /* We only know how to resume from level-0 blocks. */
408 if (zb && zb->zb_level != 0)
409 return (B_FALSE);
412 * We pause if:
413 * - we have scanned for the maximum time: an entire txg
414 * timeout (default 5 sec)
415 * or
416 * - we have scanned for at least the minimum time (default 1 sec
417 * for scrub, 3 sec for resilver), and either we have sufficient
418 * dirty data that we are starting to write more quickly
419 * (default 30%), or someone is explicitly waiting for this txg
420 * to complete.
421 * or
422 * - the spa is shutting down because this pool is being exported
423 * or the machine is rebooting.
425 int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
426 zfs_resilver_min_time_ms : zfs_scan_min_time_ms;
427 uint64_t elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time;
428 int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max;
429 if (elapsed_nanosecs / NANOSEC >= zfs_txg_timeout ||
430 (NSEC2MSEC(elapsed_nanosecs) > mintime &&
431 (txg_sync_waiting(scn->scn_dp) ||
432 dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent)) ||
433 spa_shutting_down(scn->scn_dp->dp_spa)) {
434 if (zb) {
435 dprintf("pausing at bookmark %llx/%llx/%llx/%llx\n",
436 (longlong_t)zb->zb_objset,
437 (longlong_t)zb->zb_object,
438 (longlong_t)zb->zb_level,
439 (longlong_t)zb->zb_blkid);
440 scn->scn_phys.scn_bookmark = *zb;
442 dprintf("pausing at DDT bookmark %llx/%llx/%llx/%llx\n",
443 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class,
444 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type,
445 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum,
446 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor);
447 scn->scn_pausing = B_TRUE;
448 return (B_TRUE);
450 return (B_FALSE);
453 typedef struct zil_scan_arg {
454 dsl_pool_t *zsa_dp;
455 zil_header_t *zsa_zh;
456 } zil_scan_arg_t;
458 /* ARGSUSED */
459 static int
460 dsl_scan_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
462 zil_scan_arg_t *zsa = arg;
463 dsl_pool_t *dp = zsa->zsa_dp;
464 dsl_scan_t *scn = dp->dp_scan;
465 zil_header_t *zh = zsa->zsa_zh;
466 zbookmark_phys_t zb;
468 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
469 return (0);
472 * One block ("stubby") can be allocated a long time ago; we
473 * want to visit that one because it has been allocated
474 * (on-disk) even if it hasn't been claimed (even though for
475 * scrub there's nothing to do to it).
477 if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(dp->dp_spa))
478 return (0);
480 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
481 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
483 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
484 return (0);
487 /* ARGSUSED */
488 static int
489 dsl_scan_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
491 if (lrc->lrc_txtype == TX_WRITE) {
492 zil_scan_arg_t *zsa = arg;
493 dsl_pool_t *dp = zsa->zsa_dp;
494 dsl_scan_t *scn = dp->dp_scan;
495 zil_header_t *zh = zsa->zsa_zh;
496 lr_write_t *lr = (lr_write_t *)lrc;
497 blkptr_t *bp = &lr->lr_blkptr;
498 zbookmark_phys_t zb;
500 if (BP_IS_HOLE(bp) ||
501 bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
502 return (0);
505 * birth can be < claim_txg if this record's txg is
506 * already txg sync'ed (but this log block contains
507 * other records that are not synced)
509 if (claim_txg == 0 || bp->blk_birth < claim_txg)
510 return (0);
512 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
513 lr->lr_foid, ZB_ZIL_LEVEL,
514 lr->lr_offset / BP_GET_LSIZE(bp));
516 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
518 return (0);
521 static void
522 dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh)
524 uint64_t claim_txg = zh->zh_claim_txg;
525 zil_scan_arg_t zsa = { dp, zh };
526 zilog_t *zilog;
529 * We only want to visit blocks that have been claimed but not yet
530 * replayed (or, in read-only mode, blocks that *would* be claimed).
532 if (claim_txg == 0 && spa_writeable(dp->dp_spa))
533 return;
535 zilog = zil_alloc(dp->dp_meta_objset, zh);
537 (void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa,
538 claim_txg);
540 zil_free(zilog);
543 /* ARGSUSED */
544 static void
545 dsl_scan_prefetch(dsl_scan_t *scn, arc_buf_t *buf, blkptr_t *bp,
546 uint64_t objset, uint64_t object, uint64_t blkid)
548 zbookmark_phys_t czb;
549 arc_flags_t flags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
551 if (zfs_no_scrub_prefetch)
552 return;
554 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_min_txg ||
555 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE))
556 return;
558 SET_BOOKMARK(&czb, objset, object, BP_GET_LEVEL(bp), blkid);
560 (void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa, bp,
561 NULL, NULL, ZIO_PRIORITY_ASYNC_READ,
562 ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD, &flags, &czb);
565 static boolean_t
566 dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp,
567 const zbookmark_phys_t *zb)
570 * We never skip over user/group accounting objects (obj<0)
572 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) &&
573 (int64_t)zb->zb_object >= 0) {
575 * If we already visited this bp & everything below (in
576 * a prior txg sync), don't bother doing it again.
578 if (zbookmark_is_before(dnp, zb, &scn->scn_phys.scn_bookmark))
579 return (B_TRUE);
582 * If we found the block we're trying to resume from, or
583 * we went past it to a different object, zero it out to
584 * indicate that it's OK to start checking for pausing
585 * again.
587 if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 ||
588 zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) {
589 dprintf("resuming at %llx/%llx/%llx/%llx\n",
590 (longlong_t)zb->zb_objset,
591 (longlong_t)zb->zb_object,
592 (longlong_t)zb->zb_level,
593 (longlong_t)zb->zb_blkid);
594 bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb));
597 return (B_FALSE);
601 * Return nonzero on i/o error.
602 * Return new buf to write out in *bufp.
604 static int
605 dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
606 dnode_phys_t *dnp, const blkptr_t *bp,
607 const zbookmark_phys_t *zb, dmu_tx_t *tx)
609 dsl_pool_t *dp = scn->scn_dp;
610 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
611 int err;
613 if (BP_GET_LEVEL(bp) > 0) {
614 arc_flags_t flags = ARC_FLAG_WAIT;
615 int i;
616 blkptr_t *cbp;
617 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
618 arc_buf_t *buf;
620 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
621 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb);
622 if (err) {
623 scn->scn_phys.scn_errors++;
624 return (err);
626 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
627 dsl_scan_prefetch(scn, buf, cbp, zb->zb_objset,
628 zb->zb_object, zb->zb_blkid * epb + i);
630 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
631 zbookmark_phys_t czb;
633 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
634 zb->zb_level - 1,
635 zb->zb_blkid * epb + i);
636 dsl_scan_visitbp(cbp, &czb, dnp,
637 ds, scn, ostype, tx);
639 (void) arc_buf_remove_ref(buf, &buf);
640 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
641 arc_flags_t flags = ARC_FLAG_WAIT;
642 dnode_phys_t *cdnp;
643 int i, j;
644 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
645 arc_buf_t *buf;
647 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
648 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb);
649 if (err) {
650 scn->scn_phys.scn_errors++;
651 return (err);
653 for (i = 0, cdnp = buf->b_data; i < epb; i++, cdnp++) {
654 for (j = 0; j < cdnp->dn_nblkptr; j++) {
655 blkptr_t *cbp = &cdnp->dn_blkptr[j];
656 dsl_scan_prefetch(scn, buf, cbp,
657 zb->zb_objset, zb->zb_blkid * epb + i, j);
660 for (i = 0, cdnp = buf->b_data; i < epb; i++, cdnp++) {
661 dsl_scan_visitdnode(scn, ds, ostype,
662 cdnp, zb->zb_blkid * epb + i, tx);
665 (void) arc_buf_remove_ref(buf, &buf);
666 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
667 arc_flags_t flags = ARC_FLAG_WAIT;
668 objset_phys_t *osp;
669 arc_buf_t *buf;
671 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
672 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb);
673 if (err) {
674 scn->scn_phys.scn_errors++;
675 return (err);
678 osp = buf->b_data;
680 dsl_scan_visitdnode(scn, ds, osp->os_type,
681 &osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx);
683 if (OBJSET_BUF_HAS_USERUSED(buf)) {
685 * We also always visit user/group accounting
686 * objects, and never skip them, even if we are
687 * pausing. This is necessary so that the space
688 * deltas from this txg get integrated.
690 dsl_scan_visitdnode(scn, ds, osp->os_type,
691 &osp->os_groupused_dnode,
692 DMU_GROUPUSED_OBJECT, tx);
693 dsl_scan_visitdnode(scn, ds, osp->os_type,
694 &osp->os_userused_dnode,
695 DMU_USERUSED_OBJECT, tx);
697 (void) arc_buf_remove_ref(buf, &buf);
700 return (0);
703 static void
704 dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds,
705 dmu_objset_type_t ostype, dnode_phys_t *dnp,
706 uint64_t object, dmu_tx_t *tx)
708 int j;
710 for (j = 0; j < dnp->dn_nblkptr; j++) {
711 zbookmark_phys_t czb;
713 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
714 dnp->dn_nlevels - 1, j);
715 dsl_scan_visitbp(&dnp->dn_blkptr[j],
716 &czb, dnp, ds, scn, ostype, tx);
719 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
720 zbookmark_phys_t czb;
721 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
722 0, DMU_SPILL_BLKID);
723 dsl_scan_visitbp(&dnp->dn_spill,
724 &czb, dnp, ds, scn, ostype, tx);
729 * The arguments are in this order because mdb can only print the
730 * first 5; we want them to be useful.
732 static void
733 dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
734 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
735 dmu_objset_type_t ostype, dmu_tx_t *tx)
737 dsl_pool_t *dp = scn->scn_dp;
738 arc_buf_t *buf = NULL;
739 blkptr_t bp_toread = *bp;
741 /* ASSERT(pbuf == NULL || arc_released(pbuf)); */
743 if (dsl_scan_check_pause(scn, zb))
744 return;
746 if (dsl_scan_check_resume(scn, dnp, zb))
747 return;
749 if (BP_IS_HOLE(bp))
750 return;
752 scn->scn_visited_this_txg++;
754 dprintf_bp(bp,
755 "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx bp=%p",
756 ds, ds ? ds->ds_object : 0,
757 zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid,
758 bp);
760 if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
761 return;
763 if (dsl_scan_recurse(scn, ds, ostype, dnp, &bp_toread, zb, tx) != 0)
764 return;
767 * If dsl_scan_ddt() has aready visited this block, it will have
768 * already done any translations or scrubbing, so don't call the
769 * callback again.
771 if (ddt_class_contains(dp->dp_spa,
772 scn->scn_phys.scn_ddt_class_max, bp)) {
773 ASSERT(buf == NULL);
774 return;
778 * If this block is from the future (after cur_max_txg), then we
779 * are doing this on behalf of a deleted snapshot, and we will
780 * revisit the future block on the next pass of this dataset.
781 * Don't scan it now unless we need to because something
782 * under it was modified.
784 if (BP_PHYSICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_max_txg) {
785 scan_funcs[scn->scn_phys.scn_func](dp, bp, zb);
789 static void
790 dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp,
791 dmu_tx_t *tx)
793 zbookmark_phys_t zb;
795 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
796 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
797 dsl_scan_visitbp(bp, &zb, NULL,
798 ds, scn, DMU_OST_NONE, tx);
800 dprintf_ds(ds, "finished scan%s", "");
803 void
804 dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
806 dsl_pool_t *dp = ds->ds_dir->dd_pool;
807 dsl_scan_t *scn = dp->dp_scan;
808 uint64_t mintxg;
810 if (scn->scn_phys.scn_state != DSS_SCANNING)
811 return;
813 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) {
814 if (ds->ds_is_snapshot) {
815 /* Note, scn_cur_{min,max}_txg stays the same. */
816 scn->scn_phys.scn_bookmark.zb_objset =
817 dsl_dataset_phys(ds)->ds_next_snap_obj;
818 zfs_dbgmsg("destroying ds %llu; currently traversing; "
819 "reset zb_objset to %llu",
820 (u_longlong_t)ds->ds_object,
821 (u_longlong_t)dsl_dataset_phys(ds)->
822 ds_next_snap_obj);
823 scn->scn_phys.scn_flags |= DSF_VISIT_DS_AGAIN;
824 } else {
825 SET_BOOKMARK(&scn->scn_phys.scn_bookmark,
826 ZB_DESTROYED_OBJSET, 0, 0, 0);
827 zfs_dbgmsg("destroying ds %llu; currently traversing; "
828 "reset bookmark to -1,0,0,0",
829 (u_longlong_t)ds->ds_object);
831 } else if (zap_lookup_int_key(dp->dp_meta_objset,
832 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) {
833 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
834 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
835 scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
836 if (ds->ds_is_snapshot) {
838 * We keep the same mintxg; it could be >
839 * ds_creation_txg if the previous snapshot was
840 * deleted too.
842 VERIFY(zap_add_int_key(dp->dp_meta_objset,
843 scn->scn_phys.scn_queue_obj,
844 dsl_dataset_phys(ds)->ds_next_snap_obj,
845 mintxg, tx) == 0);
846 zfs_dbgmsg("destroying ds %llu; in queue; "
847 "replacing with %llu",
848 (u_longlong_t)ds->ds_object,
849 (u_longlong_t)dsl_dataset_phys(ds)->
850 ds_next_snap_obj);
851 } else {
852 zfs_dbgmsg("destroying ds %llu; in queue; removing",
853 (u_longlong_t)ds->ds_object);
855 } else {
856 zfs_dbgmsg("destroying ds %llu; ignoring",
857 (u_longlong_t)ds->ds_object);
861 * dsl_scan_sync() should be called after this, and should sync
862 * out our changed state, but just to be safe, do it here.
864 dsl_scan_sync_state(scn, tx);
867 void
868 dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
870 dsl_pool_t *dp = ds->ds_dir->dd_pool;
871 dsl_scan_t *scn = dp->dp_scan;
872 uint64_t mintxg;
874 if (scn->scn_phys.scn_state != DSS_SCANNING)
875 return;
877 ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0);
879 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) {
880 scn->scn_phys.scn_bookmark.zb_objset =
881 dsl_dataset_phys(ds)->ds_prev_snap_obj;
882 zfs_dbgmsg("snapshotting ds %llu; currently traversing; "
883 "reset zb_objset to %llu",
884 (u_longlong_t)ds->ds_object,
885 (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
886 } else if (zap_lookup_int_key(dp->dp_meta_objset,
887 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) {
888 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
889 scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
890 VERIFY(zap_add_int_key(dp->dp_meta_objset,
891 scn->scn_phys.scn_queue_obj,
892 dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0);
893 zfs_dbgmsg("snapshotting ds %llu; in queue; "
894 "replacing with %llu",
895 (u_longlong_t)ds->ds_object,
896 (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
898 dsl_scan_sync_state(scn, tx);
901 void
902 dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx)
904 dsl_pool_t *dp = ds1->ds_dir->dd_pool;
905 dsl_scan_t *scn = dp->dp_scan;
906 uint64_t mintxg;
908 if (scn->scn_phys.scn_state != DSS_SCANNING)
909 return;
911 if (scn->scn_phys.scn_bookmark.zb_objset == ds1->ds_object) {
912 scn->scn_phys.scn_bookmark.zb_objset = ds2->ds_object;
913 zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
914 "reset zb_objset to %llu",
915 (u_longlong_t)ds1->ds_object,
916 (u_longlong_t)ds2->ds_object);
917 } else if (scn->scn_phys.scn_bookmark.zb_objset == ds2->ds_object) {
918 scn->scn_phys.scn_bookmark.zb_objset = ds1->ds_object;
919 zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
920 "reset zb_objset to %llu",
921 (u_longlong_t)ds2->ds_object,
922 (u_longlong_t)ds1->ds_object);
925 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
926 ds1->ds_object, &mintxg) == 0) {
927 int err;
929 ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
930 ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
931 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
932 scn->scn_phys.scn_queue_obj, ds1->ds_object, tx));
933 err = zap_add_int_key(dp->dp_meta_objset,
934 scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg, tx);
935 VERIFY(err == 0 || err == EEXIST);
936 if (err == EEXIST) {
937 /* Both were there to begin with */
938 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset,
939 scn->scn_phys.scn_queue_obj,
940 ds1->ds_object, mintxg, tx));
942 zfs_dbgmsg("clone_swap ds %llu; in queue; "
943 "replacing with %llu",
944 (u_longlong_t)ds1->ds_object,
945 (u_longlong_t)ds2->ds_object);
946 } else if (zap_lookup_int_key(dp->dp_meta_objset,
947 scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg) == 0) {
948 ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
949 ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
950 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
951 scn->scn_phys.scn_queue_obj, ds2->ds_object, tx));
952 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset,
953 scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg, tx));
954 zfs_dbgmsg("clone_swap ds %llu; in queue; "
955 "replacing with %llu",
956 (u_longlong_t)ds2->ds_object,
957 (u_longlong_t)ds1->ds_object);
960 dsl_scan_sync_state(scn, tx);
963 struct enqueue_clones_arg {
964 dmu_tx_t *tx;
965 uint64_t originobj;
968 /* ARGSUSED */
969 static int
970 enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
972 struct enqueue_clones_arg *eca = arg;
973 dsl_dataset_t *ds;
974 int err;
975 dsl_scan_t *scn = dp->dp_scan;
977 if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != eca->originobj)
978 return (0);
980 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
981 if (err)
982 return (err);
984 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != eca->originobj) {
985 dsl_dataset_t *prev;
986 err = dsl_dataset_hold_obj(dp,
987 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
989 dsl_dataset_rele(ds, FTAG);
990 if (err)
991 return (err);
992 ds = prev;
994 VERIFY(zap_add_int_key(dp->dp_meta_objset,
995 scn->scn_phys.scn_queue_obj, ds->ds_object,
996 dsl_dataset_phys(ds)->ds_prev_snap_txg, eca->tx) == 0);
997 dsl_dataset_rele(ds, FTAG);
998 return (0);
1001 static void
1002 dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx)
1004 dsl_pool_t *dp = scn->scn_dp;
1005 dsl_dataset_t *ds;
1006 objset_t *os;
1008 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
1010 if (dmu_objset_from_ds(ds, &os))
1011 goto out;
1014 * Only the ZIL in the head (non-snapshot) is valid. Even though
1015 * snapshots can have ZIL block pointers (which may be the same
1016 * BP as in the head), they must be ignored. So we traverse the
1017 * ZIL here, rather than in scan_recurse(), because the regular
1018 * snapshot block-sharing rules don't apply to it.
1020 if (DSL_SCAN_IS_SCRUB_RESILVER(scn) && !ds->ds_is_snapshot)
1021 dsl_scan_zil(dp, &os->os_zil_header);
1024 * Iterate over the bps in this ds.
1026 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1027 dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx);
1029 char *dsname = kmem_alloc(ZFS_MAXNAMELEN, KM_SLEEP);
1030 dsl_dataset_name(ds, dsname);
1031 zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; "
1032 "pausing=%u",
1033 (longlong_t)dsobj, dsname,
1034 (longlong_t)scn->scn_phys.scn_cur_min_txg,
1035 (longlong_t)scn->scn_phys.scn_cur_max_txg,
1036 (int)scn->scn_pausing);
1037 kmem_free(dsname, ZFS_MAXNAMELEN);
1039 if (scn->scn_pausing)
1040 goto out;
1043 * We've finished this pass over this dataset.
1047 * If we did not completely visit this dataset, do another pass.
1049 if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) {
1050 zfs_dbgmsg("incomplete pass; visiting again");
1051 scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN;
1052 VERIFY(zap_add_int_key(dp->dp_meta_objset,
1053 scn->scn_phys.scn_queue_obj, ds->ds_object,
1054 scn->scn_phys.scn_cur_max_txg, tx) == 0);
1055 goto out;
1059 * Add descendent datasets to work queue.
1061 if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
1062 VERIFY(zap_add_int_key(dp->dp_meta_objset,
1063 scn->scn_phys.scn_queue_obj,
1064 dsl_dataset_phys(ds)->ds_next_snap_obj,
1065 dsl_dataset_phys(ds)->ds_creation_txg, tx) == 0);
1067 if (dsl_dataset_phys(ds)->ds_num_children > 1) {
1068 boolean_t usenext = B_FALSE;
1069 if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
1070 uint64_t count;
1072 * A bug in a previous version of the code could
1073 * cause upgrade_clones_cb() to not set
1074 * ds_next_snap_obj when it should, leading to a
1075 * missing entry. Therefore we can only use the
1076 * next_clones_obj when its count is correct.
1078 int err = zap_count(dp->dp_meta_objset,
1079 dsl_dataset_phys(ds)->ds_next_clones_obj, &count);
1080 if (err == 0 &&
1081 count == dsl_dataset_phys(ds)->ds_num_children - 1)
1082 usenext = B_TRUE;
1085 if (usenext) {
1086 VERIFY0(zap_join_key(dp->dp_meta_objset,
1087 dsl_dataset_phys(ds)->ds_next_clones_obj,
1088 scn->scn_phys.scn_queue_obj,
1089 dsl_dataset_phys(ds)->ds_creation_txg, tx));
1090 } else {
1091 struct enqueue_clones_arg eca;
1092 eca.tx = tx;
1093 eca.originobj = ds->ds_object;
1095 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
1096 enqueue_clones_cb, &eca, DS_FIND_CHILDREN));
1100 out:
1101 dsl_dataset_rele(ds, FTAG);
1104 /* ARGSUSED */
1105 static int
1106 enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
1108 dmu_tx_t *tx = arg;
1109 dsl_dataset_t *ds;
1110 int err;
1111 dsl_scan_t *scn = dp->dp_scan;
1113 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
1114 if (err)
1115 return (err);
1117 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
1118 dsl_dataset_t *prev;
1119 err = dsl_dataset_hold_obj(dp,
1120 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
1121 if (err) {
1122 dsl_dataset_rele(ds, FTAG);
1123 return (err);
1127 * If this is a clone, we don't need to worry about it for now.
1129 if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) {
1130 dsl_dataset_rele(ds, FTAG);
1131 dsl_dataset_rele(prev, FTAG);
1132 return (0);
1134 dsl_dataset_rele(ds, FTAG);
1135 ds = prev;
1138 VERIFY(zap_add_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
1139 ds->ds_object, dsl_dataset_phys(ds)->ds_prev_snap_txg, tx) == 0);
1140 dsl_dataset_rele(ds, FTAG);
1141 return (0);
1145 * Scrub/dedup interaction.
1147 * If there are N references to a deduped block, we don't want to scrub it
1148 * N times -- ideally, we should scrub it exactly once.
1150 * We leverage the fact that the dde's replication class (enum ddt_class)
1151 * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest
1152 * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order.
1154 * To prevent excess scrubbing, the scrub begins by walking the DDT
1155 * to find all blocks with refcnt > 1, and scrubs each of these once.
1156 * Since there are two replication classes which contain blocks with
1157 * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first.
1158 * Finally the top-down scrub begins, only visiting blocks with refcnt == 1.
1160 * There would be nothing more to say if a block's refcnt couldn't change
1161 * during a scrub, but of course it can so we must account for changes
1162 * in a block's replication class.
1164 * Here's an example of what can occur:
1166 * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1
1167 * when visited during the top-down scrub phase, it will be scrubbed twice.
1168 * This negates our scrub optimization, but is otherwise harmless.
1170 * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1
1171 * on each visit during the top-down scrub phase, it will never be scrubbed.
1172 * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's
1173 * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to
1174 * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1
1175 * while a scrub is in progress, it scrubs the block right then.
1177 static void
1178 dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx)
1180 ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark;
1181 ddt_entry_t dde = { 0 };
1182 int error;
1183 uint64_t n = 0;
1185 while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) {
1186 ddt_t *ddt;
1188 if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max)
1189 break;
1190 dprintf("visiting ddb=%llu/%llu/%llu/%llx\n",
1191 (longlong_t)ddb->ddb_class,
1192 (longlong_t)ddb->ddb_type,
1193 (longlong_t)ddb->ddb_checksum,
1194 (longlong_t)ddb->ddb_cursor);
1196 /* There should be no pending changes to the dedup table */
1197 ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum];
1198 ASSERT(avl_first(&ddt->ddt_tree) == NULL);
1200 dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx);
1201 n++;
1203 if (dsl_scan_check_pause(scn, NULL))
1204 break;
1207 zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; pausing=%u",
1208 (longlong_t)n, (int)scn->scn_phys.scn_ddt_class_max,
1209 (int)scn->scn_pausing);
1211 ASSERT(error == 0 || error == ENOENT);
1212 ASSERT(error != ENOENT ||
1213 ddb->ddb_class > scn->scn_phys.scn_ddt_class_max);
1216 /* ARGSUSED */
1217 void
1218 dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum,
1219 ddt_entry_t *dde, dmu_tx_t *tx)
1221 const ddt_key_t *ddk = &dde->dde_key;
1222 ddt_phys_t *ddp = dde->dde_phys;
1223 blkptr_t bp;
1224 zbookmark_phys_t zb = { 0 };
1226 if (scn->scn_phys.scn_state != DSS_SCANNING)
1227 return;
1229 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
1230 if (ddp->ddp_phys_birth == 0 ||
1231 ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg)
1232 continue;
1233 ddt_bp_create(checksum, ddk, ddp, &bp);
1235 scn->scn_visited_this_txg++;
1236 scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb);
1240 static void
1241 dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx)
1243 dsl_pool_t *dp = scn->scn_dp;
1244 zap_cursor_t zc;
1245 zap_attribute_t za;
1247 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <=
1248 scn->scn_phys.scn_ddt_class_max) {
1249 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
1250 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
1251 dsl_scan_ddt(scn, tx);
1252 if (scn->scn_pausing)
1253 return;
1256 if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) {
1257 /* First do the MOS & ORIGIN */
1259 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
1260 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
1261 dsl_scan_visit_rootbp(scn, NULL,
1262 &dp->dp_meta_rootbp, tx);
1263 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
1264 if (scn->scn_pausing)
1265 return;
1267 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
1268 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
1269 enqueue_cb, tx, DS_FIND_CHILDREN));
1270 } else {
1271 dsl_scan_visitds(scn,
1272 dp->dp_origin_snap->ds_object, tx);
1274 ASSERT(!scn->scn_pausing);
1275 } else if (scn->scn_phys.scn_bookmark.zb_objset !=
1276 ZB_DESTROYED_OBJSET) {
1278 * If we were paused, continue from here. Note if the
1279 * ds we were paused on was deleted, the zb_objset may
1280 * be -1, so we will skip this and find a new objset
1281 * below.
1283 dsl_scan_visitds(scn, scn->scn_phys.scn_bookmark.zb_objset, tx);
1284 if (scn->scn_pausing)
1285 return;
1289 * In case we were paused right at the end of the ds, zero the
1290 * bookmark so we don't think that we're still trying to resume.
1292 bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t));
1294 /* keep pulling things out of the zap-object-as-queue */
1295 while (zap_cursor_init(&zc, dp->dp_meta_objset,
1296 scn->scn_phys.scn_queue_obj),
1297 zap_cursor_retrieve(&zc, &za) == 0) {
1298 dsl_dataset_t *ds;
1299 uint64_t dsobj;
1301 dsobj = strtonum(za.za_name, NULL);
1302 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
1303 scn->scn_phys.scn_queue_obj, dsobj, tx));
1305 /* Set up min/max txg */
1306 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
1307 if (za.za_first_integer != 0) {
1308 scn->scn_phys.scn_cur_min_txg =
1309 MAX(scn->scn_phys.scn_min_txg,
1310 za.za_first_integer);
1311 } else {
1312 scn->scn_phys.scn_cur_min_txg =
1313 MAX(scn->scn_phys.scn_min_txg,
1314 dsl_dataset_phys(ds)->ds_prev_snap_txg);
1316 scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds);
1317 dsl_dataset_rele(ds, FTAG);
1319 dsl_scan_visitds(scn, dsobj, tx);
1320 zap_cursor_fini(&zc);
1321 if (scn->scn_pausing)
1322 return;
1324 zap_cursor_fini(&zc);
1327 static boolean_t
1328 dsl_scan_free_should_pause(dsl_scan_t *scn)
1330 uint64_t elapsed_nanosecs;
1332 if (zfs_recover)
1333 return (B_FALSE);
1335 if (scn->scn_visited_this_txg >= zfs_free_max_blocks)
1336 return (B_TRUE);
1338 elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time;
1339 return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout ||
1340 (NSEC2MSEC(elapsed_nanosecs) > zfs_free_min_time_ms &&
1341 txg_sync_waiting(scn->scn_dp)) ||
1342 spa_shutting_down(scn->scn_dp->dp_spa));
1345 static int
1346 dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1348 dsl_scan_t *scn = arg;
1350 if (!scn->scn_is_bptree ||
1351 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) {
1352 if (dsl_scan_free_should_pause(scn))
1353 return (SET_ERROR(ERESTART));
1356 zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa,
1357 dmu_tx_get_txg(tx), bp, 0));
1358 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
1359 -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp),
1360 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
1361 scn->scn_visited_this_txg++;
1362 return (0);
1365 boolean_t
1366 dsl_scan_active(dsl_scan_t *scn)
1368 spa_t *spa = scn->scn_dp->dp_spa;
1369 uint64_t used = 0, comp, uncomp;
1371 if (spa->spa_load_state != SPA_LOAD_NONE)
1372 return (B_FALSE);
1373 if (spa_shutting_down(spa))
1374 return (B_FALSE);
1375 if (scn->scn_phys.scn_state == DSS_SCANNING ||
1376 (scn->scn_async_destroying && !scn->scn_async_stalled))
1377 return (B_TRUE);
1379 if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
1380 (void) bpobj_space(&scn->scn_dp->dp_free_bpobj,
1381 &used, &comp, &uncomp);
1383 return (used != 0);
1386 void
1387 dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
1389 dsl_scan_t *scn = dp->dp_scan;
1390 spa_t *spa = dp->dp_spa;
1391 int err = 0;
1394 * Check for scn_restart_txg before checking spa_load_state, so
1395 * that we can restart an old-style scan while the pool is being
1396 * imported (see dsl_scan_init).
1398 if (scn->scn_restart_txg != 0 &&
1399 scn->scn_restart_txg <= tx->tx_txg) {
1400 pool_scan_func_t func = POOL_SCAN_SCRUB;
1401 dsl_scan_done(scn, B_FALSE, tx);
1402 if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL))
1403 func = POOL_SCAN_RESILVER;
1404 zfs_dbgmsg("restarting scan func=%u txg=%llu",
1405 func, tx->tx_txg);
1406 dsl_scan_setup_sync(&func, tx);
1410 * If the scan is inactive due to a stalled async destroy, try again.
1412 if ((!scn->scn_async_stalled && !dsl_scan_active(scn)) ||
1413 spa_sync_pass(dp->dp_spa) > 1)
1414 return;
1416 scn->scn_visited_this_txg = 0;
1417 scn->scn_pausing = B_FALSE;
1418 scn->scn_sync_start_time = gethrtime();
1419 spa->spa_scrub_active = B_TRUE;
1422 * First process the async destroys. If we pause, don't do
1423 * any scrubbing or resilvering. This ensures that there are no
1424 * async destroys while we are scanning, so the scan code doesn't
1425 * have to worry about traversing it. It is also faster to free the
1426 * blocks than to scrub them.
1428 if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
1429 scn->scn_is_bptree = B_FALSE;
1430 scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
1431 NULL, ZIO_FLAG_MUSTSUCCEED);
1432 err = bpobj_iterate(&dp->dp_free_bpobj,
1433 dsl_scan_free_block_cb, scn, tx);
1434 VERIFY3U(0, ==, zio_wait(scn->scn_zio_root));
1436 if (err != 0 && err != ERESTART)
1437 zfs_panic_recover("error %u from bpobj_iterate()", err);
1440 if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
1441 ASSERT(scn->scn_async_destroying);
1442 scn->scn_is_bptree = B_TRUE;
1443 scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
1444 NULL, ZIO_FLAG_MUSTSUCCEED);
1445 err = bptree_iterate(dp->dp_meta_objset,
1446 dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx);
1447 VERIFY0(zio_wait(scn->scn_zio_root));
1449 if (err == EIO || err == ECKSUM) {
1450 err = 0;
1451 } else if (err != 0 && err != ERESTART) {
1452 zfs_panic_recover("error %u from "
1453 "traverse_dataset_destroyed()", err);
1456 if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) {
1457 /* finished; deactivate async destroy feature */
1458 spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx);
1459 ASSERT(!spa_feature_is_active(spa,
1460 SPA_FEATURE_ASYNC_DESTROY));
1461 VERIFY0(zap_remove(dp->dp_meta_objset,
1462 DMU_POOL_DIRECTORY_OBJECT,
1463 DMU_POOL_BPTREE_OBJ, tx));
1464 VERIFY0(bptree_free(dp->dp_meta_objset,
1465 dp->dp_bptree_obj, tx));
1466 dp->dp_bptree_obj = 0;
1467 scn->scn_async_destroying = B_FALSE;
1468 scn->scn_async_stalled = B_FALSE;
1469 } else {
1471 * If we didn't make progress, mark the async
1472 * destroy as stalled, so that we will not initiate
1473 * a spa_sync() on its behalf. Note that we only
1474 * check this if we are not finished, because if the
1475 * bptree had no blocks for us to visit, we can
1476 * finish without "making progress".
1478 scn->scn_async_stalled =
1479 (scn->scn_visited_this_txg == 0);
1482 if (scn->scn_visited_this_txg) {
1483 zfs_dbgmsg("freed %llu blocks in %llums from "
1484 "free_bpobj/bptree txg %llu; err=%u",
1485 (longlong_t)scn->scn_visited_this_txg,
1486 (longlong_t)
1487 NSEC2MSEC(gethrtime() - scn->scn_sync_start_time),
1488 (longlong_t)tx->tx_txg, err);
1489 scn->scn_visited_this_txg = 0;
1492 * Write out changes to the DDT that may be required as a
1493 * result of the blocks freed. This ensures that the DDT
1494 * is clean when a scrub/resilver runs.
1496 ddt_sync(spa, tx->tx_txg);
1498 if (err != 0)
1499 return;
1500 if (!scn->scn_async_destroying && zfs_free_leak_on_eio &&
1501 (dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 ||
1502 dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 ||
1503 dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) {
1505 * We have finished background destroying, but there is still
1506 * some space left in the dp_free_dir. Transfer this leaked
1507 * space to the dp_leak_dir.
1509 if (dp->dp_leak_dir == NULL) {
1510 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
1511 (void) dsl_dir_create_sync(dp, dp->dp_root_dir,
1512 LEAK_DIR_NAME, tx);
1513 VERIFY0(dsl_pool_open_special_dir(dp,
1514 LEAK_DIR_NAME, &dp->dp_leak_dir));
1515 rrw_exit(&dp->dp_config_rwlock, FTAG);
1517 dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD,
1518 dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
1519 dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
1520 dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
1521 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
1522 -dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
1523 -dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
1524 -dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
1526 if (!scn->scn_async_destroying) {
1527 /* finished; verify that space accounting went to zero */
1528 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes);
1529 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes);
1530 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes);
1533 if (scn->scn_phys.scn_state != DSS_SCANNING)
1534 return;
1536 if (scn->scn_done_txg == tx->tx_txg) {
1537 ASSERT(!scn->scn_pausing);
1538 /* finished with scan. */
1539 zfs_dbgmsg("txg %llu scan complete", tx->tx_txg);
1540 dsl_scan_done(scn, B_TRUE, tx);
1541 ASSERT3U(spa->spa_scrub_inflight, ==, 0);
1542 dsl_scan_sync_state(scn, tx);
1543 return;
1546 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <=
1547 scn->scn_phys.scn_ddt_class_max) {
1548 zfs_dbgmsg("doing scan sync txg %llu; "
1549 "ddt bm=%llu/%llu/%llu/%llx",
1550 (longlong_t)tx->tx_txg,
1551 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class,
1552 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type,
1553 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum,
1554 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor);
1555 ASSERT(scn->scn_phys.scn_bookmark.zb_objset == 0);
1556 ASSERT(scn->scn_phys.scn_bookmark.zb_object == 0);
1557 ASSERT(scn->scn_phys.scn_bookmark.zb_level == 0);
1558 ASSERT(scn->scn_phys.scn_bookmark.zb_blkid == 0);
1559 } else {
1560 zfs_dbgmsg("doing scan sync txg %llu; bm=%llu/%llu/%llu/%llu",
1561 (longlong_t)tx->tx_txg,
1562 (longlong_t)scn->scn_phys.scn_bookmark.zb_objset,
1563 (longlong_t)scn->scn_phys.scn_bookmark.zb_object,
1564 (longlong_t)scn->scn_phys.scn_bookmark.zb_level,
1565 (longlong_t)scn->scn_phys.scn_bookmark.zb_blkid);
1568 scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
1569 NULL, ZIO_FLAG_CANFAIL);
1570 dsl_pool_config_enter(dp, FTAG);
1571 dsl_scan_visit(scn, tx);
1572 dsl_pool_config_exit(dp, FTAG);
1573 (void) zio_wait(scn->scn_zio_root);
1574 scn->scn_zio_root = NULL;
1576 zfs_dbgmsg("visited %llu blocks in %llums",
1577 (longlong_t)scn->scn_visited_this_txg,
1578 (longlong_t)NSEC2MSEC(gethrtime() - scn->scn_sync_start_time));
1580 if (!scn->scn_pausing) {
1581 scn->scn_done_txg = tx->tx_txg + 1;
1582 zfs_dbgmsg("txg %llu traversal complete, waiting till txg %llu",
1583 tx->tx_txg, scn->scn_done_txg);
1586 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
1587 mutex_enter(&spa->spa_scrub_lock);
1588 while (spa->spa_scrub_inflight > 0) {
1589 cv_wait(&spa->spa_scrub_io_cv,
1590 &spa->spa_scrub_lock);
1592 mutex_exit(&spa->spa_scrub_lock);
1595 dsl_scan_sync_state(scn, tx);
1599 * This will start a new scan, or restart an existing one.
1601 void
1602 dsl_resilver_restart(dsl_pool_t *dp, uint64_t txg)
1604 if (txg == 0) {
1605 dmu_tx_t *tx;
1606 tx = dmu_tx_create_dd(dp->dp_mos_dir);
1607 VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT));
1609 txg = dmu_tx_get_txg(tx);
1610 dp->dp_scan->scn_restart_txg = txg;
1611 dmu_tx_commit(tx);
1612 } else {
1613 dp->dp_scan->scn_restart_txg = txg;
1615 zfs_dbgmsg("restarting resilver txg=%llu", txg);
1618 boolean_t
1619 dsl_scan_resilvering(dsl_pool_t *dp)
1621 return (dp->dp_scan->scn_phys.scn_state == DSS_SCANNING &&
1622 dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER);
1626 * scrub consumers
1629 static void
1630 count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp)
1632 int i;
1635 * If we resume after a reboot, zab will be NULL; don't record
1636 * incomplete stats in that case.
1638 if (zab == NULL)
1639 return;
1641 for (i = 0; i < 4; i++) {
1642 int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
1643 int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;
1644 if (t & DMU_OT_NEWTYPE)
1645 t = DMU_OT_OTHER;
1646 zfs_blkstat_t *zb = &zab->zab_type[l][t];
1647 int equal;
1649 zb->zb_count++;
1650 zb->zb_asize += BP_GET_ASIZE(bp);
1651 zb->zb_lsize += BP_GET_LSIZE(bp);
1652 zb->zb_psize += BP_GET_PSIZE(bp);
1653 zb->zb_gangs += BP_COUNT_GANG(bp);
1655 switch (BP_GET_NDVAS(bp)) {
1656 case 2:
1657 if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
1658 DVA_GET_VDEV(&bp->blk_dva[1]))
1659 zb->zb_ditto_2_of_2_samevdev++;
1660 break;
1661 case 3:
1662 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
1663 DVA_GET_VDEV(&bp->blk_dva[1])) +
1664 (DVA_GET_VDEV(&bp->blk_dva[0]) ==
1665 DVA_GET_VDEV(&bp->blk_dva[2])) +
1666 (DVA_GET_VDEV(&bp->blk_dva[1]) ==
1667 DVA_GET_VDEV(&bp->blk_dva[2]));
1668 if (equal == 1)
1669 zb->zb_ditto_2_of_3_samevdev++;
1670 else if (equal == 3)
1671 zb->zb_ditto_3_of_3_samevdev++;
1672 break;
1677 static void
1678 dsl_scan_scrub_done(zio_t *zio)
1680 spa_t *spa = zio->io_spa;
1682 zio_data_buf_free(zio->io_data, zio->io_size);
1684 mutex_enter(&spa->spa_scrub_lock);
1685 spa->spa_scrub_inflight--;
1686 cv_broadcast(&spa->spa_scrub_io_cv);
1688 if (zio->io_error && (zio->io_error != ECKSUM ||
1689 !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) {
1690 spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors++;
1692 mutex_exit(&spa->spa_scrub_lock);
1695 static int
1696 dsl_scan_scrub_cb(dsl_pool_t *dp,
1697 const blkptr_t *bp, const zbookmark_phys_t *zb)
1699 dsl_scan_t *scn = dp->dp_scan;
1700 size_t size = BP_GET_PSIZE(bp);
1701 spa_t *spa = dp->dp_spa;
1702 uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp);
1703 boolean_t needs_io;
1704 int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;
1705 int scan_delay = 0;
1707 if (phys_birth <= scn->scn_phys.scn_min_txg ||
1708 phys_birth >= scn->scn_phys.scn_max_txg)
1709 return (0);
1711 count_block(dp->dp_blkstats, bp);
1713 if (BP_IS_EMBEDDED(bp))
1714 return (0);
1716 ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn));
1717 if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) {
1718 zio_flags |= ZIO_FLAG_SCRUB;
1719 needs_io = B_TRUE;
1720 scan_delay = zfs_scrub_delay;
1721 } else {
1722 ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER);
1723 zio_flags |= ZIO_FLAG_RESILVER;
1724 needs_io = B_FALSE;
1725 scan_delay = zfs_resilver_delay;
1728 /* If it's an intent log block, failure is expected. */
1729 if (zb->zb_level == ZB_ZIL_LEVEL)
1730 zio_flags |= ZIO_FLAG_SPECULATIVE;
1732 for (int d = 0; d < BP_GET_NDVAS(bp); d++) {
1733 vdev_t *vd = vdev_lookup_top(spa,
1734 DVA_GET_VDEV(&bp->blk_dva[d]));
1737 * Keep track of how much data we've examined so that
1738 * zpool(1M) status can make useful progress reports.
1740 scn->scn_phys.scn_examined += DVA_GET_ASIZE(&bp->blk_dva[d]);
1741 spa->spa_scan_pass_exam += DVA_GET_ASIZE(&bp->blk_dva[d]);
1743 /* if it's a resilver, this may not be in the target range */
1744 if (!needs_io) {
1745 if (DVA_GET_GANG(&bp->blk_dva[d])) {
1747 * Gang members may be spread across multiple
1748 * vdevs, so the best estimate we have is the
1749 * scrub range, which has already been checked.
1750 * XXX -- it would be better to change our
1751 * allocation policy to ensure that all
1752 * gang members reside on the same vdev.
1754 needs_io = B_TRUE;
1755 } else {
1756 needs_io = vdev_dtl_contains(vd, DTL_PARTIAL,
1757 phys_birth, 1);
1762 if (needs_io && !zfs_no_scrub_io) {
1763 vdev_t *rvd = spa->spa_root_vdev;
1764 uint64_t maxinflight = rvd->vdev_children * zfs_top_maxinflight;
1765 void *data = zio_data_buf_alloc(size);
1767 mutex_enter(&spa->spa_scrub_lock);
1768 while (spa->spa_scrub_inflight >= maxinflight)
1769 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
1770 spa->spa_scrub_inflight++;
1771 mutex_exit(&spa->spa_scrub_lock);
1774 * If we're seeing recent (zfs_scan_idle) "important" I/Os
1775 * then throttle our workload to limit the impact of a scan.
1777 if (ddi_get_lbolt64() - spa->spa_last_io <= zfs_scan_idle)
1778 delay(scan_delay);
1780 zio_nowait(zio_read(NULL, spa, bp, data, size,
1781 dsl_scan_scrub_done, NULL, ZIO_PRIORITY_SCRUB,
1782 zio_flags, zb));
1785 /* do not relocate this block */
1786 return (0);
1790 dsl_scan(dsl_pool_t *dp, pool_scan_func_t func)
1792 spa_t *spa = dp->dp_spa;
1795 * Purge all vdev caches and probe all devices. We do this here
1796 * rather than in sync context because this requires a writer lock
1797 * on the spa_config lock, which we can't do from sync context. The
1798 * spa_scrub_reopen flag indicates that vdev_open() should not
1799 * attempt to start another scrub.
1801 spa_vdev_state_enter(spa, SCL_NONE);
1802 spa->spa_scrub_reopen = B_TRUE;
1803 vdev_reopen(spa->spa_root_vdev);
1804 spa->spa_scrub_reopen = B_FALSE;
1805 (void) spa_vdev_state_exit(spa, NULL, 0);
1807 return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check,
1808 dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_NONE));