4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
26 #include <sys/zfs_context.h>
27 #include <sys/dmu_objset.h>
28 #include <sys/dmu_traverse.h>
29 #include <sys/dsl_dataset.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_pool.h>
32 #include <sys/dnode.h>
35 #include <sys/dmu_impl.h>
37 #include <sys/sa_impl.h>
38 #include <sys/callb.h>
39 #include <sys/zfeature.h>
41 int zfs_pd_blks_max
= 100;
43 typedef struct prefetch_data
{
53 typedef struct traverse_data
{
58 zbookmark_phys_t
*td_resume
;
60 prefetch_data_t
*td_pfd
;
62 uint64_t td_hole_birth_enabled_txg
;
67 static int traverse_dnode(traverse_data_t
*td
, const dnode_phys_t
*dnp
,
68 uint64_t objset
, uint64_t object
);
69 static void prefetch_dnode_metadata(traverse_data_t
*td
, const dnode_phys_t
*,
70 uint64_t objset
, uint64_t object
);
73 traverse_zil_block(zilog_t
*zilog
, blkptr_t
*bp
, void *arg
, uint64_t claim_txg
)
75 traverse_data_t
*td
= arg
;
81 if (claim_txg
== 0 && bp
->blk_birth
>= spa_first_txg(td
->td_spa
))
84 SET_BOOKMARK(&zb
, td
->td_objset
, ZB_ZIL_OBJECT
, ZB_ZIL_LEVEL
,
85 bp
->blk_cksum
.zc_word
[ZIL_ZC_SEQ
]);
87 (void) td
->td_func(td
->td_spa
, zilog
, bp
, &zb
, NULL
, td
->td_arg
);
93 traverse_zil_record(zilog_t
*zilog
, lr_t
*lrc
, void *arg
, uint64_t claim_txg
)
95 traverse_data_t
*td
= arg
;
97 if (lrc
->lrc_txtype
== TX_WRITE
) {
98 lr_write_t
*lr
= (lr_write_t
*)lrc
;
99 blkptr_t
*bp
= &lr
->lr_blkptr
;
105 if (claim_txg
== 0 || bp
->blk_birth
< claim_txg
)
108 SET_BOOKMARK(&zb
, td
->td_objset
, lr
->lr_foid
,
109 ZB_ZIL_LEVEL
, lr
->lr_offset
/ BP_GET_LSIZE(bp
));
111 (void) td
->td_func(td
->td_spa
, zilog
, bp
, &zb
, NULL
,
118 traverse_zil(traverse_data_t
*td
, zil_header_t
*zh
)
120 uint64_t claim_txg
= zh
->zh_claim_txg
;
124 * We only want to visit blocks that have been claimed but not yet
125 * replayed; plus, in read-only mode, blocks that are already stable.
127 if (claim_txg
== 0 && spa_writeable(td
->td_spa
))
130 zilog
= zil_alloc(spa_get_dsl(td
->td_spa
)->dp_meta_objset
, zh
);
132 (void) zil_parse(zilog
, traverse_zil_block
, traverse_zil_record
, td
,
138 typedef enum resume_skip
{
145 * Returns RESUME_SKIP_ALL if td indicates that we are resuming a traversal and
146 * the block indicated by zb does not need to be visited at all. Returns
147 * RESUME_SKIP_CHILDREN if we are resuming a post traversal and we reach the
148 * resume point. This indicates that this block should be visited but not its
149 * children (since they must have been visited in a previous traversal).
150 * Otherwise returns RESUME_SKIP_NONE.
153 resume_skip_check(traverse_data_t
*td
, const dnode_phys_t
*dnp
,
154 const zbookmark_phys_t
*zb
)
156 if (td
->td_resume
!= NULL
&& !ZB_IS_ZERO(td
->td_resume
)) {
158 * If we already visited this bp & everything below,
159 * don't bother doing it again.
161 if (zbookmark_is_before(dnp
, zb
, td
->td_resume
))
162 return (RESUME_SKIP_ALL
);
165 * If we found the block we're trying to resume from, zero
166 * the bookmark out to indicate that we have resumed.
168 if (bcmp(zb
, td
->td_resume
, sizeof (*zb
)) == 0) {
169 bzero(td
->td_resume
, sizeof (*zb
));
170 if (td
->td_flags
& TRAVERSE_POST
)
171 return (RESUME_SKIP_CHILDREN
);
174 return (RESUME_SKIP_NONE
);
178 traverse_prefetch_metadata(traverse_data_t
*td
,
179 const blkptr_t
*bp
, const zbookmark_phys_t
*zb
)
181 arc_flags_t flags
= ARC_FLAG_NOWAIT
| ARC_FLAG_PREFETCH
;
183 if (!(td
->td_flags
& TRAVERSE_PREFETCH_METADATA
))
186 * If we are in the process of resuming, don't prefetch, because
187 * some children will not be needed (and in fact may have already
190 if (td
->td_resume
!= NULL
&& !ZB_IS_ZERO(td
->td_resume
))
192 if (BP_IS_HOLE(bp
) || bp
->blk_birth
<= td
->td_min_txg
)
194 if (BP_GET_LEVEL(bp
) == 0 && BP_GET_TYPE(bp
) != DMU_OT_DNODE
)
197 (void) arc_read(NULL
, td
->td_spa
, bp
, NULL
, NULL
,
198 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
, &flags
, zb
);
202 prefetch_needed(prefetch_data_t
*pfd
, const blkptr_t
*bp
)
204 ASSERT(pfd
->pd_flags
& TRAVERSE_PREFETCH_DATA
);
205 if (BP_IS_HOLE(bp
) || BP_IS_EMBEDDED(bp
) ||
206 BP_GET_TYPE(bp
) == DMU_OT_INTENT_LOG
)
212 traverse_visitbp(traverse_data_t
*td
, const dnode_phys_t
*dnp
,
213 const blkptr_t
*bp
, const zbookmark_phys_t
*zb
)
215 zbookmark_phys_t czb
;
217 arc_buf_t
*buf
= NULL
;
218 prefetch_data_t
*pd
= td
->td_pfd
;
219 boolean_t hard
= td
->td_flags
& TRAVERSE_HARD
;
221 switch (resume_skip_check(td
, dnp
, zb
)) {
222 case RESUME_SKIP_ALL
:
224 case RESUME_SKIP_CHILDREN
:
226 case RESUME_SKIP_NONE
:
232 if (bp
->blk_birth
== 0) {
234 * Since this block has a birth time of 0 it must be a
235 * hole created before the SPA_FEATURE_HOLE_BIRTH
236 * feature was enabled. If SPA_FEATURE_HOLE_BIRTH
237 * was enabled before the min_txg for this traveral we
238 * know the hole must have been created before the
239 * min_txg for this traveral, so we can skip it. If
240 * SPA_FEATURE_HOLE_BIRTH was enabled after the min_txg
241 * for this traveral we cannot tell if the hole was
242 * created before or after the min_txg for this
243 * traversal, so we cannot skip it.
245 if (td
->td_hole_birth_enabled_txg
< td
->td_min_txg
)
247 } else if (bp
->blk_birth
<= td
->td_min_txg
) {
251 if (pd
!= NULL
&& !pd
->pd_exited
&& prefetch_needed(pd
, bp
)) {
252 mutex_enter(&pd
->pd_mtx
);
253 ASSERT(pd
->pd_blks_fetched
>= 0);
254 while (pd
->pd_blks_fetched
== 0 && !pd
->pd_exited
)
255 cv_wait(&pd
->pd_cv
, &pd
->pd_mtx
);
256 pd
->pd_blks_fetched
--;
257 cv_broadcast(&pd
->pd_cv
);
258 mutex_exit(&pd
->pd_mtx
);
261 if (BP_IS_HOLE(bp
)) {
262 err
= td
->td_func(td
->td_spa
, NULL
, bp
, zb
, dnp
, td
->td_arg
);
268 if (td
->td_flags
& TRAVERSE_PRE
) {
269 err
= td
->td_func(td
->td_spa
, NULL
, bp
, zb
, dnp
,
271 if (err
== TRAVERSE_VISIT_NO_CHILDREN
)
277 if (BP_GET_LEVEL(bp
) > 0) {
278 arc_flags_t flags
= ARC_FLAG_WAIT
;
281 int epb
= BP_GET_LSIZE(bp
) >> SPA_BLKPTRSHIFT
;
283 err
= arc_read(NULL
, td
->td_spa
, bp
, arc_getbuf_func
, &buf
,
284 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
, &flags
, zb
);
289 for (i
= 0; i
< epb
; i
++) {
290 SET_BOOKMARK(&czb
, zb
->zb_objset
, zb
->zb_object
,
292 zb
->zb_blkid
* epb
+ i
);
293 traverse_prefetch_metadata(td
, &cbp
[i
], &czb
);
296 /* recursively visitbp() blocks below this */
297 for (i
= 0; i
< epb
; i
++) {
298 SET_BOOKMARK(&czb
, zb
->zb_objset
, zb
->zb_object
,
300 zb
->zb_blkid
* epb
+ i
);
301 err
= traverse_visitbp(td
, dnp
, &cbp
[i
], &czb
);
305 } else if (BP_GET_TYPE(bp
) == DMU_OT_DNODE
) {
306 arc_flags_t flags
= ARC_FLAG_WAIT
;
308 int epb
= BP_GET_LSIZE(bp
) >> DNODE_SHIFT
;
310 err
= arc_read(NULL
, td
->td_spa
, bp
, arc_getbuf_func
, &buf
,
311 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
, &flags
, zb
);
316 for (i
= 0; i
< epb
; i
++) {
317 prefetch_dnode_metadata(td
, &dnp
[i
], zb
->zb_objset
,
318 zb
->zb_blkid
* epb
+ i
);
321 /* recursively visitbp() blocks below this */
322 for (i
= 0; i
< epb
; i
++) {
323 err
= traverse_dnode(td
, &dnp
[i
], zb
->zb_objset
,
324 zb
->zb_blkid
* epb
+ i
);
328 } else if (BP_GET_TYPE(bp
) == DMU_OT_OBJSET
) {
329 arc_flags_t flags
= ARC_FLAG_WAIT
;
333 err
= arc_read(NULL
, td
->td_spa
, bp
, arc_getbuf_func
, &buf
,
334 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
, &flags
, zb
);
339 dnp
= &osp
->os_meta_dnode
;
340 prefetch_dnode_metadata(td
, dnp
, zb
->zb_objset
,
341 DMU_META_DNODE_OBJECT
);
342 if (arc_buf_size(buf
) >= sizeof (objset_phys_t
)) {
343 prefetch_dnode_metadata(td
, &osp
->os_groupused_dnode
,
344 zb
->zb_objset
, DMU_GROUPUSED_OBJECT
);
345 prefetch_dnode_metadata(td
, &osp
->os_userused_dnode
,
346 zb
->zb_objset
, DMU_USERUSED_OBJECT
);
349 err
= traverse_dnode(td
, dnp
, zb
->zb_objset
,
350 DMU_META_DNODE_OBJECT
);
351 if (err
== 0 && arc_buf_size(buf
) >= sizeof (objset_phys_t
)) {
352 dnp
= &osp
->os_groupused_dnode
;
353 err
= traverse_dnode(td
, dnp
, zb
->zb_objset
,
354 DMU_GROUPUSED_OBJECT
);
356 if (err
== 0 && arc_buf_size(buf
) >= sizeof (objset_phys_t
)) {
357 dnp
= &osp
->os_userused_dnode
;
358 err
= traverse_dnode(td
, dnp
, zb
->zb_objset
,
359 DMU_USERUSED_OBJECT
);
364 (void) arc_buf_remove_ref(buf
, &buf
);
367 if (err
== 0 && (td
->td_flags
& TRAVERSE_POST
))
368 err
= td
->td_func(td
->td_spa
, NULL
, bp
, zb
, dnp
, td
->td_arg
);
370 if (hard
&& (err
== EIO
|| err
== ECKSUM
)) {
372 * Ignore this disk error as requested by the HARD flag,
373 * and continue traversal.
379 * If we are stopping here, set td_resume.
381 if (td
->td_resume
!= NULL
&& err
!= 0 && !td
->td_paused
) {
382 td
->td_resume
->zb_objset
= zb
->zb_objset
;
383 td
->td_resume
->zb_object
= zb
->zb_object
;
384 td
->td_resume
->zb_level
= 0;
386 * If we have stopped on an indirect block (e.g. due to
387 * i/o error), we have not visited anything below it.
388 * Set the bookmark to the first level-0 block that we need
389 * to visit. This way, the resuming code does not need to
390 * deal with resuming from indirect blocks.
392 td
->td_resume
->zb_blkid
= zb
->zb_blkid
<<
393 (zb
->zb_level
* (dnp
->dn_indblkshift
- SPA_BLKPTRSHIFT
));
394 td
->td_paused
= B_TRUE
;
401 prefetch_dnode_metadata(traverse_data_t
*td
, const dnode_phys_t
*dnp
,
402 uint64_t objset
, uint64_t object
)
405 zbookmark_phys_t czb
;
407 for (j
= 0; j
< dnp
->dn_nblkptr
; j
++) {
408 SET_BOOKMARK(&czb
, objset
, object
, dnp
->dn_nlevels
- 1, j
);
409 traverse_prefetch_metadata(td
, &dnp
->dn_blkptr
[j
], &czb
);
412 if (dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
) {
413 SET_BOOKMARK(&czb
, objset
, object
, 0, DMU_SPILL_BLKID
);
414 traverse_prefetch_metadata(td
, &dnp
->dn_spill
, &czb
);
419 traverse_dnode(traverse_data_t
*td
, const dnode_phys_t
*dnp
,
420 uint64_t objset
, uint64_t object
)
423 zbookmark_phys_t czb
;
425 for (j
= 0; j
< dnp
->dn_nblkptr
; j
++) {
426 SET_BOOKMARK(&czb
, objset
, object
, dnp
->dn_nlevels
- 1, j
);
427 err
= traverse_visitbp(td
, dnp
, &dnp
->dn_blkptr
[j
], &czb
);
432 if (err
== 0 && dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
) {
433 SET_BOOKMARK(&czb
, objset
, object
, 0, DMU_SPILL_BLKID
);
434 err
= traverse_visitbp(td
, dnp
, &dnp
->dn_spill
, &czb
);
441 traverse_prefetcher(spa_t
*spa
, zilog_t
*zilog
, const blkptr_t
*bp
,
442 const zbookmark_phys_t
*zb
, const dnode_phys_t
*dnp
, void *arg
)
444 prefetch_data_t
*pfd
= arg
;
445 arc_flags_t aflags
= ARC_FLAG_NOWAIT
| ARC_FLAG_PREFETCH
;
447 ASSERT(pfd
->pd_blks_fetched
>= 0);
449 return (SET_ERROR(EINTR
));
451 if (!prefetch_needed(pfd
, bp
))
454 mutex_enter(&pfd
->pd_mtx
);
455 while (!pfd
->pd_cancel
&& pfd
->pd_blks_fetched
>= pfd
->pd_blks_max
)
456 cv_wait(&pfd
->pd_cv
, &pfd
->pd_mtx
);
457 pfd
->pd_blks_fetched
++;
458 cv_broadcast(&pfd
->pd_cv
);
459 mutex_exit(&pfd
->pd_mtx
);
461 (void) arc_read(NULL
, spa
, bp
, NULL
, NULL
, ZIO_PRIORITY_ASYNC_READ
,
462 ZIO_FLAG_CANFAIL
| ZIO_FLAG_SPECULATIVE
, &aflags
, zb
);
468 traverse_prefetch_thread(void *arg
)
470 traverse_data_t
*td_main
= arg
;
471 traverse_data_t td
= *td_main
;
472 zbookmark_phys_t czb
;
474 td
.td_func
= traverse_prefetcher
;
475 td
.td_arg
= td_main
->td_pfd
;
478 SET_BOOKMARK(&czb
, td
.td_objset
,
479 ZB_ROOT_OBJECT
, ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
480 (void) traverse_visitbp(&td
, NULL
, td
.td_rootbp
, &czb
);
482 mutex_enter(&td_main
->td_pfd
->pd_mtx
);
483 td_main
->td_pfd
->pd_exited
= B_TRUE
;
484 cv_broadcast(&td_main
->td_pfd
->pd_cv
);
485 mutex_exit(&td_main
->td_pfd
->pd_mtx
);
489 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
490 * in syncing context).
493 traverse_impl(spa_t
*spa
, dsl_dataset_t
*ds
, uint64_t objset
, blkptr_t
*rootbp
,
494 uint64_t txg_start
, zbookmark_phys_t
*resume
, int flags
,
495 blkptr_cb_t func
, void *arg
)
498 prefetch_data_t pd
= { 0 };
499 zbookmark_phys_t czb
;
502 ASSERT(ds
== NULL
|| objset
== ds
->ds_object
);
503 ASSERT(!(flags
& TRAVERSE_PRE
) || !(flags
& TRAVERSE_POST
));
506 * The data prefetching mechanism (the prefetch thread) is incompatible
507 * with resuming from a bookmark.
509 ASSERT(resume
== NULL
|| !(flags
& TRAVERSE_PREFETCH_DATA
));
512 td
.td_objset
= objset
;
513 td
.td_rootbp
= rootbp
;
514 td
.td_min_txg
= txg_start
;
515 td
.td_resume
= resume
;
520 td
.td_paused
= B_FALSE
;
522 if (spa_feature_is_active(spa
, SPA_FEATURE_HOLE_BIRTH
)) {
523 VERIFY(spa_feature_enabled_txg(spa
,
524 SPA_FEATURE_HOLE_BIRTH
, &td
.td_hole_birth_enabled_txg
));
526 td
.td_hole_birth_enabled_txg
= 0;
529 pd
.pd_blks_max
= zfs_pd_blks_max
;
531 mutex_init(&pd
.pd_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
532 cv_init(&pd
.pd_cv
, NULL
, CV_DEFAULT
, NULL
);
534 /* See comment on ZIL traversal in dsl_scan_visitds. */
535 if (ds
!= NULL
&& !ds
->ds_is_snapshot
&& !BP_IS_HOLE(rootbp
)) {
536 arc_flags_t flags
= ARC_FLAG_WAIT
;
540 err
= arc_read(NULL
, td
.td_spa
, rootbp
,
541 arc_getbuf_func
, &buf
,
542 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
, &flags
, NULL
);
547 traverse_zil(&td
, &osp
->os_zil_header
);
548 (void) arc_buf_remove_ref(buf
, &buf
);
551 if (!(flags
& TRAVERSE_PREFETCH_DATA
) ||
552 0 == taskq_dispatch(system_taskq
, traverse_prefetch_thread
,
554 pd
.pd_exited
= B_TRUE
;
556 SET_BOOKMARK(&czb
, td
.td_objset
,
557 ZB_ROOT_OBJECT
, ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
558 err
= traverse_visitbp(&td
, NULL
, rootbp
, &czb
);
560 mutex_enter(&pd
.pd_mtx
);
561 pd
.pd_cancel
= B_TRUE
;
562 cv_broadcast(&pd
.pd_cv
);
563 while (!pd
.pd_exited
)
564 cv_wait(&pd
.pd_cv
, &pd
.pd_mtx
);
565 mutex_exit(&pd
.pd_mtx
);
567 mutex_destroy(&pd
.pd_mtx
);
568 cv_destroy(&pd
.pd_cv
);
574 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
575 * in syncing context).
578 traverse_dataset(dsl_dataset_t
*ds
, uint64_t txg_start
, int flags
,
579 blkptr_cb_t func
, void *arg
)
581 return (traverse_impl(ds
->ds_dir
->dd_pool
->dp_spa
, ds
, ds
->ds_object
,
582 &dsl_dataset_phys(ds
)->ds_bp
, txg_start
, NULL
, flags
, func
, arg
));
586 traverse_dataset_destroyed(spa_t
*spa
, blkptr_t
*blkptr
,
587 uint64_t txg_start
, zbookmark_phys_t
*resume
, int flags
,
588 blkptr_cb_t func
, void *arg
)
590 return (traverse_impl(spa
, NULL
, ZB_DESTROYED_OBJSET
,
591 blkptr
, txg_start
, resume
, flags
, func
, arg
));
595 * NB: pool must not be changing on-disk (eg, from zdb or sync context).
598 traverse_pool(spa_t
*spa
, uint64_t txg_start
, int flags
,
599 blkptr_cb_t func
, void *arg
)
603 dsl_pool_t
*dp
= spa_get_dsl(spa
);
604 objset_t
*mos
= dp
->dp_meta_objset
;
605 boolean_t hard
= (flags
& TRAVERSE_HARD
);
608 err
= traverse_impl(spa
, NULL
, 0, spa_get_rootblkptr(spa
),
609 txg_start
, NULL
, flags
, func
, arg
);
613 /* visit each dataset */
614 for (obj
= 1; err
== 0;
615 err
= dmu_object_next(mos
, &obj
, FALSE
, txg_start
)) {
616 dmu_object_info_t doi
;
618 err
= dmu_object_info(mos
, obj
, &doi
);
625 if (doi
.doi_bonus_type
== DMU_OT_DSL_DATASET
) {
627 uint64_t txg
= txg_start
;
629 dsl_pool_config_enter(dp
, FTAG
);
630 err
= dsl_dataset_hold_obj(dp
, obj
, FTAG
, &ds
);
631 dsl_pool_config_exit(dp
, FTAG
);
637 if (dsl_dataset_phys(ds
)->ds_prev_snap_txg
> txg
)
638 txg
= dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
639 err
= traverse_dataset(ds
, txg
, flags
, func
, arg
);
640 dsl_dataset_rele(ds
, FTAG
);