4757 ZFS embedded-data block pointers ("zero block compression")
[unleashed.git] / usr / src / uts / common / fs / zfs / dmu_traverse.c
blobdb0869ca6f18802d9aa589d2f8bdad773abcd289
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
26 #include <sys/zfs_context.h>
27 #include <sys/dmu_objset.h>
28 #include <sys/dmu_traverse.h>
29 #include <sys/dsl_dataset.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_pool.h>
32 #include <sys/dnode.h>
33 #include <sys/spa.h>
34 #include <sys/zio.h>
35 #include <sys/dmu_impl.h>
36 #include <sys/sa.h>
37 #include <sys/sa_impl.h>
38 #include <sys/callb.h>
39 #include <sys/zfeature.h>
41 int zfs_pd_blks_max = 100;
43 typedef struct prefetch_data {
44 kmutex_t pd_mtx;
45 kcondvar_t pd_cv;
46 int pd_blks_max;
47 int pd_blks_fetched;
48 int pd_flags;
49 boolean_t pd_cancel;
50 boolean_t pd_exited;
51 } prefetch_data_t;
53 typedef struct traverse_data {
54 spa_t *td_spa;
55 uint64_t td_objset;
56 blkptr_t *td_rootbp;
57 uint64_t td_min_txg;
58 zbookmark_t *td_resume;
59 int td_flags;
60 prefetch_data_t *td_pfd;
61 blkptr_cb_t *td_func;
62 void *td_arg;
63 } traverse_data_t;
65 static int traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
66 uint64_t objset, uint64_t object);
67 static void prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *,
68 uint64_t objset, uint64_t object);
70 static int
71 traverse_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
73 traverse_data_t *td = arg;
74 zbookmark_t zb;
76 if (BP_IS_HOLE(bp))
77 return (0);
79 if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(td->td_spa))
80 return (0);
82 SET_BOOKMARK(&zb, td->td_objset, ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
83 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
85 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL, td->td_arg);
87 return (0);
90 static int
91 traverse_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
93 traverse_data_t *td = arg;
95 if (lrc->lrc_txtype == TX_WRITE) {
96 lr_write_t *lr = (lr_write_t *)lrc;
97 blkptr_t *bp = &lr->lr_blkptr;
98 zbookmark_t zb;
100 if (BP_IS_HOLE(bp))
101 return (0);
103 if (claim_txg == 0 || bp->blk_birth < claim_txg)
104 return (0);
106 SET_BOOKMARK(&zb, td->td_objset, lr->lr_foid,
107 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
109 (void) td->td_func(td->td_spa, zilog, bp, &zb, NULL,
110 td->td_arg);
112 return (0);
115 static void
116 traverse_zil(traverse_data_t *td, zil_header_t *zh)
118 uint64_t claim_txg = zh->zh_claim_txg;
119 zilog_t *zilog;
122 * We only want to visit blocks that have been claimed but not yet
123 * replayed; plus, in read-only mode, blocks that are already stable.
125 if (claim_txg == 0 && spa_writeable(td->td_spa))
126 return;
128 zilog = zil_alloc(spa_get_dsl(td->td_spa)->dp_meta_objset, zh);
130 (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, td,
131 claim_txg);
133 zil_free(zilog);
136 typedef enum resume_skip {
137 RESUME_SKIP_ALL,
138 RESUME_SKIP_NONE,
139 RESUME_SKIP_CHILDREN
140 } resume_skip_t;
143 * Returns RESUME_SKIP_ALL if td indicates that we are resuming a traversal and
144 * the block indicated by zb does not need to be visited at all. Returns
145 * RESUME_SKIP_CHILDREN if we are resuming a post traversal and we reach the
146 * resume point. This indicates that this block should be visited but not its
147 * children (since they must have been visited in a previous traversal).
148 * Otherwise returns RESUME_SKIP_NONE.
150 static resume_skip_t
151 resume_skip_check(traverse_data_t *td, const dnode_phys_t *dnp,
152 const zbookmark_t *zb)
154 if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume)) {
156 * If we already visited this bp & everything below,
157 * don't bother doing it again.
159 if (zbookmark_is_before(dnp, zb, td->td_resume))
160 return (RESUME_SKIP_ALL);
163 * If we found the block we're trying to resume from, zero
164 * the bookmark out to indicate that we have resumed.
166 ASSERT3U(zb->zb_object, <=, td->td_resume->zb_object);
167 if (bcmp(zb, td->td_resume, sizeof (*zb)) == 0) {
168 bzero(td->td_resume, sizeof (*zb));
169 if (td->td_flags & TRAVERSE_POST)
170 return (RESUME_SKIP_CHILDREN);
173 return (RESUME_SKIP_NONE);
176 static void
177 traverse_pause(traverse_data_t *td, const zbookmark_t *zb)
179 ASSERT(td->td_resume != NULL);
180 ASSERT0(zb->zb_level);
181 bcopy(zb, td->td_resume, sizeof (*td->td_resume));
184 static void
185 traverse_prefetch_metadata(traverse_data_t *td,
186 const blkptr_t *bp, const zbookmark_t *zb)
188 uint32_t flags = ARC_NOWAIT | ARC_PREFETCH;
190 if (!(td->td_flags & TRAVERSE_PREFETCH_METADATA))
191 return;
193 * If we are in the process of resuming, don't prefetch, because
194 * some children will not be needed (and in fact may have already
195 * been freed).
197 if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume))
198 return;
199 if (BP_IS_HOLE(bp) || bp->blk_birth <= td->td_min_txg)
200 return;
201 if (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE)
202 return;
204 (void) arc_read(NULL, td->td_spa, bp, NULL, NULL,
205 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
208 static int
209 traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
210 const blkptr_t *bp, const zbookmark_t *zb)
212 zbookmark_t czb;
213 int err = 0, lasterr = 0;
214 arc_buf_t *buf = NULL;
215 prefetch_data_t *pd = td->td_pfd;
216 boolean_t hard = td->td_flags & TRAVERSE_HARD;
217 boolean_t pause = B_FALSE;
219 switch (resume_skip_check(td, dnp, zb)) {
220 case RESUME_SKIP_ALL:
221 return (0);
222 case RESUME_SKIP_CHILDREN:
223 goto post;
224 case RESUME_SKIP_NONE:
225 break;
226 default:
227 ASSERT(0);
230 if (bp->blk_birth == 0) {
231 if (spa_feature_is_active(td->td_spa, SPA_FEATURE_HOLE_BIRTH)) {
233 * Since this block has a birth time of 0 it must be a
234 * hole created before the SPA_FEATURE_HOLE_BIRTH
235 * feature was enabled. If SPA_FEATURE_HOLE_BIRTH
236 * was enabled before the min_txg for this traveral we
237 * know the hole must have been created before the
238 * min_txg for this traveral, so we can skip it. If
239 * SPA_FEATURE_HOLE_BIRTH was enabled after the min_txg
240 * for this traveral we cannot tell if the hole was
241 * created before or after the min_txg for this
242 * traversal, so we cannot skip it.
244 uint64_t hole_birth_enabled_txg;
245 VERIFY(spa_feature_enabled_txg(td->td_spa,
246 SPA_FEATURE_HOLE_BIRTH, &hole_birth_enabled_txg));
247 if (hole_birth_enabled_txg < td->td_min_txg)
248 return (0);
250 } else if (bp->blk_birth <= td->td_min_txg) {
251 return (0);
254 if (BP_IS_HOLE(bp)) {
255 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg);
256 return (err);
259 if (pd && !pd->pd_exited &&
260 ((pd->pd_flags & TRAVERSE_PREFETCH_DATA) ||
261 BP_GET_TYPE(bp) == DMU_OT_DNODE || BP_GET_LEVEL(bp) > 0)) {
262 mutex_enter(&pd->pd_mtx);
263 ASSERT(pd->pd_blks_fetched >= 0);
264 while (pd->pd_blks_fetched == 0 && !pd->pd_exited)
265 cv_wait(&pd->pd_cv, &pd->pd_mtx);
266 pd->pd_blks_fetched--;
267 cv_broadcast(&pd->pd_cv);
268 mutex_exit(&pd->pd_mtx);
271 if (td->td_flags & TRAVERSE_PRE) {
272 err = td->td_func(td->td_spa, NULL, bp, zb, dnp,
273 td->td_arg);
274 if (err == TRAVERSE_VISIT_NO_CHILDREN)
275 return (0);
276 if (err == ERESTART)
277 pause = B_TRUE; /* handle pausing at a common point */
278 if (err != 0)
279 goto post;
282 if (BP_GET_LEVEL(bp) > 0) {
283 uint32_t flags = ARC_WAIT;
284 int i;
285 blkptr_t *cbp;
286 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
288 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
289 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
290 if (err != 0)
291 return (err);
292 cbp = buf->b_data;
294 for (i = 0; i < epb; i++) {
295 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
296 zb->zb_level - 1,
297 zb->zb_blkid * epb + i);
298 traverse_prefetch_metadata(td, &cbp[i], &czb);
301 /* recursively visitbp() blocks below this */
302 for (i = 0; i < epb; i++) {
303 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
304 zb->zb_level - 1,
305 zb->zb_blkid * epb + i);
306 err = traverse_visitbp(td, dnp, &cbp[i], &czb);
307 if (err != 0) {
308 if (!hard)
309 break;
310 lasterr = err;
313 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
314 uint32_t flags = ARC_WAIT;
315 int i;
316 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
318 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
319 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
320 if (err != 0)
321 return (err);
322 dnp = buf->b_data;
324 for (i = 0; i < epb; i++) {
325 prefetch_dnode_metadata(td, &dnp[i], zb->zb_objset,
326 zb->zb_blkid * epb + i);
329 /* recursively visitbp() blocks below this */
330 for (i = 0; i < epb; i++) {
331 err = traverse_dnode(td, &dnp[i], zb->zb_objset,
332 zb->zb_blkid * epb + i);
333 if (err != 0) {
334 if (!hard)
335 break;
336 lasterr = err;
339 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
340 uint32_t flags = ARC_WAIT;
341 objset_phys_t *osp;
342 dnode_phys_t *dnp;
344 err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
345 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
346 if (err != 0)
347 return (err);
349 osp = buf->b_data;
350 dnp = &osp->os_meta_dnode;
351 prefetch_dnode_metadata(td, dnp, zb->zb_objset,
352 DMU_META_DNODE_OBJECT);
353 if (arc_buf_size(buf) >= sizeof (objset_phys_t)) {
354 prefetch_dnode_metadata(td, &osp->os_groupused_dnode,
355 zb->zb_objset, DMU_GROUPUSED_OBJECT);
356 prefetch_dnode_metadata(td, &osp->os_userused_dnode,
357 zb->zb_objset, DMU_USERUSED_OBJECT);
360 err = traverse_dnode(td, dnp, zb->zb_objset,
361 DMU_META_DNODE_OBJECT);
362 if (err && hard) {
363 lasterr = err;
364 err = 0;
366 if (err == 0 && arc_buf_size(buf) >= sizeof (objset_phys_t)) {
367 dnp = &osp->os_groupused_dnode;
368 err = traverse_dnode(td, dnp, zb->zb_objset,
369 DMU_GROUPUSED_OBJECT);
371 if (err && hard) {
372 lasterr = err;
373 err = 0;
375 if (err == 0 && arc_buf_size(buf) >= sizeof (objset_phys_t)) {
376 dnp = &osp->os_userused_dnode;
377 err = traverse_dnode(td, dnp, zb->zb_objset,
378 DMU_USERUSED_OBJECT);
382 if (buf)
383 (void) arc_buf_remove_ref(buf, &buf);
385 post:
386 if (err == 0 && (td->td_flags & TRAVERSE_POST)) {
387 err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg);
388 if (err == ERESTART)
389 pause = B_TRUE;
392 if (pause && td->td_resume != NULL) {
393 ASSERT3U(err, ==, ERESTART);
394 ASSERT(!hard);
395 traverse_pause(td, zb);
398 return (err != 0 ? err : lasterr);
401 static void
402 prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *dnp,
403 uint64_t objset, uint64_t object)
405 int j;
406 zbookmark_t czb;
408 for (j = 0; j < dnp->dn_nblkptr; j++) {
409 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
410 traverse_prefetch_metadata(td, &dnp->dn_blkptr[j], &czb);
413 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
414 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID);
415 traverse_prefetch_metadata(td, &dnp->dn_spill, &czb);
419 static int
420 traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
421 uint64_t objset, uint64_t object)
423 int j, err = 0, lasterr = 0;
424 zbookmark_t czb;
425 boolean_t hard = (td->td_flags & TRAVERSE_HARD);
427 for (j = 0; j < dnp->dn_nblkptr; j++) {
428 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
429 err = traverse_visitbp(td, dnp, &dnp->dn_blkptr[j], &czb);
430 if (err != 0) {
431 if (!hard)
432 break;
433 lasterr = err;
437 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
438 SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID);
439 err = traverse_visitbp(td, dnp, &dnp->dn_spill, &czb);
440 if (err != 0) {
441 if (!hard)
442 return (err);
443 lasterr = err;
446 return (err != 0 ? err : lasterr);
449 /* ARGSUSED */
450 static int
451 traverse_prefetcher(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
452 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
454 prefetch_data_t *pfd = arg;
455 uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
457 ASSERT(pfd->pd_blks_fetched >= 0);
458 if (pfd->pd_cancel)
459 return (SET_ERROR(EINTR));
461 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) ||
462 !((pfd->pd_flags & TRAVERSE_PREFETCH_DATA) ||
463 BP_GET_TYPE(bp) == DMU_OT_DNODE || BP_GET_LEVEL(bp) > 0) ||
464 BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG)
465 return (0);
467 mutex_enter(&pfd->pd_mtx);
468 while (!pfd->pd_cancel && pfd->pd_blks_fetched >= pfd->pd_blks_max)
469 cv_wait(&pfd->pd_cv, &pfd->pd_mtx);
470 pfd->pd_blks_fetched++;
471 cv_broadcast(&pfd->pd_cv);
472 mutex_exit(&pfd->pd_mtx);
474 (void) arc_read(NULL, spa, bp, NULL, NULL, ZIO_PRIORITY_ASYNC_READ,
475 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &aflags, zb);
477 return (0);
480 static void
481 traverse_prefetch_thread(void *arg)
483 traverse_data_t *td_main = arg;
484 traverse_data_t td = *td_main;
485 zbookmark_t czb;
487 td.td_func = traverse_prefetcher;
488 td.td_arg = td_main->td_pfd;
489 td.td_pfd = NULL;
491 SET_BOOKMARK(&czb, td.td_objset,
492 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
493 (void) traverse_visitbp(&td, NULL, td.td_rootbp, &czb);
495 mutex_enter(&td_main->td_pfd->pd_mtx);
496 td_main->td_pfd->pd_exited = B_TRUE;
497 cv_broadcast(&td_main->td_pfd->pd_cv);
498 mutex_exit(&td_main->td_pfd->pd_mtx);
502 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
503 * in syncing context).
505 static int
506 traverse_impl(spa_t *spa, dsl_dataset_t *ds, uint64_t objset, blkptr_t *rootbp,
507 uint64_t txg_start, zbookmark_t *resume, int flags,
508 blkptr_cb_t func, void *arg)
510 traverse_data_t td;
511 prefetch_data_t pd = { 0 };
512 zbookmark_t czb;
513 int err;
515 ASSERT(ds == NULL || objset == ds->ds_object);
516 ASSERT(!(flags & TRAVERSE_PRE) || !(flags & TRAVERSE_POST));
519 * The data prefetching mechanism (the prefetch thread) is incompatible
520 * with resuming from a bookmark.
522 ASSERT(resume == NULL || !(flags & TRAVERSE_PREFETCH_DATA));
524 td.td_spa = spa;
525 td.td_objset = objset;
526 td.td_rootbp = rootbp;
527 td.td_min_txg = txg_start;
528 td.td_resume = resume;
529 td.td_func = func;
530 td.td_arg = arg;
531 td.td_pfd = &pd;
532 td.td_flags = flags;
534 pd.pd_blks_max = zfs_pd_blks_max;
535 pd.pd_flags = flags;
536 mutex_init(&pd.pd_mtx, NULL, MUTEX_DEFAULT, NULL);
537 cv_init(&pd.pd_cv, NULL, CV_DEFAULT, NULL);
539 /* See comment on ZIL traversal in dsl_scan_visitds. */
540 if (ds != NULL && !dsl_dataset_is_snapshot(ds) && !BP_IS_HOLE(rootbp)) {
541 uint32_t flags = ARC_WAIT;
542 objset_phys_t *osp;
543 arc_buf_t *buf;
545 err = arc_read(NULL, td.td_spa, rootbp,
546 arc_getbuf_func, &buf,
547 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, NULL);
548 if (err != 0)
549 return (err);
551 osp = buf->b_data;
552 traverse_zil(&td, &osp->os_zil_header);
553 (void) arc_buf_remove_ref(buf, &buf);
556 if (!(flags & TRAVERSE_PREFETCH_DATA) ||
557 0 == taskq_dispatch(system_taskq, traverse_prefetch_thread,
558 &td, TQ_NOQUEUE))
559 pd.pd_exited = B_TRUE;
561 SET_BOOKMARK(&czb, td.td_objset,
562 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
563 err = traverse_visitbp(&td, NULL, rootbp, &czb);
565 mutex_enter(&pd.pd_mtx);
566 pd.pd_cancel = B_TRUE;
567 cv_broadcast(&pd.pd_cv);
568 while (!pd.pd_exited)
569 cv_wait(&pd.pd_cv, &pd.pd_mtx);
570 mutex_exit(&pd.pd_mtx);
572 mutex_destroy(&pd.pd_mtx);
573 cv_destroy(&pd.pd_cv);
575 return (err);
579 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
580 * in syncing context).
583 traverse_dataset(dsl_dataset_t *ds, uint64_t txg_start, int flags,
584 blkptr_cb_t func, void *arg)
586 return (traverse_impl(ds->ds_dir->dd_pool->dp_spa, ds, ds->ds_object,
587 &ds->ds_phys->ds_bp, txg_start, NULL, flags, func, arg));
591 traverse_dataset_destroyed(spa_t *spa, blkptr_t *blkptr,
592 uint64_t txg_start, zbookmark_t *resume, int flags,
593 blkptr_cb_t func, void *arg)
595 return (traverse_impl(spa, NULL, ZB_DESTROYED_OBJSET,
596 blkptr, txg_start, resume, flags, func, arg));
600 * NB: pool must not be changing on-disk (eg, from zdb or sync context).
603 traverse_pool(spa_t *spa, uint64_t txg_start, int flags,
604 blkptr_cb_t func, void *arg)
606 int err, lasterr = 0;
607 uint64_t obj;
608 dsl_pool_t *dp = spa_get_dsl(spa);
609 objset_t *mos = dp->dp_meta_objset;
610 boolean_t hard = (flags & TRAVERSE_HARD);
612 /* visit the MOS */
613 err = traverse_impl(spa, NULL, 0, spa_get_rootblkptr(spa),
614 txg_start, NULL, flags, func, arg);
615 if (err != 0)
616 return (err);
618 /* visit each dataset */
619 for (obj = 1; err == 0 || (err != ESRCH && hard);
620 err = dmu_object_next(mos, &obj, FALSE, txg_start)) {
621 dmu_object_info_t doi;
623 err = dmu_object_info(mos, obj, &doi);
624 if (err != 0) {
625 if (!hard)
626 return (err);
627 lasterr = err;
628 continue;
631 if (doi.doi_bonus_type == DMU_OT_DSL_DATASET) {
632 dsl_dataset_t *ds;
633 uint64_t txg = txg_start;
635 dsl_pool_config_enter(dp, FTAG);
636 err = dsl_dataset_hold_obj(dp, obj, FTAG, &ds);
637 dsl_pool_config_exit(dp, FTAG);
638 if (err != 0) {
639 if (!hard)
640 return (err);
641 lasterr = err;
642 continue;
644 if (ds->ds_phys->ds_prev_snap_txg > txg)
645 txg = ds->ds_phys->ds_prev_snap_txg;
646 err = traverse_dataset(ds, txg, flags, func, arg);
647 dsl_dataset_rele(ds, FTAG);
648 if (err != 0) {
649 if (!hard)
650 return (err);
651 lasterr = err;
655 if (err == ESRCH)
656 err = 0;
657 return (err != 0 ? err : lasterr);