7606 dmu_objset_find_dp() takes a long time while importing pool
[unleashed.git] / usr / src / uts / common / fs / zfs / dmu_objset.c
blobb71a43f7b5514111027a917ab2cc03ac94e134a7
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
28 * Copyright (c) 2015, STRATO AG, Inc. All rights reserved.
29 * Copyright (c) 2014 Integros [integros.com]
32 /* Portions Copyright 2010 Robert Milkowski */
34 #include <sys/cred.h>
35 #include <sys/zfs_context.h>
36 #include <sys/dmu_objset.h>
37 #include <sys/dsl_dir.h>
38 #include <sys/dsl_dataset.h>
39 #include <sys/dsl_prop.h>
40 #include <sys/dsl_pool.h>
41 #include <sys/dsl_synctask.h>
42 #include <sys/dsl_deleg.h>
43 #include <sys/dnode.h>
44 #include <sys/dbuf.h>
45 #include <sys/zvol.h>
46 #include <sys/dmu_tx.h>
47 #include <sys/zap.h>
48 #include <sys/zil.h>
49 #include <sys/dmu_impl.h>
50 #include <sys/zfs_ioctl.h>
51 #include <sys/sa.h>
52 #include <sys/zfs_onexit.h>
53 #include <sys/dsl_destroy.h>
54 #include <sys/vdev.h>
57 * Needed to close a window in dnode_move() that allows the objset to be freed
58 * before it can be safely accessed.
60 krwlock_t os_lock;
63 * Tunable to overwrite the maximum number of threads for the parallization
64 * of dmu_objset_find_dp, needed to speed up the import of pools with many
65 * datasets.
66 * Default is 4 times the number of leaf vdevs.
68 int dmu_find_threads = 0;
71 * Backfill lower metadnode objects after this many have been freed.
72 * Backfilling negatively impacts object creation rates, so only do it
73 * if there are enough holes to fill.
75 int dmu_rescan_dnode_threshold = 131072;
77 static void dmu_objset_find_dp_cb(void *arg);
79 void
80 dmu_objset_init(void)
82 rw_init(&os_lock, NULL, RW_DEFAULT, NULL);
85 void
86 dmu_objset_fini(void)
88 rw_destroy(&os_lock);
91 spa_t *
92 dmu_objset_spa(objset_t *os)
94 return (os->os_spa);
97 zilog_t *
98 dmu_objset_zil(objset_t *os)
100 return (os->os_zil);
103 dsl_pool_t *
104 dmu_objset_pool(objset_t *os)
106 dsl_dataset_t *ds;
108 if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir)
109 return (ds->ds_dir->dd_pool);
110 else
111 return (spa_get_dsl(os->os_spa));
114 dsl_dataset_t *
115 dmu_objset_ds(objset_t *os)
117 return (os->os_dsl_dataset);
120 dmu_objset_type_t
121 dmu_objset_type(objset_t *os)
123 return (os->os_phys->os_type);
126 void
127 dmu_objset_name(objset_t *os, char *buf)
129 dsl_dataset_name(os->os_dsl_dataset, buf);
132 uint64_t
133 dmu_objset_id(objset_t *os)
135 dsl_dataset_t *ds = os->os_dsl_dataset;
137 return (ds ? ds->ds_object : 0);
140 zfs_sync_type_t
141 dmu_objset_syncprop(objset_t *os)
143 return (os->os_sync);
146 zfs_logbias_op_t
147 dmu_objset_logbias(objset_t *os)
149 return (os->os_logbias);
152 static void
153 checksum_changed_cb(void *arg, uint64_t newval)
155 objset_t *os = arg;
158 * Inheritance should have been done by now.
160 ASSERT(newval != ZIO_CHECKSUM_INHERIT);
162 os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE);
165 static void
166 compression_changed_cb(void *arg, uint64_t newval)
168 objset_t *os = arg;
171 * Inheritance and range checking should have been done by now.
173 ASSERT(newval != ZIO_COMPRESS_INHERIT);
175 os->os_compress = zio_compress_select(os->os_spa, newval,
176 ZIO_COMPRESS_ON);
179 static void
180 copies_changed_cb(void *arg, uint64_t newval)
182 objset_t *os = arg;
185 * Inheritance and range checking should have been done by now.
187 ASSERT(newval > 0);
188 ASSERT(newval <= spa_max_replication(os->os_spa));
190 os->os_copies = newval;
193 static void
194 dedup_changed_cb(void *arg, uint64_t newval)
196 objset_t *os = arg;
197 spa_t *spa = os->os_spa;
198 enum zio_checksum checksum;
201 * Inheritance should have been done by now.
203 ASSERT(newval != ZIO_CHECKSUM_INHERIT);
205 checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF);
207 os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK;
208 os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY);
211 static void
212 primary_cache_changed_cb(void *arg, uint64_t newval)
214 objset_t *os = arg;
217 * Inheritance and range checking should have been done by now.
219 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
220 newval == ZFS_CACHE_METADATA);
222 os->os_primary_cache = newval;
225 static void
226 secondary_cache_changed_cb(void *arg, uint64_t newval)
228 objset_t *os = arg;
231 * Inheritance and range checking should have been done by now.
233 ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
234 newval == ZFS_CACHE_METADATA);
236 os->os_secondary_cache = newval;
239 static void
240 sync_changed_cb(void *arg, uint64_t newval)
242 objset_t *os = arg;
245 * Inheritance and range checking should have been done by now.
247 ASSERT(newval == ZFS_SYNC_STANDARD || newval == ZFS_SYNC_ALWAYS ||
248 newval == ZFS_SYNC_DISABLED);
250 os->os_sync = newval;
251 if (os->os_zil)
252 zil_set_sync(os->os_zil, newval);
255 static void
256 redundant_metadata_changed_cb(void *arg, uint64_t newval)
258 objset_t *os = arg;
261 * Inheritance and range checking should have been done by now.
263 ASSERT(newval == ZFS_REDUNDANT_METADATA_ALL ||
264 newval == ZFS_REDUNDANT_METADATA_MOST);
266 os->os_redundant_metadata = newval;
269 static void
270 logbias_changed_cb(void *arg, uint64_t newval)
272 objset_t *os = arg;
274 ASSERT(newval == ZFS_LOGBIAS_LATENCY ||
275 newval == ZFS_LOGBIAS_THROUGHPUT);
276 os->os_logbias = newval;
277 if (os->os_zil)
278 zil_set_logbias(os->os_zil, newval);
281 static void
282 recordsize_changed_cb(void *arg, uint64_t newval)
284 objset_t *os = arg;
286 os->os_recordsize = newval;
289 void
290 dmu_objset_byteswap(void *buf, size_t size)
292 objset_phys_t *osp = buf;
294 ASSERT(size == OBJSET_OLD_PHYS_SIZE || size == sizeof (objset_phys_t));
295 dnode_byteswap(&osp->os_meta_dnode);
296 byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t));
297 osp->os_type = BSWAP_64(osp->os_type);
298 osp->os_flags = BSWAP_64(osp->os_flags);
299 if (size == sizeof (objset_phys_t)) {
300 dnode_byteswap(&osp->os_userused_dnode);
301 dnode_byteswap(&osp->os_groupused_dnode);
306 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
307 objset_t **osp)
309 objset_t *os;
310 int i, err;
312 ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock));
314 os = kmem_zalloc(sizeof (objset_t), KM_SLEEP);
315 os->os_dsl_dataset = ds;
316 os->os_spa = spa;
317 os->os_rootbp = bp;
318 if (!BP_IS_HOLE(os->os_rootbp)) {
319 arc_flags_t aflags = ARC_FLAG_WAIT;
320 zbookmark_phys_t zb;
321 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
322 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
324 if (DMU_OS_IS_L2CACHEABLE(os))
325 aflags |= ARC_FLAG_L2CACHE;
327 dprintf_bp(os->os_rootbp, "reading %s", "");
328 err = arc_read(NULL, spa, os->os_rootbp,
329 arc_getbuf_func, &os->os_phys_buf,
330 ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb);
331 if (err != 0) {
332 kmem_free(os, sizeof (objset_t));
333 /* convert checksum errors into IO errors */
334 if (err == ECKSUM)
335 err = SET_ERROR(EIO);
336 return (err);
339 /* Increase the blocksize if we are permitted. */
340 if (spa_version(spa) >= SPA_VERSION_USERSPACE &&
341 arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) {
342 arc_buf_t *buf = arc_alloc_buf(spa, &os->os_phys_buf,
343 ARC_BUFC_METADATA, sizeof (objset_phys_t));
344 bzero(buf->b_data, sizeof (objset_phys_t));
345 bcopy(os->os_phys_buf->b_data, buf->b_data,
346 arc_buf_size(os->os_phys_buf));
347 arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
348 os->os_phys_buf = buf;
351 os->os_phys = os->os_phys_buf->b_data;
352 os->os_flags = os->os_phys->os_flags;
353 } else {
354 int size = spa_version(spa) >= SPA_VERSION_USERSPACE ?
355 sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE;
356 os->os_phys_buf = arc_alloc_buf(spa, &os->os_phys_buf,
357 ARC_BUFC_METADATA, size);
358 os->os_phys = os->os_phys_buf->b_data;
359 bzero(os->os_phys, size);
363 * Note: the changed_cb will be called once before the register
364 * func returns, thus changing the checksum/compression from the
365 * default (fletcher2/off). Snapshots don't need to know about
366 * checksum/compression/copies.
368 if (ds != NULL) {
369 boolean_t needlock = B_FALSE;
372 * Note: it's valid to open the objset if the dataset is
373 * long-held, in which case the pool_config lock will not
374 * be held.
376 if (!dsl_pool_config_held(dmu_objset_pool(os))) {
377 needlock = B_TRUE;
378 dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
380 err = dsl_prop_register(ds,
381 zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE),
382 primary_cache_changed_cb, os);
383 if (err == 0) {
384 err = dsl_prop_register(ds,
385 zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE),
386 secondary_cache_changed_cb, os);
388 if (!ds->ds_is_snapshot) {
389 if (err == 0) {
390 err = dsl_prop_register(ds,
391 zfs_prop_to_name(ZFS_PROP_CHECKSUM),
392 checksum_changed_cb, os);
394 if (err == 0) {
395 err = dsl_prop_register(ds,
396 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
397 compression_changed_cb, os);
399 if (err == 0) {
400 err = dsl_prop_register(ds,
401 zfs_prop_to_name(ZFS_PROP_COPIES),
402 copies_changed_cb, os);
404 if (err == 0) {
405 err = dsl_prop_register(ds,
406 zfs_prop_to_name(ZFS_PROP_DEDUP),
407 dedup_changed_cb, os);
409 if (err == 0) {
410 err = dsl_prop_register(ds,
411 zfs_prop_to_name(ZFS_PROP_LOGBIAS),
412 logbias_changed_cb, os);
414 if (err == 0) {
415 err = dsl_prop_register(ds,
416 zfs_prop_to_name(ZFS_PROP_SYNC),
417 sync_changed_cb, os);
419 if (err == 0) {
420 err = dsl_prop_register(ds,
421 zfs_prop_to_name(
422 ZFS_PROP_REDUNDANT_METADATA),
423 redundant_metadata_changed_cb, os);
425 if (err == 0) {
426 err = dsl_prop_register(ds,
427 zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
428 recordsize_changed_cb, os);
431 if (needlock)
432 dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
433 if (err != 0) {
434 arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
435 kmem_free(os, sizeof (objset_t));
436 return (err);
438 } else {
439 /* It's the meta-objset. */
440 os->os_checksum = ZIO_CHECKSUM_FLETCHER_4;
441 os->os_compress = ZIO_COMPRESS_ON;
442 os->os_copies = spa_max_replication(spa);
443 os->os_dedup_checksum = ZIO_CHECKSUM_OFF;
444 os->os_dedup_verify = B_FALSE;
445 os->os_logbias = ZFS_LOGBIAS_LATENCY;
446 os->os_sync = ZFS_SYNC_STANDARD;
447 os->os_primary_cache = ZFS_CACHE_ALL;
448 os->os_secondary_cache = ZFS_CACHE_ALL;
451 if (ds == NULL || !ds->ds_is_snapshot)
452 os->os_zil_header = os->os_phys->os_zil_header;
453 os->os_zil = zil_alloc(os, &os->os_zil_header);
455 for (i = 0; i < TXG_SIZE; i++) {
456 list_create(&os->os_dirty_dnodes[i], sizeof (dnode_t),
457 offsetof(dnode_t, dn_dirty_link[i]));
458 list_create(&os->os_free_dnodes[i], sizeof (dnode_t),
459 offsetof(dnode_t, dn_dirty_link[i]));
461 list_create(&os->os_dnodes, sizeof (dnode_t),
462 offsetof(dnode_t, dn_link));
463 list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t),
464 offsetof(dmu_buf_impl_t, db_link));
466 mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL);
467 mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL);
468 mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL);
470 dnode_special_open(os, &os->os_phys->os_meta_dnode,
471 DMU_META_DNODE_OBJECT, &os->os_meta_dnode);
472 if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) {
473 dnode_special_open(os, &os->os_phys->os_userused_dnode,
474 DMU_USERUSED_OBJECT, &os->os_userused_dnode);
475 dnode_special_open(os, &os->os_phys->os_groupused_dnode,
476 DMU_GROUPUSED_OBJECT, &os->os_groupused_dnode);
479 *osp = os;
480 return (0);
484 dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp)
486 int err = 0;
489 * We shouldn't be doing anything with dsl_dataset_t's unless the
490 * pool_config lock is held, or the dataset is long-held.
492 ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool) ||
493 dsl_dataset_long_held(ds));
495 mutex_enter(&ds->ds_opening_lock);
496 if (ds->ds_objset == NULL) {
497 objset_t *os;
498 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
499 err = dmu_objset_open_impl(dsl_dataset_get_spa(ds),
500 ds, dsl_dataset_get_blkptr(ds), &os);
501 rrw_exit(&ds->ds_bp_rwlock, FTAG);
503 if (err == 0) {
504 mutex_enter(&ds->ds_lock);
505 ASSERT(ds->ds_objset == NULL);
506 ds->ds_objset = os;
507 mutex_exit(&ds->ds_lock);
510 *osp = ds->ds_objset;
511 mutex_exit(&ds->ds_opening_lock);
512 return (err);
516 * Holds the pool while the objset is held. Therefore only one objset
517 * can be held at a time.
520 dmu_objset_hold(const char *name, void *tag, objset_t **osp)
522 dsl_pool_t *dp;
523 dsl_dataset_t *ds;
524 int err;
526 err = dsl_pool_hold(name, tag, &dp);
527 if (err != 0)
528 return (err);
529 err = dsl_dataset_hold(dp, name, tag, &ds);
530 if (err != 0) {
531 dsl_pool_rele(dp, tag);
532 return (err);
535 err = dmu_objset_from_ds(ds, osp);
536 if (err != 0) {
537 dsl_dataset_rele(ds, tag);
538 dsl_pool_rele(dp, tag);
541 return (err);
544 static int
545 dmu_objset_own_impl(dsl_dataset_t *ds, dmu_objset_type_t type,
546 boolean_t readonly, void *tag, objset_t **osp)
548 int err;
550 err = dmu_objset_from_ds(ds, osp);
551 if (err != 0) {
552 dsl_dataset_disown(ds, tag);
553 } else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) {
554 dsl_dataset_disown(ds, tag);
555 return (SET_ERROR(EINVAL));
556 } else if (!readonly && dsl_dataset_is_snapshot(ds)) {
557 dsl_dataset_disown(ds, tag);
558 return (SET_ERROR(EROFS));
560 return (err);
564 * dsl_pool must not be held when this is called.
565 * Upon successful return, there will be a longhold on the dataset,
566 * and the dsl_pool will not be held.
569 dmu_objset_own(const char *name, dmu_objset_type_t type,
570 boolean_t readonly, void *tag, objset_t **osp)
572 dsl_pool_t *dp;
573 dsl_dataset_t *ds;
574 int err;
576 err = dsl_pool_hold(name, FTAG, &dp);
577 if (err != 0)
578 return (err);
579 err = dsl_dataset_own(dp, name, tag, &ds);
580 if (err != 0) {
581 dsl_pool_rele(dp, FTAG);
582 return (err);
584 err = dmu_objset_own_impl(ds, type, readonly, tag, osp);
585 dsl_pool_rele(dp, FTAG);
587 return (err);
591 dmu_objset_own_obj(dsl_pool_t *dp, uint64_t obj, dmu_objset_type_t type,
592 boolean_t readonly, void *tag, objset_t **osp)
594 dsl_dataset_t *ds;
595 int err;
597 err = dsl_dataset_own_obj(dp, obj, tag, &ds);
598 if (err != 0)
599 return (err);
601 return (dmu_objset_own_impl(ds, type, readonly, tag, osp));
604 void
605 dmu_objset_rele(objset_t *os, void *tag)
607 dsl_pool_t *dp = dmu_objset_pool(os);
608 dsl_dataset_rele(os->os_dsl_dataset, tag);
609 dsl_pool_rele(dp, tag);
613 * When we are called, os MUST refer to an objset associated with a dataset
614 * that is owned by 'tag'; that is, is held and long held by 'tag' and ds_owner
615 * == tag. We will then release and reacquire ownership of the dataset while
616 * holding the pool config_rwlock to avoid intervening namespace or ownership
617 * changes may occur.
619 * This exists solely to accommodate zfs_ioc_userspace_upgrade()'s desire to
620 * release the hold on its dataset and acquire a new one on the dataset of the
621 * same name so that it can be partially torn down and reconstructed.
623 void
624 dmu_objset_refresh_ownership(objset_t *os, void *tag)
626 dsl_pool_t *dp;
627 dsl_dataset_t *ds, *newds;
628 char name[ZFS_MAX_DATASET_NAME_LEN];
630 ds = os->os_dsl_dataset;
631 VERIFY3P(ds, !=, NULL);
632 VERIFY3P(ds->ds_owner, ==, tag);
633 VERIFY(dsl_dataset_long_held(ds));
635 dsl_dataset_name(ds, name);
636 dp = dmu_objset_pool(os);
637 dsl_pool_config_enter(dp, FTAG);
638 dmu_objset_disown(os, tag);
639 VERIFY0(dsl_dataset_own(dp, name, tag, &newds));
640 VERIFY3P(newds, ==, os->os_dsl_dataset);
641 dsl_pool_config_exit(dp, FTAG);
644 void
645 dmu_objset_disown(objset_t *os, void *tag)
647 dsl_dataset_disown(os->os_dsl_dataset, tag);
650 void
651 dmu_objset_evict_dbufs(objset_t *os)
653 dnode_t dn_marker;
654 dnode_t *dn;
656 mutex_enter(&os->os_lock);
657 dn = list_head(&os->os_dnodes);
658 while (dn != NULL) {
660 * Skip dnodes without holds. We have to do this dance
661 * because dnode_add_ref() only works if there is already a
662 * hold. If the dnode has no holds, then it has no dbufs.
664 if (dnode_add_ref(dn, FTAG)) {
665 list_insert_after(&os->os_dnodes, dn, &dn_marker);
666 mutex_exit(&os->os_lock);
668 dnode_evict_dbufs(dn);
669 dnode_rele(dn, FTAG);
671 mutex_enter(&os->os_lock);
672 dn = list_next(&os->os_dnodes, &dn_marker);
673 list_remove(&os->os_dnodes, &dn_marker);
674 } else {
675 dn = list_next(&os->os_dnodes, dn);
678 mutex_exit(&os->os_lock);
680 if (DMU_USERUSED_DNODE(os) != NULL) {
681 dnode_evict_dbufs(DMU_GROUPUSED_DNODE(os));
682 dnode_evict_dbufs(DMU_USERUSED_DNODE(os));
684 dnode_evict_dbufs(DMU_META_DNODE(os));
688 * Objset eviction processing is split into into two pieces.
689 * The first marks the objset as evicting, evicts any dbufs that
690 * have a refcount of zero, and then queues up the objset for the
691 * second phase of eviction. Once os->os_dnodes has been cleared by
692 * dnode_buf_pageout()->dnode_destroy(), the second phase is executed.
693 * The second phase closes the special dnodes, dequeues the objset from
694 * the list of those undergoing eviction, and finally frees the objset.
696 * NOTE: Due to asynchronous eviction processing (invocation of
697 * dnode_buf_pageout()), it is possible for the meta dnode for the
698 * objset to have no holds even though os->os_dnodes is not empty.
700 void
701 dmu_objset_evict(objset_t *os)
703 dsl_dataset_t *ds = os->os_dsl_dataset;
705 for (int t = 0; t < TXG_SIZE; t++)
706 ASSERT(!dmu_objset_is_dirty(os, t));
708 if (ds)
709 dsl_prop_unregister_all(ds, os);
711 if (os->os_sa)
712 sa_tear_down(os);
714 dmu_objset_evict_dbufs(os);
716 mutex_enter(&os->os_lock);
717 spa_evicting_os_register(os->os_spa, os);
718 if (list_is_empty(&os->os_dnodes)) {
719 mutex_exit(&os->os_lock);
720 dmu_objset_evict_done(os);
721 } else {
722 mutex_exit(&os->os_lock);
726 void
727 dmu_objset_evict_done(objset_t *os)
729 ASSERT3P(list_head(&os->os_dnodes), ==, NULL);
731 dnode_special_close(&os->os_meta_dnode);
732 if (DMU_USERUSED_DNODE(os)) {
733 dnode_special_close(&os->os_userused_dnode);
734 dnode_special_close(&os->os_groupused_dnode);
736 zil_free(os->os_zil);
738 arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
741 * This is a barrier to prevent the objset from going away in
742 * dnode_move() until we can safely ensure that the objset is still in
743 * use. We consider the objset valid before the barrier and invalid
744 * after the barrier.
746 rw_enter(&os_lock, RW_READER);
747 rw_exit(&os_lock);
749 mutex_destroy(&os->os_lock);
750 mutex_destroy(&os->os_obj_lock);
751 mutex_destroy(&os->os_user_ptr_lock);
752 spa_evicting_os_deregister(os->os_spa, os);
753 kmem_free(os, sizeof (objset_t));
756 timestruc_t
757 dmu_objset_snap_cmtime(objset_t *os)
759 return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir));
762 /* called from dsl for meta-objset */
763 objset_t *
764 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
765 dmu_objset_type_t type, dmu_tx_t *tx)
767 objset_t *os;
768 dnode_t *mdn;
770 ASSERT(dmu_tx_is_syncing(tx));
772 if (ds != NULL)
773 VERIFY0(dmu_objset_from_ds(ds, &os));
774 else
775 VERIFY0(dmu_objset_open_impl(spa, NULL, bp, &os));
777 mdn = DMU_META_DNODE(os);
779 dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT,
780 DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx);
783 * We don't want to have to increase the meta-dnode's nlevels
784 * later, because then we could do it in quescing context while
785 * we are also accessing it in open context.
787 * This precaution is not necessary for the MOS (ds == NULL),
788 * because the MOS is only updated in syncing context.
789 * This is most fortunate: the MOS is the only objset that
790 * needs to be synced multiple times as spa_sync() iterates
791 * to convergence, so minimizing its dn_nlevels matters.
793 if (ds != NULL) {
794 int levels = 1;
797 * Determine the number of levels necessary for the meta-dnode
798 * to contain DN_MAX_OBJECT dnodes. Note that in order to
799 * ensure that we do not overflow 64 bits, there has to be
800 * a nlevels that gives us a number of blocks > DN_MAX_OBJECT
801 * but < 2^64. Therefore,
802 * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT) (10) must be
803 * less than (64 - log2(DN_MAX_OBJECT)) (16).
805 while ((uint64_t)mdn->dn_nblkptr <<
806 (mdn->dn_datablkshift - DNODE_SHIFT +
807 (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) <
808 DN_MAX_OBJECT)
809 levels++;
811 mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] =
812 mdn->dn_nlevels = levels;
815 ASSERT(type != DMU_OST_NONE);
816 ASSERT(type != DMU_OST_ANY);
817 ASSERT(type < DMU_OST_NUMTYPES);
818 os->os_phys->os_type = type;
819 if (dmu_objset_userused_enabled(os)) {
820 os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
821 os->os_flags = os->os_phys->os_flags;
824 dsl_dataset_dirty(ds, tx);
826 return (os);
829 typedef struct dmu_objset_create_arg {
830 const char *doca_name;
831 cred_t *doca_cred;
832 void (*doca_userfunc)(objset_t *os, void *arg,
833 cred_t *cr, dmu_tx_t *tx);
834 void *doca_userarg;
835 dmu_objset_type_t doca_type;
836 uint64_t doca_flags;
837 } dmu_objset_create_arg_t;
839 /*ARGSUSED*/
840 static int
841 dmu_objset_create_check(void *arg, dmu_tx_t *tx)
843 dmu_objset_create_arg_t *doca = arg;
844 dsl_pool_t *dp = dmu_tx_pool(tx);
845 dsl_dir_t *pdd;
846 const char *tail;
847 int error;
849 if (strchr(doca->doca_name, '@') != NULL)
850 return (SET_ERROR(EINVAL));
852 if (strlen(doca->doca_name) >= ZFS_MAX_DATASET_NAME_LEN)
853 return (SET_ERROR(ENAMETOOLONG));
855 error = dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail);
856 if (error != 0)
857 return (error);
858 if (tail == NULL) {
859 dsl_dir_rele(pdd, FTAG);
860 return (SET_ERROR(EEXIST));
862 error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL,
863 doca->doca_cred);
864 dsl_dir_rele(pdd, FTAG);
866 return (error);
869 static void
870 dmu_objset_create_sync(void *arg, dmu_tx_t *tx)
872 dmu_objset_create_arg_t *doca = arg;
873 dsl_pool_t *dp = dmu_tx_pool(tx);
874 dsl_dir_t *pdd;
875 const char *tail;
876 dsl_dataset_t *ds;
877 uint64_t obj;
878 blkptr_t *bp;
879 objset_t *os;
881 VERIFY0(dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail));
883 obj = dsl_dataset_create_sync(pdd, tail, NULL, doca->doca_flags,
884 doca->doca_cred, tx);
886 VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds));
887 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
888 bp = dsl_dataset_get_blkptr(ds);
889 os = dmu_objset_create_impl(pdd->dd_pool->dp_spa,
890 ds, bp, doca->doca_type, tx);
891 rrw_exit(&ds->ds_bp_rwlock, FTAG);
893 if (doca->doca_userfunc != NULL) {
894 doca->doca_userfunc(os, doca->doca_userarg,
895 doca->doca_cred, tx);
898 spa_history_log_internal_ds(ds, "create", tx, "");
899 dsl_dataset_rele(ds, FTAG);
900 dsl_dir_rele(pdd, FTAG);
904 dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags,
905 void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg)
907 dmu_objset_create_arg_t doca;
909 doca.doca_name = name;
910 doca.doca_cred = CRED();
911 doca.doca_flags = flags;
912 doca.doca_userfunc = func;
913 doca.doca_userarg = arg;
914 doca.doca_type = type;
916 return (dsl_sync_task(name,
917 dmu_objset_create_check, dmu_objset_create_sync, &doca,
918 5, ZFS_SPACE_CHECK_NORMAL));
921 typedef struct dmu_objset_clone_arg {
922 const char *doca_clone;
923 const char *doca_origin;
924 cred_t *doca_cred;
925 } dmu_objset_clone_arg_t;
927 /*ARGSUSED*/
928 static int
929 dmu_objset_clone_check(void *arg, dmu_tx_t *tx)
931 dmu_objset_clone_arg_t *doca = arg;
932 dsl_dir_t *pdd;
933 const char *tail;
934 int error;
935 dsl_dataset_t *origin;
936 dsl_pool_t *dp = dmu_tx_pool(tx);
938 if (strchr(doca->doca_clone, '@') != NULL)
939 return (SET_ERROR(EINVAL));
941 if (strlen(doca->doca_clone) >= ZFS_MAX_DATASET_NAME_LEN)
942 return (SET_ERROR(ENAMETOOLONG));
944 error = dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail);
945 if (error != 0)
946 return (error);
947 if (tail == NULL) {
948 dsl_dir_rele(pdd, FTAG);
949 return (SET_ERROR(EEXIST));
952 error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL,
953 doca->doca_cred);
954 if (error != 0) {
955 dsl_dir_rele(pdd, FTAG);
956 return (SET_ERROR(EDQUOT));
958 dsl_dir_rele(pdd, FTAG);
960 error = dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin);
961 if (error != 0)
962 return (error);
964 /* You can only clone snapshots, not the head datasets. */
965 if (!origin->ds_is_snapshot) {
966 dsl_dataset_rele(origin, FTAG);
967 return (SET_ERROR(EINVAL));
969 dsl_dataset_rele(origin, FTAG);
971 return (0);
974 static void
975 dmu_objset_clone_sync(void *arg, dmu_tx_t *tx)
977 dmu_objset_clone_arg_t *doca = arg;
978 dsl_pool_t *dp = dmu_tx_pool(tx);
979 dsl_dir_t *pdd;
980 const char *tail;
981 dsl_dataset_t *origin, *ds;
982 uint64_t obj;
983 char namebuf[ZFS_MAX_DATASET_NAME_LEN];
985 VERIFY0(dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail));
986 VERIFY0(dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin));
988 obj = dsl_dataset_create_sync(pdd, tail, origin, 0,
989 doca->doca_cred, tx);
991 VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds));
992 dsl_dataset_name(origin, namebuf);
993 spa_history_log_internal_ds(ds, "clone", tx,
994 "origin=%s (%llu)", namebuf, origin->ds_object);
995 dsl_dataset_rele(ds, FTAG);
996 dsl_dataset_rele(origin, FTAG);
997 dsl_dir_rele(pdd, FTAG);
1001 dmu_objset_clone(const char *clone, const char *origin)
1003 dmu_objset_clone_arg_t doca;
1005 doca.doca_clone = clone;
1006 doca.doca_origin = origin;
1007 doca.doca_cred = CRED();
1009 return (dsl_sync_task(clone,
1010 dmu_objset_clone_check, dmu_objset_clone_sync, &doca,
1011 5, ZFS_SPACE_CHECK_NORMAL));
1015 dmu_objset_snapshot_one(const char *fsname, const char *snapname)
1017 int err;
1018 char *longsnap = kmem_asprintf("%s@%s", fsname, snapname);
1019 nvlist_t *snaps = fnvlist_alloc();
1021 fnvlist_add_boolean(snaps, longsnap);
1022 strfree(longsnap);
1023 err = dsl_dataset_snapshot(snaps, NULL, NULL);
1024 fnvlist_free(snaps);
1025 return (err);
1028 static void
1029 dmu_objset_sync_dnodes(list_t *list, list_t *newlist, dmu_tx_t *tx)
1031 dnode_t *dn;
1033 while (dn = list_head(list)) {
1034 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
1035 ASSERT(dn->dn_dbuf->db_data_pending);
1037 * Initialize dn_zio outside dnode_sync() because the
1038 * meta-dnode needs to set it ouside dnode_sync().
1040 dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio;
1041 ASSERT(dn->dn_zio);
1043 ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS);
1044 list_remove(list, dn);
1046 if (newlist) {
1047 (void) dnode_add_ref(dn, newlist);
1048 list_insert_tail(newlist, dn);
1051 dnode_sync(dn, tx);
1055 /* ARGSUSED */
1056 static void
1057 dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg)
1059 blkptr_t *bp = zio->io_bp;
1060 objset_t *os = arg;
1061 dnode_phys_t *dnp = &os->os_phys->os_meta_dnode;
1063 ASSERT(!BP_IS_EMBEDDED(bp));
1064 ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_OBJSET);
1065 ASSERT0(BP_GET_LEVEL(bp));
1068 * Update rootbp fill count: it should be the number of objects
1069 * allocated in the object set (not counting the "special"
1070 * objects that are stored in the objset_phys_t -- the meta
1071 * dnode and user/group accounting objects).
1073 bp->blk_fill = 0;
1074 for (int i = 0; i < dnp->dn_nblkptr; i++)
1075 bp->blk_fill += BP_GET_FILL(&dnp->dn_blkptr[i]);
1076 if (os->os_dsl_dataset != NULL)
1077 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_WRITER, FTAG);
1078 *os->os_rootbp = *bp;
1079 if (os->os_dsl_dataset != NULL)
1080 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
1083 /* ARGSUSED */
1084 static void
1085 dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg)
1087 blkptr_t *bp = zio->io_bp;
1088 blkptr_t *bp_orig = &zio->io_bp_orig;
1089 objset_t *os = arg;
1091 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
1092 ASSERT(BP_EQUAL(bp, bp_orig));
1093 } else {
1094 dsl_dataset_t *ds = os->os_dsl_dataset;
1095 dmu_tx_t *tx = os->os_synctx;
1097 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
1098 dsl_dataset_block_born(ds, bp, tx);
1100 kmem_free(bp, sizeof (*bp));
1103 /* called from dsl */
1104 void
1105 dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx)
1107 int txgoff;
1108 zbookmark_phys_t zb;
1109 zio_prop_t zp;
1110 zio_t *zio;
1111 list_t *list;
1112 list_t *newlist = NULL;
1113 dbuf_dirty_record_t *dr;
1114 blkptr_t *blkptr_copy = kmem_alloc(sizeof (*os->os_rootbp), KM_SLEEP);
1115 *blkptr_copy = *os->os_rootbp;
1117 dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg);
1119 ASSERT(dmu_tx_is_syncing(tx));
1120 /* XXX the write_done callback should really give us the tx... */
1121 os->os_synctx = tx;
1123 if (os->os_dsl_dataset == NULL) {
1125 * This is the MOS. If we have upgraded,
1126 * spa_max_replication() could change, so reset
1127 * os_copies here.
1129 os->os_copies = spa_max_replication(os->os_spa);
1133 * Create the root block IO
1135 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
1136 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
1137 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
1138 arc_release(os->os_phys_buf, &os->os_phys_buf);
1140 dmu_write_policy(os, NULL, 0, 0, ZIO_COMPRESS_INHERIT, &zp);
1142 zio = arc_write(pio, os->os_spa, tx->tx_txg,
1143 blkptr_copy, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os),
1144 &zp, dmu_objset_write_ready, NULL, NULL, dmu_objset_write_done,
1145 os, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
1148 * Sync special dnodes - the parent IO for the sync is the root block
1150 DMU_META_DNODE(os)->dn_zio = zio;
1151 dnode_sync(DMU_META_DNODE(os), tx);
1153 os->os_phys->os_flags = os->os_flags;
1155 if (DMU_USERUSED_DNODE(os) &&
1156 DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) {
1157 DMU_USERUSED_DNODE(os)->dn_zio = zio;
1158 dnode_sync(DMU_USERUSED_DNODE(os), tx);
1159 DMU_GROUPUSED_DNODE(os)->dn_zio = zio;
1160 dnode_sync(DMU_GROUPUSED_DNODE(os), tx);
1163 txgoff = tx->tx_txg & TXG_MASK;
1165 if (dmu_objset_userused_enabled(os)) {
1166 newlist = &os->os_synced_dnodes;
1168 * We must create the list here because it uses the
1169 * dn_dirty_link[] of this txg.
1171 list_create(newlist, sizeof (dnode_t),
1172 offsetof(dnode_t, dn_dirty_link[txgoff]));
1175 dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], newlist, tx);
1176 dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], newlist, tx);
1178 list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff];
1179 while (dr = list_head(list)) {
1180 ASSERT0(dr->dr_dbuf->db_level);
1181 list_remove(list, dr);
1182 if (dr->dr_zio)
1183 zio_nowait(dr->dr_zio);
1186 /* Enable dnode backfill if enough objects have been freed. */
1187 if (os->os_freed_dnodes >= dmu_rescan_dnode_threshold) {
1188 os->os_rescan_dnodes = B_TRUE;
1189 os->os_freed_dnodes = 0;
1193 * Free intent log blocks up to this tx.
1195 zil_sync(os->os_zil, tx);
1196 os->os_phys->os_zil_header = os->os_zil_header;
1197 zio_nowait(zio);
1200 boolean_t
1201 dmu_objset_is_dirty(objset_t *os, uint64_t txg)
1203 return (!list_is_empty(&os->os_dirty_dnodes[txg & TXG_MASK]) ||
1204 !list_is_empty(&os->os_free_dnodes[txg & TXG_MASK]));
1207 static objset_used_cb_t *used_cbs[DMU_OST_NUMTYPES];
1209 void
1210 dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb)
1212 used_cbs[ost] = cb;
1215 boolean_t
1216 dmu_objset_userused_enabled(objset_t *os)
1218 return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE &&
1219 used_cbs[os->os_phys->os_type] != NULL &&
1220 DMU_USERUSED_DNODE(os) != NULL);
1223 typedef struct userquota_node {
1224 uint64_t uqn_id;
1225 int64_t uqn_delta;
1226 avl_node_t uqn_node;
1227 } userquota_node_t;
1229 typedef struct userquota_cache {
1230 avl_tree_t uqc_user_deltas;
1231 avl_tree_t uqc_group_deltas;
1232 } userquota_cache_t;
1234 static int
1235 userquota_compare(const void *l, const void *r)
1237 const userquota_node_t *luqn = l;
1238 const userquota_node_t *ruqn = r;
1240 if (luqn->uqn_id < ruqn->uqn_id)
1241 return (-1);
1242 if (luqn->uqn_id > ruqn->uqn_id)
1243 return (1);
1244 return (0);
1247 static void
1248 do_userquota_cacheflush(objset_t *os, userquota_cache_t *cache, dmu_tx_t *tx)
1250 void *cookie;
1251 userquota_node_t *uqn;
1253 ASSERT(dmu_tx_is_syncing(tx));
1255 cookie = NULL;
1256 while ((uqn = avl_destroy_nodes(&cache->uqc_user_deltas,
1257 &cookie)) != NULL) {
1258 VERIFY0(zap_increment_int(os, DMU_USERUSED_OBJECT,
1259 uqn->uqn_id, uqn->uqn_delta, tx));
1260 kmem_free(uqn, sizeof (*uqn));
1262 avl_destroy(&cache->uqc_user_deltas);
1264 cookie = NULL;
1265 while ((uqn = avl_destroy_nodes(&cache->uqc_group_deltas,
1266 &cookie)) != NULL) {
1267 VERIFY0(zap_increment_int(os, DMU_GROUPUSED_OBJECT,
1268 uqn->uqn_id, uqn->uqn_delta, tx));
1269 kmem_free(uqn, sizeof (*uqn));
1271 avl_destroy(&cache->uqc_group_deltas);
1274 static void
1275 userquota_update_cache(avl_tree_t *avl, uint64_t id, int64_t delta)
1277 userquota_node_t search = { .uqn_id = id };
1278 avl_index_t idx;
1280 userquota_node_t *uqn = avl_find(avl, &search, &idx);
1281 if (uqn == NULL) {
1282 uqn = kmem_zalloc(sizeof (*uqn), KM_SLEEP);
1283 uqn->uqn_id = id;
1284 avl_insert(avl, uqn, idx);
1286 uqn->uqn_delta += delta;
1289 static void
1290 do_userquota_update(userquota_cache_t *cache, uint64_t used, uint64_t flags,
1291 uint64_t user, uint64_t group, boolean_t subtract)
1293 if ((flags & DNODE_FLAG_USERUSED_ACCOUNTED)) {
1294 int64_t delta = DNODE_SIZE + used;
1295 if (subtract)
1296 delta = -delta;
1298 userquota_update_cache(&cache->uqc_user_deltas, user, delta);
1299 userquota_update_cache(&cache->uqc_group_deltas, group, delta);
1303 void
1304 dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx)
1306 dnode_t *dn;
1307 list_t *list = &os->os_synced_dnodes;
1308 userquota_cache_t cache = { 0 };
1310 ASSERT(list_head(list) == NULL || dmu_objset_userused_enabled(os));
1312 avl_create(&cache.uqc_user_deltas, userquota_compare,
1313 sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node));
1314 avl_create(&cache.uqc_group_deltas, userquota_compare,
1315 sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node));
1317 while (dn = list_head(list)) {
1318 int flags;
1319 ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object));
1320 ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE ||
1321 dn->dn_phys->dn_flags &
1322 DNODE_FLAG_USERUSED_ACCOUNTED);
1324 /* Allocate the user/groupused objects if necessary. */
1325 if (DMU_USERUSED_DNODE(os)->dn_type == DMU_OT_NONE) {
1326 VERIFY0(zap_create_claim(os,
1327 DMU_USERUSED_OBJECT,
1328 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
1329 VERIFY0(zap_create_claim(os,
1330 DMU_GROUPUSED_OBJECT,
1331 DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
1334 flags = dn->dn_id_flags;
1335 ASSERT(flags);
1336 if (flags & DN_ID_OLD_EXIST) {
1337 do_userquota_update(&cache,
1338 dn->dn_oldused, dn->dn_oldflags,
1339 dn->dn_olduid, dn->dn_oldgid, B_TRUE);
1341 if (flags & DN_ID_NEW_EXIST) {
1342 do_userquota_update(&cache,
1343 DN_USED_BYTES(dn->dn_phys),
1344 dn->dn_phys->dn_flags, dn->dn_newuid,
1345 dn->dn_newgid, B_FALSE);
1348 mutex_enter(&dn->dn_mtx);
1349 dn->dn_oldused = 0;
1350 dn->dn_oldflags = 0;
1351 if (dn->dn_id_flags & DN_ID_NEW_EXIST) {
1352 dn->dn_olduid = dn->dn_newuid;
1353 dn->dn_oldgid = dn->dn_newgid;
1354 dn->dn_id_flags |= DN_ID_OLD_EXIST;
1355 if (dn->dn_bonuslen == 0)
1356 dn->dn_id_flags |= DN_ID_CHKED_SPILL;
1357 else
1358 dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1360 dn->dn_id_flags &= ~(DN_ID_NEW_EXIST);
1361 mutex_exit(&dn->dn_mtx);
1363 list_remove(list, dn);
1364 dnode_rele(dn, list);
1366 do_userquota_cacheflush(os, &cache, tx);
1370 * Returns a pointer to data to find uid/gid from
1372 * If a dirty record for transaction group that is syncing can't
1373 * be found then NULL is returned. In the NULL case it is assumed
1374 * the uid/gid aren't changing.
1376 static void *
1377 dmu_objset_userquota_find_data(dmu_buf_impl_t *db, dmu_tx_t *tx)
1379 dbuf_dirty_record_t *dr, **drp;
1380 void *data;
1382 if (db->db_dirtycnt == 0)
1383 return (db->db.db_data); /* Nothing is changing */
1385 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1386 if (dr->dr_txg == tx->tx_txg)
1387 break;
1389 if (dr == NULL) {
1390 data = NULL;
1391 } else {
1392 dnode_t *dn;
1394 DB_DNODE_ENTER(dr->dr_dbuf);
1395 dn = DB_DNODE(dr->dr_dbuf);
1397 if (dn->dn_bonuslen == 0 &&
1398 dr->dr_dbuf->db_blkid == DMU_SPILL_BLKID)
1399 data = dr->dt.dl.dr_data->b_data;
1400 else
1401 data = dr->dt.dl.dr_data;
1403 DB_DNODE_EXIT(dr->dr_dbuf);
1406 return (data);
1409 void
1410 dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx)
1412 objset_t *os = dn->dn_objset;
1413 void *data = NULL;
1414 dmu_buf_impl_t *db = NULL;
1415 uint64_t *user = NULL;
1416 uint64_t *group = NULL;
1417 int flags = dn->dn_id_flags;
1418 int error;
1419 boolean_t have_spill = B_FALSE;
1421 if (!dmu_objset_userused_enabled(dn->dn_objset))
1422 return;
1424 if (before && (flags & (DN_ID_CHKED_BONUS|DN_ID_OLD_EXIST|
1425 DN_ID_CHKED_SPILL)))
1426 return;
1428 if (before && dn->dn_bonuslen != 0)
1429 data = DN_BONUS(dn->dn_phys);
1430 else if (!before && dn->dn_bonuslen != 0) {
1431 if (dn->dn_bonus) {
1432 db = dn->dn_bonus;
1433 mutex_enter(&db->db_mtx);
1434 data = dmu_objset_userquota_find_data(db, tx);
1435 } else {
1436 data = DN_BONUS(dn->dn_phys);
1438 } else if (dn->dn_bonuslen == 0 && dn->dn_bonustype == DMU_OT_SA) {
1439 int rf = 0;
1441 if (RW_WRITE_HELD(&dn->dn_struct_rwlock))
1442 rf |= DB_RF_HAVESTRUCT;
1443 error = dmu_spill_hold_by_dnode(dn,
1444 rf | DB_RF_MUST_SUCCEED,
1445 FTAG, (dmu_buf_t **)&db);
1446 ASSERT(error == 0);
1447 mutex_enter(&db->db_mtx);
1448 data = (before) ? db->db.db_data :
1449 dmu_objset_userquota_find_data(db, tx);
1450 have_spill = B_TRUE;
1451 } else {
1452 mutex_enter(&dn->dn_mtx);
1453 dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1454 mutex_exit(&dn->dn_mtx);
1455 return;
1458 if (before) {
1459 ASSERT(data);
1460 user = &dn->dn_olduid;
1461 group = &dn->dn_oldgid;
1462 } else if (data) {
1463 user = &dn->dn_newuid;
1464 group = &dn->dn_newgid;
1468 * Must always call the callback in case the object
1469 * type has changed and that type isn't an object type to track
1471 error = used_cbs[os->os_phys->os_type](dn->dn_bonustype, data,
1472 user, group);
1475 * Preserve existing uid/gid when the callback can't determine
1476 * what the new uid/gid are and the callback returned EEXIST.
1477 * The EEXIST error tells us to just use the existing uid/gid.
1478 * If we don't know what the old values are then just assign
1479 * them to 0, since that is a new file being created.
1481 if (!before && data == NULL && error == EEXIST) {
1482 if (flags & DN_ID_OLD_EXIST) {
1483 dn->dn_newuid = dn->dn_olduid;
1484 dn->dn_newgid = dn->dn_oldgid;
1485 } else {
1486 dn->dn_newuid = 0;
1487 dn->dn_newgid = 0;
1489 error = 0;
1492 if (db)
1493 mutex_exit(&db->db_mtx);
1495 mutex_enter(&dn->dn_mtx);
1496 if (error == 0 && before)
1497 dn->dn_id_flags |= DN_ID_OLD_EXIST;
1498 if (error == 0 && !before)
1499 dn->dn_id_flags |= DN_ID_NEW_EXIST;
1501 if (have_spill) {
1502 dn->dn_id_flags |= DN_ID_CHKED_SPILL;
1503 } else {
1504 dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1506 mutex_exit(&dn->dn_mtx);
1507 if (have_spill)
1508 dmu_buf_rele((dmu_buf_t *)db, FTAG);
1511 boolean_t
1512 dmu_objset_userspace_present(objset_t *os)
1514 return (os->os_phys->os_flags &
1515 OBJSET_FLAG_USERACCOUNTING_COMPLETE);
1519 dmu_objset_userspace_upgrade(objset_t *os)
1521 uint64_t obj;
1522 int err = 0;
1524 if (dmu_objset_userspace_present(os))
1525 return (0);
1526 if (!dmu_objset_userused_enabled(os))
1527 return (SET_ERROR(ENOTSUP));
1528 if (dmu_objset_is_snapshot(os))
1529 return (SET_ERROR(EINVAL));
1532 * We simply need to mark every object dirty, so that it will be
1533 * synced out and now accounted. If this is called
1534 * concurrently, or if we already did some work before crashing,
1535 * that's fine, since we track each object's accounted state
1536 * independently.
1539 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) {
1540 dmu_tx_t *tx;
1541 dmu_buf_t *db;
1542 int objerr;
1544 if (issig(JUSTLOOKING) && issig(FORREAL))
1545 return (SET_ERROR(EINTR));
1547 objerr = dmu_bonus_hold(os, obj, FTAG, &db);
1548 if (objerr != 0)
1549 continue;
1550 tx = dmu_tx_create(os);
1551 dmu_tx_hold_bonus(tx, obj);
1552 objerr = dmu_tx_assign(tx, TXG_WAIT);
1553 if (objerr != 0) {
1554 dmu_tx_abort(tx);
1555 continue;
1557 dmu_buf_will_dirty(db, tx);
1558 dmu_buf_rele(db, FTAG);
1559 dmu_tx_commit(tx);
1562 os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
1563 txg_wait_synced(dmu_objset_pool(os), 0);
1564 return (0);
1567 void
1568 dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
1569 uint64_t *usedobjsp, uint64_t *availobjsp)
1571 dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp,
1572 usedobjsp, availobjsp);
1575 uint64_t
1576 dmu_objset_fsid_guid(objset_t *os)
1578 return (dsl_dataset_fsid_guid(os->os_dsl_dataset));
1581 void
1582 dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat)
1584 stat->dds_type = os->os_phys->os_type;
1585 if (os->os_dsl_dataset)
1586 dsl_dataset_fast_stat(os->os_dsl_dataset, stat);
1589 void
1590 dmu_objset_stats(objset_t *os, nvlist_t *nv)
1592 ASSERT(os->os_dsl_dataset ||
1593 os->os_phys->os_type == DMU_OST_META);
1595 if (os->os_dsl_dataset != NULL)
1596 dsl_dataset_stats(os->os_dsl_dataset, nv);
1598 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE,
1599 os->os_phys->os_type);
1600 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING,
1601 dmu_objset_userspace_present(os));
1605 dmu_objset_is_snapshot(objset_t *os)
1607 if (os->os_dsl_dataset != NULL)
1608 return (os->os_dsl_dataset->ds_is_snapshot);
1609 else
1610 return (B_FALSE);
1614 dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen,
1615 boolean_t *conflict)
1617 dsl_dataset_t *ds = os->os_dsl_dataset;
1618 uint64_t ignored;
1620 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0)
1621 return (SET_ERROR(ENOENT));
1623 return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset,
1624 dsl_dataset_phys(ds)->ds_snapnames_zapobj, name, 8, 1, &ignored,
1625 MT_FIRST, real, maxlen, conflict));
1629 dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
1630 uint64_t *idp, uint64_t *offp, boolean_t *case_conflict)
1632 dsl_dataset_t *ds = os->os_dsl_dataset;
1633 zap_cursor_t cursor;
1634 zap_attribute_t attr;
1636 ASSERT(dsl_pool_config_held(dmu_objset_pool(os)));
1638 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0)
1639 return (SET_ERROR(ENOENT));
1641 zap_cursor_init_serialized(&cursor,
1642 ds->ds_dir->dd_pool->dp_meta_objset,
1643 dsl_dataset_phys(ds)->ds_snapnames_zapobj, *offp);
1645 if (zap_cursor_retrieve(&cursor, &attr) != 0) {
1646 zap_cursor_fini(&cursor);
1647 return (SET_ERROR(ENOENT));
1650 if (strlen(attr.za_name) + 1 > namelen) {
1651 zap_cursor_fini(&cursor);
1652 return (SET_ERROR(ENAMETOOLONG));
1655 (void) strcpy(name, attr.za_name);
1656 if (idp)
1657 *idp = attr.za_first_integer;
1658 if (case_conflict)
1659 *case_conflict = attr.za_normalization_conflict;
1660 zap_cursor_advance(&cursor);
1661 *offp = zap_cursor_serialize(&cursor);
1662 zap_cursor_fini(&cursor);
1664 return (0);
1668 dmu_dir_list_next(objset_t *os, int namelen, char *name,
1669 uint64_t *idp, uint64_t *offp)
1671 dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
1672 zap_cursor_t cursor;
1673 zap_attribute_t attr;
1675 /* there is no next dir on a snapshot! */
1676 if (os->os_dsl_dataset->ds_object !=
1677 dsl_dir_phys(dd)->dd_head_dataset_obj)
1678 return (SET_ERROR(ENOENT));
1680 zap_cursor_init_serialized(&cursor,
1681 dd->dd_pool->dp_meta_objset,
1682 dsl_dir_phys(dd)->dd_child_dir_zapobj, *offp);
1684 if (zap_cursor_retrieve(&cursor, &attr) != 0) {
1685 zap_cursor_fini(&cursor);
1686 return (SET_ERROR(ENOENT));
1689 if (strlen(attr.za_name) + 1 > namelen) {
1690 zap_cursor_fini(&cursor);
1691 return (SET_ERROR(ENAMETOOLONG));
1694 (void) strcpy(name, attr.za_name);
1695 if (idp)
1696 *idp = attr.za_first_integer;
1697 zap_cursor_advance(&cursor);
1698 *offp = zap_cursor_serialize(&cursor);
1699 zap_cursor_fini(&cursor);
1701 return (0);
1704 typedef struct dmu_objset_find_ctx {
1705 taskq_t *dc_tq;
1706 dsl_pool_t *dc_dp;
1707 uint64_t dc_ddobj;
1708 char *dc_ddname; /* last component of ddobj's name */
1709 int (*dc_func)(dsl_pool_t *, dsl_dataset_t *, void *);
1710 void *dc_arg;
1711 int dc_flags;
1712 kmutex_t *dc_error_lock;
1713 int *dc_error;
1714 } dmu_objset_find_ctx_t;
1716 static void
1717 dmu_objset_find_dp_impl(dmu_objset_find_ctx_t *dcp)
1719 dsl_pool_t *dp = dcp->dc_dp;
1720 dsl_dir_t *dd;
1721 dsl_dataset_t *ds;
1722 zap_cursor_t zc;
1723 zap_attribute_t *attr;
1724 uint64_t thisobj;
1725 int err = 0;
1727 /* don't process if there already was an error */
1728 if (*dcp->dc_error != 0)
1729 goto out;
1732 * Note: passing the name (dc_ddname) here is optional, but it
1733 * improves performance because we don't need to call
1734 * zap_value_search() to determine the name.
1736 err = dsl_dir_hold_obj(dp, dcp->dc_ddobj, dcp->dc_ddname, FTAG, &dd);
1737 if (err != 0)
1738 goto out;
1740 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */
1741 if (dd->dd_myname[0] == '$') {
1742 dsl_dir_rele(dd, FTAG);
1743 goto out;
1746 thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj;
1747 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
1750 * Iterate over all children.
1752 if (dcp->dc_flags & DS_FIND_CHILDREN) {
1753 for (zap_cursor_init(&zc, dp->dp_meta_objset,
1754 dsl_dir_phys(dd)->dd_child_dir_zapobj);
1755 zap_cursor_retrieve(&zc, attr) == 0;
1756 (void) zap_cursor_advance(&zc)) {
1757 ASSERT3U(attr->za_integer_length, ==,
1758 sizeof (uint64_t));
1759 ASSERT3U(attr->za_num_integers, ==, 1);
1761 dmu_objset_find_ctx_t *child_dcp =
1762 kmem_alloc(sizeof (*child_dcp), KM_SLEEP);
1763 *child_dcp = *dcp;
1764 child_dcp->dc_ddobj = attr->za_first_integer;
1765 child_dcp->dc_ddname = spa_strdup(attr->za_name);
1766 if (dcp->dc_tq != NULL)
1767 (void) taskq_dispatch(dcp->dc_tq,
1768 dmu_objset_find_dp_cb, child_dcp, TQ_SLEEP);
1769 else
1770 dmu_objset_find_dp_impl(child_dcp);
1772 zap_cursor_fini(&zc);
1776 * Iterate over all snapshots.
1778 if (dcp->dc_flags & DS_FIND_SNAPSHOTS) {
1779 dsl_dataset_t *ds;
1780 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
1782 if (err == 0) {
1783 uint64_t snapobj;
1785 snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
1786 dsl_dataset_rele(ds, FTAG);
1788 for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
1789 zap_cursor_retrieve(&zc, attr) == 0;
1790 (void) zap_cursor_advance(&zc)) {
1791 ASSERT3U(attr->za_integer_length, ==,
1792 sizeof (uint64_t));
1793 ASSERT3U(attr->za_num_integers, ==, 1);
1795 err = dsl_dataset_hold_obj(dp,
1796 attr->za_first_integer, FTAG, &ds);
1797 if (err != 0)
1798 break;
1799 err = dcp->dc_func(dp, ds, dcp->dc_arg);
1800 dsl_dataset_rele(ds, FTAG);
1801 if (err != 0)
1802 break;
1804 zap_cursor_fini(&zc);
1808 kmem_free(attr, sizeof (zap_attribute_t));
1810 if (err != 0) {
1811 dsl_dir_rele(dd, FTAG);
1812 goto out;
1816 * Apply to self.
1818 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
1821 * Note: we hold the dir while calling dsl_dataset_hold_obj() so
1822 * that the dir will remain cached, and we won't have to re-instantiate
1823 * it (which could be expensive due to finding its name via
1824 * zap_value_search()).
1826 dsl_dir_rele(dd, FTAG);
1827 if (err != 0)
1828 goto out;
1829 err = dcp->dc_func(dp, ds, dcp->dc_arg);
1830 dsl_dataset_rele(ds, FTAG);
1832 out:
1833 if (err != 0) {
1834 mutex_enter(dcp->dc_error_lock);
1835 /* only keep first error */
1836 if (*dcp->dc_error == 0)
1837 *dcp->dc_error = err;
1838 mutex_exit(dcp->dc_error_lock);
1841 if (dcp->dc_ddname != NULL)
1842 spa_strfree(dcp->dc_ddname);
1843 kmem_free(dcp, sizeof (*dcp));
1846 static void
1847 dmu_objset_find_dp_cb(void *arg)
1849 dmu_objset_find_ctx_t *dcp = arg;
1850 dsl_pool_t *dp = dcp->dc_dp;
1853 * We need to get a pool_config_lock here, as there are several
1854 * asssert(pool_config_held) down the stack. Getting a lock via
1855 * dsl_pool_config_enter is risky, as it might be stalled by a
1856 * pending writer. This would deadlock, as the write lock can
1857 * only be granted when our parent thread gives up the lock.
1858 * The _prio interface gives us priority over a pending writer.
1860 dsl_pool_config_enter_prio(dp, FTAG);
1862 dmu_objset_find_dp_impl(dcp);
1864 dsl_pool_config_exit(dp, FTAG);
1868 * Find objsets under and including ddobj, call func(ds) on each.
1869 * The order for the enumeration is completely undefined.
1870 * func is called with dsl_pool_config held.
1873 dmu_objset_find_dp(dsl_pool_t *dp, uint64_t ddobj,
1874 int func(dsl_pool_t *, dsl_dataset_t *, void *), void *arg, int flags)
1876 int error = 0;
1877 taskq_t *tq = NULL;
1878 int ntasks;
1879 dmu_objset_find_ctx_t *dcp;
1880 kmutex_t err_lock;
1882 mutex_init(&err_lock, NULL, MUTEX_DEFAULT, NULL);
1883 dcp = kmem_alloc(sizeof (*dcp), KM_SLEEP);
1884 dcp->dc_tq = NULL;
1885 dcp->dc_dp = dp;
1886 dcp->dc_ddobj = ddobj;
1887 dcp->dc_ddname = NULL;
1888 dcp->dc_func = func;
1889 dcp->dc_arg = arg;
1890 dcp->dc_flags = flags;
1891 dcp->dc_error_lock = &err_lock;
1892 dcp->dc_error = &error;
1894 if ((flags & DS_FIND_SERIALIZE) || dsl_pool_config_held_writer(dp)) {
1896 * In case a write lock is held we can't make use of
1897 * parallelism, as down the stack of the worker threads
1898 * the lock is asserted via dsl_pool_config_held.
1899 * In case of a read lock this is solved by getting a read
1900 * lock in each worker thread, which isn't possible in case
1901 * of a writer lock. So we fall back to the synchronous path
1902 * here.
1903 * In the future it might be possible to get some magic into
1904 * dsl_pool_config_held in a way that it returns true for
1905 * the worker threads so that a single lock held from this
1906 * thread suffices. For now, stay single threaded.
1908 dmu_objset_find_dp_impl(dcp);
1909 mutex_destroy(&err_lock);
1911 return (error);
1914 ntasks = dmu_find_threads;
1915 if (ntasks == 0)
1916 ntasks = vdev_count_leaves(dp->dp_spa) * 4;
1917 tq = taskq_create("dmu_objset_find", ntasks, minclsyspri, ntasks,
1918 INT_MAX, 0);
1919 if (tq == NULL) {
1920 kmem_free(dcp, sizeof (*dcp));
1921 mutex_destroy(&err_lock);
1923 return (SET_ERROR(ENOMEM));
1925 dcp->dc_tq = tq;
1927 /* dcp will be freed by task */
1928 (void) taskq_dispatch(tq, dmu_objset_find_dp_cb, dcp, TQ_SLEEP);
1931 * PORTING: this code relies on the property of taskq_wait to wait
1932 * until no more tasks are queued and no more tasks are active. As
1933 * we always queue new tasks from within other tasks, task_wait
1934 * reliably waits for the full recursion to finish, even though we
1935 * enqueue new tasks after taskq_wait has been called.
1936 * On platforms other than illumos, taskq_wait may not have this
1937 * property.
1939 taskq_wait(tq);
1940 taskq_destroy(tq);
1941 mutex_destroy(&err_lock);
1943 return (error);
1947 * Find all objsets under name, and for each, call 'func(child_name, arg)'.
1948 * The dp_config_rwlock must not be held when this is called, and it
1949 * will not be held when the callback is called.
1950 * Therefore this function should only be used when the pool is not changing
1951 * (e.g. in syncing context), or the callback can deal with the possible races.
1953 static int
1954 dmu_objset_find_impl(spa_t *spa, const char *name,
1955 int func(const char *, void *), void *arg, int flags)
1957 dsl_dir_t *dd;
1958 dsl_pool_t *dp = spa_get_dsl(spa);
1959 dsl_dataset_t *ds;
1960 zap_cursor_t zc;
1961 zap_attribute_t *attr;
1962 char *child;
1963 uint64_t thisobj;
1964 int err;
1966 dsl_pool_config_enter(dp, FTAG);
1968 err = dsl_dir_hold(dp, name, FTAG, &dd, NULL);
1969 if (err != 0) {
1970 dsl_pool_config_exit(dp, FTAG);
1971 return (err);
1974 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */
1975 if (dd->dd_myname[0] == '$') {
1976 dsl_dir_rele(dd, FTAG);
1977 dsl_pool_config_exit(dp, FTAG);
1978 return (0);
1981 thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj;
1982 attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
1985 * Iterate over all children.
1987 if (flags & DS_FIND_CHILDREN) {
1988 for (zap_cursor_init(&zc, dp->dp_meta_objset,
1989 dsl_dir_phys(dd)->dd_child_dir_zapobj);
1990 zap_cursor_retrieve(&zc, attr) == 0;
1991 (void) zap_cursor_advance(&zc)) {
1992 ASSERT3U(attr->za_integer_length, ==,
1993 sizeof (uint64_t));
1994 ASSERT3U(attr->za_num_integers, ==, 1);
1996 child = kmem_asprintf("%s/%s", name, attr->za_name);
1997 dsl_pool_config_exit(dp, FTAG);
1998 err = dmu_objset_find_impl(spa, child,
1999 func, arg, flags);
2000 dsl_pool_config_enter(dp, FTAG);
2001 strfree(child);
2002 if (err != 0)
2003 break;
2005 zap_cursor_fini(&zc);
2007 if (err != 0) {
2008 dsl_dir_rele(dd, FTAG);
2009 dsl_pool_config_exit(dp, FTAG);
2010 kmem_free(attr, sizeof (zap_attribute_t));
2011 return (err);
2016 * Iterate over all snapshots.
2018 if (flags & DS_FIND_SNAPSHOTS) {
2019 err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
2021 if (err == 0) {
2022 uint64_t snapobj;
2024 snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
2025 dsl_dataset_rele(ds, FTAG);
2027 for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
2028 zap_cursor_retrieve(&zc, attr) == 0;
2029 (void) zap_cursor_advance(&zc)) {
2030 ASSERT3U(attr->za_integer_length, ==,
2031 sizeof (uint64_t));
2032 ASSERT3U(attr->za_num_integers, ==, 1);
2034 child = kmem_asprintf("%s@%s",
2035 name, attr->za_name);
2036 dsl_pool_config_exit(dp, FTAG);
2037 err = func(child, arg);
2038 dsl_pool_config_enter(dp, FTAG);
2039 strfree(child);
2040 if (err != 0)
2041 break;
2043 zap_cursor_fini(&zc);
2047 dsl_dir_rele(dd, FTAG);
2048 kmem_free(attr, sizeof (zap_attribute_t));
2049 dsl_pool_config_exit(dp, FTAG);
2051 if (err != 0)
2052 return (err);
2054 /* Apply to self. */
2055 return (func(name, arg));
2059 * See comment above dmu_objset_find_impl().
2062 dmu_objset_find(char *name, int func(const char *, void *), void *arg,
2063 int flags)
2065 spa_t *spa;
2066 int error;
2068 error = spa_open(name, &spa, FTAG);
2069 if (error != 0)
2070 return (error);
2071 error = dmu_objset_find_impl(spa, name, func, arg, flags);
2072 spa_close(spa, FTAG);
2073 return (error);
2076 void
2077 dmu_objset_set_user(objset_t *os, void *user_ptr)
2079 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
2080 os->os_user_ptr = user_ptr;
2083 void *
2084 dmu_objset_get_user(objset_t *os)
2086 ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
2087 return (os->os_user_ptr);
2091 * Determine name of filesystem, given name of snapshot.
2092 * buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes
2095 dmu_fsname(const char *snapname, char *buf)
2097 char *atp = strchr(snapname, '@');
2098 if (atp == NULL)
2099 return (SET_ERROR(EINVAL));
2100 if (atp - snapname >= ZFS_MAX_DATASET_NAME_LEN)
2101 return (SET_ERROR(ENAMETOOLONG));
2102 (void) strlcpy(buf, snapname, atp - snapname + 1);
2103 return (0);