4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 * Copyright (c) 2015, STRATO AG, Inc. All rights reserved.
29 * Copyright (c) 2014 Integros [integros.com]
30 * Copyright 2017 Nexenta Systems, Inc.
33 /* Portions Copyright 2010 Robert Milkowski */
36 #include <sys/zfs_context.h>
37 #include <sys/dmu_objset.h>
38 #include <sys/dsl_dir.h>
39 #include <sys/dsl_dataset.h>
40 #include <sys/dsl_prop.h>
41 #include <sys/dsl_pool.h>
42 #include <sys/dsl_synctask.h>
43 #include <sys/dsl_deleg.h>
44 #include <sys/dnode.h>
47 #include <sys/dmu_tx.h>
50 #include <sys/dmu_impl.h>
51 #include <sys/zfs_ioctl.h>
53 #include <sys/zfs_onexit.h>
54 #include <sys/dsl_destroy.h>
56 #include <sys/zfeature.h>
59 * Needed to close a window in dnode_move() that allows the objset to be freed
60 * before it can be safely accessed.
65 * Tunable to overwrite the maximum number of threads for the parallization
66 * of dmu_objset_find_dp, needed to speed up the import of pools with many
68 * Default is 4 times the number of leaf vdevs.
70 int dmu_find_threads
= 0;
73 * Backfill lower metadnode objects after this many have been freed.
74 * Backfilling negatively impacts object creation rates, so only do it
75 * if there are enough holes to fill.
77 int dmu_rescan_dnode_threshold
= 131072;
79 static void dmu_objset_find_dp_cb(void *arg
);
84 rw_init(&os_lock
, NULL
, RW_DEFAULT
, NULL
);
94 dmu_objset_spa(objset_t
*os
)
100 dmu_objset_zil(objset_t
*os
)
106 dmu_objset_pool(objset_t
*os
)
110 if ((ds
= os
->os_dsl_dataset
) != NULL
&& ds
->ds_dir
)
111 return (ds
->ds_dir
->dd_pool
);
113 return (spa_get_dsl(os
->os_spa
));
117 dmu_objset_ds(objset_t
*os
)
119 return (os
->os_dsl_dataset
);
123 dmu_objset_type(objset_t
*os
)
125 return (os
->os_phys
->os_type
);
129 dmu_objset_name(objset_t
*os
, char *buf
)
131 dsl_dataset_name(os
->os_dsl_dataset
, buf
);
135 dmu_objset_id(objset_t
*os
)
137 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
139 return (ds
? ds
->ds_object
: 0);
143 dmu_objset_syncprop(objset_t
*os
)
145 return (os
->os_sync
);
149 dmu_objset_logbias(objset_t
*os
)
151 return (os
->os_logbias
);
155 checksum_changed_cb(void *arg
, uint64_t newval
)
160 * Inheritance should have been done by now.
162 ASSERT(newval
!= ZIO_CHECKSUM_INHERIT
);
164 os
->os_checksum
= zio_checksum_select(newval
, ZIO_CHECKSUM_ON_VALUE
);
168 compression_changed_cb(void *arg
, uint64_t newval
)
173 * Inheritance and range checking should have been done by now.
175 ASSERT(newval
!= ZIO_COMPRESS_INHERIT
);
177 os
->os_compress
= zio_compress_select(os
->os_spa
, newval
,
182 copies_changed_cb(void *arg
, uint64_t newval
)
187 * Inheritance and range checking should have been done by now.
190 ASSERT(newval
<= spa_max_replication(os
->os_spa
));
192 os
->os_copies
= newval
;
196 dedup_changed_cb(void *arg
, uint64_t newval
)
199 spa_t
*spa
= os
->os_spa
;
200 enum zio_checksum checksum
;
203 * Inheritance should have been done by now.
205 ASSERT(newval
!= ZIO_CHECKSUM_INHERIT
);
207 checksum
= zio_checksum_dedup_select(spa
, newval
, ZIO_CHECKSUM_OFF
);
209 os
->os_dedup_checksum
= checksum
& ZIO_CHECKSUM_MASK
;
210 os
->os_dedup_verify
= !!(checksum
& ZIO_CHECKSUM_VERIFY
);
214 primary_cache_changed_cb(void *arg
, uint64_t newval
)
219 * Inheritance and range checking should have been done by now.
221 ASSERT(newval
== ZFS_CACHE_ALL
|| newval
== ZFS_CACHE_NONE
||
222 newval
== ZFS_CACHE_METADATA
);
224 os
->os_primary_cache
= newval
;
228 secondary_cache_changed_cb(void *arg
, uint64_t newval
)
233 * Inheritance and range checking should have been done by now.
235 ASSERT(newval
== ZFS_CACHE_ALL
|| newval
== ZFS_CACHE_NONE
||
236 newval
== ZFS_CACHE_METADATA
);
238 os
->os_secondary_cache
= newval
;
242 sync_changed_cb(void *arg
, uint64_t newval
)
247 * Inheritance and range checking should have been done by now.
249 ASSERT(newval
== ZFS_SYNC_STANDARD
|| newval
== ZFS_SYNC_ALWAYS
||
250 newval
== ZFS_SYNC_DISABLED
);
252 os
->os_sync
= newval
;
254 zil_set_sync(os
->os_zil
, newval
);
258 redundant_metadata_changed_cb(void *arg
, uint64_t newval
)
263 * Inheritance and range checking should have been done by now.
265 ASSERT(newval
== ZFS_REDUNDANT_METADATA_ALL
||
266 newval
== ZFS_REDUNDANT_METADATA_MOST
);
268 os
->os_redundant_metadata
= newval
;
272 logbias_changed_cb(void *arg
, uint64_t newval
)
276 ASSERT(newval
== ZFS_LOGBIAS_LATENCY
||
277 newval
== ZFS_LOGBIAS_THROUGHPUT
);
278 os
->os_logbias
= newval
;
280 zil_set_logbias(os
->os_zil
, newval
);
284 recordsize_changed_cb(void *arg
, uint64_t newval
)
288 os
->os_recordsize
= newval
;
292 dmu_objset_byteswap(void *buf
, size_t size
)
294 objset_phys_t
*osp
= buf
;
296 ASSERT(size
== OBJSET_OLD_PHYS_SIZE
|| size
== sizeof (objset_phys_t
));
297 dnode_byteswap(&osp
->os_meta_dnode
);
298 byteswap_uint64_array(&osp
->os_zil_header
, sizeof (zil_header_t
));
299 osp
->os_type
= BSWAP_64(osp
->os_type
);
300 osp
->os_flags
= BSWAP_64(osp
->os_flags
);
301 if (size
== sizeof (objset_phys_t
)) {
302 dnode_byteswap(&osp
->os_userused_dnode
);
303 dnode_byteswap(&osp
->os_groupused_dnode
);
308 * The hash is a CRC-based hash of the objset_t pointer and the object number.
311 dnode_hash(const objset_t
*os
, uint64_t obj
)
313 uintptr_t osv
= (uintptr_t)os
;
314 uint64_t crc
= -1ULL;
316 ASSERT(zfs_crc64_table
[128] == ZFS_CRC64_POLY
);
318 * The low 6 bits of the pointer don't have much entropy, because
319 * the objset_t is larger than 2^6 bytes long.
321 crc
= (crc
>> 8) ^ zfs_crc64_table
[(crc
^ (osv
>> 6)) & 0xFF];
322 crc
= (crc
>> 8) ^ zfs_crc64_table
[(crc
^ (obj
>> 0)) & 0xFF];
323 crc
= (crc
>> 8) ^ zfs_crc64_table
[(crc
^ (obj
>> 8)) & 0xFF];
324 crc
= (crc
>> 8) ^ zfs_crc64_table
[(crc
^ (obj
>> 16)) & 0xFF];
326 crc
^= (osv
>>14) ^ (obj
>>24);
332 dnode_multilist_index_func(multilist_t
*ml
, void *obj
)
335 return (dnode_hash(dn
->dn_objset
, dn
->dn_object
) %
336 multilist_get_num_sublists(ml
));
340 * Instantiates the objset_t in-memory structure corresponding to the
341 * objset_phys_t that's pointed to by the specified blkptr_t.
344 dmu_objset_open_impl(spa_t
*spa
, dsl_dataset_t
*ds
, blkptr_t
*bp
,
350 ASSERT(ds
== NULL
|| MUTEX_HELD(&ds
->ds_opening_lock
));
353 * The $ORIGIN dataset (if it exists) doesn't have an associated
354 * objset, so there's no reason to open it. The $ORIGIN dataset
355 * will not exist on pools older than SPA_VERSION_ORIGIN.
357 if (ds
!= NULL
&& spa_get_dsl(spa
) != NULL
&&
358 spa_get_dsl(spa
)->dp_origin_snap
!= NULL
) {
359 ASSERT3P(ds
->ds_dir
, !=,
360 spa_get_dsl(spa
)->dp_origin_snap
->ds_dir
);
363 os
= kmem_zalloc(sizeof (objset_t
), KM_SLEEP
);
364 os
->os_dsl_dataset
= ds
;
367 if (!BP_IS_HOLE(os
->os_rootbp
)) {
368 arc_flags_t aflags
= ARC_FLAG_WAIT
;
370 SET_BOOKMARK(&zb
, ds
? ds
->ds_object
: DMU_META_OBJSET
,
371 ZB_ROOT_OBJECT
, ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
373 if (DMU_OS_IS_L2CACHEABLE(os
))
374 aflags
|= ARC_FLAG_L2CACHE
;
376 dprintf_bp(os
->os_rootbp
, "reading %s", "");
377 err
= arc_read(NULL
, spa
, os
->os_rootbp
,
378 arc_getbuf_func
, &os
->os_phys_buf
,
379 ZIO_PRIORITY_SYNC_READ
, ZIO_FLAG_CANFAIL
, &aflags
, &zb
);
381 kmem_free(os
, sizeof (objset_t
));
382 /* convert checksum errors into IO errors */
384 err
= SET_ERROR(EIO
);
388 /* Increase the blocksize if we are permitted. */
389 if (spa_version(spa
) >= SPA_VERSION_USERSPACE
&&
390 arc_buf_size(os
->os_phys_buf
) < sizeof (objset_phys_t
)) {
391 arc_buf_t
*buf
= arc_alloc_buf(spa
, &os
->os_phys_buf
,
392 ARC_BUFC_METADATA
, sizeof (objset_phys_t
));
393 bzero(buf
->b_data
, sizeof (objset_phys_t
));
394 bcopy(os
->os_phys_buf
->b_data
, buf
->b_data
,
395 arc_buf_size(os
->os_phys_buf
));
396 arc_buf_destroy(os
->os_phys_buf
, &os
->os_phys_buf
);
397 os
->os_phys_buf
= buf
;
400 os
->os_phys
= os
->os_phys_buf
->b_data
;
401 os
->os_flags
= os
->os_phys
->os_flags
;
403 int size
= spa_version(spa
) >= SPA_VERSION_USERSPACE
?
404 sizeof (objset_phys_t
) : OBJSET_OLD_PHYS_SIZE
;
405 os
->os_phys_buf
= arc_alloc_buf(spa
, &os
->os_phys_buf
,
406 ARC_BUFC_METADATA
, size
);
407 os
->os_phys
= os
->os_phys_buf
->b_data
;
408 bzero(os
->os_phys
, size
);
412 * Note: the changed_cb will be called once before the register
413 * func returns, thus changing the checksum/compression from the
414 * default (fletcher2/off). Snapshots don't need to know about
415 * checksum/compression/copies.
418 boolean_t needlock
= B_FALSE
;
421 * Note: it's valid to open the objset if the dataset is
422 * long-held, in which case the pool_config lock will not
425 if (!dsl_pool_config_held(dmu_objset_pool(os
))) {
427 dsl_pool_config_enter(dmu_objset_pool(os
), FTAG
);
429 err
= dsl_prop_register(ds
,
430 zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE
),
431 primary_cache_changed_cb
, os
);
433 err
= dsl_prop_register(ds
,
434 zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE
),
435 secondary_cache_changed_cb
, os
);
437 if (!ds
->ds_is_snapshot
) {
439 err
= dsl_prop_register(ds
,
440 zfs_prop_to_name(ZFS_PROP_CHECKSUM
),
441 checksum_changed_cb
, os
);
444 err
= dsl_prop_register(ds
,
445 zfs_prop_to_name(ZFS_PROP_COMPRESSION
),
446 compression_changed_cb
, os
);
449 err
= dsl_prop_register(ds
,
450 zfs_prop_to_name(ZFS_PROP_COPIES
),
451 copies_changed_cb
, os
);
454 err
= dsl_prop_register(ds
,
455 zfs_prop_to_name(ZFS_PROP_DEDUP
),
456 dedup_changed_cb
, os
);
459 err
= dsl_prop_register(ds
,
460 zfs_prop_to_name(ZFS_PROP_LOGBIAS
),
461 logbias_changed_cb
, os
);
464 err
= dsl_prop_register(ds
,
465 zfs_prop_to_name(ZFS_PROP_SYNC
),
466 sync_changed_cb
, os
);
469 err
= dsl_prop_register(ds
,
471 ZFS_PROP_REDUNDANT_METADATA
),
472 redundant_metadata_changed_cb
, os
);
475 err
= dsl_prop_register(ds
,
476 zfs_prop_to_name(ZFS_PROP_RECORDSIZE
),
477 recordsize_changed_cb
, os
);
481 dsl_pool_config_exit(dmu_objset_pool(os
), FTAG
);
483 arc_buf_destroy(os
->os_phys_buf
, &os
->os_phys_buf
);
484 kmem_free(os
, sizeof (objset_t
));
488 /* It's the meta-objset. */
489 os
->os_checksum
= ZIO_CHECKSUM_FLETCHER_4
;
490 os
->os_compress
= ZIO_COMPRESS_ON
;
491 os
->os_copies
= spa_max_replication(spa
);
492 os
->os_dedup_checksum
= ZIO_CHECKSUM_OFF
;
493 os
->os_dedup_verify
= B_FALSE
;
494 os
->os_logbias
= ZFS_LOGBIAS_LATENCY
;
495 os
->os_sync
= ZFS_SYNC_STANDARD
;
496 os
->os_primary_cache
= ZFS_CACHE_ALL
;
497 os
->os_secondary_cache
= ZFS_CACHE_ALL
;
500 if (ds
== NULL
|| !ds
->ds_is_snapshot
)
501 os
->os_zil_header
= os
->os_phys
->os_zil_header
;
502 os
->os_zil
= zil_alloc(os
, &os
->os_zil_header
);
504 for (i
= 0; i
< TXG_SIZE
; i
++) {
505 os
->os_dirty_dnodes
[i
] = multilist_create(sizeof (dnode_t
),
506 offsetof(dnode_t
, dn_dirty_link
[i
]),
507 dnode_multilist_index_func
);
509 list_create(&os
->os_dnodes
, sizeof (dnode_t
),
510 offsetof(dnode_t
, dn_link
));
511 list_create(&os
->os_downgraded_dbufs
, sizeof (dmu_buf_impl_t
),
512 offsetof(dmu_buf_impl_t
, db_link
));
514 mutex_init(&os
->os_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
515 mutex_init(&os
->os_userused_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
516 mutex_init(&os
->os_obj_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
517 mutex_init(&os
->os_user_ptr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
519 dnode_special_open(os
, &os
->os_phys
->os_meta_dnode
,
520 DMU_META_DNODE_OBJECT
, &os
->os_meta_dnode
);
521 if (arc_buf_size(os
->os_phys_buf
) >= sizeof (objset_phys_t
)) {
522 dnode_special_open(os
, &os
->os_phys
->os_userused_dnode
,
523 DMU_USERUSED_OBJECT
, &os
->os_userused_dnode
);
524 dnode_special_open(os
, &os
->os_phys
->os_groupused_dnode
,
525 DMU_GROUPUSED_OBJECT
, &os
->os_groupused_dnode
);
533 dmu_objset_from_ds(dsl_dataset_t
*ds
, objset_t
**osp
)
538 * We shouldn't be doing anything with dsl_dataset_t's unless the
539 * pool_config lock is held, or the dataset is long-held.
541 ASSERT(dsl_pool_config_held(ds
->ds_dir
->dd_pool
) ||
542 dsl_dataset_long_held(ds
));
544 mutex_enter(&ds
->ds_opening_lock
);
545 if (ds
->ds_objset
== NULL
) {
547 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
548 err
= dmu_objset_open_impl(dsl_dataset_get_spa(ds
),
549 ds
, dsl_dataset_get_blkptr(ds
), &os
);
550 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
553 mutex_enter(&ds
->ds_lock
);
554 ASSERT(ds
->ds_objset
== NULL
);
556 mutex_exit(&ds
->ds_lock
);
559 *osp
= ds
->ds_objset
;
560 mutex_exit(&ds
->ds_opening_lock
);
565 * Holds the pool while the objset is held. Therefore only one objset
566 * can be held at a time.
569 dmu_objset_hold(const char *name
, void *tag
, objset_t
**osp
)
575 err
= dsl_pool_hold(name
, tag
, &dp
);
578 err
= dsl_dataset_hold(dp
, name
, tag
, &ds
);
580 dsl_pool_rele(dp
, tag
);
584 err
= dmu_objset_from_ds(ds
, osp
);
586 dsl_dataset_rele(ds
, tag
);
587 dsl_pool_rele(dp
, tag
);
594 dmu_objset_own_impl(dsl_dataset_t
*ds
, dmu_objset_type_t type
,
595 boolean_t readonly
, void *tag
, objset_t
**osp
)
599 err
= dmu_objset_from_ds(ds
, osp
);
601 dsl_dataset_disown(ds
, tag
);
602 } else if (type
!= DMU_OST_ANY
&& type
!= (*osp
)->os_phys
->os_type
) {
603 dsl_dataset_disown(ds
, tag
);
604 return (SET_ERROR(EINVAL
));
605 } else if (!readonly
&& dsl_dataset_is_snapshot(ds
)) {
606 dsl_dataset_disown(ds
, tag
);
607 return (SET_ERROR(EROFS
));
613 * dsl_pool must not be held when this is called.
614 * Upon successful return, there will be a longhold on the dataset,
615 * and the dsl_pool will not be held.
618 dmu_objset_own(const char *name
, dmu_objset_type_t type
,
619 boolean_t readonly
, void *tag
, objset_t
**osp
)
625 err
= dsl_pool_hold(name
, FTAG
, &dp
);
628 err
= dsl_dataset_own(dp
, name
, tag
, &ds
);
630 dsl_pool_rele(dp
, FTAG
);
633 err
= dmu_objset_own_impl(ds
, type
, readonly
, tag
, osp
);
634 dsl_pool_rele(dp
, FTAG
);
640 dmu_objset_own_obj(dsl_pool_t
*dp
, uint64_t obj
, dmu_objset_type_t type
,
641 boolean_t readonly
, void *tag
, objset_t
**osp
)
646 err
= dsl_dataset_own_obj(dp
, obj
, tag
, &ds
);
650 return (dmu_objset_own_impl(ds
, type
, readonly
, tag
, osp
));
654 dmu_objset_rele(objset_t
*os
, void *tag
)
656 dsl_pool_t
*dp
= dmu_objset_pool(os
);
657 dsl_dataset_rele(os
->os_dsl_dataset
, tag
);
658 dsl_pool_rele(dp
, tag
);
662 * When we are called, os MUST refer to an objset associated with a dataset
663 * that is owned by 'tag'; that is, is held and long held by 'tag' and ds_owner
664 * == tag. We will then release and reacquire ownership of the dataset while
665 * holding the pool config_rwlock to avoid intervening namespace or ownership
668 * This exists solely to accommodate zfs_ioc_userspace_upgrade()'s desire to
669 * release the hold on its dataset and acquire a new one on the dataset of the
670 * same name so that it can be partially torn down and reconstructed.
673 dmu_objset_refresh_ownership(objset_t
*os
, void *tag
)
676 dsl_dataset_t
*ds
, *newds
;
677 char name
[ZFS_MAX_DATASET_NAME_LEN
];
679 ds
= os
->os_dsl_dataset
;
680 VERIFY3P(ds
, !=, NULL
);
681 VERIFY3P(ds
->ds_owner
, ==, tag
);
682 VERIFY(dsl_dataset_long_held(ds
));
684 dsl_dataset_name(ds
, name
);
685 dp
= dmu_objset_pool(os
);
686 dsl_pool_config_enter(dp
, FTAG
);
687 dmu_objset_disown(os
, tag
);
688 VERIFY0(dsl_dataset_own(dp
, name
, tag
, &newds
));
689 VERIFY3P(newds
, ==, os
->os_dsl_dataset
);
690 dsl_pool_config_exit(dp
, FTAG
);
694 dmu_objset_disown(objset_t
*os
, void *tag
)
696 dsl_dataset_disown(os
->os_dsl_dataset
, tag
);
700 dmu_objset_evict_dbufs(objset_t
*os
)
705 mutex_enter(&os
->os_lock
);
706 dn
= list_head(&os
->os_dnodes
);
709 * Skip dnodes without holds. We have to do this dance
710 * because dnode_add_ref() only works if there is already a
711 * hold. If the dnode has no holds, then it has no dbufs.
713 if (dnode_add_ref(dn
, FTAG
)) {
714 list_insert_after(&os
->os_dnodes
, dn
, &dn_marker
);
715 mutex_exit(&os
->os_lock
);
717 dnode_evict_dbufs(dn
);
718 dnode_rele(dn
, FTAG
);
720 mutex_enter(&os
->os_lock
);
721 dn
= list_next(&os
->os_dnodes
, &dn_marker
);
722 list_remove(&os
->os_dnodes
, &dn_marker
);
724 dn
= list_next(&os
->os_dnodes
, dn
);
727 mutex_exit(&os
->os_lock
);
729 if (DMU_USERUSED_DNODE(os
) != NULL
) {
730 dnode_evict_dbufs(DMU_GROUPUSED_DNODE(os
));
731 dnode_evict_dbufs(DMU_USERUSED_DNODE(os
));
733 dnode_evict_dbufs(DMU_META_DNODE(os
));
737 * Objset eviction processing is split into into two pieces.
738 * The first marks the objset as evicting, evicts any dbufs that
739 * have a refcount of zero, and then queues up the objset for the
740 * second phase of eviction. Once os->os_dnodes has been cleared by
741 * dnode_buf_pageout()->dnode_destroy(), the second phase is executed.
742 * The second phase closes the special dnodes, dequeues the objset from
743 * the list of those undergoing eviction, and finally frees the objset.
745 * NOTE: Due to asynchronous eviction processing (invocation of
746 * dnode_buf_pageout()), it is possible for the meta dnode for the
747 * objset to have no holds even though os->os_dnodes is not empty.
750 dmu_objset_evict(objset_t
*os
)
752 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
754 for (int t
= 0; t
< TXG_SIZE
; t
++)
755 ASSERT(!dmu_objset_is_dirty(os
, t
));
758 dsl_prop_unregister_all(ds
, os
);
763 dmu_objset_evict_dbufs(os
);
765 mutex_enter(&os
->os_lock
);
766 spa_evicting_os_register(os
->os_spa
, os
);
767 if (list_is_empty(&os
->os_dnodes
)) {
768 mutex_exit(&os
->os_lock
);
769 dmu_objset_evict_done(os
);
771 mutex_exit(&os
->os_lock
);
776 dmu_objset_evict_done(objset_t
*os
)
778 ASSERT3P(list_head(&os
->os_dnodes
), ==, NULL
);
780 dnode_special_close(&os
->os_meta_dnode
);
781 if (DMU_USERUSED_DNODE(os
)) {
782 dnode_special_close(&os
->os_userused_dnode
);
783 dnode_special_close(&os
->os_groupused_dnode
);
785 zil_free(os
->os_zil
);
787 arc_buf_destroy(os
->os_phys_buf
, &os
->os_phys_buf
);
790 * This is a barrier to prevent the objset from going away in
791 * dnode_move() until we can safely ensure that the objset is still in
792 * use. We consider the objset valid before the barrier and invalid
795 rw_enter(&os_lock
, RW_READER
);
798 mutex_destroy(&os
->os_lock
);
799 mutex_destroy(&os
->os_userused_lock
);
800 mutex_destroy(&os
->os_obj_lock
);
801 mutex_destroy(&os
->os_user_ptr_lock
);
802 for (int i
= 0; i
< TXG_SIZE
; i
++) {
803 multilist_destroy(os
->os_dirty_dnodes
[i
]);
805 spa_evicting_os_deregister(os
->os_spa
, os
);
806 kmem_free(os
, sizeof (objset_t
));
810 dmu_objset_snap_cmtime(objset_t
*os
)
812 return (dsl_dir_snap_cmtime(os
->os_dsl_dataset
->ds_dir
));
815 /* called from dsl for meta-objset */
817 dmu_objset_create_impl(spa_t
*spa
, dsl_dataset_t
*ds
, blkptr_t
*bp
,
818 dmu_objset_type_t type
, dmu_tx_t
*tx
)
823 ASSERT(dmu_tx_is_syncing(tx
));
826 VERIFY0(dmu_objset_from_ds(ds
, &os
));
828 VERIFY0(dmu_objset_open_impl(spa
, NULL
, bp
, &os
));
830 mdn
= DMU_META_DNODE(os
);
832 dnode_allocate(mdn
, DMU_OT_DNODE
, 1 << DNODE_BLOCK_SHIFT
,
833 DN_MAX_INDBLKSHIFT
, DMU_OT_NONE
, 0, tx
);
836 * We don't want to have to increase the meta-dnode's nlevels
837 * later, because then we could do it in quescing context while
838 * we are also accessing it in open context.
840 * This precaution is not necessary for the MOS (ds == NULL),
841 * because the MOS is only updated in syncing context.
842 * This is most fortunate: the MOS is the only objset that
843 * needs to be synced multiple times as spa_sync() iterates
844 * to convergence, so minimizing its dn_nlevels matters.
850 * Determine the number of levels necessary for the meta-dnode
851 * to contain DN_MAX_OBJECT dnodes. Note that in order to
852 * ensure that we do not overflow 64 bits, there has to be
853 * a nlevels that gives us a number of blocks > DN_MAX_OBJECT
854 * but < 2^64. Therefore,
855 * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT) (10) must be
856 * less than (64 - log2(DN_MAX_OBJECT)) (16).
858 while ((uint64_t)mdn
->dn_nblkptr
<<
859 (mdn
->dn_datablkshift
- DNODE_SHIFT
+
860 (levels
- 1) * (mdn
->dn_indblkshift
- SPA_BLKPTRSHIFT
)) <
864 mdn
->dn_next_nlevels
[tx
->tx_txg
& TXG_MASK
] =
865 mdn
->dn_nlevels
= levels
;
868 ASSERT(type
!= DMU_OST_NONE
);
869 ASSERT(type
!= DMU_OST_ANY
);
870 ASSERT(type
< DMU_OST_NUMTYPES
);
871 os
->os_phys
->os_type
= type
;
872 if (dmu_objset_userused_enabled(os
)) {
873 os
->os_phys
->os_flags
|= OBJSET_FLAG_USERACCOUNTING_COMPLETE
;
874 os
->os_flags
= os
->os_phys
->os_flags
;
877 dsl_dataset_dirty(ds
, tx
);
882 typedef struct dmu_objset_create_arg
{
883 const char *doca_name
;
885 void (*doca_userfunc
)(objset_t
*os
, void *arg
,
886 cred_t
*cr
, dmu_tx_t
*tx
);
888 dmu_objset_type_t doca_type
;
890 } dmu_objset_create_arg_t
;
894 dmu_objset_create_check(void *arg
, dmu_tx_t
*tx
)
896 dmu_objset_create_arg_t
*doca
= arg
;
897 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
902 if (strchr(doca
->doca_name
, '@') != NULL
)
903 return (SET_ERROR(EINVAL
));
905 if (strlen(doca
->doca_name
) >= ZFS_MAX_DATASET_NAME_LEN
)
906 return (SET_ERROR(ENAMETOOLONG
));
908 error
= dsl_dir_hold(dp
, doca
->doca_name
, FTAG
, &pdd
, &tail
);
912 dsl_dir_rele(pdd
, FTAG
);
913 return (SET_ERROR(EEXIST
));
915 error
= dsl_fs_ss_limit_check(pdd
, 1, ZFS_PROP_FILESYSTEM_LIMIT
, NULL
,
917 dsl_dir_rele(pdd
, FTAG
);
923 dmu_objset_create_sync(void *arg
, dmu_tx_t
*tx
)
925 dmu_objset_create_arg_t
*doca
= arg
;
926 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
934 VERIFY0(dsl_dir_hold(dp
, doca
->doca_name
, FTAG
, &pdd
, &tail
));
936 obj
= dsl_dataset_create_sync(pdd
, tail
, NULL
, doca
->doca_flags
,
937 doca
->doca_cred
, tx
);
939 VERIFY0(dsl_dataset_hold_obj(pdd
->dd_pool
, obj
, FTAG
, &ds
));
940 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
941 bp
= dsl_dataset_get_blkptr(ds
);
942 os
= dmu_objset_create_impl(pdd
->dd_pool
->dp_spa
,
943 ds
, bp
, doca
->doca_type
, tx
);
944 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
946 if (doca
->doca_userfunc
!= NULL
) {
947 doca
->doca_userfunc(os
, doca
->doca_userarg
,
948 doca
->doca_cred
, tx
);
951 spa_history_log_internal_ds(ds
, "create", tx
, "");
952 dsl_dataset_rele(ds
, FTAG
);
953 dsl_dir_rele(pdd
, FTAG
);
957 dmu_objset_create(const char *name
, dmu_objset_type_t type
, uint64_t flags
,
958 void (*func
)(objset_t
*os
, void *arg
, cred_t
*cr
, dmu_tx_t
*tx
), void *arg
)
960 dmu_objset_create_arg_t doca
;
962 doca
.doca_name
= name
;
963 doca
.doca_cred
= CRED();
964 doca
.doca_flags
= flags
;
965 doca
.doca_userfunc
= func
;
966 doca
.doca_userarg
= arg
;
967 doca
.doca_type
= type
;
969 return (dsl_sync_task(name
,
970 dmu_objset_create_check
, dmu_objset_create_sync
, &doca
,
971 5, ZFS_SPACE_CHECK_NORMAL
));
974 typedef struct dmu_objset_clone_arg
{
975 const char *doca_clone
;
976 const char *doca_origin
;
978 } dmu_objset_clone_arg_t
;
982 dmu_objset_clone_check(void *arg
, dmu_tx_t
*tx
)
984 dmu_objset_clone_arg_t
*doca
= arg
;
988 dsl_dataset_t
*origin
;
989 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
991 if (strchr(doca
->doca_clone
, '@') != NULL
)
992 return (SET_ERROR(EINVAL
));
994 if (strlen(doca
->doca_clone
) >= ZFS_MAX_DATASET_NAME_LEN
)
995 return (SET_ERROR(ENAMETOOLONG
));
997 error
= dsl_dir_hold(dp
, doca
->doca_clone
, FTAG
, &pdd
, &tail
);
1001 dsl_dir_rele(pdd
, FTAG
);
1002 return (SET_ERROR(EEXIST
));
1005 error
= dsl_fs_ss_limit_check(pdd
, 1, ZFS_PROP_FILESYSTEM_LIMIT
, NULL
,
1008 dsl_dir_rele(pdd
, FTAG
);
1009 return (SET_ERROR(EDQUOT
));
1011 dsl_dir_rele(pdd
, FTAG
);
1013 error
= dsl_dataset_hold(dp
, doca
->doca_origin
, FTAG
, &origin
);
1017 /* You can only clone snapshots, not the head datasets. */
1018 if (!origin
->ds_is_snapshot
) {
1019 dsl_dataset_rele(origin
, FTAG
);
1020 return (SET_ERROR(EINVAL
));
1022 dsl_dataset_rele(origin
, FTAG
);
1028 dmu_objset_clone_sync(void *arg
, dmu_tx_t
*tx
)
1030 dmu_objset_clone_arg_t
*doca
= arg
;
1031 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1034 dsl_dataset_t
*origin
, *ds
;
1036 char namebuf
[ZFS_MAX_DATASET_NAME_LEN
];
1038 VERIFY0(dsl_dir_hold(dp
, doca
->doca_clone
, FTAG
, &pdd
, &tail
));
1039 VERIFY0(dsl_dataset_hold(dp
, doca
->doca_origin
, FTAG
, &origin
));
1041 obj
= dsl_dataset_create_sync(pdd
, tail
, origin
, 0,
1042 doca
->doca_cred
, tx
);
1044 VERIFY0(dsl_dataset_hold_obj(pdd
->dd_pool
, obj
, FTAG
, &ds
));
1045 dsl_dataset_name(origin
, namebuf
);
1046 spa_history_log_internal_ds(ds
, "clone", tx
,
1047 "origin=%s (%llu)", namebuf
, origin
->ds_object
);
1048 dsl_dataset_rele(ds
, FTAG
);
1049 dsl_dataset_rele(origin
, FTAG
);
1050 dsl_dir_rele(pdd
, FTAG
);
1054 dmu_objset_clone(const char *clone
, const char *origin
)
1056 dmu_objset_clone_arg_t doca
;
1058 doca
.doca_clone
= clone
;
1059 doca
.doca_origin
= origin
;
1060 doca
.doca_cred
= CRED();
1062 return (dsl_sync_task(clone
,
1063 dmu_objset_clone_check
, dmu_objset_clone_sync
, &doca
,
1064 5, ZFS_SPACE_CHECK_NORMAL
));
1068 dmu_objset_remap_indirects_impl(objset_t
*os
, uint64_t last_removed_txg
)
1071 uint64_t object
= 0;
1072 while ((error
= dmu_object_next(os
, &object
, B_FALSE
, 0)) == 0) {
1073 error
= dmu_object_remap_indirects(os
, object
,
1076 * If the ZPL removed the object before we managed to dnode_hold
1077 * it, we would get an ENOENT. If the ZPL declares its intent
1078 * to remove the object (dnode_free) before we manage to
1079 * dnode_hold it, we would get an EEXIST. In either case, we
1080 * want to continue remapping the other objects in the objset;
1081 * in all other cases, we want to break early.
1083 if (error
!= 0 && error
!= ENOENT
&& error
!= EEXIST
) {
1087 if (error
== ESRCH
) {
1094 dmu_objset_remap_indirects(const char *fsname
)
1097 objset_t
*os
= NULL
;
1098 uint64_t last_removed_txg
;
1099 uint64_t remap_start_txg
;
1102 error
= dmu_objset_hold(fsname
, FTAG
, &os
);
1106 dd
= dmu_objset_ds(os
)->ds_dir
;
1108 if (!spa_feature_is_enabled(dmu_objset_spa(os
),
1109 SPA_FEATURE_OBSOLETE_COUNTS
)) {
1110 dmu_objset_rele(os
, FTAG
);
1111 return (SET_ERROR(ENOTSUP
));
1114 if (dsl_dataset_is_snapshot(dmu_objset_ds(os
))) {
1115 dmu_objset_rele(os
, FTAG
);
1116 return (SET_ERROR(EINVAL
));
1120 * If there has not been a removal, we're done.
1122 last_removed_txg
= spa_get_last_removal_txg(dmu_objset_spa(os
));
1123 if (last_removed_txg
== -1ULL) {
1124 dmu_objset_rele(os
, FTAG
);
1129 * If we have remapped since the last removal, we're done.
1131 if (dsl_dir_is_zapified(dd
)) {
1132 uint64_t last_remap_txg
;
1133 if (zap_lookup(spa_meta_objset(dmu_objset_spa(os
)),
1134 dd
->dd_object
, DD_FIELD_LAST_REMAP_TXG
,
1135 sizeof (last_remap_txg
), 1, &last_remap_txg
) == 0 &&
1136 last_remap_txg
> last_removed_txg
) {
1137 dmu_objset_rele(os
, FTAG
);
1142 dsl_dataset_long_hold(dmu_objset_ds(os
), FTAG
);
1143 dsl_pool_rele(dmu_objset_pool(os
), FTAG
);
1145 remap_start_txg
= spa_last_synced_txg(dmu_objset_spa(os
));
1146 error
= dmu_objset_remap_indirects_impl(os
, last_removed_txg
);
1149 * We update the last_remap_txg to be the start txg so that
1150 * we can guarantee that every block older than last_remap_txg
1151 * that can be remapped has been remapped.
1153 error
= dsl_dir_update_last_remap_txg(dd
, remap_start_txg
);
1156 dsl_dataset_long_rele(dmu_objset_ds(os
), FTAG
);
1157 dsl_dataset_rele(dmu_objset_ds(os
), FTAG
);
1163 dmu_objset_snapshot_one(const char *fsname
, const char *snapname
)
1166 char *longsnap
= kmem_asprintf("%s@%s", fsname
, snapname
);
1167 nvlist_t
*snaps
= fnvlist_alloc();
1169 fnvlist_add_boolean(snaps
, longsnap
);
1171 err
= dsl_dataset_snapshot(snaps
, NULL
, NULL
);
1172 fnvlist_free(snaps
);
1177 dmu_objset_sync_dnodes(multilist_sublist_t
*list
, dmu_tx_t
*tx
)
1181 while ((dn
= multilist_sublist_head(list
)) != NULL
) {
1182 ASSERT(dn
->dn_object
!= DMU_META_DNODE_OBJECT
);
1183 ASSERT(dn
->dn_dbuf
->db_data_pending
);
1185 * Initialize dn_zio outside dnode_sync() because the
1186 * meta-dnode needs to set it ouside dnode_sync().
1188 dn
->dn_zio
= dn
->dn_dbuf
->db_data_pending
->dr_zio
;
1191 ASSERT3U(dn
->dn_nlevels
, <=, DN_MAX_LEVELS
);
1192 multilist_sublist_remove(list
, dn
);
1194 multilist_t
*newlist
= dn
->dn_objset
->os_synced_dnodes
;
1195 if (newlist
!= NULL
) {
1196 (void) dnode_add_ref(dn
, newlist
);
1197 multilist_insert(newlist
, dn
);
1206 dmu_objset_write_ready(zio_t
*zio
, arc_buf_t
*abuf
, void *arg
)
1208 blkptr_t
*bp
= zio
->io_bp
;
1210 dnode_phys_t
*dnp
= &os
->os_phys
->os_meta_dnode
;
1212 ASSERT(!BP_IS_EMBEDDED(bp
));
1213 ASSERT3U(BP_GET_TYPE(bp
), ==, DMU_OT_OBJSET
);
1214 ASSERT0(BP_GET_LEVEL(bp
));
1217 * Update rootbp fill count: it should be the number of objects
1218 * allocated in the object set (not counting the "special"
1219 * objects that are stored in the objset_phys_t -- the meta
1220 * dnode and user/group accounting objects).
1223 for (int i
= 0; i
< dnp
->dn_nblkptr
; i
++)
1224 bp
->blk_fill
+= BP_GET_FILL(&dnp
->dn_blkptr
[i
]);
1225 if (os
->os_dsl_dataset
!= NULL
)
1226 rrw_enter(&os
->os_dsl_dataset
->ds_bp_rwlock
, RW_WRITER
, FTAG
);
1227 *os
->os_rootbp
= *bp
;
1228 if (os
->os_dsl_dataset
!= NULL
)
1229 rrw_exit(&os
->os_dsl_dataset
->ds_bp_rwlock
, FTAG
);
1234 dmu_objset_write_done(zio_t
*zio
, arc_buf_t
*abuf
, void *arg
)
1236 blkptr_t
*bp
= zio
->io_bp
;
1237 blkptr_t
*bp_orig
= &zio
->io_bp_orig
;
1240 if (zio
->io_flags
& ZIO_FLAG_IO_REWRITE
) {
1241 ASSERT(BP_EQUAL(bp
, bp_orig
));
1243 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
1244 dmu_tx_t
*tx
= os
->os_synctx
;
1246 (void) dsl_dataset_block_kill(ds
, bp_orig
, tx
, B_TRUE
);
1247 dsl_dataset_block_born(ds
, bp
, tx
);
1249 kmem_free(bp
, sizeof (*bp
));
1252 typedef struct sync_dnodes_arg
{
1253 multilist_t
*sda_list
;
1254 int sda_sublist_idx
;
1255 multilist_t
*sda_newlist
;
1257 } sync_dnodes_arg_t
;
1260 sync_dnodes_task(void *arg
)
1262 sync_dnodes_arg_t
*sda
= arg
;
1264 multilist_sublist_t
*ms
=
1265 multilist_sublist_lock(sda
->sda_list
, sda
->sda_sublist_idx
);
1267 dmu_objset_sync_dnodes(ms
, sda
->sda_tx
);
1269 multilist_sublist_unlock(ms
);
1271 kmem_free(sda
, sizeof (*sda
));
1275 /* called from dsl */
1277 dmu_objset_sync(objset_t
*os
, zio_t
*pio
, dmu_tx_t
*tx
)
1280 zbookmark_phys_t zb
;
1284 dbuf_dirty_record_t
*dr
;
1285 blkptr_t
*blkptr_copy
= kmem_alloc(sizeof (*os
->os_rootbp
), KM_SLEEP
);
1286 *blkptr_copy
= *os
->os_rootbp
;
1288 dprintf_ds(os
->os_dsl_dataset
, "txg=%llu\n", tx
->tx_txg
);
1290 ASSERT(dmu_tx_is_syncing(tx
));
1291 /* XXX the write_done callback should really give us the tx... */
1294 if (os
->os_dsl_dataset
== NULL
) {
1296 * This is the MOS. If we have upgraded,
1297 * spa_max_replication() could change, so reset
1300 os
->os_copies
= spa_max_replication(os
->os_spa
);
1304 * Create the root block IO
1306 SET_BOOKMARK(&zb
, os
->os_dsl_dataset
?
1307 os
->os_dsl_dataset
->ds_object
: DMU_META_OBJSET
,
1308 ZB_ROOT_OBJECT
, ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
1309 arc_release(os
->os_phys_buf
, &os
->os_phys_buf
);
1311 dmu_write_policy(os
, NULL
, 0, 0, &zp
);
1313 zio
= arc_write(pio
, os
->os_spa
, tx
->tx_txg
,
1314 blkptr_copy
, os
->os_phys_buf
, DMU_OS_IS_L2CACHEABLE(os
),
1315 &zp
, dmu_objset_write_ready
, NULL
, NULL
, dmu_objset_write_done
,
1316 os
, ZIO_PRIORITY_ASYNC_WRITE
, ZIO_FLAG_MUSTSUCCEED
, &zb
);
1319 * Sync special dnodes - the parent IO for the sync is the root block
1321 DMU_META_DNODE(os
)->dn_zio
= zio
;
1322 dnode_sync(DMU_META_DNODE(os
), tx
);
1324 os
->os_phys
->os_flags
= os
->os_flags
;
1326 if (DMU_USERUSED_DNODE(os
) &&
1327 DMU_USERUSED_DNODE(os
)->dn_type
!= DMU_OT_NONE
) {
1328 DMU_USERUSED_DNODE(os
)->dn_zio
= zio
;
1329 dnode_sync(DMU_USERUSED_DNODE(os
), tx
);
1330 DMU_GROUPUSED_DNODE(os
)->dn_zio
= zio
;
1331 dnode_sync(DMU_GROUPUSED_DNODE(os
), tx
);
1334 txgoff
= tx
->tx_txg
& TXG_MASK
;
1336 if (dmu_objset_userused_enabled(os
)) {
1338 * We must create the list here because it uses the
1339 * dn_dirty_link[] of this txg. But it may already
1340 * exist because we call dsl_dataset_sync() twice per txg.
1342 if (os
->os_synced_dnodes
== NULL
) {
1343 os
->os_synced_dnodes
=
1344 multilist_create(sizeof (dnode_t
),
1345 offsetof(dnode_t
, dn_dirty_link
[txgoff
]),
1346 dnode_multilist_index_func
);
1348 ASSERT3U(os
->os_synced_dnodes
->ml_offset
, ==,
1349 offsetof(dnode_t
, dn_dirty_link
[txgoff
]));
1354 i
< multilist_get_num_sublists(os
->os_dirty_dnodes
[txgoff
]); i
++) {
1355 sync_dnodes_arg_t
*sda
= kmem_alloc(sizeof (*sda
), KM_SLEEP
);
1356 sda
->sda_list
= os
->os_dirty_dnodes
[txgoff
];
1357 sda
->sda_sublist_idx
= i
;
1359 (void) taskq_dispatch(dmu_objset_pool(os
)->dp_sync_taskq
,
1360 sync_dnodes_task
, sda
, 0);
1361 /* callback frees sda */
1363 taskq_wait(dmu_objset_pool(os
)->dp_sync_taskq
);
1365 list
= &DMU_META_DNODE(os
)->dn_dirty_records
[txgoff
];
1366 while ((dr
= list_head(list
)) != NULL
) {
1367 ASSERT0(dr
->dr_dbuf
->db_level
);
1368 list_remove(list
, dr
);
1370 zio_nowait(dr
->dr_zio
);
1373 /* Enable dnode backfill if enough objects have been freed. */
1374 if (os
->os_freed_dnodes
>= dmu_rescan_dnode_threshold
) {
1375 os
->os_rescan_dnodes
= B_TRUE
;
1376 os
->os_freed_dnodes
= 0;
1380 * Free intent log blocks up to this tx.
1382 zil_sync(os
->os_zil
, tx
);
1383 os
->os_phys
->os_zil_header
= os
->os_zil_header
;
1388 dmu_objset_is_dirty(objset_t
*os
, uint64_t txg
)
1390 return (!multilist_is_empty(os
->os_dirty_dnodes
[txg
& TXG_MASK
]));
1393 static objset_used_cb_t
*used_cbs
[DMU_OST_NUMTYPES
];
1396 dmu_objset_register_type(dmu_objset_type_t ost
, objset_used_cb_t
*cb
)
1402 dmu_objset_userused_enabled(objset_t
*os
)
1404 return (spa_version(os
->os_spa
) >= SPA_VERSION_USERSPACE
&&
1405 used_cbs
[os
->os_phys
->os_type
] != NULL
&&
1406 DMU_USERUSED_DNODE(os
) != NULL
);
1409 typedef struct userquota_node
{
1412 avl_node_t uqn_node
;
1415 typedef struct userquota_cache
{
1416 avl_tree_t uqc_user_deltas
;
1417 avl_tree_t uqc_group_deltas
;
1418 } userquota_cache_t
;
1421 userquota_compare(const void *l
, const void *r
)
1423 const userquota_node_t
*luqn
= l
;
1424 const userquota_node_t
*ruqn
= r
;
1426 if (luqn
->uqn_id
< ruqn
->uqn_id
)
1428 if (luqn
->uqn_id
> ruqn
->uqn_id
)
1434 do_userquota_cacheflush(objset_t
*os
, userquota_cache_t
*cache
, dmu_tx_t
*tx
)
1437 userquota_node_t
*uqn
;
1439 ASSERT(dmu_tx_is_syncing(tx
));
1442 while ((uqn
= avl_destroy_nodes(&cache
->uqc_user_deltas
,
1443 &cookie
)) != NULL
) {
1445 * os_userused_lock protects against concurrent calls to
1446 * zap_increment_int(). It's needed because zap_increment_int()
1447 * is not thread-safe (i.e. not atomic).
1449 mutex_enter(&os
->os_userused_lock
);
1450 VERIFY0(zap_increment_int(os
, DMU_USERUSED_OBJECT
,
1451 uqn
->uqn_id
, uqn
->uqn_delta
, tx
));
1452 mutex_exit(&os
->os_userused_lock
);
1453 kmem_free(uqn
, sizeof (*uqn
));
1455 avl_destroy(&cache
->uqc_user_deltas
);
1458 while ((uqn
= avl_destroy_nodes(&cache
->uqc_group_deltas
,
1459 &cookie
)) != NULL
) {
1460 mutex_enter(&os
->os_userused_lock
);
1461 VERIFY0(zap_increment_int(os
, DMU_GROUPUSED_OBJECT
,
1462 uqn
->uqn_id
, uqn
->uqn_delta
, tx
));
1463 mutex_exit(&os
->os_userused_lock
);
1464 kmem_free(uqn
, sizeof (*uqn
));
1466 avl_destroy(&cache
->uqc_group_deltas
);
1470 userquota_update_cache(avl_tree_t
*avl
, uint64_t id
, int64_t delta
)
1472 userquota_node_t search
= { .uqn_id
= id
};
1475 userquota_node_t
*uqn
= avl_find(avl
, &search
, &idx
);
1477 uqn
= kmem_zalloc(sizeof (*uqn
), KM_SLEEP
);
1479 avl_insert(avl
, uqn
, idx
);
1481 uqn
->uqn_delta
+= delta
;
1485 do_userquota_update(userquota_cache_t
*cache
, uint64_t used
, uint64_t flags
,
1486 uint64_t user
, uint64_t group
, boolean_t subtract
)
1488 if ((flags
& DNODE_FLAG_USERUSED_ACCOUNTED
)) {
1489 int64_t delta
= DNODE_SIZE
+ used
;
1493 userquota_update_cache(&cache
->uqc_user_deltas
, user
, delta
);
1494 userquota_update_cache(&cache
->uqc_group_deltas
, group
, delta
);
1498 typedef struct userquota_updates_arg
{
1500 int uua_sublist_idx
;
1502 } userquota_updates_arg_t
;
1505 userquota_updates_task(void *arg
)
1507 userquota_updates_arg_t
*uua
= arg
;
1508 objset_t
*os
= uua
->uua_os
;
1509 dmu_tx_t
*tx
= uua
->uua_tx
;
1511 userquota_cache_t cache
= { 0 };
1513 multilist_sublist_t
*list
=
1514 multilist_sublist_lock(os
->os_synced_dnodes
, uua
->uua_sublist_idx
);
1516 ASSERT(multilist_sublist_head(list
) == NULL
||
1517 dmu_objset_userused_enabled(os
));
1518 avl_create(&cache
.uqc_user_deltas
, userquota_compare
,
1519 sizeof (userquota_node_t
), offsetof(userquota_node_t
, uqn_node
));
1520 avl_create(&cache
.uqc_group_deltas
, userquota_compare
,
1521 sizeof (userquota_node_t
), offsetof(userquota_node_t
, uqn_node
));
1523 while ((dn
= multilist_sublist_head(list
)) != NULL
) {
1525 ASSERT(!DMU_OBJECT_IS_SPECIAL(dn
->dn_object
));
1526 ASSERT(dn
->dn_phys
->dn_type
== DMU_OT_NONE
||
1527 dn
->dn_phys
->dn_flags
&
1528 DNODE_FLAG_USERUSED_ACCOUNTED
);
1530 flags
= dn
->dn_id_flags
;
1532 if (flags
& DN_ID_OLD_EXIST
) {
1533 do_userquota_update(&cache
,
1534 dn
->dn_oldused
, dn
->dn_oldflags
,
1535 dn
->dn_olduid
, dn
->dn_oldgid
, B_TRUE
);
1537 if (flags
& DN_ID_NEW_EXIST
) {
1538 do_userquota_update(&cache
,
1539 DN_USED_BYTES(dn
->dn_phys
),
1540 dn
->dn_phys
->dn_flags
, dn
->dn_newuid
,
1541 dn
->dn_newgid
, B_FALSE
);
1544 mutex_enter(&dn
->dn_mtx
);
1546 dn
->dn_oldflags
= 0;
1547 if (dn
->dn_id_flags
& DN_ID_NEW_EXIST
) {
1548 dn
->dn_olduid
= dn
->dn_newuid
;
1549 dn
->dn_oldgid
= dn
->dn_newgid
;
1550 dn
->dn_id_flags
|= DN_ID_OLD_EXIST
;
1551 if (dn
->dn_bonuslen
== 0)
1552 dn
->dn_id_flags
|= DN_ID_CHKED_SPILL
;
1554 dn
->dn_id_flags
|= DN_ID_CHKED_BONUS
;
1556 dn
->dn_id_flags
&= ~(DN_ID_NEW_EXIST
);
1557 mutex_exit(&dn
->dn_mtx
);
1559 multilist_sublist_remove(list
, dn
);
1560 dnode_rele(dn
, os
->os_synced_dnodes
);
1562 do_userquota_cacheflush(os
, &cache
, tx
);
1563 multilist_sublist_unlock(list
);
1564 kmem_free(uua
, sizeof (*uua
));
1568 dmu_objset_do_userquota_updates(objset_t
*os
, dmu_tx_t
*tx
)
1570 if (!dmu_objset_userused_enabled(os
))
1573 /* Allocate the user/groupused objects if necessary. */
1574 if (DMU_USERUSED_DNODE(os
)->dn_type
== DMU_OT_NONE
) {
1575 VERIFY0(zap_create_claim(os
,
1576 DMU_USERUSED_OBJECT
,
1577 DMU_OT_USERGROUP_USED
, DMU_OT_NONE
, 0, tx
));
1578 VERIFY0(zap_create_claim(os
,
1579 DMU_GROUPUSED_OBJECT
,
1580 DMU_OT_USERGROUP_USED
, DMU_OT_NONE
, 0, tx
));
1584 i
< multilist_get_num_sublists(os
->os_synced_dnodes
); i
++) {
1585 userquota_updates_arg_t
*uua
=
1586 kmem_alloc(sizeof (*uua
), KM_SLEEP
);
1588 uua
->uua_sublist_idx
= i
;
1590 /* note: caller does taskq_wait() */
1591 (void) taskq_dispatch(dmu_objset_pool(os
)->dp_sync_taskq
,
1592 userquota_updates_task
, uua
, 0);
1593 /* callback frees uua */
1598 * Returns a pointer to data to find uid/gid from
1600 * If a dirty record for transaction group that is syncing can't
1601 * be found then NULL is returned. In the NULL case it is assumed
1602 * the uid/gid aren't changing.
1605 dmu_objset_userquota_find_data(dmu_buf_impl_t
*db
, dmu_tx_t
*tx
)
1607 dbuf_dirty_record_t
*dr
, **drp
;
1610 if (db
->db_dirtycnt
== 0)
1611 return (db
->db
.db_data
); /* Nothing is changing */
1613 for (drp
= &db
->db_last_dirty
; (dr
= *drp
) != NULL
; drp
= &dr
->dr_next
)
1614 if (dr
->dr_txg
== tx
->tx_txg
)
1622 DB_DNODE_ENTER(dr
->dr_dbuf
);
1623 dn
= DB_DNODE(dr
->dr_dbuf
);
1625 if (dn
->dn_bonuslen
== 0 &&
1626 dr
->dr_dbuf
->db_blkid
== DMU_SPILL_BLKID
)
1627 data
= dr
->dt
.dl
.dr_data
->b_data
;
1629 data
= dr
->dt
.dl
.dr_data
;
1631 DB_DNODE_EXIT(dr
->dr_dbuf
);
1638 dmu_objset_userquota_get_ids(dnode_t
*dn
, boolean_t before
, dmu_tx_t
*tx
)
1640 objset_t
*os
= dn
->dn_objset
;
1642 dmu_buf_impl_t
*db
= NULL
;
1643 uint64_t *user
= NULL
;
1644 uint64_t *group
= NULL
;
1645 int flags
= dn
->dn_id_flags
;
1647 boolean_t have_spill
= B_FALSE
;
1649 if (!dmu_objset_userused_enabled(dn
->dn_objset
))
1652 if (before
&& (flags
& (DN_ID_CHKED_BONUS
|DN_ID_OLD_EXIST
|
1653 DN_ID_CHKED_SPILL
)))
1656 if (before
&& dn
->dn_bonuslen
!= 0)
1657 data
= DN_BONUS(dn
->dn_phys
);
1658 else if (!before
&& dn
->dn_bonuslen
!= 0) {
1661 mutex_enter(&db
->db_mtx
);
1662 data
= dmu_objset_userquota_find_data(db
, tx
);
1664 data
= DN_BONUS(dn
->dn_phys
);
1666 } else if (dn
->dn_bonuslen
== 0 && dn
->dn_bonustype
== DMU_OT_SA
) {
1669 if (RW_WRITE_HELD(&dn
->dn_struct_rwlock
))
1670 rf
|= DB_RF_HAVESTRUCT
;
1671 error
= dmu_spill_hold_by_dnode(dn
,
1672 rf
| DB_RF_MUST_SUCCEED
,
1673 FTAG
, (dmu_buf_t
**)&db
);
1675 mutex_enter(&db
->db_mtx
);
1676 data
= (before
) ? db
->db
.db_data
:
1677 dmu_objset_userquota_find_data(db
, tx
);
1678 have_spill
= B_TRUE
;
1680 mutex_enter(&dn
->dn_mtx
);
1681 dn
->dn_id_flags
|= DN_ID_CHKED_BONUS
;
1682 mutex_exit(&dn
->dn_mtx
);
1688 user
= &dn
->dn_olduid
;
1689 group
= &dn
->dn_oldgid
;
1691 user
= &dn
->dn_newuid
;
1692 group
= &dn
->dn_newgid
;
1696 * Must always call the callback in case the object
1697 * type has changed and that type isn't an object type to track
1699 error
= used_cbs
[os
->os_phys
->os_type
](dn
->dn_bonustype
, data
,
1703 * Preserve existing uid/gid when the callback can't determine
1704 * what the new uid/gid are and the callback returned EEXIST.
1705 * The EEXIST error tells us to just use the existing uid/gid.
1706 * If we don't know what the old values are then just assign
1707 * them to 0, since that is a new file being created.
1709 if (!before
&& data
== NULL
&& error
== EEXIST
) {
1710 if (flags
& DN_ID_OLD_EXIST
) {
1711 dn
->dn_newuid
= dn
->dn_olduid
;
1712 dn
->dn_newgid
= dn
->dn_oldgid
;
1721 mutex_exit(&db
->db_mtx
);
1723 mutex_enter(&dn
->dn_mtx
);
1724 if (error
== 0 && before
)
1725 dn
->dn_id_flags
|= DN_ID_OLD_EXIST
;
1726 if (error
== 0 && !before
)
1727 dn
->dn_id_flags
|= DN_ID_NEW_EXIST
;
1730 dn
->dn_id_flags
|= DN_ID_CHKED_SPILL
;
1732 dn
->dn_id_flags
|= DN_ID_CHKED_BONUS
;
1734 mutex_exit(&dn
->dn_mtx
);
1736 dmu_buf_rele((dmu_buf_t
*)db
, FTAG
);
1740 dmu_objset_userspace_present(objset_t
*os
)
1742 return (os
->os_phys
->os_flags
&
1743 OBJSET_FLAG_USERACCOUNTING_COMPLETE
);
1747 dmu_objset_userspace_upgrade(objset_t
*os
)
1752 if (dmu_objset_userspace_present(os
))
1754 if (!dmu_objset_userused_enabled(os
))
1755 return (SET_ERROR(ENOTSUP
));
1756 if (dmu_objset_is_snapshot(os
))
1757 return (SET_ERROR(EINVAL
));
1760 * We simply need to mark every object dirty, so that it will be
1761 * synced out and now accounted. If this is called
1762 * concurrently, or if we already did some work before crashing,
1763 * that's fine, since we track each object's accounted state
1767 for (obj
= 0; err
== 0; err
= dmu_object_next(os
, &obj
, FALSE
, 0)) {
1772 if (issig(JUSTLOOKING
) && issig(FORREAL
))
1773 return (SET_ERROR(EINTR
));
1775 objerr
= dmu_bonus_hold(os
, obj
, FTAG
, &db
);
1778 tx
= dmu_tx_create(os
);
1779 dmu_tx_hold_bonus(tx
, obj
);
1780 objerr
= dmu_tx_assign(tx
, TXG_WAIT
);
1785 dmu_buf_will_dirty(db
, tx
);
1786 dmu_buf_rele(db
, FTAG
);
1790 os
->os_flags
|= OBJSET_FLAG_USERACCOUNTING_COMPLETE
;
1791 txg_wait_synced(dmu_objset_pool(os
), 0);
1796 dmu_objset_space(objset_t
*os
, uint64_t *refdbytesp
, uint64_t *availbytesp
,
1797 uint64_t *usedobjsp
, uint64_t *availobjsp
)
1799 dsl_dataset_space(os
->os_dsl_dataset
, refdbytesp
, availbytesp
,
1800 usedobjsp
, availobjsp
);
1804 dmu_objset_fsid_guid(objset_t
*os
)
1806 return (dsl_dataset_fsid_guid(os
->os_dsl_dataset
));
1810 dmu_objset_fast_stat(objset_t
*os
, dmu_objset_stats_t
*stat
)
1812 stat
->dds_type
= os
->os_phys
->os_type
;
1813 if (os
->os_dsl_dataset
)
1814 dsl_dataset_fast_stat(os
->os_dsl_dataset
, stat
);
1818 dmu_objset_stats(objset_t
*os
, nvlist_t
*nv
)
1820 ASSERT(os
->os_dsl_dataset
||
1821 os
->os_phys
->os_type
== DMU_OST_META
);
1823 if (os
->os_dsl_dataset
!= NULL
)
1824 dsl_dataset_stats(os
->os_dsl_dataset
, nv
);
1826 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_TYPE
,
1827 os
->os_phys
->os_type
);
1828 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USERACCOUNTING
,
1829 dmu_objset_userspace_present(os
));
1833 dmu_objset_is_snapshot(objset_t
*os
)
1835 if (os
->os_dsl_dataset
!= NULL
)
1836 return (os
->os_dsl_dataset
->ds_is_snapshot
);
1842 dmu_snapshot_realname(objset_t
*os
, char *name
, char *real
, int maxlen
,
1843 boolean_t
*conflict
)
1845 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
1848 if (dsl_dataset_phys(ds
)->ds_snapnames_zapobj
== 0)
1849 return (SET_ERROR(ENOENT
));
1851 return (zap_lookup_norm(ds
->ds_dir
->dd_pool
->dp_meta_objset
,
1852 dsl_dataset_phys(ds
)->ds_snapnames_zapobj
, name
, 8, 1, &ignored
,
1853 MT_NORMALIZE
, real
, maxlen
, conflict
));
1857 dmu_snapshot_list_next(objset_t
*os
, int namelen
, char *name
,
1858 uint64_t *idp
, uint64_t *offp
, boolean_t
*case_conflict
)
1860 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
1861 zap_cursor_t cursor
;
1862 zap_attribute_t attr
;
1864 ASSERT(dsl_pool_config_held(dmu_objset_pool(os
)));
1866 if (dsl_dataset_phys(ds
)->ds_snapnames_zapobj
== 0)
1867 return (SET_ERROR(ENOENT
));
1869 zap_cursor_init_serialized(&cursor
,
1870 ds
->ds_dir
->dd_pool
->dp_meta_objset
,
1871 dsl_dataset_phys(ds
)->ds_snapnames_zapobj
, *offp
);
1873 if (zap_cursor_retrieve(&cursor
, &attr
) != 0) {
1874 zap_cursor_fini(&cursor
);
1875 return (SET_ERROR(ENOENT
));
1878 if (strlen(attr
.za_name
) + 1 > namelen
) {
1879 zap_cursor_fini(&cursor
);
1880 return (SET_ERROR(ENAMETOOLONG
));
1883 (void) strcpy(name
, attr
.za_name
);
1885 *idp
= attr
.za_first_integer
;
1887 *case_conflict
= attr
.za_normalization_conflict
;
1888 zap_cursor_advance(&cursor
);
1889 *offp
= zap_cursor_serialize(&cursor
);
1890 zap_cursor_fini(&cursor
);
1896 dmu_dir_list_next(objset_t
*os
, int namelen
, char *name
,
1897 uint64_t *idp
, uint64_t *offp
)
1899 dsl_dir_t
*dd
= os
->os_dsl_dataset
->ds_dir
;
1900 zap_cursor_t cursor
;
1901 zap_attribute_t attr
;
1903 /* there is no next dir on a snapshot! */
1904 if (os
->os_dsl_dataset
->ds_object
!=
1905 dsl_dir_phys(dd
)->dd_head_dataset_obj
)
1906 return (SET_ERROR(ENOENT
));
1908 zap_cursor_init_serialized(&cursor
,
1909 dd
->dd_pool
->dp_meta_objset
,
1910 dsl_dir_phys(dd
)->dd_child_dir_zapobj
, *offp
);
1912 if (zap_cursor_retrieve(&cursor
, &attr
) != 0) {
1913 zap_cursor_fini(&cursor
);
1914 return (SET_ERROR(ENOENT
));
1917 if (strlen(attr
.za_name
) + 1 > namelen
) {
1918 zap_cursor_fini(&cursor
);
1919 return (SET_ERROR(ENAMETOOLONG
));
1922 (void) strcpy(name
, attr
.za_name
);
1924 *idp
= attr
.za_first_integer
;
1925 zap_cursor_advance(&cursor
);
1926 *offp
= zap_cursor_serialize(&cursor
);
1927 zap_cursor_fini(&cursor
);
1932 typedef struct dmu_objset_find_ctx
{
1936 char *dc_ddname
; /* last component of ddobj's name */
1937 int (*dc_func
)(dsl_pool_t
*, dsl_dataset_t
*, void *);
1940 kmutex_t
*dc_error_lock
;
1942 } dmu_objset_find_ctx_t
;
1945 dmu_objset_find_dp_impl(dmu_objset_find_ctx_t
*dcp
)
1947 dsl_pool_t
*dp
= dcp
->dc_dp
;
1951 zap_attribute_t
*attr
;
1955 /* don't process if there already was an error */
1956 if (*dcp
->dc_error
!= 0)
1960 * Note: passing the name (dc_ddname) here is optional, but it
1961 * improves performance because we don't need to call
1962 * zap_value_search() to determine the name.
1964 err
= dsl_dir_hold_obj(dp
, dcp
->dc_ddobj
, dcp
->dc_ddname
, FTAG
, &dd
);
1968 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */
1969 if (dd
->dd_myname
[0] == '$') {
1970 dsl_dir_rele(dd
, FTAG
);
1974 thisobj
= dsl_dir_phys(dd
)->dd_head_dataset_obj
;
1975 attr
= kmem_alloc(sizeof (zap_attribute_t
), KM_SLEEP
);
1978 * Iterate over all children.
1980 if (dcp
->dc_flags
& DS_FIND_CHILDREN
) {
1981 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
1982 dsl_dir_phys(dd
)->dd_child_dir_zapobj
);
1983 zap_cursor_retrieve(&zc
, attr
) == 0;
1984 (void) zap_cursor_advance(&zc
)) {
1985 ASSERT3U(attr
->za_integer_length
, ==,
1987 ASSERT3U(attr
->za_num_integers
, ==, 1);
1989 dmu_objset_find_ctx_t
*child_dcp
=
1990 kmem_alloc(sizeof (*child_dcp
), KM_SLEEP
);
1992 child_dcp
->dc_ddobj
= attr
->za_first_integer
;
1993 child_dcp
->dc_ddname
= spa_strdup(attr
->za_name
);
1994 if (dcp
->dc_tq
!= NULL
)
1995 (void) taskq_dispatch(dcp
->dc_tq
,
1996 dmu_objset_find_dp_cb
, child_dcp
, TQ_SLEEP
);
1998 dmu_objset_find_dp_impl(child_dcp
);
2000 zap_cursor_fini(&zc
);
2004 * Iterate over all snapshots.
2006 if (dcp
->dc_flags
& DS_FIND_SNAPSHOTS
) {
2008 err
= dsl_dataset_hold_obj(dp
, thisobj
, FTAG
, &ds
);
2013 snapobj
= dsl_dataset_phys(ds
)->ds_snapnames_zapobj
;
2014 dsl_dataset_rele(ds
, FTAG
);
2016 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
, snapobj
);
2017 zap_cursor_retrieve(&zc
, attr
) == 0;
2018 (void) zap_cursor_advance(&zc
)) {
2019 ASSERT3U(attr
->za_integer_length
, ==,
2021 ASSERT3U(attr
->za_num_integers
, ==, 1);
2023 err
= dsl_dataset_hold_obj(dp
,
2024 attr
->za_first_integer
, FTAG
, &ds
);
2027 err
= dcp
->dc_func(dp
, ds
, dcp
->dc_arg
);
2028 dsl_dataset_rele(ds
, FTAG
);
2032 zap_cursor_fini(&zc
);
2036 kmem_free(attr
, sizeof (zap_attribute_t
));
2039 dsl_dir_rele(dd
, FTAG
);
2046 err
= dsl_dataset_hold_obj(dp
, thisobj
, FTAG
, &ds
);
2049 * Note: we hold the dir while calling dsl_dataset_hold_obj() so
2050 * that the dir will remain cached, and we won't have to re-instantiate
2051 * it (which could be expensive due to finding its name via
2052 * zap_value_search()).
2054 dsl_dir_rele(dd
, FTAG
);
2057 err
= dcp
->dc_func(dp
, ds
, dcp
->dc_arg
);
2058 dsl_dataset_rele(ds
, FTAG
);
2062 mutex_enter(dcp
->dc_error_lock
);
2063 /* only keep first error */
2064 if (*dcp
->dc_error
== 0)
2065 *dcp
->dc_error
= err
;
2066 mutex_exit(dcp
->dc_error_lock
);
2069 if (dcp
->dc_ddname
!= NULL
)
2070 spa_strfree(dcp
->dc_ddname
);
2071 kmem_free(dcp
, sizeof (*dcp
));
2075 dmu_objset_find_dp_cb(void *arg
)
2077 dmu_objset_find_ctx_t
*dcp
= arg
;
2078 dsl_pool_t
*dp
= dcp
->dc_dp
;
2081 * We need to get a pool_config_lock here, as there are several
2082 * asssert(pool_config_held) down the stack. Getting a lock via
2083 * dsl_pool_config_enter is risky, as it might be stalled by a
2084 * pending writer. This would deadlock, as the write lock can
2085 * only be granted when our parent thread gives up the lock.
2086 * The _prio interface gives us priority over a pending writer.
2088 dsl_pool_config_enter_prio(dp
, FTAG
);
2090 dmu_objset_find_dp_impl(dcp
);
2092 dsl_pool_config_exit(dp
, FTAG
);
2096 * Find objsets under and including ddobj, call func(ds) on each.
2097 * The order for the enumeration is completely undefined.
2098 * func is called with dsl_pool_config held.
2101 dmu_objset_find_dp(dsl_pool_t
*dp
, uint64_t ddobj
,
2102 int func(dsl_pool_t
*, dsl_dataset_t
*, void *), void *arg
, int flags
)
2107 dmu_objset_find_ctx_t
*dcp
;
2110 mutex_init(&err_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
2111 dcp
= kmem_alloc(sizeof (*dcp
), KM_SLEEP
);
2114 dcp
->dc_ddobj
= ddobj
;
2115 dcp
->dc_ddname
= NULL
;
2116 dcp
->dc_func
= func
;
2118 dcp
->dc_flags
= flags
;
2119 dcp
->dc_error_lock
= &err_lock
;
2120 dcp
->dc_error
= &error
;
2122 if ((flags
& DS_FIND_SERIALIZE
) || dsl_pool_config_held_writer(dp
)) {
2124 * In case a write lock is held we can't make use of
2125 * parallelism, as down the stack of the worker threads
2126 * the lock is asserted via dsl_pool_config_held.
2127 * In case of a read lock this is solved by getting a read
2128 * lock in each worker thread, which isn't possible in case
2129 * of a writer lock. So we fall back to the synchronous path
2131 * In the future it might be possible to get some magic into
2132 * dsl_pool_config_held in a way that it returns true for
2133 * the worker threads so that a single lock held from this
2134 * thread suffices. For now, stay single threaded.
2136 dmu_objset_find_dp_impl(dcp
);
2137 mutex_destroy(&err_lock
);
2142 ntasks
= dmu_find_threads
;
2144 ntasks
= vdev_count_leaves(dp
->dp_spa
) * 4;
2145 tq
= taskq_create("dmu_objset_find", ntasks
, minclsyspri
, ntasks
,
2148 kmem_free(dcp
, sizeof (*dcp
));
2149 mutex_destroy(&err_lock
);
2151 return (SET_ERROR(ENOMEM
));
2155 /* dcp will be freed by task */
2156 (void) taskq_dispatch(tq
, dmu_objset_find_dp_cb
, dcp
, TQ_SLEEP
);
2159 * PORTING: this code relies on the property of taskq_wait to wait
2160 * until no more tasks are queued and no more tasks are active. As
2161 * we always queue new tasks from within other tasks, task_wait
2162 * reliably waits for the full recursion to finish, even though we
2163 * enqueue new tasks after taskq_wait has been called.
2164 * On platforms other than illumos, taskq_wait may not have this
2169 mutex_destroy(&err_lock
);
2175 * Find all objsets under name, and for each, call 'func(child_name, arg)'.
2176 * The dp_config_rwlock must not be held when this is called, and it
2177 * will not be held when the callback is called.
2178 * Therefore this function should only be used when the pool is not changing
2179 * (e.g. in syncing context), or the callback can deal with the possible races.
2182 dmu_objset_find_impl(spa_t
*spa
, const char *name
,
2183 int func(const char *, void *), void *arg
, int flags
)
2186 dsl_pool_t
*dp
= spa_get_dsl(spa
);
2189 zap_attribute_t
*attr
;
2194 dsl_pool_config_enter(dp
, FTAG
);
2196 err
= dsl_dir_hold(dp
, name
, FTAG
, &dd
, NULL
);
2198 dsl_pool_config_exit(dp
, FTAG
);
2202 /* Don't visit hidden ($MOS & $ORIGIN) objsets. */
2203 if (dd
->dd_myname
[0] == '$') {
2204 dsl_dir_rele(dd
, FTAG
);
2205 dsl_pool_config_exit(dp
, FTAG
);
2209 thisobj
= dsl_dir_phys(dd
)->dd_head_dataset_obj
;
2210 attr
= kmem_alloc(sizeof (zap_attribute_t
), KM_SLEEP
);
2213 * Iterate over all children.
2215 if (flags
& DS_FIND_CHILDREN
) {
2216 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
2217 dsl_dir_phys(dd
)->dd_child_dir_zapobj
);
2218 zap_cursor_retrieve(&zc
, attr
) == 0;
2219 (void) zap_cursor_advance(&zc
)) {
2220 ASSERT3U(attr
->za_integer_length
, ==,
2222 ASSERT3U(attr
->za_num_integers
, ==, 1);
2224 child
= kmem_asprintf("%s/%s", name
, attr
->za_name
);
2225 dsl_pool_config_exit(dp
, FTAG
);
2226 err
= dmu_objset_find_impl(spa
, child
,
2228 dsl_pool_config_enter(dp
, FTAG
);
2233 zap_cursor_fini(&zc
);
2236 dsl_dir_rele(dd
, FTAG
);
2237 dsl_pool_config_exit(dp
, FTAG
);
2238 kmem_free(attr
, sizeof (zap_attribute_t
));
2244 * Iterate over all snapshots.
2246 if (flags
& DS_FIND_SNAPSHOTS
) {
2247 err
= dsl_dataset_hold_obj(dp
, thisobj
, FTAG
, &ds
);
2252 snapobj
= dsl_dataset_phys(ds
)->ds_snapnames_zapobj
;
2253 dsl_dataset_rele(ds
, FTAG
);
2255 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
, snapobj
);
2256 zap_cursor_retrieve(&zc
, attr
) == 0;
2257 (void) zap_cursor_advance(&zc
)) {
2258 ASSERT3U(attr
->za_integer_length
, ==,
2260 ASSERT3U(attr
->za_num_integers
, ==, 1);
2262 child
= kmem_asprintf("%s@%s",
2263 name
, attr
->za_name
);
2264 dsl_pool_config_exit(dp
, FTAG
);
2265 err
= func(child
, arg
);
2266 dsl_pool_config_enter(dp
, FTAG
);
2271 zap_cursor_fini(&zc
);
2275 dsl_dir_rele(dd
, FTAG
);
2276 kmem_free(attr
, sizeof (zap_attribute_t
));
2277 dsl_pool_config_exit(dp
, FTAG
);
2282 /* Apply to self. */
2283 return (func(name
, arg
));
2287 * See comment above dmu_objset_find_impl().
2290 dmu_objset_find(char *name
, int func(const char *, void *), void *arg
,
2296 error
= spa_open(name
, &spa
, FTAG
);
2299 error
= dmu_objset_find_impl(spa
, name
, func
, arg
, flags
);
2300 spa_close(spa
, FTAG
);
2305 dmu_objset_set_user(objset_t
*os
, void *user_ptr
)
2307 ASSERT(MUTEX_HELD(&os
->os_user_ptr_lock
));
2308 os
->os_user_ptr
= user_ptr
;
2312 dmu_objset_get_user(objset_t
*os
)
2314 ASSERT(MUTEX_HELD(&os
->os_user_ptr_lock
));
2315 return (os
->os_user_ptr
);
2319 * Determine name of filesystem, given name of snapshot.
2320 * buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes
2323 dmu_fsname(const char *snapname
, char *buf
)
2325 char *atp
= strchr(snapname
, '@');
2327 return (SET_ERROR(EINVAL
));
2328 if (atp
- snapname
>= ZFS_MAX_DATASET_NAME_LEN
)
2329 return (SET_ERROR(ENAMETOOLONG
));
2330 (void) strlcpy(buf
, snapname
, atp
- snapname
+ 1);
2335 * Call when we think we're going to write/free space in open context to track
2336 * the amount of dirty data in the open txg, which is also the amount
2337 * of memory that can not be evicted until this txg syncs.
2340 dmu_objset_willuse_space(objset_t
*os
, int64_t space
, dmu_tx_t
*tx
)
2342 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
2343 int64_t aspace
= spa_get_worst_case_asize(os
->os_spa
, space
);
2346 dsl_dir_willuse_space(ds
->ds_dir
, aspace
, tx
);
2347 dsl_pool_dirty_space(dmu_tx_pool(tx
), space
, tx
);