4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2014 RackTop Systems.
27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 * Copyright (c) 2014 Integros [integros.com]
29 * Copyright 2016, OmniTI Computer Consulting, Inc. All rights reserved.
30 * Copyright 2017 Nexenta Systems, Inc.
33 #include <sys/dmu_objset.h>
34 #include <sys/dsl_dataset.h>
35 #include <sys/dsl_dir.h>
36 #include <sys/dsl_prop.h>
37 #include <sys/dsl_synctask.h>
38 #include <sys/dmu_traverse.h>
39 #include <sys/dmu_impl.h>
40 #include <sys/dmu_tx.h>
44 #include <sys/zfeature.h>
45 #include <sys/unique.h>
46 #include <sys/zfs_context.h>
47 #include <sys/zfs_ioctl.h>
49 #include <sys/zfs_znode.h>
50 #include <sys/zfs_onexit.h>
52 #include <sys/dsl_scan.h>
53 #include <sys/dsl_deadlist.h>
54 #include <sys/dsl_destroy.h>
55 #include <sys/dsl_userhold.h>
56 #include <sys/dsl_bookmark.h>
57 #include <sys/dmu_send.h>
58 #include <sys/zio_checksum.h>
59 #include <sys/zio_compress.h>
60 #include <zfs_fletcher.h>
63 * The SPA supports block sizes up to 16MB. However, very large blocks
64 * can have an impact on i/o latency (e.g. tying up a spinning disk for
65 * ~300ms), and also potentially on the memory allocator. Therefore,
66 * we do not allow the recordsize to be set larger than zfs_max_recordsize
67 * (default 1MB). Larger blocks can be created by changing this tunable,
68 * and pools with larger blocks can always be imported and used, regardless
71 int zfs_max_recordsize
= 1 * 1024 * 1024;
73 #define SWITCH64(x, y) \
75 uint64_t __tmp = (x); \
80 #define DS_REF_MAX (1ULL << 62)
82 extern inline dsl_dataset_phys_t
*dsl_dataset_phys(dsl_dataset_t
*ds
);
84 extern int spa_asize_inflation
;
86 static zil_header_t zero_zil
;
89 * Figure out how much of this delta should be propogated to the dsl_dir
90 * layer. If there's a refreservation, that space has already been
91 * partially accounted for in our ancestors.
94 parent_delta(dsl_dataset_t
*ds
, int64_t delta
)
96 dsl_dataset_phys_t
*ds_phys
;
97 uint64_t old_bytes
, new_bytes
;
99 if (ds
->ds_reserved
== 0)
102 ds_phys
= dsl_dataset_phys(ds
);
103 old_bytes
= MAX(ds_phys
->ds_unique_bytes
, ds
->ds_reserved
);
104 new_bytes
= MAX(ds_phys
->ds_unique_bytes
+ delta
, ds
->ds_reserved
);
106 ASSERT3U(ABS((int64_t)(new_bytes
- old_bytes
)), <=, ABS(delta
));
107 return (new_bytes
- old_bytes
);
111 dsl_dataset_block_born(dsl_dataset_t
*ds
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
113 int used
= bp_get_dsize_sync(tx
->tx_pool
->dp_spa
, bp
);
114 int compressed
= BP_GET_PSIZE(bp
);
115 int uncompressed
= BP_GET_UCSIZE(bp
);
118 dprintf_bp(bp
, "ds=%p", ds
);
120 ASSERT(dmu_tx_is_syncing(tx
));
121 /* It could have been compressed away to nothing */
124 ASSERT(BP_GET_TYPE(bp
) != DMU_OT_NONE
);
125 ASSERT(DMU_OT_IS_VALID(BP_GET_TYPE(bp
)));
127 dsl_pool_mos_diduse_space(tx
->tx_pool
,
128 used
, compressed
, uncompressed
);
132 ASSERT3U(bp
->blk_birth
, >, dsl_dataset_phys(ds
)->ds_prev_snap_txg
);
133 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
134 mutex_enter(&ds
->ds_lock
);
135 delta
= parent_delta(ds
, used
);
136 dsl_dataset_phys(ds
)->ds_referenced_bytes
+= used
;
137 dsl_dataset_phys(ds
)->ds_compressed_bytes
+= compressed
;
138 dsl_dataset_phys(ds
)->ds_uncompressed_bytes
+= uncompressed
;
139 dsl_dataset_phys(ds
)->ds_unique_bytes
+= used
;
141 if (BP_GET_LSIZE(bp
) > SPA_OLD_MAXBLOCKSIZE
) {
142 ds
->ds_feature_activation_needed
[SPA_FEATURE_LARGE_BLOCKS
] =
146 spa_feature_t f
= zio_checksum_to_feature(BP_GET_CHECKSUM(bp
));
147 if (f
!= SPA_FEATURE_NONE
)
148 ds
->ds_feature_activation_needed
[f
] = B_TRUE
;
150 mutex_exit(&ds
->ds_lock
);
151 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_HEAD
, delta
,
152 compressed
, uncompressed
, tx
);
153 dsl_dir_transfer_space(ds
->ds_dir
, used
- delta
,
154 DD_USED_REFRSRV
, DD_USED_HEAD
, tx
);
158 dsl_dataset_block_kill(dsl_dataset_t
*ds
, const blkptr_t
*bp
, dmu_tx_t
*tx
,
161 int used
= bp_get_dsize_sync(tx
->tx_pool
->dp_spa
, bp
);
162 int compressed
= BP_GET_PSIZE(bp
);
163 int uncompressed
= BP_GET_UCSIZE(bp
);
168 ASSERT(dmu_tx_is_syncing(tx
));
169 ASSERT(bp
->blk_birth
<= tx
->tx_txg
);
172 dsl_free(tx
->tx_pool
, tx
->tx_txg
, bp
);
173 dsl_pool_mos_diduse_space(tx
->tx_pool
,
174 -used
, -compressed
, -uncompressed
);
177 ASSERT3P(tx
->tx_pool
, ==, ds
->ds_dir
->dd_pool
);
179 ASSERT(!ds
->ds_is_snapshot
);
180 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
182 if (bp
->blk_birth
> dsl_dataset_phys(ds
)->ds_prev_snap_txg
) {
185 dprintf_bp(bp
, "freeing ds=%llu", ds
->ds_object
);
186 dsl_free(tx
->tx_pool
, tx
->tx_txg
, bp
);
188 mutex_enter(&ds
->ds_lock
);
189 ASSERT(dsl_dataset_phys(ds
)->ds_unique_bytes
>= used
||
190 !DS_UNIQUE_IS_ACCURATE(ds
));
191 delta
= parent_delta(ds
, -used
);
192 dsl_dataset_phys(ds
)->ds_unique_bytes
-= used
;
193 mutex_exit(&ds
->ds_lock
);
194 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_HEAD
,
195 delta
, -compressed
, -uncompressed
, tx
);
196 dsl_dir_transfer_space(ds
->ds_dir
, -used
- delta
,
197 DD_USED_REFRSRV
, DD_USED_HEAD
, tx
);
199 dprintf_bp(bp
, "putting on dead list: %s", "");
202 * We are here as part of zio's write done callback,
203 * which means we're a zio interrupt thread. We can't
204 * call dsl_deadlist_insert() now because it may block
205 * waiting for I/O. Instead, put bp on the deferred
206 * queue and let dsl_pool_sync() finish the job.
208 bplist_append(&ds
->ds_pending_deadlist
, bp
);
210 dsl_deadlist_insert(&ds
->ds_deadlist
, bp
, tx
);
212 ASSERT3U(ds
->ds_prev
->ds_object
, ==,
213 dsl_dataset_phys(ds
)->ds_prev_snap_obj
);
214 ASSERT(dsl_dataset_phys(ds
->ds_prev
)->ds_num_children
> 0);
215 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
216 if (dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
==
217 ds
->ds_object
&& bp
->blk_birth
>
218 dsl_dataset_phys(ds
->ds_prev
)->ds_prev_snap_txg
) {
219 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
220 mutex_enter(&ds
->ds_prev
->ds_lock
);
221 dsl_dataset_phys(ds
->ds_prev
)->ds_unique_bytes
+= used
;
222 mutex_exit(&ds
->ds_prev
->ds_lock
);
224 if (bp
->blk_birth
> ds
->ds_dir
->dd_origin_txg
) {
225 dsl_dir_transfer_space(ds
->ds_dir
, used
,
226 DD_USED_HEAD
, DD_USED_SNAP
, tx
);
229 mutex_enter(&ds
->ds_lock
);
230 ASSERT3U(dsl_dataset_phys(ds
)->ds_referenced_bytes
, >=, used
);
231 dsl_dataset_phys(ds
)->ds_referenced_bytes
-= used
;
232 ASSERT3U(dsl_dataset_phys(ds
)->ds_compressed_bytes
, >=, compressed
);
233 dsl_dataset_phys(ds
)->ds_compressed_bytes
-= compressed
;
234 ASSERT3U(dsl_dataset_phys(ds
)->ds_uncompressed_bytes
, >=, uncompressed
);
235 dsl_dataset_phys(ds
)->ds_uncompressed_bytes
-= uncompressed
;
236 mutex_exit(&ds
->ds_lock
);
242 * We have to release the fsid syncronously or we risk that a subsequent
243 * mount of the same dataset will fail to unique_insert the fsid. This
244 * failure would manifest itself as the fsid of this dataset changing
245 * between mounts which makes NFS clients quite unhappy.
248 dsl_dataset_evict_sync(void *dbu
)
250 dsl_dataset_t
*ds
= dbu
;
252 ASSERT(ds
->ds_owner
== NULL
);
254 unique_remove(ds
->ds_fsid_guid
);
258 dsl_dataset_evict_async(void *dbu
)
260 dsl_dataset_t
*ds
= dbu
;
262 ASSERT(ds
->ds_owner
== NULL
);
266 if (ds
->ds_objset
!= NULL
)
267 dmu_objset_evict(ds
->ds_objset
);
270 dsl_dataset_rele(ds
->ds_prev
, ds
);
274 bplist_destroy(&ds
->ds_pending_deadlist
);
275 if (ds
->ds_deadlist
.dl_os
!= NULL
)
276 dsl_deadlist_close(&ds
->ds_deadlist
);
278 dsl_dir_async_rele(ds
->ds_dir
, ds
);
280 ASSERT(!list_link_active(&ds
->ds_synced_link
));
282 list_destroy(&ds
->ds_prop_cbs
);
283 mutex_destroy(&ds
->ds_lock
);
284 mutex_destroy(&ds
->ds_opening_lock
);
285 mutex_destroy(&ds
->ds_sendstream_lock
);
286 refcount_destroy(&ds
->ds_longholds
);
287 rrw_destroy(&ds
->ds_bp_rwlock
);
289 kmem_free(ds
, sizeof (dsl_dataset_t
));
293 dsl_dataset_get_snapname(dsl_dataset_t
*ds
)
295 dsl_dataset_phys_t
*headphys
;
298 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
299 objset_t
*mos
= dp
->dp_meta_objset
;
301 if (ds
->ds_snapname
[0])
303 if (dsl_dataset_phys(ds
)->ds_next_snap_obj
== 0)
306 err
= dmu_bonus_hold(mos
, dsl_dir_phys(ds
->ds_dir
)->dd_head_dataset_obj
,
310 headphys
= headdbuf
->db_data
;
311 err
= zap_value_search(dp
->dp_meta_objset
,
312 headphys
->ds_snapnames_zapobj
, ds
->ds_object
, 0, ds
->ds_snapname
);
313 dmu_buf_rele(headdbuf
, FTAG
);
318 dsl_dataset_snap_lookup(dsl_dataset_t
*ds
, const char *name
, uint64_t *value
)
320 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
321 uint64_t snapobj
= dsl_dataset_phys(ds
)->ds_snapnames_zapobj
;
325 if (dsl_dataset_phys(ds
)->ds_flags
& DS_FLAG_CI_DATASET
)
328 err
= zap_lookup_norm(mos
, snapobj
, name
, 8, 1,
329 value
, mt
, NULL
, 0, NULL
);
330 if (err
== ENOTSUP
&& (mt
& MT_NORMALIZE
))
331 err
= zap_lookup(mos
, snapobj
, name
, 8, 1, value
);
336 dsl_dataset_snap_remove(dsl_dataset_t
*ds
, const char *name
, dmu_tx_t
*tx
,
339 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
340 uint64_t snapobj
= dsl_dataset_phys(ds
)->ds_snapnames_zapobj
;
344 dsl_dir_snap_cmtime_update(ds
->ds_dir
);
346 if (dsl_dataset_phys(ds
)->ds_flags
& DS_FLAG_CI_DATASET
)
349 err
= zap_remove_norm(mos
, snapobj
, name
, mt
, tx
);
350 if (err
== ENOTSUP
&& (mt
& MT_NORMALIZE
))
351 err
= zap_remove(mos
, snapobj
, name
, tx
);
353 if (err
== 0 && adj_cnt
)
354 dsl_fs_ss_count_adjust(ds
->ds_dir
, -1,
355 DD_FIELD_SNAPSHOT_COUNT
, tx
);
361 dsl_dataset_try_add_ref(dsl_pool_t
*dp
, dsl_dataset_t
*ds
, void *tag
)
363 dmu_buf_t
*dbuf
= ds
->ds_dbuf
;
364 boolean_t result
= B_FALSE
;
366 if (dbuf
!= NULL
&& dmu_buf_try_add_ref(dbuf
, dp
->dp_meta_objset
,
367 ds
->ds_object
, DMU_BONUS_BLKID
, tag
)) {
369 if (ds
== dmu_buf_get_user(dbuf
))
372 dmu_buf_rele(dbuf
, tag
);
379 dsl_dataset_hold_obj(dsl_pool_t
*dp
, uint64_t dsobj
, void *tag
,
382 objset_t
*mos
= dp
->dp_meta_objset
;
386 dmu_object_info_t doi
;
388 ASSERT(dsl_pool_config_held(dp
));
390 err
= dmu_bonus_hold(mos
, dsobj
, tag
, &dbuf
);
394 /* Make sure dsobj has the correct object type. */
395 dmu_object_info_from_db(dbuf
, &doi
);
396 if (doi
.doi_bonus_type
!= DMU_OT_DSL_DATASET
) {
397 dmu_buf_rele(dbuf
, tag
);
398 return (SET_ERROR(EINVAL
));
401 ds
= dmu_buf_get_user(dbuf
);
403 dsl_dataset_t
*winner
= NULL
;
405 ds
= kmem_zalloc(sizeof (dsl_dataset_t
), KM_SLEEP
);
407 ds
->ds_object
= dsobj
;
408 ds
->ds_is_snapshot
= dsl_dataset_phys(ds
)->ds_num_children
!= 0;
410 mutex_init(&ds
->ds_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
411 mutex_init(&ds
->ds_opening_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
412 mutex_init(&ds
->ds_sendstream_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
413 rrw_init(&ds
->ds_bp_rwlock
, B_FALSE
);
414 refcount_create(&ds
->ds_longholds
);
416 bplist_create(&ds
->ds_pending_deadlist
);
417 dsl_deadlist_open(&ds
->ds_deadlist
,
418 mos
, dsl_dataset_phys(ds
)->ds_deadlist_obj
);
420 list_create(&ds
->ds_sendstreams
, sizeof (dmu_sendarg_t
),
421 offsetof(dmu_sendarg_t
, dsa_link
));
423 list_create(&ds
->ds_prop_cbs
, sizeof (dsl_prop_cb_record_t
),
424 offsetof(dsl_prop_cb_record_t
, cbr_ds_node
));
426 if (doi
.doi_type
== DMU_OTN_ZAP_METADATA
) {
427 for (spa_feature_t f
= 0; f
< SPA_FEATURES
; f
++) {
428 if (!(spa_feature_table
[f
].fi_flags
&
429 ZFEATURE_FLAG_PER_DATASET
))
431 err
= zap_contains(mos
, dsobj
,
432 spa_feature_table
[f
].fi_guid
);
434 ds
->ds_feature_inuse
[f
] = B_TRUE
;
436 ASSERT3U(err
, ==, ENOENT
);
442 err
= dsl_dir_hold_obj(dp
,
443 dsl_dataset_phys(ds
)->ds_dir_obj
, NULL
, ds
, &ds
->ds_dir
);
445 mutex_destroy(&ds
->ds_lock
);
446 mutex_destroy(&ds
->ds_opening_lock
);
447 mutex_destroy(&ds
->ds_sendstream_lock
);
448 refcount_destroy(&ds
->ds_longholds
);
449 bplist_destroy(&ds
->ds_pending_deadlist
);
450 dsl_deadlist_close(&ds
->ds_deadlist
);
451 kmem_free(ds
, sizeof (dsl_dataset_t
));
452 dmu_buf_rele(dbuf
, tag
);
456 if (!ds
->ds_is_snapshot
) {
457 ds
->ds_snapname
[0] = '\0';
458 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0) {
459 err
= dsl_dataset_hold_obj(dp
,
460 dsl_dataset_phys(ds
)->ds_prev_snap_obj
,
463 if (doi
.doi_type
== DMU_OTN_ZAP_METADATA
) {
464 int zaperr
= zap_lookup(mos
, ds
->ds_object
,
465 DS_FIELD_BOOKMARK_NAMES
,
466 sizeof (ds
->ds_bookmarks
), 1,
468 if (zaperr
!= ENOENT
)
472 if (zfs_flags
& ZFS_DEBUG_SNAPNAMES
)
473 err
= dsl_dataset_get_snapname(ds
);
475 dsl_dataset_phys(ds
)->ds_userrefs_obj
!= 0) {
477 ds
->ds_dir
->dd_pool
->dp_meta_objset
,
478 dsl_dataset_phys(ds
)->ds_userrefs_obj
,
483 if (err
== 0 && !ds
->ds_is_snapshot
) {
484 err
= dsl_prop_get_int_ds(ds
,
485 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
),
488 err
= dsl_prop_get_int_ds(ds
,
489 zfs_prop_to_name(ZFS_PROP_REFQUOTA
),
493 ds
->ds_reserved
= ds
->ds_quota
= 0;
496 dmu_buf_init_user(&ds
->ds_dbu
, dsl_dataset_evict_sync
,
497 dsl_dataset_evict_async
, &ds
->ds_dbuf
);
499 winner
= dmu_buf_set_user_ie(dbuf
, &ds
->ds_dbu
);
501 if (err
!= 0 || winner
!= NULL
) {
502 bplist_destroy(&ds
->ds_pending_deadlist
);
503 dsl_deadlist_close(&ds
->ds_deadlist
);
505 dsl_dataset_rele(ds
->ds_prev
, ds
);
506 dsl_dir_rele(ds
->ds_dir
, ds
);
507 mutex_destroy(&ds
->ds_lock
);
508 mutex_destroy(&ds
->ds_opening_lock
);
509 mutex_destroy(&ds
->ds_sendstream_lock
);
510 refcount_destroy(&ds
->ds_longholds
);
511 kmem_free(ds
, sizeof (dsl_dataset_t
));
513 dmu_buf_rele(dbuf
, tag
);
519 unique_insert(dsl_dataset_phys(ds
)->ds_fsid_guid
);
520 if (ds
->ds_fsid_guid
!=
521 dsl_dataset_phys(ds
)->ds_fsid_guid
) {
522 zfs_dbgmsg("ds_fsid_guid changed from "
523 "%llx to %llx for pool %s dataset id %llu",
525 dsl_dataset_phys(ds
)->ds_fsid_guid
,
526 (long long)ds
->ds_fsid_guid
,
527 spa_name(dp
->dp_spa
),
532 ASSERT3P(ds
->ds_dbuf
, ==, dbuf
);
533 ASSERT3P(dsl_dataset_phys(ds
), ==, dbuf
->db_data
);
534 ASSERT(dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0 ||
535 spa_version(dp
->dp_spa
) < SPA_VERSION_ORIGIN
||
536 dp
->dp_origin_snap
== NULL
|| ds
== dp
->dp_origin_snap
);
542 dsl_dataset_hold(dsl_pool_t
*dp
, const char *name
,
543 void *tag
, dsl_dataset_t
**dsp
)
546 const char *snapname
;
551 err
= dsl_dir_hold(dp
, name
, FTAG
, &dd
, &snapname
);
555 ASSERT(dsl_pool_config_held(dp
));
556 obj
= dsl_dir_phys(dd
)->dd_head_dataset_obj
;
558 err
= dsl_dataset_hold_obj(dp
, obj
, tag
, &ds
);
560 err
= SET_ERROR(ENOENT
);
562 /* we may be looking for a snapshot */
563 if (err
== 0 && snapname
!= NULL
) {
564 dsl_dataset_t
*snap_ds
;
566 if (*snapname
++ != '@') {
567 dsl_dataset_rele(ds
, tag
);
568 dsl_dir_rele(dd
, FTAG
);
569 return (SET_ERROR(ENOENT
));
572 dprintf("looking for snapshot '%s'\n", snapname
);
573 err
= dsl_dataset_snap_lookup(ds
, snapname
, &obj
);
575 err
= dsl_dataset_hold_obj(dp
, obj
, tag
, &snap_ds
);
576 dsl_dataset_rele(ds
, tag
);
579 mutex_enter(&snap_ds
->ds_lock
);
580 if (snap_ds
->ds_snapname
[0] == 0)
581 (void) strlcpy(snap_ds
->ds_snapname
, snapname
,
582 sizeof (snap_ds
->ds_snapname
));
583 mutex_exit(&snap_ds
->ds_lock
);
589 dsl_dir_rele(dd
, FTAG
);
594 dsl_dataset_own_obj(dsl_pool_t
*dp
, uint64_t dsobj
,
595 void *tag
, dsl_dataset_t
**dsp
)
597 int err
= dsl_dataset_hold_obj(dp
, dsobj
, tag
, dsp
);
600 if (!dsl_dataset_tryown(*dsp
, tag
)) {
601 dsl_dataset_rele(*dsp
, tag
);
603 return (SET_ERROR(EBUSY
));
609 dsl_dataset_own(dsl_pool_t
*dp
, const char *name
,
610 void *tag
, dsl_dataset_t
**dsp
)
612 int err
= dsl_dataset_hold(dp
, name
, tag
, dsp
);
615 if (!dsl_dataset_tryown(*dsp
, tag
)) {
616 dsl_dataset_rele(*dsp
, tag
);
617 return (SET_ERROR(EBUSY
));
623 * See the comment above dsl_pool_hold() for details. In summary, a long
624 * hold is used to prevent destruction of a dataset while the pool hold
625 * is dropped, allowing other concurrent operations (e.g. spa_sync()).
627 * The dataset and pool must be held when this function is called. After it
628 * is called, the pool hold may be released while the dataset is still held
632 dsl_dataset_long_hold(dsl_dataset_t
*ds
, void *tag
)
634 ASSERT(dsl_pool_config_held(ds
->ds_dir
->dd_pool
));
635 (void) refcount_add(&ds
->ds_longholds
, tag
);
639 dsl_dataset_long_rele(dsl_dataset_t
*ds
, void *tag
)
641 (void) refcount_remove(&ds
->ds_longholds
, tag
);
644 /* Return B_TRUE if there are any long holds on this dataset. */
646 dsl_dataset_long_held(dsl_dataset_t
*ds
)
648 return (!refcount_is_zero(&ds
->ds_longholds
));
652 dsl_dataset_name(dsl_dataset_t
*ds
, char *name
)
655 (void) strcpy(name
, "mos");
657 dsl_dir_name(ds
->ds_dir
, name
);
658 VERIFY0(dsl_dataset_get_snapname(ds
));
659 if (ds
->ds_snapname
[0]) {
660 VERIFY3U(strlcat(name
, "@", ZFS_MAX_DATASET_NAME_LEN
),
661 <, ZFS_MAX_DATASET_NAME_LEN
);
663 * We use a "recursive" mutex so that we
664 * can call dprintf_ds() with ds_lock held.
666 if (!MUTEX_HELD(&ds
->ds_lock
)) {
667 mutex_enter(&ds
->ds_lock
);
668 VERIFY3U(strlcat(name
, ds
->ds_snapname
,
669 ZFS_MAX_DATASET_NAME_LEN
), <,
670 ZFS_MAX_DATASET_NAME_LEN
);
671 mutex_exit(&ds
->ds_lock
);
673 VERIFY3U(strlcat(name
, ds
->ds_snapname
,
674 ZFS_MAX_DATASET_NAME_LEN
), <,
675 ZFS_MAX_DATASET_NAME_LEN
);
682 dsl_dataset_namelen(dsl_dataset_t
*ds
)
684 VERIFY0(dsl_dataset_get_snapname(ds
));
685 mutex_enter(&ds
->ds_lock
);
686 int len
= dsl_dir_namelen(ds
->ds_dir
) + 1 + strlen(ds
->ds_snapname
);
687 mutex_exit(&ds
->ds_lock
);
692 dsl_dataset_rele(dsl_dataset_t
*ds
, void *tag
)
694 dmu_buf_rele(ds
->ds_dbuf
, tag
);
698 dsl_dataset_disown(dsl_dataset_t
*ds
, void *tag
)
700 ASSERT3P(ds
->ds_owner
, ==, tag
);
701 ASSERT(ds
->ds_dbuf
!= NULL
);
703 mutex_enter(&ds
->ds_lock
);
705 mutex_exit(&ds
->ds_lock
);
706 dsl_dataset_long_rele(ds
, tag
);
707 dsl_dataset_rele(ds
, tag
);
711 dsl_dataset_tryown(dsl_dataset_t
*ds
, void *tag
)
713 boolean_t gotit
= FALSE
;
715 ASSERT(dsl_pool_config_held(ds
->ds_dir
->dd_pool
));
716 mutex_enter(&ds
->ds_lock
);
717 if (ds
->ds_owner
== NULL
&& !DS_IS_INCONSISTENT(ds
)) {
719 dsl_dataset_long_hold(ds
, tag
);
722 mutex_exit(&ds
->ds_lock
);
727 dsl_dataset_has_owner(dsl_dataset_t
*ds
)
730 mutex_enter(&ds
->ds_lock
);
731 rv
= (ds
->ds_owner
!= NULL
);
732 mutex_exit(&ds
->ds_lock
);
737 dsl_dataset_activate_feature(uint64_t dsobj
, spa_feature_t f
, dmu_tx_t
*tx
)
739 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
740 objset_t
*mos
= dmu_tx_pool(tx
)->dp_meta_objset
;
743 VERIFY(spa_feature_table
[f
].fi_flags
& ZFEATURE_FLAG_PER_DATASET
);
745 spa_feature_incr(spa
, f
, tx
);
746 dmu_object_zapify(mos
, dsobj
, DMU_OT_DSL_DATASET
, tx
);
748 VERIFY0(zap_add(mos
, dsobj
, spa_feature_table
[f
].fi_guid
,
749 sizeof (zero
), 1, &zero
, tx
));
753 dsl_dataset_deactivate_feature(uint64_t dsobj
, spa_feature_t f
, dmu_tx_t
*tx
)
755 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
756 objset_t
*mos
= dmu_tx_pool(tx
)->dp_meta_objset
;
758 VERIFY(spa_feature_table
[f
].fi_flags
& ZFEATURE_FLAG_PER_DATASET
);
760 VERIFY0(zap_remove(mos
, dsobj
, spa_feature_table
[f
].fi_guid
, tx
));
761 spa_feature_decr(spa
, f
, tx
);
765 dsl_dataset_create_sync_dd(dsl_dir_t
*dd
, dsl_dataset_t
*origin
,
766 uint64_t flags
, dmu_tx_t
*tx
)
768 dsl_pool_t
*dp
= dd
->dd_pool
;
770 dsl_dataset_phys_t
*dsphys
;
772 objset_t
*mos
= dp
->dp_meta_objset
;
775 origin
= dp
->dp_origin_snap
;
777 ASSERT(origin
== NULL
|| origin
->ds_dir
->dd_pool
== dp
);
778 ASSERT(origin
== NULL
|| dsl_dataset_phys(origin
)->ds_num_children
> 0);
779 ASSERT(dmu_tx_is_syncing(tx
));
780 ASSERT(dsl_dir_phys(dd
)->dd_head_dataset_obj
== 0);
782 dsobj
= dmu_object_alloc(mos
, DMU_OT_DSL_DATASET
, 0,
783 DMU_OT_DSL_DATASET
, sizeof (dsl_dataset_phys_t
), tx
);
784 VERIFY0(dmu_bonus_hold(mos
, dsobj
, FTAG
, &dbuf
));
785 dmu_buf_will_dirty(dbuf
, tx
);
786 dsphys
= dbuf
->db_data
;
787 bzero(dsphys
, sizeof (dsl_dataset_phys_t
));
788 dsphys
->ds_dir_obj
= dd
->dd_object
;
789 dsphys
->ds_flags
= flags
;
790 dsphys
->ds_fsid_guid
= unique_create();
791 (void) random_get_pseudo_bytes((void*)&dsphys
->ds_guid
,
792 sizeof (dsphys
->ds_guid
));
793 dsphys
->ds_snapnames_zapobj
=
794 zap_create_norm(mos
, U8_TEXTPREP_TOUPPER
, DMU_OT_DSL_DS_SNAP_MAP
,
796 dsphys
->ds_creation_time
= gethrestime_sec();
797 dsphys
->ds_creation_txg
= tx
->tx_txg
== TXG_INITIAL
? 1 : tx
->tx_txg
;
799 if (origin
== NULL
) {
800 dsphys
->ds_deadlist_obj
= dsl_deadlist_alloc(mos
, tx
);
802 dsl_dataset_t
*ohds
; /* head of the origin snapshot */
804 dsphys
->ds_prev_snap_obj
= origin
->ds_object
;
805 dsphys
->ds_prev_snap_txg
=
806 dsl_dataset_phys(origin
)->ds_creation_txg
;
807 dsphys
->ds_referenced_bytes
=
808 dsl_dataset_phys(origin
)->ds_referenced_bytes
;
809 dsphys
->ds_compressed_bytes
=
810 dsl_dataset_phys(origin
)->ds_compressed_bytes
;
811 dsphys
->ds_uncompressed_bytes
=
812 dsl_dataset_phys(origin
)->ds_uncompressed_bytes
;
813 rrw_enter(&origin
->ds_bp_rwlock
, RW_READER
, FTAG
);
814 dsphys
->ds_bp
= dsl_dataset_phys(origin
)->ds_bp
;
815 rrw_exit(&origin
->ds_bp_rwlock
, FTAG
);
818 * Inherit flags that describe the dataset's contents
819 * (INCONSISTENT) or properties (Case Insensitive).
821 dsphys
->ds_flags
|= dsl_dataset_phys(origin
)->ds_flags
&
822 (DS_FLAG_INCONSISTENT
| DS_FLAG_CI_DATASET
);
824 for (spa_feature_t f
= 0; f
< SPA_FEATURES
; f
++) {
825 if (origin
->ds_feature_inuse
[f
])
826 dsl_dataset_activate_feature(dsobj
, f
, tx
);
829 dmu_buf_will_dirty(origin
->ds_dbuf
, tx
);
830 dsl_dataset_phys(origin
)->ds_num_children
++;
832 VERIFY0(dsl_dataset_hold_obj(dp
,
833 dsl_dir_phys(origin
->ds_dir
)->dd_head_dataset_obj
,
835 dsphys
->ds_deadlist_obj
= dsl_deadlist_clone(&ohds
->ds_deadlist
,
836 dsphys
->ds_prev_snap_txg
, dsphys
->ds_prev_snap_obj
, tx
);
837 dsl_dataset_rele(ohds
, FTAG
);
839 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_NEXT_CLONES
) {
840 if (dsl_dataset_phys(origin
)->ds_next_clones_obj
== 0) {
841 dsl_dataset_phys(origin
)->ds_next_clones_obj
=
843 DMU_OT_NEXT_CLONES
, DMU_OT_NONE
, 0, tx
);
845 VERIFY0(zap_add_int(mos
,
846 dsl_dataset_phys(origin
)->ds_next_clones_obj
,
850 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
851 dsl_dir_phys(dd
)->dd_origin_obj
= origin
->ds_object
;
852 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
853 if (dsl_dir_phys(origin
->ds_dir
)->dd_clones
== 0) {
854 dmu_buf_will_dirty(origin
->ds_dir
->dd_dbuf
, tx
);
855 dsl_dir_phys(origin
->ds_dir
)->dd_clones
=
857 DMU_OT_DSL_CLONES
, DMU_OT_NONE
, 0, tx
);
859 VERIFY0(zap_add_int(mos
,
860 dsl_dir_phys(origin
->ds_dir
)->dd_clones
,
865 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_UNIQUE_ACCURATE
)
866 dsphys
->ds_flags
|= DS_FLAG_UNIQUE_ACCURATE
;
868 dmu_buf_rele(dbuf
, FTAG
);
870 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
871 dsl_dir_phys(dd
)->dd_head_dataset_obj
= dsobj
;
877 dsl_dataset_zero_zil(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
881 VERIFY0(dmu_objset_from_ds(ds
, &os
));
882 if (bcmp(&os
->os_zil_header
, &zero_zil
, sizeof (zero_zil
)) != 0) {
883 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
886 bzero(&os
->os_zil_header
, sizeof (os
->os_zil_header
));
888 zio
= zio_root(dp
->dp_spa
, NULL
, NULL
, ZIO_FLAG_MUSTSUCCEED
);
889 dsl_dataset_sync(ds
, zio
, tx
);
890 VERIFY0(zio_wait(zio
));
892 /* dsl_dataset_sync_done will drop this reference. */
893 dmu_buf_add_ref(ds
->ds_dbuf
, ds
);
894 dsl_dataset_sync_done(ds
, tx
);
899 dsl_dataset_create_sync(dsl_dir_t
*pdd
, const char *lastname
,
900 dsl_dataset_t
*origin
, uint64_t flags
, cred_t
*cr
, dmu_tx_t
*tx
)
902 dsl_pool_t
*dp
= pdd
->dd_pool
;
903 uint64_t dsobj
, ddobj
;
906 ASSERT(dmu_tx_is_syncing(tx
));
907 ASSERT(lastname
[0] != '@');
909 ddobj
= dsl_dir_create_sync(dp
, pdd
, lastname
, tx
);
910 VERIFY0(dsl_dir_hold_obj(dp
, ddobj
, lastname
, FTAG
, &dd
));
912 dsobj
= dsl_dataset_create_sync_dd(dd
, origin
,
913 flags
& ~DS_CREATE_FLAG_NODIRTY
, tx
);
915 dsl_deleg_set_create_perms(dd
, tx
, cr
);
918 * Since we're creating a new node we know it's a leaf, so we can
919 * initialize the counts if the limit feature is active.
921 if (spa_feature_is_active(dp
->dp_spa
, SPA_FEATURE_FS_SS_LIMIT
)) {
923 objset_t
*os
= dd
->dd_pool
->dp_meta_objset
;
925 dsl_dir_zapify(dd
, tx
);
926 VERIFY0(zap_add(os
, dd
->dd_object
, DD_FIELD_FILESYSTEM_COUNT
,
927 sizeof (cnt
), 1, &cnt
, tx
));
928 VERIFY0(zap_add(os
, dd
->dd_object
, DD_FIELD_SNAPSHOT_COUNT
,
929 sizeof (cnt
), 1, &cnt
, tx
));
932 dsl_dir_rele(dd
, FTAG
);
935 * If we are creating a clone, make sure we zero out any stale
936 * data from the origin snapshots zil header.
938 if (origin
!= NULL
&& !(flags
& DS_CREATE_FLAG_NODIRTY
)) {
941 VERIFY0(dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
));
942 dsl_dataset_zero_zil(ds
, tx
);
943 dsl_dataset_rele(ds
, FTAG
);
950 * The unique space in the head dataset can be calculated by subtracting
951 * the space used in the most recent snapshot, that is still being used
952 * in this file system, from the space currently in use. To figure out
953 * the space in the most recent snapshot still in use, we need to take
954 * the total space used in the snapshot and subtract out the space that
955 * has been freed up since the snapshot was taken.
958 dsl_dataset_recalc_head_uniq(dsl_dataset_t
*ds
)
961 uint64_t dlused
, dlcomp
, dluncomp
;
963 ASSERT(!ds
->ds_is_snapshot
);
965 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0)
966 mrs_used
= dsl_dataset_phys(ds
->ds_prev
)->ds_referenced_bytes
;
970 dsl_deadlist_space(&ds
->ds_deadlist
, &dlused
, &dlcomp
, &dluncomp
);
972 ASSERT3U(dlused
, <=, mrs_used
);
973 dsl_dataset_phys(ds
)->ds_unique_bytes
=
974 dsl_dataset_phys(ds
)->ds_referenced_bytes
- (mrs_used
- dlused
);
976 if (spa_version(ds
->ds_dir
->dd_pool
->dp_spa
) >=
977 SPA_VERSION_UNIQUE_ACCURATE
)
978 dsl_dataset_phys(ds
)->ds_flags
|= DS_FLAG_UNIQUE_ACCURATE
;
982 dsl_dataset_remove_from_next_clones(dsl_dataset_t
*ds
, uint64_t obj
,
985 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
989 ASSERT(dsl_dataset_phys(ds
)->ds_num_children
>= 2);
990 err
= zap_remove_int(mos
, dsl_dataset_phys(ds
)->ds_next_clones_obj
,
993 * The err should not be ENOENT, but a bug in a previous version
994 * of the code could cause upgrade_clones_cb() to not set
995 * ds_next_snap_obj when it should, leading to a missing entry.
996 * If we knew that the pool was created after
997 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
998 * ENOENT. However, at least we can check that we don't have
999 * too many entries in the next_clones_obj even after failing to
1004 ASSERT0(zap_count(mos
, dsl_dataset_phys(ds
)->ds_next_clones_obj
,
1006 ASSERT3U(count
, <=, dsl_dataset_phys(ds
)->ds_num_children
- 2);
1011 dsl_dataset_get_blkptr(dsl_dataset_t
*ds
)
1013 return (&dsl_dataset_phys(ds
)->ds_bp
);
1017 dsl_dataset_get_spa(dsl_dataset_t
*ds
)
1019 return (ds
->ds_dir
->dd_pool
->dp_spa
);
1023 dsl_dataset_dirty(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1027 if (ds
== NULL
) /* this is the meta-objset */
1030 ASSERT(ds
->ds_objset
!= NULL
);
1032 if (dsl_dataset_phys(ds
)->ds_next_snap_obj
!= 0)
1033 panic("dirtying snapshot!");
1035 /* Must not dirty a dataset in the same txg where it got snapshotted. */
1036 ASSERT3U(tx
->tx_txg
, >, dsl_dataset_phys(ds
)->ds_prev_snap_txg
);
1038 dp
= ds
->ds_dir
->dd_pool
;
1039 if (txg_list_add(&dp
->dp_dirty_datasets
, ds
, tx
->tx_txg
)) {
1040 /* up the hold count until we can be written out */
1041 dmu_buf_add_ref(ds
->ds_dbuf
, ds
);
1046 dsl_dataset_is_dirty(dsl_dataset_t
*ds
)
1048 for (int t
= 0; t
< TXG_SIZE
; t
++) {
1049 if (txg_list_member(&ds
->ds_dir
->dd_pool
->dp_dirty_datasets
,
1057 dsl_dataset_snapshot_reserve_space(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1061 if (!dmu_tx_is_syncing(tx
))
1065 * If there's an fs-only reservation, any blocks that might become
1066 * owned by the snapshot dataset must be accommodated by space
1067 * outside of the reservation.
1069 ASSERT(ds
->ds_reserved
== 0 || DS_UNIQUE_IS_ACCURATE(ds
));
1070 asize
= MIN(dsl_dataset_phys(ds
)->ds_unique_bytes
, ds
->ds_reserved
);
1071 if (asize
> dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, TRUE
))
1072 return (SET_ERROR(ENOSPC
));
1075 * Propagate any reserved space for this snapshot to other
1076 * snapshot checks in this sync group.
1079 dsl_dir_willuse_space(ds
->ds_dir
, asize
, tx
);
1084 typedef struct dsl_dataset_snapshot_arg
{
1085 nvlist_t
*ddsa_snaps
;
1086 nvlist_t
*ddsa_props
;
1087 nvlist_t
*ddsa_errors
;
1089 } dsl_dataset_snapshot_arg_t
;
1092 dsl_dataset_snapshot_check_impl(dsl_dataset_t
*ds
, const char *snapname
,
1093 dmu_tx_t
*tx
, boolean_t recv
, uint64_t cnt
, cred_t
*cr
)
1098 ds
->ds_trysnap_txg
= tx
->tx_txg
;
1100 if (!dmu_tx_is_syncing(tx
))
1104 * We don't allow multiple snapshots of the same txg. If there
1105 * is already one, try again.
1107 if (dsl_dataset_phys(ds
)->ds_prev_snap_txg
>= tx
->tx_txg
)
1108 return (SET_ERROR(EAGAIN
));
1111 * Check for conflicting snapshot name.
1113 error
= dsl_dataset_snap_lookup(ds
, snapname
, &value
);
1115 return (SET_ERROR(EEXIST
));
1116 if (error
!= ENOENT
)
1120 * We don't allow taking snapshots of inconsistent datasets, such as
1121 * those into which we are currently receiving. However, if we are
1122 * creating this snapshot as part of a receive, this check will be
1123 * executed atomically with respect to the completion of the receive
1124 * itself but prior to the clearing of DS_FLAG_INCONSISTENT; in this
1125 * case we ignore this, knowing it will be fixed up for us shortly in
1126 * dmu_recv_end_sync().
1128 if (!recv
&& DS_IS_INCONSISTENT(ds
))
1129 return (SET_ERROR(EBUSY
));
1132 * Skip the check for temporary snapshots or if we have already checked
1133 * the counts in dsl_dataset_snapshot_check. This means we really only
1134 * check the count here when we're receiving a stream.
1136 if (cnt
!= 0 && cr
!= NULL
) {
1137 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, cnt
,
1138 ZFS_PROP_SNAPSHOT_LIMIT
, NULL
, cr
);
1143 error
= dsl_dataset_snapshot_reserve_space(ds
, tx
);
1151 dsl_dataset_snapshot_check(void *arg
, dmu_tx_t
*tx
)
1153 dsl_dataset_snapshot_arg_t
*ddsa
= arg
;
1154 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1159 * Pre-compute how many total new snapshots will be created for each
1160 * level in the tree and below. This is needed for validating the
1161 * snapshot limit when either taking a recursive snapshot or when
1162 * taking multiple snapshots.
1164 * The problem is that the counts are not actually adjusted when
1165 * we are checking, only when we finally sync. For a single snapshot,
1166 * this is easy, the count will increase by 1 at each node up the tree,
1167 * but its more complicated for the recursive/multiple snapshot case.
1169 * The dsl_fs_ss_limit_check function does recursively check the count
1170 * at each level up the tree but since it is validating each snapshot
1171 * independently we need to be sure that we are validating the complete
1172 * count for the entire set of snapshots. We do this by rolling up the
1173 * counts for each component of the name into an nvlist and then
1174 * checking each of those cases with the aggregated count.
1176 * This approach properly handles not only the recursive snapshot
1177 * case (where we get all of those on the ddsa_snaps list) but also
1178 * the sibling case (e.g. snapshot a/b and a/c so that we will also
1179 * validate the limit on 'a' using a count of 2).
1181 * We validate the snapshot names in the third loop and only report
1184 if (dmu_tx_is_syncing(tx
)) {
1185 nvlist_t
*cnt_track
= NULL
;
1186 cnt_track
= fnvlist_alloc();
1188 /* Rollup aggregated counts into the cnt_track list */
1189 for (pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, NULL
);
1191 pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, pair
)) {
1194 char nm
[MAXPATHLEN
];
1196 (void) strlcpy(nm
, nvpair_name(pair
), sizeof (nm
));
1197 pdelim
= strchr(nm
, '@');
1203 if (nvlist_lookup_uint64(cnt_track
, nm
,
1205 /* update existing entry */
1206 fnvlist_add_uint64(cnt_track
, nm
,
1210 fnvlist_add_uint64(cnt_track
, nm
, 1);
1213 pdelim
= strrchr(nm
, '/');
1216 } while (pdelim
!= NULL
);
1219 /* Check aggregated counts at each level */
1220 for (pair
= nvlist_next_nvpair(cnt_track
, NULL
);
1221 pair
!= NULL
; pair
= nvlist_next_nvpair(cnt_track
, pair
)) {
1227 name
= nvpair_name(pair
);
1228 cnt
= fnvpair_value_uint64(pair
);
1231 error
= dsl_dataset_hold(dp
, name
, FTAG
, &ds
);
1233 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, cnt
,
1234 ZFS_PROP_SNAPSHOT_LIMIT
, NULL
,
1236 dsl_dataset_rele(ds
, FTAG
);
1240 if (ddsa
->ddsa_errors
!= NULL
)
1241 fnvlist_add_int32(ddsa
->ddsa_errors
,
1244 /* only report one error for this check */
1248 nvlist_free(cnt_track
);
1251 for (pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, NULL
);
1252 pair
!= NULL
; pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, pair
)) {
1256 char dsname
[ZFS_MAX_DATASET_NAME_LEN
];
1258 name
= nvpair_name(pair
);
1259 if (strlen(name
) >= ZFS_MAX_DATASET_NAME_LEN
)
1260 error
= SET_ERROR(ENAMETOOLONG
);
1262 atp
= strchr(name
, '@');
1264 error
= SET_ERROR(EINVAL
);
1266 (void) strlcpy(dsname
, name
, atp
- name
+ 1);
1269 error
= dsl_dataset_hold(dp
, dsname
, FTAG
, &ds
);
1271 /* passing 0/NULL skips dsl_fs_ss_limit_check */
1272 error
= dsl_dataset_snapshot_check_impl(ds
,
1273 atp
+ 1, tx
, B_FALSE
, 0, NULL
);
1274 dsl_dataset_rele(ds
, FTAG
);
1278 if (ddsa
->ddsa_errors
!= NULL
) {
1279 fnvlist_add_int32(ddsa
->ddsa_errors
,
1290 dsl_dataset_snapshot_sync_impl(dsl_dataset_t
*ds
, const char *snapname
,
1293 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1295 dsl_dataset_phys_t
*dsphys
;
1296 uint64_t dsobj
, crtxg
;
1297 objset_t
*mos
= dp
->dp_meta_objset
;
1300 ASSERT(RRW_WRITE_HELD(&dp
->dp_config_rwlock
));
1303 * If we are on an old pool, the zil must not be active, in which
1304 * case it will be zeroed. Usually zil_suspend() accomplishes this.
1306 ASSERT(spa_version(dmu_tx_pool(tx
)->dp_spa
) >= SPA_VERSION_FAST_SNAP
||
1307 dmu_objset_from_ds(ds
, &os
) != 0 ||
1308 bcmp(&os
->os_phys
->os_zil_header
, &zero_zil
,
1309 sizeof (zero_zil
)) == 0);
1311 /* Should not snapshot a dirty dataset. */
1312 ASSERT(!txg_list_member(&ds
->ds_dir
->dd_pool
->dp_dirty_datasets
,
1315 dsl_fs_ss_count_adjust(ds
->ds_dir
, 1, DD_FIELD_SNAPSHOT_COUNT
, tx
);
1318 * The origin's ds_creation_txg has to be < TXG_INITIAL
1320 if (strcmp(snapname
, ORIGIN_DIR_NAME
) == 0)
1325 dsobj
= dmu_object_alloc(mos
, DMU_OT_DSL_DATASET
, 0,
1326 DMU_OT_DSL_DATASET
, sizeof (dsl_dataset_phys_t
), tx
);
1327 VERIFY0(dmu_bonus_hold(mos
, dsobj
, FTAG
, &dbuf
));
1328 dmu_buf_will_dirty(dbuf
, tx
);
1329 dsphys
= dbuf
->db_data
;
1330 bzero(dsphys
, sizeof (dsl_dataset_phys_t
));
1331 dsphys
->ds_dir_obj
= ds
->ds_dir
->dd_object
;
1332 dsphys
->ds_fsid_guid
= unique_create();
1333 (void) random_get_pseudo_bytes((void*)&dsphys
->ds_guid
,
1334 sizeof (dsphys
->ds_guid
));
1335 dsphys
->ds_prev_snap_obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
1336 dsphys
->ds_prev_snap_txg
= dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
1337 dsphys
->ds_next_snap_obj
= ds
->ds_object
;
1338 dsphys
->ds_num_children
= 1;
1339 dsphys
->ds_creation_time
= gethrestime_sec();
1340 dsphys
->ds_creation_txg
= crtxg
;
1341 dsphys
->ds_deadlist_obj
= dsl_dataset_phys(ds
)->ds_deadlist_obj
;
1342 dsphys
->ds_referenced_bytes
= dsl_dataset_phys(ds
)->ds_referenced_bytes
;
1343 dsphys
->ds_compressed_bytes
= dsl_dataset_phys(ds
)->ds_compressed_bytes
;
1344 dsphys
->ds_uncompressed_bytes
=
1345 dsl_dataset_phys(ds
)->ds_uncompressed_bytes
;
1346 dsphys
->ds_flags
= dsl_dataset_phys(ds
)->ds_flags
;
1347 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
1348 dsphys
->ds_bp
= dsl_dataset_phys(ds
)->ds_bp
;
1349 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
1350 dmu_buf_rele(dbuf
, FTAG
);
1352 for (spa_feature_t f
= 0; f
< SPA_FEATURES
; f
++) {
1353 if (ds
->ds_feature_inuse
[f
])
1354 dsl_dataset_activate_feature(dsobj
, f
, tx
);
1357 ASSERT3U(ds
->ds_prev
!= 0, ==,
1358 dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0);
1360 uint64_t next_clones_obj
=
1361 dsl_dataset_phys(ds
->ds_prev
)->ds_next_clones_obj
;
1362 ASSERT(dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
==
1364 dsl_dataset_phys(ds
->ds_prev
)->ds_num_children
> 1);
1365 if (dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
==
1367 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
1368 ASSERT3U(dsl_dataset_phys(ds
)->ds_prev_snap_txg
, ==,
1369 dsl_dataset_phys(ds
->ds_prev
)->ds_creation_txg
);
1370 dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
= dsobj
;
1371 } else if (next_clones_obj
!= 0) {
1372 dsl_dataset_remove_from_next_clones(ds
->ds_prev
,
1373 dsphys
->ds_next_snap_obj
, tx
);
1374 VERIFY0(zap_add_int(mos
,
1375 next_clones_obj
, dsobj
, tx
));
1380 * If we have a reference-reservation on this dataset, we will
1381 * need to increase the amount of refreservation being charged
1382 * since our unique space is going to zero.
1384 if (ds
->ds_reserved
) {
1386 ASSERT(DS_UNIQUE_IS_ACCURATE(ds
));
1387 delta
= MIN(dsl_dataset_phys(ds
)->ds_unique_bytes
,
1389 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_REFRSRV
,
1393 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1394 dsl_dataset_phys(ds
)->ds_deadlist_obj
=
1395 dsl_deadlist_clone(&ds
->ds_deadlist
, UINT64_MAX
,
1396 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, tx
);
1397 dsl_deadlist_close(&ds
->ds_deadlist
);
1398 dsl_deadlist_open(&ds
->ds_deadlist
, mos
,
1399 dsl_dataset_phys(ds
)->ds_deadlist_obj
);
1400 dsl_deadlist_add_key(&ds
->ds_deadlist
,
1401 dsl_dataset_phys(ds
)->ds_prev_snap_txg
, tx
);
1403 ASSERT3U(dsl_dataset_phys(ds
)->ds_prev_snap_txg
, <, tx
->tx_txg
);
1404 dsl_dataset_phys(ds
)->ds_prev_snap_obj
= dsobj
;
1405 dsl_dataset_phys(ds
)->ds_prev_snap_txg
= crtxg
;
1406 dsl_dataset_phys(ds
)->ds_unique_bytes
= 0;
1407 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_UNIQUE_ACCURATE
)
1408 dsl_dataset_phys(ds
)->ds_flags
|= DS_FLAG_UNIQUE_ACCURATE
;
1410 VERIFY0(zap_add(mos
, dsl_dataset_phys(ds
)->ds_snapnames_zapobj
,
1411 snapname
, 8, 1, &dsobj
, tx
));
1414 dsl_dataset_rele(ds
->ds_prev
, ds
);
1415 VERIFY0(dsl_dataset_hold_obj(dp
,
1416 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, ds
, &ds
->ds_prev
));
1418 dsl_scan_ds_snapshotted(ds
, tx
);
1420 dsl_dir_snap_cmtime_update(ds
->ds_dir
);
1422 spa_history_log_internal_ds(ds
->ds_prev
, "snapshot", tx
, "");
1426 dsl_dataset_snapshot_sync(void *arg
, dmu_tx_t
*tx
)
1428 dsl_dataset_snapshot_arg_t
*ddsa
= arg
;
1429 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1432 for (pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, NULL
);
1433 pair
!= NULL
; pair
= nvlist_next_nvpair(ddsa
->ddsa_snaps
, pair
)) {
1436 char dsname
[ZFS_MAX_DATASET_NAME_LEN
];
1438 name
= nvpair_name(pair
);
1439 atp
= strchr(name
, '@');
1440 (void) strlcpy(dsname
, name
, atp
- name
+ 1);
1441 VERIFY0(dsl_dataset_hold(dp
, dsname
, FTAG
, &ds
));
1443 dsl_dataset_snapshot_sync_impl(ds
, atp
+ 1, tx
);
1444 if (ddsa
->ddsa_props
!= NULL
) {
1445 dsl_props_set_sync_impl(ds
->ds_prev
,
1446 ZPROP_SRC_LOCAL
, ddsa
->ddsa_props
, tx
);
1448 dsl_dataset_rele(ds
, FTAG
);
1453 * The snapshots must all be in the same pool.
1454 * All-or-nothing: if there are any failures, nothing will be modified.
1457 dsl_dataset_snapshot(nvlist_t
*snaps
, nvlist_t
*props
, nvlist_t
*errors
)
1459 dsl_dataset_snapshot_arg_t ddsa
;
1461 boolean_t needsuspend
;
1465 nvlist_t
*suspended
= NULL
;
1467 pair
= nvlist_next_nvpair(snaps
, NULL
);
1470 firstname
= nvpair_name(pair
);
1472 error
= spa_open(firstname
, &spa
, FTAG
);
1475 needsuspend
= (spa_version(spa
) < SPA_VERSION_FAST_SNAP
);
1476 spa_close(spa
, FTAG
);
1479 suspended
= fnvlist_alloc();
1480 for (pair
= nvlist_next_nvpair(snaps
, NULL
); pair
!= NULL
;
1481 pair
= nvlist_next_nvpair(snaps
, pair
)) {
1482 char fsname
[ZFS_MAX_DATASET_NAME_LEN
];
1483 char *snapname
= nvpair_name(pair
);
1487 atp
= strchr(snapname
, '@');
1489 error
= SET_ERROR(EINVAL
);
1492 (void) strlcpy(fsname
, snapname
, atp
- snapname
+ 1);
1494 error
= zil_suspend(fsname
, &cookie
);
1497 fnvlist_add_uint64(suspended
, fsname
,
1502 ddsa
.ddsa_snaps
= snaps
;
1503 ddsa
.ddsa_props
= props
;
1504 ddsa
.ddsa_errors
= errors
;
1505 ddsa
.ddsa_cr
= CRED();
1508 error
= dsl_sync_task(firstname
, dsl_dataset_snapshot_check
,
1509 dsl_dataset_snapshot_sync
, &ddsa
,
1510 fnvlist_num_pairs(snaps
) * 3, ZFS_SPACE_CHECK_NORMAL
);
1513 if (suspended
!= NULL
) {
1514 for (pair
= nvlist_next_nvpair(suspended
, NULL
); pair
!= NULL
;
1515 pair
= nvlist_next_nvpair(suspended
, pair
)) {
1516 zil_resume((void *)(uintptr_t)
1517 fnvpair_value_uint64(pair
));
1519 fnvlist_free(suspended
);
1525 typedef struct dsl_dataset_snapshot_tmp_arg
{
1526 const char *ddsta_fsname
;
1527 const char *ddsta_snapname
;
1528 minor_t ddsta_cleanup_minor
;
1529 const char *ddsta_htag
;
1530 } dsl_dataset_snapshot_tmp_arg_t
;
1533 dsl_dataset_snapshot_tmp_check(void *arg
, dmu_tx_t
*tx
)
1535 dsl_dataset_snapshot_tmp_arg_t
*ddsta
= arg
;
1536 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1540 error
= dsl_dataset_hold(dp
, ddsta
->ddsta_fsname
, FTAG
, &ds
);
1544 /* NULL cred means no limit check for tmp snapshot */
1545 error
= dsl_dataset_snapshot_check_impl(ds
, ddsta
->ddsta_snapname
,
1546 tx
, B_FALSE
, 0, NULL
);
1548 dsl_dataset_rele(ds
, FTAG
);
1552 if (spa_version(dp
->dp_spa
) < SPA_VERSION_USERREFS
) {
1553 dsl_dataset_rele(ds
, FTAG
);
1554 return (SET_ERROR(ENOTSUP
));
1556 error
= dsl_dataset_user_hold_check_one(NULL
, ddsta
->ddsta_htag
,
1559 dsl_dataset_rele(ds
, FTAG
);
1563 dsl_dataset_rele(ds
, FTAG
);
1568 dsl_dataset_snapshot_tmp_sync(void *arg
, dmu_tx_t
*tx
)
1570 dsl_dataset_snapshot_tmp_arg_t
*ddsta
= arg
;
1571 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1574 VERIFY0(dsl_dataset_hold(dp
, ddsta
->ddsta_fsname
, FTAG
, &ds
));
1576 dsl_dataset_snapshot_sync_impl(ds
, ddsta
->ddsta_snapname
, tx
);
1577 dsl_dataset_user_hold_sync_one(ds
->ds_prev
, ddsta
->ddsta_htag
,
1578 ddsta
->ddsta_cleanup_minor
, gethrestime_sec(), tx
);
1579 dsl_destroy_snapshot_sync_impl(ds
->ds_prev
, B_TRUE
, tx
);
1581 dsl_dataset_rele(ds
, FTAG
);
1585 dsl_dataset_snapshot_tmp(const char *fsname
, const char *snapname
,
1586 minor_t cleanup_minor
, const char *htag
)
1588 dsl_dataset_snapshot_tmp_arg_t ddsta
;
1591 boolean_t needsuspend
;
1594 ddsta
.ddsta_fsname
= fsname
;
1595 ddsta
.ddsta_snapname
= snapname
;
1596 ddsta
.ddsta_cleanup_minor
= cleanup_minor
;
1597 ddsta
.ddsta_htag
= htag
;
1599 error
= spa_open(fsname
, &spa
, FTAG
);
1602 needsuspend
= (spa_version(spa
) < SPA_VERSION_FAST_SNAP
);
1603 spa_close(spa
, FTAG
);
1606 error
= zil_suspend(fsname
, &cookie
);
1611 error
= dsl_sync_task(fsname
, dsl_dataset_snapshot_tmp_check
,
1612 dsl_dataset_snapshot_tmp_sync
, &ddsta
, 3, ZFS_SPACE_CHECK_RESERVED
);
1621 dsl_dataset_sync(dsl_dataset_t
*ds
, zio_t
*zio
, dmu_tx_t
*tx
)
1623 ASSERT(dmu_tx_is_syncing(tx
));
1624 ASSERT(ds
->ds_objset
!= NULL
);
1625 ASSERT(dsl_dataset_phys(ds
)->ds_next_snap_obj
== 0);
1628 * in case we had to change ds_fsid_guid when we opened it,
1631 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1632 dsl_dataset_phys(ds
)->ds_fsid_guid
= ds
->ds_fsid_guid
;
1634 if (ds
->ds_resume_bytes
[tx
->tx_txg
& TXG_MASK
] != 0) {
1635 VERIFY0(zap_update(tx
->tx_pool
->dp_meta_objset
,
1636 ds
->ds_object
, DS_FIELD_RESUME_OBJECT
, 8, 1,
1637 &ds
->ds_resume_object
[tx
->tx_txg
& TXG_MASK
], tx
));
1638 VERIFY0(zap_update(tx
->tx_pool
->dp_meta_objset
,
1639 ds
->ds_object
, DS_FIELD_RESUME_OFFSET
, 8, 1,
1640 &ds
->ds_resume_offset
[tx
->tx_txg
& TXG_MASK
], tx
));
1641 VERIFY0(zap_update(tx
->tx_pool
->dp_meta_objset
,
1642 ds
->ds_object
, DS_FIELD_RESUME_BYTES
, 8, 1,
1643 &ds
->ds_resume_bytes
[tx
->tx_txg
& TXG_MASK
], tx
));
1644 ds
->ds_resume_object
[tx
->tx_txg
& TXG_MASK
] = 0;
1645 ds
->ds_resume_offset
[tx
->tx_txg
& TXG_MASK
] = 0;
1646 ds
->ds_resume_bytes
[tx
->tx_txg
& TXG_MASK
] = 0;
1649 dmu_objset_sync(ds
->ds_objset
, zio
, tx
);
1651 for (spa_feature_t f
= 0; f
< SPA_FEATURES
; f
++) {
1652 if (ds
->ds_feature_activation_needed
[f
]) {
1653 if (ds
->ds_feature_inuse
[f
])
1655 dsl_dataset_activate_feature(ds
->ds_object
, f
, tx
);
1656 ds
->ds_feature_inuse
[f
] = B_TRUE
;
1662 deadlist_enqueue_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
1664 dsl_deadlist_t
*dl
= arg
;
1665 dsl_deadlist_insert(dl
, bp
, tx
);
1670 dsl_dataset_sync_done(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
1672 objset_t
*os
= ds
->ds_objset
;
1674 bplist_iterate(&ds
->ds_pending_deadlist
,
1675 deadlist_enqueue_cb
, &ds
->ds_deadlist
, tx
);
1677 if (os
->os_synced_dnodes
!= NULL
) {
1678 multilist_destroy(os
->os_synced_dnodes
);
1679 os
->os_synced_dnodes
= NULL
;
1682 ASSERT(!dmu_objset_is_dirty(os
, dmu_tx_get_txg(tx
)));
1684 dmu_buf_rele(ds
->ds_dbuf
, ds
);
1688 get_clones_stat(dsl_dataset_t
*ds
, nvlist_t
*nv
)
1691 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
1694 nvlist_t
*propval
= fnvlist_alloc();
1697 ASSERT(dsl_pool_config_held(ds
->ds_dir
->dd_pool
));
1700 * We use nvlist_alloc() instead of fnvlist_alloc() because the
1701 * latter would allocate the list with NV_UNIQUE_NAME flag.
1702 * As a result, every time a clone name is appended to the list
1703 * it would be (linearly) searched for for a duplicate name.
1704 * We already know that all clone names must be unique and we
1705 * want avoid the quadratic complexity of double-checking that
1706 * because we can have a large number of clones.
1708 VERIFY0(nvlist_alloc(&val
, 0, KM_SLEEP
));
1711 * There may be missing entries in ds_next_clones_obj
1712 * due to a bug in a previous version of the code.
1713 * Only trust it if it has the right number of entries.
1715 if (dsl_dataset_phys(ds
)->ds_next_clones_obj
!= 0) {
1716 VERIFY0(zap_count(mos
, dsl_dataset_phys(ds
)->ds_next_clones_obj
,
1719 if (count
!= dsl_dataset_phys(ds
)->ds_num_children
- 1)
1721 for (zap_cursor_init(&zc
, mos
,
1722 dsl_dataset_phys(ds
)->ds_next_clones_obj
);
1723 zap_cursor_retrieve(&zc
, &za
) == 0;
1724 zap_cursor_advance(&zc
)) {
1725 dsl_dataset_t
*clone
;
1726 char buf
[ZFS_MAX_DATASET_NAME_LEN
];
1727 VERIFY0(dsl_dataset_hold_obj(ds
->ds_dir
->dd_pool
,
1728 za
.za_first_integer
, FTAG
, &clone
));
1729 dsl_dir_name(clone
->ds_dir
, buf
);
1730 fnvlist_add_boolean(val
, buf
);
1731 dsl_dataset_rele(clone
, FTAG
);
1733 zap_cursor_fini(&zc
);
1734 fnvlist_add_nvlist(propval
, ZPROP_VALUE
, val
);
1735 fnvlist_add_nvlist(nv
, zfs_prop_to_name(ZFS_PROP_CLONES
), propval
);
1738 nvlist_free(propval
);
1742 get_receive_resume_stats(dsl_dataset_t
*ds
, nvlist_t
*nv
)
1744 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1746 if (dsl_dataset_has_resume_receive_state(ds
)) {
1749 uint8_t *compressed
;
1751 nvlist_t
*token_nv
= fnvlist_alloc();
1752 size_t packed_size
, compressed_size
;
1754 if (zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
1755 DS_FIELD_RESUME_FROMGUID
, sizeof (val
), 1, &val
) == 0) {
1756 fnvlist_add_uint64(token_nv
, "fromguid", val
);
1758 if (zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
1759 DS_FIELD_RESUME_OBJECT
, sizeof (val
), 1, &val
) == 0) {
1760 fnvlist_add_uint64(token_nv
, "object", val
);
1762 if (zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
1763 DS_FIELD_RESUME_OFFSET
, sizeof (val
), 1, &val
) == 0) {
1764 fnvlist_add_uint64(token_nv
, "offset", val
);
1766 if (zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
1767 DS_FIELD_RESUME_BYTES
, sizeof (val
), 1, &val
) == 0) {
1768 fnvlist_add_uint64(token_nv
, "bytes", val
);
1770 if (zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
1771 DS_FIELD_RESUME_TOGUID
, sizeof (val
), 1, &val
) == 0) {
1772 fnvlist_add_uint64(token_nv
, "toguid", val
);
1775 if (zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
1776 DS_FIELD_RESUME_TONAME
, 1, sizeof (buf
), buf
) == 0) {
1777 fnvlist_add_string(token_nv
, "toname", buf
);
1779 if (zap_contains(dp
->dp_meta_objset
, ds
->ds_object
,
1780 DS_FIELD_RESUME_LARGEBLOCK
) == 0) {
1781 fnvlist_add_boolean(token_nv
, "largeblockok");
1783 if (zap_contains(dp
->dp_meta_objset
, ds
->ds_object
,
1784 DS_FIELD_RESUME_EMBEDOK
) == 0) {
1785 fnvlist_add_boolean(token_nv
, "embedok");
1787 if (zap_contains(dp
->dp_meta_objset
, ds
->ds_object
,
1788 DS_FIELD_RESUME_COMPRESSOK
) == 0) {
1789 fnvlist_add_boolean(token_nv
, "compressok");
1791 packed
= fnvlist_pack(token_nv
, &packed_size
);
1792 fnvlist_free(token_nv
);
1793 compressed
= kmem_alloc(packed_size
, KM_SLEEP
);
1795 compressed_size
= gzip_compress(packed
, compressed
,
1796 packed_size
, packed_size
, 6);
1799 fletcher_4_native(compressed
, compressed_size
, NULL
, &cksum
);
1801 str
= kmem_alloc(compressed_size
* 2 + 1, KM_SLEEP
);
1802 for (int i
= 0; i
< compressed_size
; i
++) {
1803 (void) sprintf(str
+ i
* 2, "%02x", compressed
[i
]);
1805 str
[compressed_size
* 2] = '\0';
1806 char *propval
= kmem_asprintf("%u-%llx-%llx-%s",
1807 ZFS_SEND_RESUME_TOKEN_VERSION
,
1808 (longlong_t
)cksum
.zc_word
[0],
1809 (longlong_t
)packed_size
, str
);
1810 dsl_prop_nvlist_add_string(nv
,
1811 ZFS_PROP_RECEIVE_RESUME_TOKEN
, propval
);
1812 kmem_free(packed
, packed_size
);
1813 kmem_free(str
, compressed_size
* 2 + 1);
1814 kmem_free(compressed
, packed_size
);
1820 dsl_dataset_stats(dsl_dataset_t
*ds
, nvlist_t
*nv
)
1822 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1823 uint64_t refd
, avail
, uobjs
, aobjs
, ratio
;
1825 ASSERT(dsl_pool_config_held(dp
));
1827 ratio
= dsl_dataset_phys(ds
)->ds_compressed_bytes
== 0 ? 100 :
1828 (dsl_dataset_phys(ds
)->ds_uncompressed_bytes
* 100 /
1829 dsl_dataset_phys(ds
)->ds_compressed_bytes
);
1831 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFRATIO
, ratio
);
1832 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_LOGICALREFERENCED
,
1833 dsl_dataset_phys(ds
)->ds_uncompressed_bytes
);
1835 if (ds
->ds_is_snapshot
) {
1836 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_COMPRESSRATIO
, ratio
);
1837 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USED
,
1838 dsl_dataset_phys(ds
)->ds_unique_bytes
);
1839 get_clones_stat(ds
, nv
);
1841 if (ds
->ds_prev
!= NULL
&& ds
->ds_prev
!= dp
->dp_origin_snap
) {
1842 char buf
[ZFS_MAX_DATASET_NAME_LEN
];
1843 dsl_dataset_name(ds
->ds_prev
, buf
);
1844 dsl_prop_nvlist_add_string(nv
, ZFS_PROP_PREV_SNAP
, buf
);
1847 dsl_dir_stats(ds
->ds_dir
, nv
);
1850 dsl_dataset_space(ds
, &refd
, &avail
, &uobjs
, &aobjs
);
1851 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_AVAILABLE
, avail
);
1852 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFERENCED
, refd
);
1854 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_CREATION
,
1855 dsl_dataset_phys(ds
)->ds_creation_time
);
1856 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_CREATETXG
,
1857 dsl_dataset_phys(ds
)->ds_creation_txg
);
1858 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFQUOTA
,
1860 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_REFRESERVATION
,
1862 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_GUID
,
1863 dsl_dataset_phys(ds
)->ds_guid
);
1864 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_UNIQUE
,
1865 dsl_dataset_phys(ds
)->ds_unique_bytes
);
1866 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_OBJSETID
,
1868 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USERREFS
,
1870 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_DEFER_DESTROY
,
1871 DS_IS_DEFER_DESTROY(ds
) ? 1 : 0);
1873 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0) {
1874 uint64_t written
, comp
, uncomp
;
1875 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1876 dsl_dataset_t
*prev
;
1878 int err
= dsl_dataset_hold_obj(dp
,
1879 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, FTAG
, &prev
);
1881 err
= dsl_dataset_space_written(prev
, ds
, &written
,
1883 dsl_dataset_rele(prev
, FTAG
);
1885 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_WRITTEN
,
1891 if (!dsl_dataset_is_snapshot(ds
)) {
1893 * A failed "newfs" (e.g. full) resumable receive leaves
1894 * the stats set on this dataset. Check here for the prop.
1896 get_receive_resume_stats(ds
, nv
);
1899 * A failed incremental resumable receive leaves the
1900 * stats set on our child named "%recv". Check the child
1903 /* 6 extra bytes for /%recv */
1904 char recvname
[ZFS_MAX_DATASET_NAME_LEN
+ 6];
1905 dsl_dataset_t
*recv_ds
;
1906 dsl_dataset_name(ds
, recvname
);
1907 if (strlcat(recvname
, "/", sizeof (recvname
)) <
1908 sizeof (recvname
) &&
1909 strlcat(recvname
, recv_clone_name
, sizeof (recvname
)) <
1910 sizeof (recvname
) &&
1911 dsl_dataset_hold(dp
, recvname
, FTAG
, &recv_ds
) == 0) {
1912 get_receive_resume_stats(recv_ds
, nv
);
1913 dsl_dataset_rele(recv_ds
, FTAG
);
1919 dsl_dataset_fast_stat(dsl_dataset_t
*ds
, dmu_objset_stats_t
*stat
)
1921 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1922 ASSERT(dsl_pool_config_held(dp
));
1924 stat
->dds_creation_txg
= dsl_dataset_phys(ds
)->ds_creation_txg
;
1925 stat
->dds_inconsistent
=
1926 dsl_dataset_phys(ds
)->ds_flags
& DS_FLAG_INCONSISTENT
;
1927 stat
->dds_guid
= dsl_dataset_phys(ds
)->ds_guid
;
1928 stat
->dds_origin
[0] = '\0';
1929 if (ds
->ds_is_snapshot
) {
1930 stat
->dds_is_snapshot
= B_TRUE
;
1931 stat
->dds_num_clones
=
1932 dsl_dataset_phys(ds
)->ds_num_children
- 1;
1934 stat
->dds_is_snapshot
= B_FALSE
;
1935 stat
->dds_num_clones
= 0;
1937 if (dsl_dir_is_clone(ds
->ds_dir
)) {
1940 VERIFY0(dsl_dataset_hold_obj(dp
,
1941 dsl_dir_phys(ds
->ds_dir
)->dd_origin_obj
,
1943 dsl_dataset_name(ods
, stat
->dds_origin
);
1944 dsl_dataset_rele(ods
, FTAG
);
1950 dsl_dataset_fsid_guid(dsl_dataset_t
*ds
)
1952 return (ds
->ds_fsid_guid
);
1956 dsl_dataset_space(dsl_dataset_t
*ds
,
1957 uint64_t *refdbytesp
, uint64_t *availbytesp
,
1958 uint64_t *usedobjsp
, uint64_t *availobjsp
)
1960 *refdbytesp
= dsl_dataset_phys(ds
)->ds_referenced_bytes
;
1961 *availbytesp
= dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, TRUE
);
1962 if (ds
->ds_reserved
> dsl_dataset_phys(ds
)->ds_unique_bytes
)
1964 ds
->ds_reserved
- dsl_dataset_phys(ds
)->ds_unique_bytes
;
1965 if (ds
->ds_quota
!= 0) {
1967 * Adjust available bytes according to refquota
1969 if (*refdbytesp
< ds
->ds_quota
)
1970 *availbytesp
= MIN(*availbytesp
,
1971 ds
->ds_quota
- *refdbytesp
);
1975 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
1976 *usedobjsp
= BP_GET_FILL(&dsl_dataset_phys(ds
)->ds_bp
);
1977 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
1978 *availobjsp
= DN_MAX_OBJECT
- *usedobjsp
;
1982 dsl_dataset_modified_since_snap(dsl_dataset_t
*ds
, dsl_dataset_t
*snap
)
1984 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1987 ASSERT(dsl_pool_config_held(dp
));
1990 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
1991 birth
= dsl_dataset_get_blkptr(ds
)->blk_birth
;
1992 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
1993 if (birth
> dsl_dataset_phys(snap
)->ds_creation_txg
) {
1994 objset_t
*os
, *os_snap
;
1996 * It may be that only the ZIL differs, because it was
1997 * reset in the head. Don't count that as being
2000 if (dmu_objset_from_ds(ds
, &os
) != 0)
2002 if (dmu_objset_from_ds(snap
, &os_snap
) != 0)
2004 return (bcmp(&os
->os_phys
->os_meta_dnode
,
2005 &os_snap
->os_phys
->os_meta_dnode
,
2006 sizeof (os
->os_phys
->os_meta_dnode
)) != 0);
2011 typedef struct dsl_dataset_rename_snapshot_arg
{
2012 const char *ddrsa_fsname
;
2013 const char *ddrsa_oldsnapname
;
2014 const char *ddrsa_newsnapname
;
2015 boolean_t ddrsa_recursive
;
2017 } dsl_dataset_rename_snapshot_arg_t
;
2021 dsl_dataset_rename_snapshot_check_impl(dsl_pool_t
*dp
,
2022 dsl_dataset_t
*hds
, void *arg
)
2024 dsl_dataset_rename_snapshot_arg_t
*ddrsa
= arg
;
2028 error
= dsl_dataset_snap_lookup(hds
, ddrsa
->ddrsa_oldsnapname
, &val
);
2030 /* ignore nonexistent snapshots */
2031 return (error
== ENOENT
? 0 : error
);
2034 /* new name should not exist */
2035 error
= dsl_dataset_snap_lookup(hds
, ddrsa
->ddrsa_newsnapname
, &val
);
2037 error
= SET_ERROR(EEXIST
);
2038 else if (error
== ENOENT
)
2041 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2042 if (dsl_dir_namelen(hds
->ds_dir
) + 1 +
2043 strlen(ddrsa
->ddrsa_newsnapname
) >= ZFS_MAX_DATASET_NAME_LEN
)
2044 error
= SET_ERROR(ENAMETOOLONG
);
2050 dsl_dataset_rename_snapshot_check(void *arg
, dmu_tx_t
*tx
)
2052 dsl_dataset_rename_snapshot_arg_t
*ddrsa
= arg
;
2053 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2057 error
= dsl_dataset_hold(dp
, ddrsa
->ddrsa_fsname
, FTAG
, &hds
);
2061 if (ddrsa
->ddrsa_recursive
) {
2062 error
= dmu_objset_find_dp(dp
, hds
->ds_dir
->dd_object
,
2063 dsl_dataset_rename_snapshot_check_impl
, ddrsa
,
2066 error
= dsl_dataset_rename_snapshot_check_impl(dp
, hds
, ddrsa
);
2068 dsl_dataset_rele(hds
, FTAG
);
2073 dsl_dataset_rename_snapshot_sync_impl(dsl_pool_t
*dp
,
2074 dsl_dataset_t
*hds
, void *arg
)
2076 dsl_dataset_rename_snapshot_arg_t
*ddrsa
= arg
;
2079 dmu_tx_t
*tx
= ddrsa
->ddrsa_tx
;
2082 error
= dsl_dataset_snap_lookup(hds
, ddrsa
->ddrsa_oldsnapname
, &val
);
2083 ASSERT(error
== 0 || error
== ENOENT
);
2084 if (error
== ENOENT
) {
2085 /* ignore nonexistent snapshots */
2089 VERIFY0(dsl_dataset_hold_obj(dp
, val
, FTAG
, &ds
));
2091 /* log before we change the name */
2092 spa_history_log_internal_ds(ds
, "rename", tx
,
2093 "-> @%s", ddrsa
->ddrsa_newsnapname
);
2095 VERIFY0(dsl_dataset_snap_remove(hds
, ddrsa
->ddrsa_oldsnapname
, tx
,
2097 mutex_enter(&ds
->ds_lock
);
2098 (void) strcpy(ds
->ds_snapname
, ddrsa
->ddrsa_newsnapname
);
2099 mutex_exit(&ds
->ds_lock
);
2100 VERIFY0(zap_add(dp
->dp_meta_objset
,
2101 dsl_dataset_phys(hds
)->ds_snapnames_zapobj
,
2102 ds
->ds_snapname
, 8, 1, &ds
->ds_object
, tx
));
2104 dsl_dataset_rele(ds
, FTAG
);
2109 dsl_dataset_rename_snapshot_sync(void *arg
, dmu_tx_t
*tx
)
2111 dsl_dataset_rename_snapshot_arg_t
*ddrsa
= arg
;
2112 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2115 VERIFY0(dsl_dataset_hold(dp
, ddrsa
->ddrsa_fsname
, FTAG
, &hds
));
2116 ddrsa
->ddrsa_tx
= tx
;
2117 if (ddrsa
->ddrsa_recursive
) {
2118 VERIFY0(dmu_objset_find_dp(dp
, hds
->ds_dir
->dd_object
,
2119 dsl_dataset_rename_snapshot_sync_impl
, ddrsa
,
2122 VERIFY0(dsl_dataset_rename_snapshot_sync_impl(dp
, hds
, ddrsa
));
2124 dsl_dataset_rele(hds
, FTAG
);
2128 dsl_dataset_rename_snapshot(const char *fsname
,
2129 const char *oldsnapname
, const char *newsnapname
, boolean_t recursive
)
2131 dsl_dataset_rename_snapshot_arg_t ddrsa
;
2133 ddrsa
.ddrsa_fsname
= fsname
;
2134 ddrsa
.ddrsa_oldsnapname
= oldsnapname
;
2135 ddrsa
.ddrsa_newsnapname
= newsnapname
;
2136 ddrsa
.ddrsa_recursive
= recursive
;
2138 return (dsl_sync_task(fsname
, dsl_dataset_rename_snapshot_check
,
2139 dsl_dataset_rename_snapshot_sync
, &ddrsa
,
2140 1, ZFS_SPACE_CHECK_RESERVED
));
2144 * If we're doing an ownership handoff, we need to make sure that there is
2145 * only one long hold on the dataset. We're not allowed to change anything here
2146 * so we don't permanently release the long hold or regular hold here. We want
2147 * to do this only when syncing to avoid the dataset unexpectedly going away
2148 * when we release the long hold.
2151 dsl_dataset_handoff_check(dsl_dataset_t
*ds
, void *owner
, dmu_tx_t
*tx
)
2155 if (!dmu_tx_is_syncing(tx
))
2158 if (owner
!= NULL
) {
2159 VERIFY3P(ds
->ds_owner
, ==, owner
);
2160 dsl_dataset_long_rele(ds
, owner
);
2163 held
= dsl_dataset_long_held(ds
);
2166 dsl_dataset_long_hold(ds
, owner
);
2169 return (SET_ERROR(EBUSY
));
2174 typedef struct dsl_dataset_rollback_arg
{
2175 const char *ddra_fsname
;
2177 nvlist_t
*ddra_result
;
2178 } dsl_dataset_rollback_arg_t
;
2181 dsl_dataset_rollback_check(void *arg
, dmu_tx_t
*tx
)
2183 dsl_dataset_rollback_arg_t
*ddra
= arg
;
2184 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2186 int64_t unused_refres_delta
;
2189 error
= dsl_dataset_hold(dp
, ddra
->ddra_fsname
, FTAG
, &ds
);
2193 /* must not be a snapshot */
2194 if (ds
->ds_is_snapshot
) {
2195 dsl_dataset_rele(ds
, FTAG
);
2196 return (SET_ERROR(EINVAL
));
2199 /* must have a most recent snapshot */
2200 if (dsl_dataset_phys(ds
)->ds_prev_snap_txg
< TXG_INITIAL
) {
2201 dsl_dataset_rele(ds
, FTAG
);
2202 return (SET_ERROR(EINVAL
));
2206 * No rollback to a snapshot created in the current txg, because
2207 * the rollback may dirty the dataset and create blocks that are
2208 * not reachable from the rootbp while having a birth txg that
2209 * falls into the snapshot's range.
2211 if (dmu_tx_is_syncing(tx
) &&
2212 dsl_dataset_phys(ds
)->ds_prev_snap_txg
>= tx
->tx_txg
) {
2213 dsl_dataset_rele(ds
, FTAG
);
2214 return (SET_ERROR(EAGAIN
));
2217 /* must not have any bookmarks after the most recent snapshot */
2218 nvlist_t
*proprequest
= fnvlist_alloc();
2219 fnvlist_add_boolean(proprequest
, zfs_prop_to_name(ZFS_PROP_CREATETXG
));
2220 nvlist_t
*bookmarks
= fnvlist_alloc();
2221 error
= dsl_get_bookmarks_impl(ds
, proprequest
, bookmarks
);
2222 fnvlist_free(proprequest
);
2225 for (nvpair_t
*pair
= nvlist_next_nvpair(bookmarks
, NULL
);
2226 pair
!= NULL
; pair
= nvlist_next_nvpair(bookmarks
, pair
)) {
2228 fnvlist_lookup_nvlist(fnvpair_value_nvlist(pair
),
2229 zfs_prop_to_name(ZFS_PROP_CREATETXG
));
2230 uint64_t createtxg
= fnvlist_lookup_uint64(valuenv
, "value");
2231 if (createtxg
> dsl_dataset_phys(ds
)->ds_prev_snap_txg
) {
2232 fnvlist_free(bookmarks
);
2233 dsl_dataset_rele(ds
, FTAG
);
2234 return (SET_ERROR(EEXIST
));
2237 fnvlist_free(bookmarks
);
2239 error
= dsl_dataset_handoff_check(ds
, ddra
->ddra_owner
, tx
);
2241 dsl_dataset_rele(ds
, FTAG
);
2246 * Check if the snap we are rolling back to uses more than
2249 if (ds
->ds_quota
!= 0 &&
2250 dsl_dataset_phys(ds
->ds_prev
)->ds_referenced_bytes
> ds
->ds_quota
) {
2251 dsl_dataset_rele(ds
, FTAG
);
2252 return (SET_ERROR(EDQUOT
));
2256 * When we do the clone swap, we will temporarily use more space
2257 * due to the refreservation (the head will no longer have any
2258 * unique space, so the entire amount of the refreservation will need
2259 * to be free). We will immediately destroy the clone, freeing
2260 * this space, but the freeing happens over many txg's.
2262 unused_refres_delta
= (int64_t)MIN(ds
->ds_reserved
,
2263 dsl_dataset_phys(ds
)->ds_unique_bytes
);
2265 if (unused_refres_delta
> 0 &&
2266 unused_refres_delta
>
2267 dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, TRUE
)) {
2268 dsl_dataset_rele(ds
, FTAG
);
2269 return (SET_ERROR(ENOSPC
));
2272 dsl_dataset_rele(ds
, FTAG
);
2277 dsl_dataset_rollback_sync(void *arg
, dmu_tx_t
*tx
)
2279 dsl_dataset_rollback_arg_t
*ddra
= arg
;
2280 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2281 dsl_dataset_t
*ds
, *clone
;
2283 char namebuf
[ZFS_MAX_DATASET_NAME_LEN
];
2285 VERIFY0(dsl_dataset_hold(dp
, ddra
->ddra_fsname
, FTAG
, &ds
));
2287 dsl_dataset_name(ds
->ds_prev
, namebuf
);
2288 fnvlist_add_string(ddra
->ddra_result
, "target", namebuf
);
2290 cloneobj
= dsl_dataset_create_sync(ds
->ds_dir
, "%rollback",
2291 ds
->ds_prev
, DS_CREATE_FLAG_NODIRTY
, kcred
, tx
);
2293 VERIFY0(dsl_dataset_hold_obj(dp
, cloneobj
, FTAG
, &clone
));
2295 dsl_dataset_clone_swap_sync_impl(clone
, ds
, tx
);
2296 dsl_dataset_zero_zil(ds
, tx
);
2298 dsl_destroy_head_sync_impl(clone
, tx
);
2300 dsl_dataset_rele(clone
, FTAG
);
2301 dsl_dataset_rele(ds
, FTAG
);
2305 * Rolls back the given filesystem or volume to the most recent snapshot.
2306 * The name of the most recent snapshot will be returned under key "target"
2307 * in the result nvlist.
2310 * - The existing dataset MUST be owned by the specified owner at entry
2311 * - Upon return, dataset will still be held by the same owner, whether we
2314 * This mode is required any time the existing filesystem is mounted. See
2315 * notes above zfs_suspend_fs() for further details.
2318 dsl_dataset_rollback(const char *fsname
, void *owner
, nvlist_t
*result
)
2320 dsl_dataset_rollback_arg_t ddra
;
2322 ddra
.ddra_fsname
= fsname
;
2323 ddra
.ddra_owner
= owner
;
2324 ddra
.ddra_result
= result
;
2326 return (dsl_sync_task(fsname
, dsl_dataset_rollback_check
,
2327 dsl_dataset_rollback_sync
, &ddra
,
2328 1, ZFS_SPACE_CHECK_RESERVED
));
2331 struct promotenode
{
2336 typedef struct dsl_dataset_promote_arg
{
2337 const char *ddpa_clonename
;
2338 dsl_dataset_t
*ddpa_clone
;
2339 list_t shared_snaps
, origin_snaps
, clone_snaps
;
2340 dsl_dataset_t
*origin_origin
; /* origin of the origin */
2341 uint64_t used
, comp
, uncomp
, unique
, cloneusedsnap
, originusedsnap
;
2344 } dsl_dataset_promote_arg_t
;
2346 static int snaplist_space(list_t
*l
, uint64_t mintxg
, uint64_t *spacep
);
2347 static int promote_hold(dsl_dataset_promote_arg_t
*ddpa
, dsl_pool_t
*dp
,
2349 static void promote_rele(dsl_dataset_promote_arg_t
*ddpa
, void *tag
);
2352 dsl_dataset_promote_check(void *arg
, dmu_tx_t
*tx
)
2354 dsl_dataset_promote_arg_t
*ddpa
= arg
;
2355 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2357 struct promotenode
*snap
;
2358 dsl_dataset_t
*origin_ds
;
2362 size_t max_snap_len
;
2364 err
= promote_hold(ddpa
, dp
, FTAG
);
2368 hds
= ddpa
->ddpa_clone
;
2369 max_snap_len
= MAXNAMELEN
- strlen(ddpa
->ddpa_clonename
) - 1;
2371 if (dsl_dataset_phys(hds
)->ds_flags
& DS_FLAG_NOPROMOTE
) {
2372 promote_rele(ddpa
, FTAG
);
2373 return (SET_ERROR(EXDEV
));
2377 * Compute and check the amount of space to transfer. Since this is
2378 * so expensive, don't do the preliminary check.
2380 if (!dmu_tx_is_syncing(tx
)) {
2381 promote_rele(ddpa
, FTAG
);
2385 snap
= list_head(&ddpa
->shared_snaps
);
2386 origin_ds
= snap
->ds
;
2388 /* compute origin's new unique space */
2389 snap
= list_tail(&ddpa
->clone_snaps
);
2390 ASSERT3U(dsl_dataset_phys(snap
->ds
)->ds_prev_snap_obj
, ==,
2391 origin_ds
->ds_object
);
2392 dsl_deadlist_space_range(&snap
->ds
->ds_deadlist
,
2393 dsl_dataset_phys(origin_ds
)->ds_prev_snap_txg
, UINT64_MAX
,
2394 &ddpa
->unique
, &unused
, &unused
);
2397 * Walk the snapshots that we are moving
2399 * Compute space to transfer. Consider the incremental changes
2400 * to used by each snapshot:
2401 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2402 * So each snapshot gave birth to:
2403 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2404 * So a sequence would look like:
2405 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2406 * Which simplifies to:
2407 * uN + kN + kN-1 + ... + k1 + k0
2408 * Note however, if we stop before we reach the ORIGIN we get:
2409 * uN + kN + kN-1 + ... + kM - uM-1
2412 ddpa
->used
= dsl_dataset_phys(origin_ds
)->ds_referenced_bytes
;
2413 ddpa
->comp
= dsl_dataset_phys(origin_ds
)->ds_compressed_bytes
;
2414 ddpa
->uncomp
= dsl_dataset_phys(origin_ds
)->ds_uncompressed_bytes
;
2415 for (snap
= list_head(&ddpa
->shared_snaps
); snap
;
2416 snap
= list_next(&ddpa
->shared_snaps
, snap
)) {
2417 uint64_t val
, dlused
, dlcomp
, dluncomp
;
2418 dsl_dataset_t
*ds
= snap
->ds
;
2423 * If there are long holds, we won't be able to evict
2426 if (dsl_dataset_long_held(ds
)) {
2427 err
= SET_ERROR(EBUSY
);
2431 /* Check that the snapshot name does not conflict */
2432 VERIFY0(dsl_dataset_get_snapname(ds
));
2433 if (strlen(ds
->ds_snapname
) >= max_snap_len
) {
2434 err
= SET_ERROR(ENAMETOOLONG
);
2437 err
= dsl_dataset_snap_lookup(hds
, ds
->ds_snapname
, &val
);
2439 (void) strcpy(ddpa
->err_ds
, snap
->ds
->ds_snapname
);
2440 err
= SET_ERROR(EEXIST
);
2446 /* The very first snapshot does not have a deadlist */
2447 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
== 0)
2450 dsl_deadlist_space(&ds
->ds_deadlist
,
2451 &dlused
, &dlcomp
, &dluncomp
);
2452 ddpa
->used
+= dlused
;
2453 ddpa
->comp
+= dlcomp
;
2454 ddpa
->uncomp
+= dluncomp
;
2458 * If we are a clone of a clone then we never reached ORIGIN,
2459 * so we need to subtract out the clone origin's used space.
2461 if (ddpa
->origin_origin
) {
2463 dsl_dataset_phys(ddpa
->origin_origin
)->ds_referenced_bytes
;
2465 dsl_dataset_phys(ddpa
->origin_origin
)->ds_compressed_bytes
;
2467 dsl_dataset_phys(ddpa
->origin_origin
)->
2468 ds_uncompressed_bytes
;
2471 /* Check that there is enough space and limit headroom here */
2472 err
= dsl_dir_transfer_possible(origin_ds
->ds_dir
, hds
->ds_dir
,
2473 0, ss_mv_cnt
, ddpa
->used
, ddpa
->cr
);
2478 * Compute the amounts of space that will be used by snapshots
2479 * after the promotion (for both origin and clone). For each,
2480 * it is the amount of space that will be on all of their
2481 * deadlists (that was not born before their new origin).
2483 if (dsl_dir_phys(hds
->ds_dir
)->dd_flags
& DD_FLAG_USED_BREAKDOWN
) {
2487 * Note, typically this will not be a clone of a clone,
2488 * so dd_origin_txg will be < TXG_INITIAL, so
2489 * these snaplist_space() -> dsl_deadlist_space_range()
2490 * calls will be fast because they do not have to
2491 * iterate over all bps.
2493 snap
= list_head(&ddpa
->origin_snaps
);
2494 err
= snaplist_space(&ddpa
->shared_snaps
,
2495 snap
->ds
->ds_dir
->dd_origin_txg
, &ddpa
->cloneusedsnap
);
2499 err
= snaplist_space(&ddpa
->clone_snaps
,
2500 snap
->ds
->ds_dir
->dd_origin_txg
, &space
);
2503 ddpa
->cloneusedsnap
+= space
;
2505 if (dsl_dir_phys(origin_ds
->ds_dir
)->dd_flags
&
2506 DD_FLAG_USED_BREAKDOWN
) {
2507 err
= snaplist_space(&ddpa
->origin_snaps
,
2508 dsl_dataset_phys(origin_ds
)->ds_creation_txg
,
2509 &ddpa
->originusedsnap
);
2515 promote_rele(ddpa
, FTAG
);
2520 dsl_dataset_promote_sync(void *arg
, dmu_tx_t
*tx
)
2522 dsl_dataset_promote_arg_t
*ddpa
= arg
;
2523 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2525 struct promotenode
*snap
;
2526 dsl_dataset_t
*origin_ds
;
2527 dsl_dataset_t
*origin_head
;
2529 dsl_dir_t
*odd
= NULL
;
2530 uint64_t oldnext_obj
;
2533 VERIFY0(promote_hold(ddpa
, dp
, FTAG
));
2534 hds
= ddpa
->ddpa_clone
;
2536 ASSERT0(dsl_dataset_phys(hds
)->ds_flags
& DS_FLAG_NOPROMOTE
);
2538 snap
= list_head(&ddpa
->shared_snaps
);
2539 origin_ds
= snap
->ds
;
2542 snap
= list_head(&ddpa
->origin_snaps
);
2543 origin_head
= snap
->ds
;
2546 * We need to explicitly open odd, since origin_ds's dd will be
2549 VERIFY0(dsl_dir_hold_obj(dp
, origin_ds
->ds_dir
->dd_object
,
2552 /* change origin's next snap */
2553 dmu_buf_will_dirty(origin_ds
->ds_dbuf
, tx
);
2554 oldnext_obj
= dsl_dataset_phys(origin_ds
)->ds_next_snap_obj
;
2555 snap
= list_tail(&ddpa
->clone_snaps
);
2556 ASSERT3U(dsl_dataset_phys(snap
->ds
)->ds_prev_snap_obj
, ==,
2557 origin_ds
->ds_object
);
2558 dsl_dataset_phys(origin_ds
)->ds_next_snap_obj
= snap
->ds
->ds_object
;
2560 /* change the origin's next clone */
2561 if (dsl_dataset_phys(origin_ds
)->ds_next_clones_obj
) {
2562 dsl_dataset_remove_from_next_clones(origin_ds
,
2563 snap
->ds
->ds_object
, tx
);
2564 VERIFY0(zap_add_int(dp
->dp_meta_objset
,
2565 dsl_dataset_phys(origin_ds
)->ds_next_clones_obj
,
2570 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
2571 ASSERT3U(dsl_dir_phys(dd
)->dd_origin_obj
, ==, origin_ds
->ds_object
);
2572 dsl_dir_phys(dd
)->dd_origin_obj
= dsl_dir_phys(odd
)->dd_origin_obj
;
2573 dd
->dd_origin_txg
= origin_head
->ds_dir
->dd_origin_txg
;
2574 dmu_buf_will_dirty(odd
->dd_dbuf
, tx
);
2575 dsl_dir_phys(odd
)->dd_origin_obj
= origin_ds
->ds_object
;
2576 origin_head
->ds_dir
->dd_origin_txg
=
2577 dsl_dataset_phys(origin_ds
)->ds_creation_txg
;
2579 /* change dd_clone entries */
2580 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
2581 VERIFY0(zap_remove_int(dp
->dp_meta_objset
,
2582 dsl_dir_phys(odd
)->dd_clones
, hds
->ds_object
, tx
));
2583 VERIFY0(zap_add_int(dp
->dp_meta_objset
,
2584 dsl_dir_phys(ddpa
->origin_origin
->ds_dir
)->dd_clones
,
2585 hds
->ds_object
, tx
));
2587 VERIFY0(zap_remove_int(dp
->dp_meta_objset
,
2588 dsl_dir_phys(ddpa
->origin_origin
->ds_dir
)->dd_clones
,
2589 origin_head
->ds_object
, tx
));
2590 if (dsl_dir_phys(dd
)->dd_clones
== 0) {
2591 dsl_dir_phys(dd
)->dd_clones
=
2592 zap_create(dp
->dp_meta_objset
, DMU_OT_DSL_CLONES
,
2593 DMU_OT_NONE
, 0, tx
);
2595 VERIFY0(zap_add_int(dp
->dp_meta_objset
,
2596 dsl_dir_phys(dd
)->dd_clones
, origin_head
->ds_object
, tx
));
2599 /* move snapshots to this dir */
2600 for (snap
= list_head(&ddpa
->shared_snaps
); snap
;
2601 snap
= list_next(&ddpa
->shared_snaps
, snap
)) {
2602 dsl_dataset_t
*ds
= snap
->ds
;
2605 * Property callbacks are registered to a particular
2606 * dsl_dir. Since ours is changing, evict the objset
2607 * so that they will be unregistered from the old dsl_dir.
2609 if (ds
->ds_objset
) {
2610 dmu_objset_evict(ds
->ds_objset
);
2611 ds
->ds_objset
= NULL
;
2614 /* move snap name entry */
2615 VERIFY0(dsl_dataset_get_snapname(ds
));
2616 VERIFY0(dsl_dataset_snap_remove(origin_head
,
2617 ds
->ds_snapname
, tx
, B_TRUE
));
2618 VERIFY0(zap_add(dp
->dp_meta_objset
,
2619 dsl_dataset_phys(hds
)->ds_snapnames_zapobj
, ds
->ds_snapname
,
2620 8, 1, &ds
->ds_object
, tx
));
2621 dsl_fs_ss_count_adjust(hds
->ds_dir
, 1,
2622 DD_FIELD_SNAPSHOT_COUNT
, tx
);
2624 /* change containing dsl_dir */
2625 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
2626 ASSERT3U(dsl_dataset_phys(ds
)->ds_dir_obj
, ==, odd
->dd_object
);
2627 dsl_dataset_phys(ds
)->ds_dir_obj
= dd
->dd_object
;
2628 ASSERT3P(ds
->ds_dir
, ==, odd
);
2629 dsl_dir_rele(ds
->ds_dir
, ds
);
2630 VERIFY0(dsl_dir_hold_obj(dp
, dd
->dd_object
,
2631 NULL
, ds
, &ds
->ds_dir
));
2633 /* move any clone references */
2634 if (dsl_dataset_phys(ds
)->ds_next_clones_obj
&&
2635 spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
2639 for (zap_cursor_init(&zc
, dp
->dp_meta_objset
,
2640 dsl_dataset_phys(ds
)->ds_next_clones_obj
);
2641 zap_cursor_retrieve(&zc
, &za
) == 0;
2642 zap_cursor_advance(&zc
)) {
2643 dsl_dataset_t
*cnds
;
2646 if (za
.za_first_integer
== oldnext_obj
) {
2648 * We've already moved the
2649 * origin's reference.
2654 VERIFY0(dsl_dataset_hold_obj(dp
,
2655 za
.za_first_integer
, FTAG
, &cnds
));
2656 o
= dsl_dir_phys(cnds
->ds_dir
)->
2657 dd_head_dataset_obj
;
2659 VERIFY0(zap_remove_int(dp
->dp_meta_objset
,
2660 dsl_dir_phys(odd
)->dd_clones
, o
, tx
));
2661 VERIFY0(zap_add_int(dp
->dp_meta_objset
,
2662 dsl_dir_phys(dd
)->dd_clones
, o
, tx
));
2663 dsl_dataset_rele(cnds
, FTAG
);
2665 zap_cursor_fini(&zc
);
2668 ASSERT(!dsl_prop_hascb(ds
));
2672 * Change space accounting.
2673 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2674 * both be valid, or both be 0 (resulting in delta == 0). This
2675 * is true for each of {clone,origin} independently.
2678 delta
= ddpa
->cloneusedsnap
-
2679 dsl_dir_phys(dd
)->dd_used_breakdown
[DD_USED_SNAP
];
2680 ASSERT3S(delta
, >=, 0);
2681 ASSERT3U(ddpa
->used
, >=, delta
);
2682 dsl_dir_diduse_space(dd
, DD_USED_SNAP
, delta
, 0, 0, tx
);
2683 dsl_dir_diduse_space(dd
, DD_USED_HEAD
,
2684 ddpa
->used
- delta
, ddpa
->comp
, ddpa
->uncomp
, tx
);
2686 delta
= ddpa
->originusedsnap
-
2687 dsl_dir_phys(odd
)->dd_used_breakdown
[DD_USED_SNAP
];
2688 ASSERT3S(delta
, <=, 0);
2689 ASSERT3U(ddpa
->used
, >=, -delta
);
2690 dsl_dir_diduse_space(odd
, DD_USED_SNAP
, delta
, 0, 0, tx
);
2691 dsl_dir_diduse_space(odd
, DD_USED_HEAD
,
2692 -ddpa
->used
- delta
, -ddpa
->comp
, -ddpa
->uncomp
, tx
);
2694 dsl_dataset_phys(origin_ds
)->ds_unique_bytes
= ddpa
->unique
;
2696 /* log history record */
2697 spa_history_log_internal_ds(hds
, "promote", tx
, "");
2699 dsl_dir_rele(odd
, FTAG
);
2700 promote_rele(ddpa
, FTAG
);
2704 * Make a list of dsl_dataset_t's for the snapshots between first_obj
2705 * (exclusive) and last_obj (inclusive). The list will be in reverse
2706 * order (last_obj will be the list_head()). If first_obj == 0, do all
2707 * snapshots back to this dataset's origin.
2710 snaplist_make(dsl_pool_t
*dp
,
2711 uint64_t first_obj
, uint64_t last_obj
, list_t
*l
, void *tag
)
2713 uint64_t obj
= last_obj
;
2715 list_create(l
, sizeof (struct promotenode
),
2716 offsetof(struct promotenode
, link
));
2718 while (obj
!= first_obj
) {
2720 struct promotenode
*snap
;
2723 err
= dsl_dataset_hold_obj(dp
, obj
, tag
, &ds
);
2724 ASSERT(err
!= ENOENT
);
2729 first_obj
= dsl_dir_phys(ds
->ds_dir
)->dd_origin_obj
;
2731 snap
= kmem_alloc(sizeof (*snap
), KM_SLEEP
);
2733 list_insert_tail(l
, snap
);
2734 obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
2741 snaplist_space(list_t
*l
, uint64_t mintxg
, uint64_t *spacep
)
2743 struct promotenode
*snap
;
2746 for (snap
= list_head(l
); snap
; snap
= list_next(l
, snap
)) {
2747 uint64_t used
, comp
, uncomp
;
2748 dsl_deadlist_space_range(&snap
->ds
->ds_deadlist
,
2749 mintxg
, UINT64_MAX
, &used
, &comp
, &uncomp
);
2756 snaplist_destroy(list_t
*l
, void *tag
)
2758 struct promotenode
*snap
;
2760 if (l
== NULL
|| !list_link_active(&l
->list_head
))
2763 while ((snap
= list_tail(l
)) != NULL
) {
2764 list_remove(l
, snap
);
2765 dsl_dataset_rele(snap
->ds
, tag
);
2766 kmem_free(snap
, sizeof (*snap
));
2772 promote_hold(dsl_dataset_promote_arg_t
*ddpa
, dsl_pool_t
*dp
, void *tag
)
2776 struct promotenode
*snap
;
2778 error
= dsl_dataset_hold(dp
, ddpa
->ddpa_clonename
, tag
,
2782 dd
= ddpa
->ddpa_clone
->ds_dir
;
2784 if (ddpa
->ddpa_clone
->ds_is_snapshot
||
2785 !dsl_dir_is_clone(dd
)) {
2786 dsl_dataset_rele(ddpa
->ddpa_clone
, tag
);
2787 return (SET_ERROR(EINVAL
));
2790 error
= snaplist_make(dp
, 0, dsl_dir_phys(dd
)->dd_origin_obj
,
2791 &ddpa
->shared_snaps
, tag
);
2795 error
= snaplist_make(dp
, 0, ddpa
->ddpa_clone
->ds_object
,
2796 &ddpa
->clone_snaps
, tag
);
2800 snap
= list_head(&ddpa
->shared_snaps
);
2801 ASSERT3U(snap
->ds
->ds_object
, ==, dsl_dir_phys(dd
)->dd_origin_obj
);
2802 error
= snaplist_make(dp
, dsl_dir_phys(dd
)->dd_origin_obj
,
2803 dsl_dir_phys(snap
->ds
->ds_dir
)->dd_head_dataset_obj
,
2804 &ddpa
->origin_snaps
, tag
);
2808 if (dsl_dir_phys(snap
->ds
->ds_dir
)->dd_origin_obj
!= 0) {
2809 error
= dsl_dataset_hold_obj(dp
,
2810 dsl_dir_phys(snap
->ds
->ds_dir
)->dd_origin_obj
,
2811 tag
, &ddpa
->origin_origin
);
2817 promote_rele(ddpa
, tag
);
2822 promote_rele(dsl_dataset_promote_arg_t
*ddpa
, void *tag
)
2824 snaplist_destroy(&ddpa
->shared_snaps
, tag
);
2825 snaplist_destroy(&ddpa
->clone_snaps
, tag
);
2826 snaplist_destroy(&ddpa
->origin_snaps
, tag
);
2827 if (ddpa
->origin_origin
!= NULL
)
2828 dsl_dataset_rele(ddpa
->origin_origin
, tag
);
2829 dsl_dataset_rele(ddpa
->ddpa_clone
, tag
);
2835 * If it fails due to a conflicting snapshot name, "conflsnap" will be filled
2836 * in with the name. (It must be at least ZFS_MAX_DATASET_NAME_LEN bytes long.)
2839 dsl_dataset_promote(const char *name
, char *conflsnap
)
2841 dsl_dataset_promote_arg_t ddpa
= { 0 };
2847 * We will modify space proportional to the number of
2848 * snapshots. Compute numsnaps.
2850 error
= dmu_objset_hold(name
, FTAG
, &os
);
2853 error
= zap_count(dmu_objset_pool(os
)->dp_meta_objset
,
2854 dsl_dataset_phys(dmu_objset_ds(os
))->ds_snapnames_zapobj
,
2856 dmu_objset_rele(os
, FTAG
);
2860 ddpa
.ddpa_clonename
= name
;
2861 ddpa
.err_ds
= conflsnap
;
2864 return (dsl_sync_task(name
, dsl_dataset_promote_check
,
2865 dsl_dataset_promote_sync
, &ddpa
,
2866 2 + numsnaps
, ZFS_SPACE_CHECK_RESERVED
));
2870 dsl_dataset_clone_swap_check_impl(dsl_dataset_t
*clone
,
2871 dsl_dataset_t
*origin_head
, boolean_t force
, void *owner
, dmu_tx_t
*tx
)
2874 * "slack" factor for received datasets with refquota set on them.
2875 * See the bottom of this function for details on its use.
2877 uint64_t refquota_slack
= DMU_MAX_ACCESS
* spa_asize_inflation
;
2878 int64_t unused_refres_delta
;
2880 /* they should both be heads */
2881 if (clone
->ds_is_snapshot
||
2882 origin_head
->ds_is_snapshot
)
2883 return (SET_ERROR(EINVAL
));
2885 /* if we are not forcing, the branch point should be just before them */
2886 if (!force
&& clone
->ds_prev
!= origin_head
->ds_prev
)
2887 return (SET_ERROR(EINVAL
));
2889 /* clone should be the clone (unless they are unrelated) */
2890 if (clone
->ds_prev
!= NULL
&&
2891 clone
->ds_prev
!= clone
->ds_dir
->dd_pool
->dp_origin_snap
&&
2892 origin_head
->ds_dir
!= clone
->ds_prev
->ds_dir
)
2893 return (SET_ERROR(EINVAL
));
2895 /* the clone should be a child of the origin */
2896 if (clone
->ds_dir
->dd_parent
!= origin_head
->ds_dir
)
2897 return (SET_ERROR(EINVAL
));
2899 /* origin_head shouldn't be modified unless 'force' */
2901 dsl_dataset_modified_since_snap(origin_head
, origin_head
->ds_prev
))
2902 return (SET_ERROR(ETXTBSY
));
2904 /* origin_head should have no long holds (e.g. is not mounted) */
2905 if (dsl_dataset_handoff_check(origin_head
, owner
, tx
))
2906 return (SET_ERROR(EBUSY
));
2908 /* check amount of any unconsumed refreservation */
2909 unused_refres_delta
=
2910 (int64_t)MIN(origin_head
->ds_reserved
,
2911 dsl_dataset_phys(origin_head
)->ds_unique_bytes
) -
2912 (int64_t)MIN(origin_head
->ds_reserved
,
2913 dsl_dataset_phys(clone
)->ds_unique_bytes
);
2915 if (unused_refres_delta
> 0 &&
2916 unused_refres_delta
>
2917 dsl_dir_space_available(origin_head
->ds_dir
, NULL
, 0, TRUE
))
2918 return (SET_ERROR(ENOSPC
));
2921 * The clone can't be too much over the head's refquota.
2923 * To ensure that the entire refquota can be used, we allow one
2924 * transaction to exceed the the refquota. Therefore, this check
2925 * needs to also allow for the space referenced to be more than the
2926 * refquota. The maximum amount of space that one transaction can use
2927 * on disk is DMU_MAX_ACCESS * spa_asize_inflation. Allowing this
2928 * overage ensures that we are able to receive a filesystem that
2929 * exceeds the refquota on the source system.
2931 * So that overage is the refquota_slack we use below.
2933 if (origin_head
->ds_quota
!= 0 &&
2934 dsl_dataset_phys(clone
)->ds_referenced_bytes
>
2935 origin_head
->ds_quota
+ refquota_slack
)
2936 return (SET_ERROR(EDQUOT
));
2942 dsl_dataset_clone_swap_sync_impl(dsl_dataset_t
*clone
,
2943 dsl_dataset_t
*origin_head
, dmu_tx_t
*tx
)
2945 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2946 int64_t unused_refres_delta
;
2948 ASSERT(clone
->ds_reserved
== 0);
2950 * NOTE: On DEBUG kernels there could be a race between this and
2951 * the check function if spa_asize_inflation is adjusted...
2953 ASSERT(origin_head
->ds_quota
== 0 ||
2954 dsl_dataset_phys(clone
)->ds_unique_bytes
<= origin_head
->ds_quota
+
2955 DMU_MAX_ACCESS
* spa_asize_inflation
);
2956 ASSERT3P(clone
->ds_prev
, ==, origin_head
->ds_prev
);
2959 * Swap per-dataset feature flags.
2961 for (spa_feature_t f
= 0; f
< SPA_FEATURES
; f
++) {
2962 if (!(spa_feature_table
[f
].fi_flags
&
2963 ZFEATURE_FLAG_PER_DATASET
)) {
2964 ASSERT(!clone
->ds_feature_inuse
[f
]);
2965 ASSERT(!origin_head
->ds_feature_inuse
[f
]);
2969 boolean_t clone_inuse
= clone
->ds_feature_inuse
[f
];
2970 boolean_t origin_head_inuse
= origin_head
->ds_feature_inuse
[f
];
2973 dsl_dataset_deactivate_feature(clone
->ds_object
, f
, tx
);
2974 clone
->ds_feature_inuse
[f
] = B_FALSE
;
2976 if (origin_head_inuse
) {
2977 dsl_dataset_deactivate_feature(origin_head
->ds_object
,
2979 origin_head
->ds_feature_inuse
[f
] = B_FALSE
;
2982 dsl_dataset_activate_feature(origin_head
->ds_object
,
2984 origin_head
->ds_feature_inuse
[f
] = B_TRUE
;
2986 if (origin_head_inuse
) {
2987 dsl_dataset_activate_feature(clone
->ds_object
, f
, tx
);
2988 clone
->ds_feature_inuse
[f
] = B_TRUE
;
2992 dmu_buf_will_dirty(clone
->ds_dbuf
, tx
);
2993 dmu_buf_will_dirty(origin_head
->ds_dbuf
, tx
);
2995 if (clone
->ds_objset
!= NULL
) {
2996 dmu_objset_evict(clone
->ds_objset
);
2997 clone
->ds_objset
= NULL
;
3000 if (origin_head
->ds_objset
!= NULL
) {
3001 dmu_objset_evict(origin_head
->ds_objset
);
3002 origin_head
->ds_objset
= NULL
;
3005 unused_refres_delta
=
3006 (int64_t)MIN(origin_head
->ds_reserved
,
3007 dsl_dataset_phys(origin_head
)->ds_unique_bytes
) -
3008 (int64_t)MIN(origin_head
->ds_reserved
,
3009 dsl_dataset_phys(clone
)->ds_unique_bytes
);
3012 * Reset origin's unique bytes, if it exists.
3014 if (clone
->ds_prev
) {
3015 dsl_dataset_t
*origin
= clone
->ds_prev
;
3016 uint64_t comp
, uncomp
;
3018 dmu_buf_will_dirty(origin
->ds_dbuf
, tx
);
3019 dsl_deadlist_space_range(&clone
->ds_deadlist
,
3020 dsl_dataset_phys(origin
)->ds_prev_snap_txg
, UINT64_MAX
,
3021 &dsl_dataset_phys(origin
)->ds_unique_bytes
, &comp
, &uncomp
);
3026 rrw_enter(&clone
->ds_bp_rwlock
, RW_WRITER
, FTAG
);
3027 rrw_enter(&origin_head
->ds_bp_rwlock
, RW_WRITER
, FTAG
);
3029 tmp
= dsl_dataset_phys(origin_head
)->ds_bp
;
3030 dsl_dataset_phys(origin_head
)->ds_bp
=
3031 dsl_dataset_phys(clone
)->ds_bp
;
3032 dsl_dataset_phys(clone
)->ds_bp
= tmp
;
3033 rrw_exit(&origin_head
->ds_bp_rwlock
, FTAG
);
3034 rrw_exit(&clone
->ds_bp_rwlock
, FTAG
);
3037 /* set dd_*_bytes */
3039 int64_t dused
, dcomp
, duncomp
;
3040 uint64_t cdl_used
, cdl_comp
, cdl_uncomp
;
3041 uint64_t odl_used
, odl_comp
, odl_uncomp
;
3043 ASSERT3U(dsl_dir_phys(clone
->ds_dir
)->
3044 dd_used_breakdown
[DD_USED_SNAP
], ==, 0);
3046 dsl_deadlist_space(&clone
->ds_deadlist
,
3047 &cdl_used
, &cdl_comp
, &cdl_uncomp
);
3048 dsl_deadlist_space(&origin_head
->ds_deadlist
,
3049 &odl_used
, &odl_comp
, &odl_uncomp
);
3051 dused
= dsl_dataset_phys(clone
)->ds_referenced_bytes
+
3053 (dsl_dataset_phys(origin_head
)->ds_referenced_bytes
+
3055 dcomp
= dsl_dataset_phys(clone
)->ds_compressed_bytes
+
3057 (dsl_dataset_phys(origin_head
)->ds_compressed_bytes
+
3059 duncomp
= dsl_dataset_phys(clone
)->ds_uncompressed_bytes
+
3061 (dsl_dataset_phys(origin_head
)->ds_uncompressed_bytes
+
3064 dsl_dir_diduse_space(origin_head
->ds_dir
, DD_USED_HEAD
,
3065 dused
, dcomp
, duncomp
, tx
);
3066 dsl_dir_diduse_space(clone
->ds_dir
, DD_USED_HEAD
,
3067 -dused
, -dcomp
, -duncomp
, tx
);
3070 * The difference in the space used by snapshots is the
3071 * difference in snapshot space due to the head's
3072 * deadlist (since that's the only thing that's
3073 * changing that affects the snapused).
3075 dsl_deadlist_space_range(&clone
->ds_deadlist
,
3076 origin_head
->ds_dir
->dd_origin_txg
, UINT64_MAX
,
3077 &cdl_used
, &cdl_comp
, &cdl_uncomp
);
3078 dsl_deadlist_space_range(&origin_head
->ds_deadlist
,
3079 origin_head
->ds_dir
->dd_origin_txg
, UINT64_MAX
,
3080 &odl_used
, &odl_comp
, &odl_uncomp
);
3081 dsl_dir_transfer_space(origin_head
->ds_dir
, cdl_used
- odl_used
,
3082 DD_USED_HEAD
, DD_USED_SNAP
, tx
);
3085 /* swap ds_*_bytes */
3086 SWITCH64(dsl_dataset_phys(origin_head
)->ds_referenced_bytes
,
3087 dsl_dataset_phys(clone
)->ds_referenced_bytes
);
3088 SWITCH64(dsl_dataset_phys(origin_head
)->ds_compressed_bytes
,
3089 dsl_dataset_phys(clone
)->ds_compressed_bytes
);
3090 SWITCH64(dsl_dataset_phys(origin_head
)->ds_uncompressed_bytes
,
3091 dsl_dataset_phys(clone
)->ds_uncompressed_bytes
);
3092 SWITCH64(dsl_dataset_phys(origin_head
)->ds_unique_bytes
,
3093 dsl_dataset_phys(clone
)->ds_unique_bytes
);
3095 /* apply any parent delta for change in unconsumed refreservation */
3096 dsl_dir_diduse_space(origin_head
->ds_dir
, DD_USED_REFRSRV
,
3097 unused_refres_delta
, 0, 0, tx
);
3102 dsl_deadlist_close(&clone
->ds_deadlist
);
3103 dsl_deadlist_close(&origin_head
->ds_deadlist
);
3104 SWITCH64(dsl_dataset_phys(origin_head
)->ds_deadlist_obj
,
3105 dsl_dataset_phys(clone
)->ds_deadlist_obj
);
3106 dsl_deadlist_open(&clone
->ds_deadlist
, dp
->dp_meta_objset
,
3107 dsl_dataset_phys(clone
)->ds_deadlist_obj
);
3108 dsl_deadlist_open(&origin_head
->ds_deadlist
, dp
->dp_meta_objset
,
3109 dsl_dataset_phys(origin_head
)->ds_deadlist_obj
);
3111 dsl_scan_ds_clone_swapped(origin_head
, clone
, tx
);
3113 spa_history_log_internal_ds(clone
, "clone swap", tx
,
3114 "parent=%s", origin_head
->ds_dir
->dd_myname
);
3118 * Given a pool name and a dataset object number in that pool,
3119 * return the name of that dataset.
3122 dsl_dsobj_to_dsname(char *pname
, uint64_t obj
, char *buf
)
3128 error
= dsl_pool_hold(pname
, FTAG
, &dp
);
3132 error
= dsl_dataset_hold_obj(dp
, obj
, FTAG
, &ds
);
3134 dsl_dataset_name(ds
, buf
);
3135 dsl_dataset_rele(ds
, FTAG
);
3137 dsl_pool_rele(dp
, FTAG
);
3143 dsl_dataset_check_quota(dsl_dataset_t
*ds
, boolean_t check_quota
,
3144 uint64_t asize
, uint64_t inflight
, uint64_t *used
, uint64_t *ref_rsrv
)
3148 ASSERT3S(asize
, >, 0);
3151 * *ref_rsrv is the portion of asize that will come from any
3152 * unconsumed refreservation space.
3156 mutex_enter(&ds
->ds_lock
);
3158 * Make a space adjustment for reserved bytes.
3160 if (ds
->ds_reserved
> dsl_dataset_phys(ds
)->ds_unique_bytes
) {
3162 ds
->ds_reserved
- dsl_dataset_phys(ds
)->ds_unique_bytes
);
3164 (ds
->ds_reserved
- dsl_dataset_phys(ds
)->ds_unique_bytes
);
3166 asize
- MIN(asize
, parent_delta(ds
, asize
+ inflight
));
3169 if (!check_quota
|| ds
->ds_quota
== 0) {
3170 mutex_exit(&ds
->ds_lock
);
3174 * If they are requesting more space, and our current estimate
3175 * is over quota, they get to try again unless the actual
3176 * on-disk is over quota and there are no pending changes (which
3177 * may free up space for us).
3179 if (dsl_dataset_phys(ds
)->ds_referenced_bytes
+ inflight
>=
3182 dsl_dataset_phys(ds
)->ds_referenced_bytes
< ds
->ds_quota
)
3183 error
= SET_ERROR(ERESTART
);
3185 error
= SET_ERROR(EDQUOT
);
3187 mutex_exit(&ds
->ds_lock
);
3192 typedef struct dsl_dataset_set_qr_arg
{
3193 const char *ddsqra_name
;
3194 zprop_source_t ddsqra_source
;
3195 uint64_t ddsqra_value
;
3196 } dsl_dataset_set_qr_arg_t
;
3201 dsl_dataset_set_refquota_check(void *arg
, dmu_tx_t
*tx
)
3203 dsl_dataset_set_qr_arg_t
*ddsqra
= arg
;
3204 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3209 if (spa_version(dp
->dp_spa
) < SPA_VERSION_REFQUOTA
)
3210 return (SET_ERROR(ENOTSUP
));
3212 error
= dsl_dataset_hold(dp
, ddsqra
->ddsqra_name
, FTAG
, &ds
);
3216 if (ds
->ds_is_snapshot
) {
3217 dsl_dataset_rele(ds
, FTAG
);
3218 return (SET_ERROR(EINVAL
));
3221 error
= dsl_prop_predict(ds
->ds_dir
,
3222 zfs_prop_to_name(ZFS_PROP_REFQUOTA
),
3223 ddsqra
->ddsqra_source
, ddsqra
->ddsqra_value
, &newval
);
3225 dsl_dataset_rele(ds
, FTAG
);
3230 dsl_dataset_rele(ds
, FTAG
);
3234 if (newval
< dsl_dataset_phys(ds
)->ds_referenced_bytes
||
3235 newval
< ds
->ds_reserved
) {
3236 dsl_dataset_rele(ds
, FTAG
);
3237 return (SET_ERROR(ENOSPC
));
3240 dsl_dataset_rele(ds
, FTAG
);
3245 dsl_dataset_set_refquota_sync(void *arg
, dmu_tx_t
*tx
)
3247 dsl_dataset_set_qr_arg_t
*ddsqra
= arg
;
3248 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3252 VERIFY0(dsl_dataset_hold(dp
, ddsqra
->ddsqra_name
, FTAG
, &ds
));
3254 dsl_prop_set_sync_impl(ds
,
3255 zfs_prop_to_name(ZFS_PROP_REFQUOTA
),
3256 ddsqra
->ddsqra_source
, sizeof (ddsqra
->ddsqra_value
), 1,
3257 &ddsqra
->ddsqra_value
, tx
);
3259 VERIFY0(dsl_prop_get_int_ds(ds
,
3260 zfs_prop_to_name(ZFS_PROP_REFQUOTA
), &newval
));
3262 if (ds
->ds_quota
!= newval
) {
3263 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
3264 ds
->ds_quota
= newval
;
3266 dsl_dataset_rele(ds
, FTAG
);
3270 dsl_dataset_set_refquota(const char *dsname
, zprop_source_t source
,
3273 dsl_dataset_set_qr_arg_t ddsqra
;
3275 ddsqra
.ddsqra_name
= dsname
;
3276 ddsqra
.ddsqra_source
= source
;
3277 ddsqra
.ddsqra_value
= refquota
;
3279 return (dsl_sync_task(dsname
, dsl_dataset_set_refquota_check
,
3280 dsl_dataset_set_refquota_sync
, &ddsqra
, 0, ZFS_SPACE_CHECK_NONE
));
3284 dsl_dataset_set_refreservation_check(void *arg
, dmu_tx_t
*tx
)
3286 dsl_dataset_set_qr_arg_t
*ddsqra
= arg
;
3287 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3290 uint64_t newval
, unique
;
3292 if (spa_version(dp
->dp_spa
) < SPA_VERSION_REFRESERVATION
)
3293 return (SET_ERROR(ENOTSUP
));
3295 error
= dsl_dataset_hold(dp
, ddsqra
->ddsqra_name
, FTAG
, &ds
);
3299 if (ds
->ds_is_snapshot
) {
3300 dsl_dataset_rele(ds
, FTAG
);
3301 return (SET_ERROR(EINVAL
));
3304 error
= dsl_prop_predict(ds
->ds_dir
,
3305 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
),
3306 ddsqra
->ddsqra_source
, ddsqra
->ddsqra_value
, &newval
);
3308 dsl_dataset_rele(ds
, FTAG
);
3313 * If we are doing the preliminary check in open context, the
3314 * space estimates may be inaccurate.
3316 if (!dmu_tx_is_syncing(tx
)) {
3317 dsl_dataset_rele(ds
, FTAG
);
3321 mutex_enter(&ds
->ds_lock
);
3322 if (!DS_UNIQUE_IS_ACCURATE(ds
))
3323 dsl_dataset_recalc_head_uniq(ds
);
3324 unique
= dsl_dataset_phys(ds
)->ds_unique_bytes
;
3325 mutex_exit(&ds
->ds_lock
);
3327 if (MAX(unique
, newval
) > MAX(unique
, ds
->ds_reserved
)) {
3328 uint64_t delta
= MAX(unique
, newval
) -
3329 MAX(unique
, ds
->ds_reserved
);
3332 dsl_dir_space_available(ds
->ds_dir
, NULL
, 0, B_TRUE
) ||
3333 (ds
->ds_quota
> 0 && newval
> ds
->ds_quota
)) {
3334 dsl_dataset_rele(ds
, FTAG
);
3335 return (SET_ERROR(ENOSPC
));
3339 dsl_dataset_rele(ds
, FTAG
);
3344 dsl_dataset_set_refreservation_sync_impl(dsl_dataset_t
*ds
,
3345 zprop_source_t source
, uint64_t value
, dmu_tx_t
*tx
)
3351 dsl_prop_set_sync_impl(ds
, zfs_prop_to_name(ZFS_PROP_REFRESERVATION
),
3352 source
, sizeof (value
), 1, &value
, tx
);
3354 VERIFY0(dsl_prop_get_int_ds(ds
,
3355 zfs_prop_to_name(ZFS_PROP_REFRESERVATION
), &newval
));
3357 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
3358 mutex_enter(&ds
->ds_dir
->dd_lock
);
3359 mutex_enter(&ds
->ds_lock
);
3360 ASSERT(DS_UNIQUE_IS_ACCURATE(ds
));
3361 unique
= dsl_dataset_phys(ds
)->ds_unique_bytes
;
3362 delta
= MAX(0, (int64_t)(newval
- unique
)) -
3363 MAX(0, (int64_t)(ds
->ds_reserved
- unique
));
3364 ds
->ds_reserved
= newval
;
3365 mutex_exit(&ds
->ds_lock
);
3367 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_REFRSRV
, delta
, 0, 0, tx
);
3368 mutex_exit(&ds
->ds_dir
->dd_lock
);
3372 dsl_dataset_set_refreservation_sync(void *arg
, dmu_tx_t
*tx
)
3374 dsl_dataset_set_qr_arg_t
*ddsqra
= arg
;
3375 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3378 VERIFY0(dsl_dataset_hold(dp
, ddsqra
->ddsqra_name
, FTAG
, &ds
));
3379 dsl_dataset_set_refreservation_sync_impl(ds
,
3380 ddsqra
->ddsqra_source
, ddsqra
->ddsqra_value
, tx
);
3381 dsl_dataset_rele(ds
, FTAG
);
3385 dsl_dataset_set_refreservation(const char *dsname
, zprop_source_t source
,
3386 uint64_t refreservation
)
3388 dsl_dataset_set_qr_arg_t ddsqra
;
3390 ddsqra
.ddsqra_name
= dsname
;
3391 ddsqra
.ddsqra_source
= source
;
3392 ddsqra
.ddsqra_value
= refreservation
;
3394 return (dsl_sync_task(dsname
, dsl_dataset_set_refreservation_check
,
3395 dsl_dataset_set_refreservation_sync
, &ddsqra
,
3396 0, ZFS_SPACE_CHECK_NONE
));
3400 * Return (in *usedp) the amount of space written in new that is not
3401 * present in oldsnap. New may be a snapshot or the head. Old must be
3402 * a snapshot before new, in new's filesystem (or its origin). If not then
3403 * fail and return EINVAL.
3405 * The written space is calculated by considering two components: First, we
3406 * ignore any freed space, and calculate the written as new's used space
3407 * minus old's used space. Next, we add in the amount of space that was freed
3408 * between the two snapshots, thus reducing new's used space relative to old's.
3409 * Specifically, this is the space that was born before old->ds_creation_txg,
3410 * and freed before new (ie. on new's deadlist or a previous deadlist).
3412 * space freed [---------------------]
3413 * snapshots ---O-------O--------O-------O------
3417 dsl_dataset_space_written(dsl_dataset_t
*oldsnap
, dsl_dataset_t
*new,
3418 uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
3422 dsl_pool_t
*dp
= new->ds_dir
->dd_pool
;
3424 ASSERT(dsl_pool_config_held(dp
));
3427 *usedp
+= dsl_dataset_phys(new)->ds_referenced_bytes
;
3428 *usedp
-= dsl_dataset_phys(oldsnap
)->ds_referenced_bytes
;
3431 *compp
+= dsl_dataset_phys(new)->ds_compressed_bytes
;
3432 *compp
-= dsl_dataset_phys(oldsnap
)->ds_compressed_bytes
;
3435 *uncompp
+= dsl_dataset_phys(new)->ds_uncompressed_bytes
;
3436 *uncompp
-= dsl_dataset_phys(oldsnap
)->ds_uncompressed_bytes
;
3438 snapobj
= new->ds_object
;
3439 while (snapobj
!= oldsnap
->ds_object
) {
3440 dsl_dataset_t
*snap
;
3441 uint64_t used
, comp
, uncomp
;
3443 if (snapobj
== new->ds_object
) {
3446 err
= dsl_dataset_hold_obj(dp
, snapobj
, FTAG
, &snap
);
3451 if (dsl_dataset_phys(snap
)->ds_prev_snap_txg
==
3452 dsl_dataset_phys(oldsnap
)->ds_creation_txg
) {
3454 * The blocks in the deadlist can not be born after
3455 * ds_prev_snap_txg, so get the whole deadlist space,
3456 * which is more efficient (especially for old-format
3457 * deadlists). Unfortunately the deadlist code
3458 * doesn't have enough information to make this
3459 * optimization itself.
3461 dsl_deadlist_space(&snap
->ds_deadlist
,
3462 &used
, &comp
, &uncomp
);
3464 dsl_deadlist_space_range(&snap
->ds_deadlist
,
3465 0, dsl_dataset_phys(oldsnap
)->ds_creation_txg
,
3466 &used
, &comp
, &uncomp
);
3473 * If we get to the beginning of the chain of snapshots
3474 * (ds_prev_snap_obj == 0) before oldsnap, then oldsnap
3475 * was not a snapshot of/before new.
3477 snapobj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
3479 dsl_dataset_rele(snap
, FTAG
);
3481 err
= SET_ERROR(EINVAL
);
3490 * Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
3491 * lastsnap, and all snapshots in between are deleted.
3493 * blocks that would be freed [---------------------------]
3494 * snapshots ---O-------O--------O-------O--------O
3495 * firstsnap lastsnap
3497 * This is the set of blocks that were born after the snap before firstsnap,
3498 * (birth > firstsnap->prev_snap_txg) and died before the snap after the
3499 * last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
3500 * We calculate this by iterating over the relevant deadlists (from the snap
3501 * after lastsnap, backward to the snap after firstsnap), summing up the
3502 * space on the deadlist that was born after the snap before firstsnap.
3505 dsl_dataset_space_wouldfree(dsl_dataset_t
*firstsnap
,
3506 dsl_dataset_t
*lastsnap
,
3507 uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
3511 dsl_pool_t
*dp
= firstsnap
->ds_dir
->dd_pool
;
3513 ASSERT(firstsnap
->ds_is_snapshot
);
3514 ASSERT(lastsnap
->ds_is_snapshot
);
3517 * Check that the snapshots are in the same dsl_dir, and firstsnap
3518 * is before lastsnap.
3520 if (firstsnap
->ds_dir
!= lastsnap
->ds_dir
||
3521 dsl_dataset_phys(firstsnap
)->ds_creation_txg
>
3522 dsl_dataset_phys(lastsnap
)->ds_creation_txg
)
3523 return (SET_ERROR(EINVAL
));
3525 *usedp
= *compp
= *uncompp
= 0;
3527 snapobj
= dsl_dataset_phys(lastsnap
)->ds_next_snap_obj
;
3528 while (snapobj
!= firstsnap
->ds_object
) {
3530 uint64_t used
, comp
, uncomp
;
3532 err
= dsl_dataset_hold_obj(dp
, snapobj
, FTAG
, &ds
);
3536 dsl_deadlist_space_range(&ds
->ds_deadlist
,
3537 dsl_dataset_phys(firstsnap
)->ds_prev_snap_txg
, UINT64_MAX
,
3538 &used
, &comp
, &uncomp
);
3543 snapobj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
3544 ASSERT3U(snapobj
, !=, 0);
3545 dsl_dataset_rele(ds
, FTAG
);
3551 * Return TRUE if 'earlier' is an earlier snapshot in 'later's timeline.
3552 * For example, they could both be snapshots of the same filesystem, and
3553 * 'earlier' is before 'later'. Or 'earlier' could be the origin of
3554 * 'later's filesystem. Or 'earlier' could be an older snapshot in the origin's
3555 * filesystem. Or 'earlier' could be the origin's origin.
3557 * If non-zero, earlier_txg is used instead of earlier's ds_creation_txg.
3560 dsl_dataset_is_before(dsl_dataset_t
*later
, dsl_dataset_t
*earlier
,
3561 uint64_t earlier_txg
)
3563 dsl_pool_t
*dp
= later
->ds_dir
->dd_pool
;
3567 ASSERT(dsl_pool_config_held(dp
));
3568 ASSERT(earlier
->ds_is_snapshot
|| earlier_txg
!= 0);
3570 if (earlier_txg
== 0)
3571 earlier_txg
= dsl_dataset_phys(earlier
)->ds_creation_txg
;
3573 if (later
->ds_is_snapshot
&&
3574 earlier_txg
>= dsl_dataset_phys(later
)->ds_creation_txg
)
3577 if (later
->ds_dir
== earlier
->ds_dir
)
3579 if (!dsl_dir_is_clone(later
->ds_dir
))
3582 if (dsl_dir_phys(later
->ds_dir
)->dd_origin_obj
== earlier
->ds_object
)
3584 dsl_dataset_t
*origin
;
3585 error
= dsl_dataset_hold_obj(dp
,
3586 dsl_dir_phys(later
->ds_dir
)->dd_origin_obj
, FTAG
, &origin
);
3589 ret
= dsl_dataset_is_before(origin
, earlier
, earlier_txg
);
3590 dsl_dataset_rele(origin
, FTAG
);
3595 dsl_dataset_zapify(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
3597 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
3598 dmu_object_zapify(mos
, ds
->ds_object
, DMU_OT_DSL_DATASET
, tx
);
3602 dsl_dataset_is_zapified(dsl_dataset_t
*ds
)
3604 dmu_object_info_t doi
;
3606 dmu_object_info_from_db(ds
->ds_dbuf
, &doi
);
3607 return (doi
.doi_type
== DMU_OTN_ZAP_METADATA
);
3611 dsl_dataset_has_resume_receive_state(dsl_dataset_t
*ds
)
3613 return (dsl_dataset_is_zapified(ds
) &&
3614 zap_contains(ds
->ds_dir
->dd_pool
->dp_meta_objset
,
3615 ds
->ds_object
, DS_FIELD_RESUME_TOGUID
) == 0);