4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
25 * Copyright (c) 2013 by Joyent, Inc. All rights reserved.
26 * Copyright (c) 2014 Integros [integros.com]
29 #include <sys/zfs_context.h>
30 #include <sys/dsl_userhold.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_synctask.h>
33 #include <sys/dsl_destroy.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/dsl_pool.h>
36 #include <sys/dsl_dir.h>
37 #include <sys/dmu_traverse.h>
38 #include <sys/dsl_scan.h>
39 #include <sys/dmu_objset.h>
41 #include <sys/zfeature.h>
42 #include <sys/zfs_ioctl.h>
43 #include <sys/dsl_deleg.h>
44 #include <sys/dmu_impl.h>
48 dsl_destroy_snapshot_check_impl(dsl_dataset_t
*ds
, boolean_t defer
)
50 if (!ds
->ds_is_snapshot
)
51 return (SET_ERROR(EINVAL
));
53 if (dsl_dataset_long_held(ds
))
54 return (SET_ERROR(EBUSY
));
57 * Only allow deferred destroy on pools that support it.
58 * NOTE: deferred destroy is only supported on snapshots.
61 if (spa_version(ds
->ds_dir
->dd_pool
->dp_spa
) <
63 return (SET_ERROR(ENOTSUP
));
68 * If this snapshot has an elevated user reference count,
69 * we can't destroy it yet.
71 if (ds
->ds_userrefs
> 0)
72 return (SET_ERROR(EBUSY
));
75 * Can't delete a branch point.
77 if (dsl_dataset_phys(ds
)->ds_num_children
> 1)
78 return (SET_ERROR(EEXIST
));
84 dsl_destroy_snapshot_check(void *arg
, dmu_tx_t
*tx
)
86 dsl_destroy_snapshot_arg_t
*ddsa
= arg
;
87 const char *dsname
= ddsa
->ddsa_name
;
88 boolean_t defer
= ddsa
->ddsa_defer
;
90 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
94 error
= dsl_dataset_hold(dp
, dsname
, FTAG
, &ds
);
97 * If the snapshot does not exist, silently ignore it, and
98 * dsl_destroy_snapshot_sync() will be a no-op
99 * (it's "already destroyed").
105 error
= dsl_destroy_snapshot_check_impl(ds
, defer
);
106 dsl_dataset_rele(ds
, FTAG
);
112 struct process_old_arg
{
114 dsl_dataset_t
*ds_prev
;
115 boolean_t after_branch_point
;
117 uint64_t used
, comp
, uncomp
;
121 process_old_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
123 struct process_old_arg
*poa
= arg
;
124 dsl_pool_t
*dp
= poa
->ds
->ds_dir
->dd_pool
;
126 ASSERT(!BP_IS_HOLE(bp
));
128 if (bp
->blk_birth
<= dsl_dataset_phys(poa
->ds
)->ds_prev_snap_txg
) {
129 dsl_deadlist_insert(&poa
->ds
->ds_deadlist
, bp
, tx
);
130 if (poa
->ds_prev
&& !poa
->after_branch_point
&&
132 dsl_dataset_phys(poa
->ds_prev
)->ds_prev_snap_txg
) {
133 dsl_dataset_phys(poa
->ds_prev
)->ds_unique_bytes
+=
134 bp_get_dsize_sync(dp
->dp_spa
, bp
);
137 poa
->used
+= bp_get_dsize_sync(dp
->dp_spa
, bp
);
138 poa
->comp
+= BP_GET_PSIZE(bp
);
139 poa
->uncomp
+= BP_GET_UCSIZE(bp
);
140 dsl_free_sync(poa
->pio
, dp
, tx
->tx_txg
, bp
);
146 process_old_deadlist(dsl_dataset_t
*ds
, dsl_dataset_t
*ds_prev
,
147 dsl_dataset_t
*ds_next
, boolean_t after_branch_point
, dmu_tx_t
*tx
)
149 struct process_old_arg poa
= { 0 };
150 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
151 objset_t
*mos
= dp
->dp_meta_objset
;
152 uint64_t deadlist_obj
;
154 ASSERT(ds
->ds_deadlist
.dl_oldfmt
);
155 ASSERT(ds_next
->ds_deadlist
.dl_oldfmt
);
158 poa
.ds_prev
= ds_prev
;
159 poa
.after_branch_point
= after_branch_point
;
160 poa
.pio
= zio_root(dp
->dp_spa
, NULL
, NULL
, ZIO_FLAG_MUSTSUCCEED
);
161 VERIFY0(bpobj_iterate(&ds_next
->ds_deadlist
.dl_bpobj
,
162 process_old_cb
, &poa
, tx
));
163 VERIFY0(zio_wait(poa
.pio
));
164 ASSERT3U(poa
.used
, ==, dsl_dataset_phys(ds
)->ds_unique_bytes
);
166 /* change snapused */
167 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_SNAP
,
168 -poa
.used
, -poa
.comp
, -poa
.uncomp
, tx
);
170 /* swap next's deadlist to our deadlist */
171 dsl_deadlist_close(&ds
->ds_deadlist
);
172 dsl_deadlist_close(&ds_next
->ds_deadlist
);
173 deadlist_obj
= dsl_dataset_phys(ds
)->ds_deadlist_obj
;
174 dsl_dataset_phys(ds
)->ds_deadlist_obj
=
175 dsl_dataset_phys(ds_next
)->ds_deadlist_obj
;
176 dsl_dataset_phys(ds_next
)->ds_deadlist_obj
= deadlist_obj
;
177 dsl_deadlist_open(&ds
->ds_deadlist
, mos
,
178 dsl_dataset_phys(ds
)->ds_deadlist_obj
);
179 dsl_deadlist_open(&ds_next
->ds_deadlist
, mos
,
180 dsl_dataset_phys(ds_next
)->ds_deadlist_obj
);
184 dsl_dataset_remove_clones_key(dsl_dataset_t
*ds
, uint64_t mintxg
, dmu_tx_t
*tx
)
186 objset_t
*mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
191 * If it is the old version, dd_clones doesn't exist so we can't
192 * find the clones, but dsl_deadlist_remove_key() is a no-op so it
195 if (dsl_dir_phys(ds
->ds_dir
)->dd_clones
== 0)
198 for (zap_cursor_init(&zc
, mos
, dsl_dir_phys(ds
->ds_dir
)->dd_clones
);
199 zap_cursor_retrieve(&zc
, &za
) == 0;
200 zap_cursor_advance(&zc
)) {
201 dsl_dataset_t
*clone
;
203 VERIFY0(dsl_dataset_hold_obj(ds
->ds_dir
->dd_pool
,
204 za
.za_first_integer
, FTAG
, &clone
));
205 if (clone
->ds_dir
->dd_origin_txg
> mintxg
) {
206 dsl_deadlist_remove_key(&clone
->ds_deadlist
,
208 if (dsl_dataset_remap_deadlist_exists(clone
)) {
209 dsl_deadlist_remove_key(
210 &clone
->ds_remap_deadlist
, mintxg
, tx
);
212 dsl_dataset_remove_clones_key(clone
, mintxg
, tx
);
214 dsl_dataset_rele(clone
, FTAG
);
216 zap_cursor_fini(&zc
);
220 dsl_destroy_snapshot_handle_remaps(dsl_dataset_t
*ds
, dsl_dataset_t
*ds_next
,
223 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
225 /* Move blocks to be obsoleted to pool's obsolete list. */
226 if (dsl_dataset_remap_deadlist_exists(ds_next
)) {
227 if (!bpobj_is_open(&dp
->dp_obsolete_bpobj
))
228 dsl_pool_create_obsolete_bpobj(dp
, tx
);
230 dsl_deadlist_move_bpobj(&ds_next
->ds_remap_deadlist
,
231 &dp
->dp_obsolete_bpobj
,
232 dsl_dataset_phys(ds
)->ds_prev_snap_txg
, tx
);
235 /* Merge our deadlist into next's and free it. */
236 if (dsl_dataset_remap_deadlist_exists(ds
)) {
237 uint64_t remap_deadlist_object
=
238 dsl_dataset_get_remap_deadlist_object(ds
);
239 ASSERT(remap_deadlist_object
!= 0);
241 mutex_enter(&ds_next
->ds_remap_deadlist_lock
);
242 if (!dsl_dataset_remap_deadlist_exists(ds_next
))
243 dsl_dataset_create_remap_deadlist(ds_next
, tx
);
244 mutex_exit(&ds_next
->ds_remap_deadlist_lock
);
246 dsl_deadlist_merge(&ds_next
->ds_remap_deadlist
,
247 remap_deadlist_object
, tx
);
248 dsl_dataset_destroy_remap_deadlist(ds
, tx
);
253 dsl_destroy_snapshot_sync_impl(dsl_dataset_t
*ds
, boolean_t defer
, dmu_tx_t
*tx
)
256 int after_branch_point
= FALSE
;
257 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
258 objset_t
*mos
= dp
->dp_meta_objset
;
259 dsl_dataset_t
*ds_prev
= NULL
;
262 ASSERT(RRW_WRITE_HELD(&dp
->dp_config_rwlock
));
263 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
264 ASSERT3U(dsl_dataset_phys(ds
)->ds_bp
.blk_birth
, <=, tx
->tx_txg
);
265 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
266 ASSERT(refcount_is_zero(&ds
->ds_longholds
));
269 (ds
->ds_userrefs
> 0 ||
270 dsl_dataset_phys(ds
)->ds_num_children
> 1)) {
271 ASSERT(spa_version(dp
->dp_spa
) >= SPA_VERSION_USERREFS
);
272 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
273 dsl_dataset_phys(ds
)->ds_flags
|= DS_FLAG_DEFER_DESTROY
;
274 spa_history_log_internal_ds(ds
, "defer_destroy", tx
, "");
278 ASSERT3U(dsl_dataset_phys(ds
)->ds_num_children
, <=, 1);
280 /* We need to log before removing it from the namespace. */
281 spa_history_log_internal_ds(ds
, "destroy", tx
, "");
283 dsl_scan_ds_destroyed(ds
, tx
);
287 for (spa_feature_t f
= 0; f
< SPA_FEATURES
; f
++) {
288 if (ds
->ds_feature_inuse
[f
]) {
289 dsl_dataset_deactivate_feature(obj
, f
, tx
);
290 ds
->ds_feature_inuse
[f
] = B_FALSE
;
293 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0) {
294 ASSERT3P(ds
->ds_prev
, ==, NULL
);
295 VERIFY0(dsl_dataset_hold_obj(dp
,
296 dsl_dataset_phys(ds
)->ds_prev_snap_obj
, FTAG
, &ds_prev
));
298 (dsl_dataset_phys(ds_prev
)->ds_next_snap_obj
!= obj
);
300 dmu_buf_will_dirty(ds_prev
->ds_dbuf
, tx
);
301 if (after_branch_point
&&
302 dsl_dataset_phys(ds_prev
)->ds_next_clones_obj
!= 0) {
303 dsl_dataset_remove_from_next_clones(ds_prev
, obj
, tx
);
304 if (dsl_dataset_phys(ds
)->ds_next_snap_obj
!= 0) {
305 VERIFY0(zap_add_int(mos
,
306 dsl_dataset_phys(ds_prev
)->
308 dsl_dataset_phys(ds
)->ds_next_snap_obj
,
312 if (!after_branch_point
) {
313 dsl_dataset_phys(ds_prev
)->ds_next_snap_obj
=
314 dsl_dataset_phys(ds
)->ds_next_snap_obj
;
318 dsl_dataset_t
*ds_next
;
320 uint64_t used
= 0, comp
= 0, uncomp
= 0;
322 VERIFY0(dsl_dataset_hold_obj(dp
,
323 dsl_dataset_phys(ds
)->ds_next_snap_obj
, FTAG
, &ds_next
));
324 ASSERT3U(dsl_dataset_phys(ds_next
)->ds_prev_snap_obj
, ==, obj
);
326 old_unique
= dsl_dataset_phys(ds_next
)->ds_unique_bytes
;
328 dmu_buf_will_dirty(ds_next
->ds_dbuf
, tx
);
329 dsl_dataset_phys(ds_next
)->ds_prev_snap_obj
=
330 dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
331 dsl_dataset_phys(ds_next
)->ds_prev_snap_txg
=
332 dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
333 ASSERT3U(dsl_dataset_phys(ds
)->ds_prev_snap_txg
, ==,
334 ds_prev
? dsl_dataset_phys(ds_prev
)->ds_creation_txg
: 0);
336 if (ds_next
->ds_deadlist
.dl_oldfmt
) {
337 process_old_deadlist(ds
, ds_prev
, ds_next
,
338 after_branch_point
, tx
);
340 /* Adjust prev's unique space. */
341 if (ds_prev
&& !after_branch_point
) {
342 dsl_deadlist_space_range(&ds_next
->ds_deadlist
,
343 dsl_dataset_phys(ds_prev
)->ds_prev_snap_txg
,
344 dsl_dataset_phys(ds
)->ds_prev_snap_txg
,
345 &used
, &comp
, &uncomp
);
346 dsl_dataset_phys(ds_prev
)->ds_unique_bytes
+= used
;
349 /* Adjust snapused. */
350 dsl_deadlist_space_range(&ds_next
->ds_deadlist
,
351 dsl_dataset_phys(ds
)->ds_prev_snap_txg
, UINT64_MAX
,
352 &used
, &comp
, &uncomp
);
353 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_SNAP
,
354 -used
, -comp
, -uncomp
, tx
);
356 /* Move blocks to be freed to pool's free list. */
357 dsl_deadlist_move_bpobj(&ds_next
->ds_deadlist
,
358 &dp
->dp_free_bpobj
, dsl_dataset_phys(ds
)->ds_prev_snap_txg
,
360 dsl_dir_diduse_space(tx
->tx_pool
->dp_free_dir
,
361 DD_USED_HEAD
, used
, comp
, uncomp
, tx
);
363 /* Merge our deadlist into next's and free it. */
364 dsl_deadlist_merge(&ds_next
->ds_deadlist
,
365 dsl_dataset_phys(ds
)->ds_deadlist_obj
, tx
);
368 dsl_deadlist_close(&ds
->ds_deadlist
);
369 dsl_deadlist_free(mos
, dsl_dataset_phys(ds
)->ds_deadlist_obj
, tx
);
370 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
371 dsl_dataset_phys(ds
)->ds_deadlist_obj
= 0;
373 dsl_destroy_snapshot_handle_remaps(ds
, ds_next
, tx
);
375 /* Collapse range in clone heads */
376 dsl_dataset_remove_clones_key(ds
,
377 dsl_dataset_phys(ds
)->ds_creation_txg
, tx
);
379 if (ds_next
->ds_is_snapshot
) {
380 dsl_dataset_t
*ds_nextnext
;
383 * Update next's unique to include blocks which
384 * were previously shared by only this snapshot
385 * and it. Those blocks will be born after the
386 * prev snap and before this snap, and will have
387 * died after the next snap and before the one
388 * after that (ie. be on the snap after next's
391 VERIFY0(dsl_dataset_hold_obj(dp
,
392 dsl_dataset_phys(ds_next
)->ds_next_snap_obj
,
393 FTAG
, &ds_nextnext
));
394 dsl_deadlist_space_range(&ds_nextnext
->ds_deadlist
,
395 dsl_dataset_phys(ds
)->ds_prev_snap_txg
,
396 dsl_dataset_phys(ds
)->ds_creation_txg
,
397 &used
, &comp
, &uncomp
);
398 dsl_dataset_phys(ds_next
)->ds_unique_bytes
+= used
;
399 dsl_dataset_rele(ds_nextnext
, FTAG
);
400 ASSERT3P(ds_next
->ds_prev
, ==, NULL
);
402 /* Collapse range in this head. */
404 VERIFY0(dsl_dataset_hold_obj(dp
,
405 dsl_dir_phys(ds
->ds_dir
)->dd_head_dataset_obj
, FTAG
, &hds
));
406 dsl_deadlist_remove_key(&hds
->ds_deadlist
,
407 dsl_dataset_phys(ds
)->ds_creation_txg
, tx
);
408 if (dsl_dataset_remap_deadlist_exists(hds
)) {
409 dsl_deadlist_remove_key(&hds
->ds_remap_deadlist
,
410 dsl_dataset_phys(ds
)->ds_creation_txg
, tx
);
412 dsl_dataset_rele(hds
, FTAG
);
415 ASSERT3P(ds_next
->ds_prev
, ==, ds
);
416 dsl_dataset_rele(ds_next
->ds_prev
, ds_next
);
417 ds_next
->ds_prev
= NULL
;
419 VERIFY0(dsl_dataset_hold_obj(dp
,
420 dsl_dataset_phys(ds
)->ds_prev_snap_obj
,
421 ds_next
, &ds_next
->ds_prev
));
424 dsl_dataset_recalc_head_uniq(ds_next
);
427 * Reduce the amount of our unconsumed refreservation
428 * being charged to our parent by the amount of
429 * new unique data we have gained.
431 if (old_unique
< ds_next
->ds_reserved
) {
433 uint64_t new_unique
=
434 dsl_dataset_phys(ds_next
)->ds_unique_bytes
;
436 ASSERT(old_unique
<= new_unique
);
437 mrsdelta
= MIN(new_unique
- old_unique
,
438 ds_next
->ds_reserved
- old_unique
);
439 dsl_dir_diduse_space(ds
->ds_dir
,
440 DD_USED_REFRSRV
, -mrsdelta
, 0, 0, tx
);
443 dsl_dataset_rele(ds_next
, FTAG
);
446 * This must be done after the dsl_traverse(), because it will
447 * re-open the objset.
450 dmu_objset_evict(ds
->ds_objset
);
451 ds
->ds_objset
= NULL
;
454 /* remove from snapshot namespace */
455 dsl_dataset_t
*ds_head
;
456 ASSERT(dsl_dataset_phys(ds
)->ds_snapnames_zapobj
== 0);
457 VERIFY0(dsl_dataset_hold_obj(dp
,
458 dsl_dir_phys(ds
->ds_dir
)->dd_head_dataset_obj
, FTAG
, &ds_head
));
459 VERIFY0(dsl_dataset_get_snapname(ds
));
464 err
= dsl_dataset_snap_lookup(ds_head
,
465 ds
->ds_snapname
, &val
);
467 ASSERT3U(val
, ==, obj
);
470 VERIFY0(dsl_dataset_snap_remove(ds_head
, ds
->ds_snapname
, tx
, B_TRUE
));
471 dsl_dataset_rele(ds_head
, FTAG
);
474 dsl_dataset_rele(ds_prev
, FTAG
);
476 spa_prop_clear_bootfs(dp
->dp_spa
, ds
->ds_object
, tx
);
478 if (dsl_dataset_phys(ds
)->ds_next_clones_obj
!= 0) {
480 ASSERT0(zap_count(mos
,
481 dsl_dataset_phys(ds
)->ds_next_clones_obj
, &count
) &&
483 VERIFY0(dmu_object_free(mos
,
484 dsl_dataset_phys(ds
)->ds_next_clones_obj
, tx
));
486 if (dsl_dataset_phys(ds
)->ds_props_obj
!= 0)
487 VERIFY0(zap_destroy(mos
, dsl_dataset_phys(ds
)->ds_props_obj
,
489 if (dsl_dataset_phys(ds
)->ds_userrefs_obj
!= 0)
490 VERIFY0(zap_destroy(mos
, dsl_dataset_phys(ds
)->ds_userrefs_obj
,
492 dsl_dir_rele(ds
->ds_dir
, ds
);
494 dmu_object_free_zapified(mos
, obj
, tx
);
498 dsl_destroy_snapshot_sync(void *arg
, dmu_tx_t
*tx
)
500 dsl_destroy_snapshot_arg_t
*ddsa
= arg
;
501 const char *dsname
= ddsa
->ddsa_name
;
502 boolean_t defer
= ddsa
->ddsa_defer
;
504 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
507 int error
= dsl_dataset_hold(dp
, dsname
, FTAG
, &ds
);
511 dsl_destroy_snapshot_sync_impl(ds
, defer
, tx
);
512 dsl_dataset_rele(ds
, FTAG
);
516 * The semantics of this function are described in the comment above
517 * lzc_destroy_snaps(). To summarize:
519 * The snapshots must all be in the same pool.
521 * Snapshots that don't exist will be silently ignored (considered to be
522 * "already deleted").
524 * On success, all snaps will be destroyed and this will return 0.
525 * On failure, no snaps will be destroyed, the errlist will be filled in,
526 * and this will return an errno.
529 dsl_destroy_snapshots_nvl(nvlist_t
*snaps
, boolean_t defer
,
532 if (nvlist_next_nvpair(snaps
, NULL
) == NULL
)
536 * lzc_destroy_snaps() is documented to take an nvlist whose
537 * values "don't matter". We need to convert that nvlist to
538 * one that we know can be converted to LUA. We also don't
539 * care about any duplicate entries because the nvlist will
540 * be converted to a LUA table which should take care of this.
542 nvlist_t
*snaps_normalized
;
543 VERIFY0(nvlist_alloc(&snaps_normalized
, 0, KM_SLEEP
));
544 for (nvpair_t
*pair
= nvlist_next_nvpair(snaps
, NULL
);
545 pair
!= NULL
; pair
= nvlist_next_nvpair(snaps
, pair
)) {
546 fnvlist_add_boolean_value(snaps_normalized
,
547 nvpair_name(pair
), B_TRUE
);
551 VERIFY0(nvlist_alloc(&arg
, 0, KM_SLEEP
));
552 fnvlist_add_nvlist(arg
, "snaps", snaps_normalized
);
553 fnvlist_free(snaps_normalized
);
554 fnvlist_add_boolean_value(arg
, "defer", defer
);
557 VERIFY0(nvlist_alloc(&wrapper
, 0, KM_SLEEP
));
558 fnvlist_add_nvlist(wrapper
, ZCP_ARG_ARGLIST
, arg
);
561 const char *program
=
563 "snaps = arg['snaps']\n"
564 "defer = arg['defer']\n"
566 "has_errors = false\n"
567 "for snap, v in pairs(snaps) do\n"
568 " errno = zfs.check.destroy{snap, defer=defer}\n"
569 " zfs.debug('snap: ' .. snap .. ' errno: ' .. errno)\n"
570 " if errno == ENOENT then\n"
571 " snaps[snap] = nil\n"
572 " elseif errno ~= 0 then\n"
573 " errors[snap] = errno\n"
574 " has_errors = true\n"
577 "if has_errors then\n"
580 "for snap, v in pairs(snaps) do\n"
581 " errno = zfs.sync.destroy{snap, defer=defer}\n"
582 " assert(errno == 0)\n"
586 nvlist_t
*result
= fnvlist_alloc();
587 int error
= zcp_eval(nvpair_name(nvlist_next_nvpair(snaps
, NULL
)),
591 zfs_lua_max_memlimit
,
592 nvlist_next_nvpair(wrapper
, NULL
), result
);
594 char *errorstr
= NULL
;
595 (void) nvlist_lookup_string(result
, ZCP_RET_ERROR
, &errorstr
);
596 if (errorstr
!= NULL
) {
597 zfs_dbgmsg(errorstr
);
601 fnvlist_free(wrapper
);
604 * lzc_destroy_snaps() is documented to fill the errlist with
605 * int32 values, so we need to covert the int64 values that are
609 nvlist_t
*errlist_raw
= fnvlist_lookup_nvlist(result
, ZCP_RET_RETURN
);
610 for (nvpair_t
*pair
= nvlist_next_nvpair(errlist_raw
, NULL
);
611 pair
!= NULL
; pair
= nvlist_next_nvpair(errlist_raw
, pair
)) {
612 int32_t val
= (int32_t)fnvpair_value_int64(pair
);
615 fnvlist_add_int32(errlist
, nvpair_name(pair
), val
);
617 fnvlist_free(result
);
622 dsl_destroy_snapshot(const char *name
, boolean_t defer
)
625 nvlist_t
*nvl
= fnvlist_alloc();
626 nvlist_t
*errlist
= fnvlist_alloc();
628 fnvlist_add_boolean(nvl
, name
);
629 error
= dsl_destroy_snapshots_nvl(nvl
, defer
, errlist
);
630 fnvlist_free(errlist
);
642 kill_blkptr(spa_t
*spa
, zilog_t
*zilog
, const blkptr_t
*bp
,
643 const zbookmark_phys_t
*zb
, const dnode_phys_t
*dnp
, void *arg
)
645 struct killarg
*ka
= arg
;
646 dmu_tx_t
*tx
= ka
->tx
;
648 if (bp
== NULL
|| BP_IS_HOLE(bp
) || BP_IS_EMBEDDED(bp
))
651 if (zb
->zb_level
== ZB_ZIL_LEVEL
) {
652 ASSERT(zilog
!= NULL
);
654 * It's a block in the intent log. It has no
655 * accounting, so just free it.
657 dsl_free(ka
->tx
->tx_pool
, ka
->tx
->tx_txg
, bp
);
659 ASSERT(zilog
== NULL
);
660 ASSERT3U(bp
->blk_birth
, >,
661 dsl_dataset_phys(ka
->ds
)->ds_prev_snap_txg
);
662 (void) dsl_dataset_block_kill(ka
->ds
, bp
, tx
, B_FALSE
);
669 old_synchronous_dataset_destroy(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
674 * Free everything that we point to (that's born after
675 * the previous snapshot, if we are a clone)
677 * NB: this should be very quick, because we already
678 * freed all the objects in open context.
682 VERIFY0(traverse_dataset(ds
,
683 dsl_dataset_phys(ds
)->ds_prev_snap_txg
, TRAVERSE_POST
,
685 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds
) ||
686 dsl_dataset_phys(ds
)->ds_unique_bytes
== 0);
690 dsl_destroy_head_check_impl(dsl_dataset_t
*ds
, int expected_holds
)
696 ASSERT(!ds
->ds_is_snapshot
);
697 if (ds
->ds_is_snapshot
)
698 return (SET_ERROR(EINVAL
));
700 if (refcount_count(&ds
->ds_longholds
) != expected_holds
)
701 return (SET_ERROR(EBUSY
));
703 mos
= ds
->ds_dir
->dd_pool
->dp_meta_objset
;
706 * Can't delete a head dataset if there are snapshots of it.
707 * (Except if the only snapshots are from the branch we cloned
710 if (ds
->ds_prev
!= NULL
&&
711 dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
== ds
->ds_object
)
712 return (SET_ERROR(EBUSY
));
715 * Can't delete if there are children of this fs.
717 error
= zap_count(mos
,
718 dsl_dir_phys(ds
->ds_dir
)->dd_child_dir_zapobj
, &count
);
722 return (SET_ERROR(EEXIST
));
724 if (dsl_dir_is_clone(ds
->ds_dir
) && DS_IS_DEFER_DESTROY(ds
->ds_prev
) &&
725 dsl_dataset_phys(ds
->ds_prev
)->ds_num_children
== 2 &&
726 ds
->ds_prev
->ds_userrefs
== 0) {
727 /* We need to remove the origin snapshot as well. */
728 if (!refcount_is_zero(&ds
->ds_prev
->ds_longholds
))
729 return (SET_ERROR(EBUSY
));
735 dsl_destroy_head_check(void *arg
, dmu_tx_t
*tx
)
737 dsl_destroy_head_arg_t
*ddha
= arg
;
738 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
742 error
= dsl_dataset_hold(dp
, ddha
->ddha_name
, FTAG
, &ds
);
746 error
= dsl_destroy_head_check_impl(ds
, 0);
747 dsl_dataset_rele(ds
, FTAG
);
752 dsl_dir_destroy_sync(uint64_t ddobj
, dmu_tx_t
*tx
)
755 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
756 objset_t
*mos
= dp
->dp_meta_objset
;
759 ASSERT(RRW_WRITE_HELD(&dmu_tx_pool(tx
)->dp_config_rwlock
));
761 VERIFY0(dsl_dir_hold_obj(dp
, ddobj
, NULL
, FTAG
, &dd
));
763 ASSERT0(dsl_dir_phys(dd
)->dd_head_dataset_obj
);
766 * Decrement the filesystem count for all parent filesystems.
768 * When we receive an incremental stream into a filesystem that already
769 * exists, a temporary clone is created. We never count this temporary
770 * clone, whose name begins with a '%'.
772 if (dd
->dd_myname
[0] != '%' && dd
->dd_parent
!= NULL
)
773 dsl_fs_ss_count_adjust(dd
->dd_parent
, -1,
774 DD_FIELD_FILESYSTEM_COUNT
, tx
);
777 * Remove our reservation. The impl() routine avoids setting the
778 * actual property, which would require the (already destroyed) ds.
780 dsl_dir_set_reservation_sync_impl(dd
, 0, tx
);
782 ASSERT0(dsl_dir_phys(dd
)->dd_used_bytes
);
783 ASSERT0(dsl_dir_phys(dd
)->dd_reserved
);
784 for (t
= 0; t
< DD_USED_NUM
; t
++)
785 ASSERT0(dsl_dir_phys(dd
)->dd_used_breakdown
[t
]);
787 VERIFY0(zap_destroy(mos
, dsl_dir_phys(dd
)->dd_child_dir_zapobj
, tx
));
788 VERIFY0(zap_destroy(mos
, dsl_dir_phys(dd
)->dd_props_zapobj
, tx
));
789 VERIFY0(dsl_deleg_destroy(mos
, dsl_dir_phys(dd
)->dd_deleg_zapobj
, tx
));
790 VERIFY0(zap_remove(mos
,
791 dsl_dir_phys(dd
->dd_parent
)->dd_child_dir_zapobj
,
794 dsl_dir_rele(dd
, FTAG
);
795 dmu_object_free_zapified(mos
, ddobj
, tx
);
799 dsl_destroy_head_sync_impl(dsl_dataset_t
*ds
, dmu_tx_t
*tx
)
801 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
802 objset_t
*mos
= dp
->dp_meta_objset
;
803 uint64_t obj
, ddobj
, prevobj
= 0;
806 ASSERT3U(dsl_dataset_phys(ds
)->ds_num_children
, <=, 1);
807 ASSERT(ds
->ds_prev
== NULL
||
808 dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
!= ds
->ds_object
);
809 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
810 ASSERT3U(dsl_dataset_phys(ds
)->ds_bp
.blk_birth
, <=, tx
->tx_txg
);
811 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
812 ASSERT(RRW_WRITE_HELD(&dp
->dp_config_rwlock
));
814 /* We need to log before removing it from the namespace. */
815 spa_history_log_internal_ds(ds
, "destroy", tx
, "");
817 rmorigin
= (dsl_dir_is_clone(ds
->ds_dir
) &&
818 DS_IS_DEFER_DESTROY(ds
->ds_prev
) &&
819 dsl_dataset_phys(ds
->ds_prev
)->ds_num_children
== 2 &&
820 ds
->ds_prev
->ds_userrefs
== 0);
822 /* Remove our reservation. */
823 if (ds
->ds_reserved
!= 0) {
824 dsl_dataset_set_refreservation_sync_impl(ds
,
825 (ZPROP_SRC_NONE
| ZPROP_SRC_LOCAL
| ZPROP_SRC_RECEIVED
),
827 ASSERT0(ds
->ds_reserved
);
832 for (spa_feature_t f
= 0; f
< SPA_FEATURES
; f
++) {
833 if (ds
->ds_feature_inuse
[f
]) {
834 dsl_dataset_deactivate_feature(obj
, f
, tx
);
835 ds
->ds_feature_inuse
[f
] = B_FALSE
;
839 dsl_scan_ds_destroyed(ds
, tx
);
841 if (dsl_dataset_phys(ds
)->ds_prev_snap_obj
!= 0) {
842 /* This is a clone */
843 ASSERT(ds
->ds_prev
!= NULL
);
844 ASSERT3U(dsl_dataset_phys(ds
->ds_prev
)->ds_next_snap_obj
, !=,
846 ASSERT0(dsl_dataset_phys(ds
)->ds_next_snap_obj
);
848 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
849 if (dsl_dataset_phys(ds
->ds_prev
)->ds_next_clones_obj
!= 0) {
850 dsl_dataset_remove_from_next_clones(ds
->ds_prev
,
854 ASSERT3U(dsl_dataset_phys(ds
->ds_prev
)->ds_num_children
, >, 1);
855 dsl_dataset_phys(ds
->ds_prev
)->ds_num_children
--;
859 * Destroy the deadlist. Unless it's a clone, the
860 * deadlist should be empty since the dataset has no snapshots.
861 * (If it's a clone, it's safe to ignore the deadlist contents
862 * since they are still referenced by the origin snapshot.)
864 dsl_deadlist_close(&ds
->ds_deadlist
);
865 dsl_deadlist_free(mos
, dsl_dataset_phys(ds
)->ds_deadlist_obj
, tx
);
866 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
867 dsl_dataset_phys(ds
)->ds_deadlist_obj
= 0;
869 if (dsl_dataset_remap_deadlist_exists(ds
))
870 dsl_dataset_destroy_remap_deadlist(ds
, tx
);
873 VERIFY0(dmu_objset_from_ds(ds
, &os
));
875 if (!spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_ASYNC_DESTROY
)) {
876 old_synchronous_dataset_destroy(ds
, tx
);
879 * Move the bptree into the pool's list of trees to
880 * clean up and update space accounting information.
882 uint64_t used
, comp
, uncomp
;
884 zil_destroy_sync(dmu_objset_zil(os
), tx
);
886 if (!spa_feature_is_active(dp
->dp_spa
,
887 SPA_FEATURE_ASYNC_DESTROY
)) {
888 dsl_scan_t
*scn
= dp
->dp_scan
;
889 spa_feature_incr(dp
->dp_spa
, SPA_FEATURE_ASYNC_DESTROY
,
891 dp
->dp_bptree_obj
= bptree_alloc(mos
, tx
);
893 DMU_POOL_DIRECTORY_OBJECT
,
894 DMU_POOL_BPTREE_OBJ
, sizeof (uint64_t), 1,
895 &dp
->dp_bptree_obj
, tx
));
896 ASSERT(!scn
->scn_async_destroying
);
897 scn
->scn_async_destroying
= B_TRUE
;
900 used
= dsl_dir_phys(ds
->ds_dir
)->dd_used_bytes
;
901 comp
= dsl_dir_phys(ds
->ds_dir
)->dd_compressed_bytes
;
902 uncomp
= dsl_dir_phys(ds
->ds_dir
)->dd_uncompressed_bytes
;
904 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds
) ||
905 dsl_dataset_phys(ds
)->ds_unique_bytes
== used
);
907 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
908 bptree_add(mos
, dp
->dp_bptree_obj
,
909 &dsl_dataset_phys(ds
)->ds_bp
,
910 dsl_dataset_phys(ds
)->ds_prev_snap_txg
,
911 used
, comp
, uncomp
, tx
);
912 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
913 dsl_dir_diduse_space(ds
->ds_dir
, DD_USED_HEAD
,
914 -used
, -comp
, -uncomp
, tx
);
915 dsl_dir_diduse_space(dp
->dp_free_dir
, DD_USED_HEAD
,
916 used
, comp
, uncomp
, tx
);
919 if (ds
->ds_prev
!= NULL
) {
920 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_DIR_CLONES
) {
921 VERIFY0(zap_remove_int(mos
,
922 dsl_dir_phys(ds
->ds_prev
->ds_dir
)->dd_clones
,
925 prevobj
= ds
->ds_prev
->ds_object
;
926 dsl_dataset_rele(ds
->ds_prev
, ds
);
931 * This must be done after the dsl_traverse(), because it will
932 * re-open the objset.
935 dmu_objset_evict(ds
->ds_objset
);
936 ds
->ds_objset
= NULL
;
939 /* Erase the link in the dir */
940 dmu_buf_will_dirty(ds
->ds_dir
->dd_dbuf
, tx
);
941 dsl_dir_phys(ds
->ds_dir
)->dd_head_dataset_obj
= 0;
942 ddobj
= ds
->ds_dir
->dd_object
;
943 ASSERT(dsl_dataset_phys(ds
)->ds_snapnames_zapobj
!= 0);
944 VERIFY0(zap_destroy(mos
,
945 dsl_dataset_phys(ds
)->ds_snapnames_zapobj
, tx
));
947 if (ds
->ds_bookmarks
!= 0) {
948 VERIFY0(zap_destroy(mos
, ds
->ds_bookmarks
, tx
));
949 spa_feature_decr(dp
->dp_spa
, SPA_FEATURE_BOOKMARKS
, tx
);
952 spa_prop_clear_bootfs(dp
->dp_spa
, ds
->ds_object
, tx
);
954 ASSERT0(dsl_dataset_phys(ds
)->ds_next_clones_obj
);
955 ASSERT0(dsl_dataset_phys(ds
)->ds_props_obj
);
956 ASSERT0(dsl_dataset_phys(ds
)->ds_userrefs_obj
);
957 dsl_dir_rele(ds
->ds_dir
, ds
);
959 dmu_object_free_zapified(mos
, obj
, tx
);
961 dsl_dir_destroy_sync(ddobj
, tx
);
965 VERIFY0(dsl_dataset_hold_obj(dp
, prevobj
, FTAG
, &prev
));
966 dsl_destroy_snapshot_sync_impl(prev
, B_FALSE
, tx
);
967 dsl_dataset_rele(prev
, FTAG
);
972 dsl_destroy_head_sync(void *arg
, dmu_tx_t
*tx
)
974 dsl_destroy_head_arg_t
*ddha
= arg
;
975 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
978 VERIFY0(dsl_dataset_hold(dp
, ddha
->ddha_name
, FTAG
, &ds
));
979 dsl_destroy_head_sync_impl(ds
, tx
);
980 dsl_dataset_rele(ds
, FTAG
);
984 dsl_destroy_head_begin_sync(void *arg
, dmu_tx_t
*tx
)
986 dsl_destroy_head_arg_t
*ddha
= arg
;
987 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
990 VERIFY0(dsl_dataset_hold(dp
, ddha
->ddha_name
, FTAG
, &ds
));
992 /* Mark it as inconsistent on-disk, in case we crash */
993 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
994 dsl_dataset_phys(ds
)->ds_flags
|= DS_FLAG_INCONSISTENT
;
996 spa_history_log_internal_ds(ds
, "destroy begin", tx
, "");
997 dsl_dataset_rele(ds
, FTAG
);
1001 dsl_destroy_head(const char *name
)
1003 dsl_destroy_head_arg_t ddha
;
1006 boolean_t isenabled
;
1009 zfs_destroy_unmount_origin(name
);
1012 error
= spa_open(name
, &spa
, FTAG
);
1015 isenabled
= spa_feature_is_enabled(spa
, SPA_FEATURE_ASYNC_DESTROY
);
1016 spa_close(spa
, FTAG
);
1018 ddha
.ddha_name
= name
;
1023 error
= dsl_sync_task(name
, dsl_destroy_head_check
,
1024 dsl_destroy_head_begin_sync
, &ddha
,
1025 0, ZFS_SPACE_CHECK_NONE
);
1030 * Head deletion is processed in one txg on old pools;
1031 * remove the objects from open context so that the txg sync
1034 error
= dmu_objset_own(name
, DMU_OST_ANY
, B_FALSE
, FTAG
, &os
);
1036 uint64_t prev_snap_txg
=
1037 dsl_dataset_phys(dmu_objset_ds(os
))->
1039 for (uint64_t obj
= 0; error
== 0;
1040 error
= dmu_object_next(os
, &obj
, FALSE
,
1042 (void) dmu_free_long_object(os
, obj
);
1043 /* sync out all frees */
1044 txg_wait_synced(dmu_objset_pool(os
), 0);
1045 dmu_objset_disown(os
, FTAG
);
1049 return (dsl_sync_task(name
, dsl_destroy_head_check
,
1050 dsl_destroy_head_sync
, &ddha
, 0, ZFS_SPACE_CHECK_NONE
));
1054 * Note, this function is used as the callback for dmu_objset_find(). We
1055 * always return 0 so that we will continue to find and process
1056 * inconsistent datasets, even if we encounter an error trying to
1057 * process one of them.
1061 dsl_destroy_inconsistent(const char *dsname
, void *arg
)
1065 if (dmu_objset_hold(dsname
, FTAG
, &os
) == 0) {
1066 boolean_t need_destroy
= DS_IS_INCONSISTENT(dmu_objset_ds(os
));
1069 * If the dataset is inconsistent because a resumable receive
1070 * has failed, then do not destroy it.
1072 if (dsl_dataset_has_resume_receive_state(dmu_objset_ds(os
)))
1073 need_destroy
= B_FALSE
;
1075 dmu_objset_rele(os
, FTAG
);
1077 (void) dsl_destroy_head(dsname
);