6905188 panic: kernel heap corruption when doing "zfs rename -r"
[unleashed.git] / usr / src / uts / common / fs / zfs / dsl_dataset.c
blob1a46b49240a298e7f7973221bfece969968dd4b1
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/dmu_objset.h>
27 #include <sys/dsl_dataset.h>
28 #include <sys/dsl_dir.h>
29 #include <sys/dsl_prop.h>
30 #include <sys/dsl_synctask.h>
31 #include <sys/dmu_traverse.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/arc.h>
34 #include <sys/zio.h>
35 #include <sys/zap.h>
36 #include <sys/unique.h>
37 #include <sys/zfs_context.h>
38 #include <sys/zfs_ioctl.h>
39 #include <sys/spa.h>
40 #include <sys/zfs_znode.h>
41 #include <sys/zvol.h>
43 static char *dsl_reaper = "the grim reaper";
45 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
46 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
47 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
49 #define DS_REF_MAX (1ULL << 62)
51 #define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE
53 #define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper)
57 * Figure out how much of this delta should be propogated to the dsl_dir
58 * layer. If there's a refreservation, that space has already been
59 * partially accounted for in our ancestors.
61 static int64_t
62 parent_delta(dsl_dataset_t *ds, int64_t delta)
64 uint64_t old_bytes, new_bytes;
66 if (ds->ds_reserved == 0)
67 return (delta);
69 old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
70 new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
72 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
73 return (new_bytes - old_bytes);
76 void
77 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
79 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
80 int compressed = BP_GET_PSIZE(bp);
81 int uncompressed = BP_GET_UCSIZE(bp);
82 int64_t delta;
84 dprintf_bp(bp, "born, ds=%p\n", ds);
86 ASSERT(dmu_tx_is_syncing(tx));
87 /* It could have been compressed away to nothing */
88 if (BP_IS_HOLE(bp))
89 return;
90 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
91 ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES);
92 if (ds == NULL) {
94 * Account for the meta-objset space in its placeholder
95 * dsl_dir.
97 ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */
98 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
99 used, compressed, uncompressed, tx);
100 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
101 return;
103 dmu_buf_will_dirty(ds->ds_dbuf, tx);
104 mutex_enter(&ds->ds_dir->dd_lock);
105 mutex_enter(&ds->ds_lock);
106 delta = parent_delta(ds, used);
107 ds->ds_phys->ds_used_bytes += used;
108 ds->ds_phys->ds_compressed_bytes += compressed;
109 ds->ds_phys->ds_uncompressed_bytes += uncompressed;
110 ds->ds_phys->ds_unique_bytes += used;
111 mutex_exit(&ds->ds_lock);
112 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
113 compressed, uncompressed, tx);
114 dsl_dir_transfer_space(ds->ds_dir, used - delta,
115 DD_USED_REFRSRV, DD_USED_HEAD, tx);
116 mutex_exit(&ds->ds_dir->dd_lock);
120 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
121 boolean_t async)
123 if (BP_IS_HOLE(bp))
124 return (0);
126 ASSERT(dmu_tx_is_syncing(tx));
127 ASSERT(bp->blk_birth <= tx->tx_txg);
129 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
130 int compressed = BP_GET_PSIZE(bp);
131 int uncompressed = BP_GET_UCSIZE(bp);
133 ASSERT(used > 0);
134 if (ds == NULL) {
136 * Account for the meta-objset space in its placeholder
137 * dataset.
139 dsl_free(tx->tx_pool, tx->tx_txg, bp);
141 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
142 -used, -compressed, -uncompressed, tx);
143 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
144 return (used);
146 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
148 ASSERT(!dsl_dataset_is_snapshot(ds));
149 dmu_buf_will_dirty(ds->ds_dbuf, tx);
151 if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
152 int64_t delta;
154 dprintf_bp(bp, "freeing: %s", "");
155 dsl_free(tx->tx_pool, tx->tx_txg, bp);
157 mutex_enter(&ds->ds_dir->dd_lock);
158 mutex_enter(&ds->ds_lock);
159 ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
160 !DS_UNIQUE_IS_ACCURATE(ds));
161 delta = parent_delta(ds, -used);
162 ds->ds_phys->ds_unique_bytes -= used;
163 mutex_exit(&ds->ds_lock);
164 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
165 delta, -compressed, -uncompressed, tx);
166 dsl_dir_transfer_space(ds->ds_dir, -used - delta,
167 DD_USED_REFRSRV, DD_USED_HEAD, tx);
168 mutex_exit(&ds->ds_dir->dd_lock);
169 } else {
170 dprintf_bp(bp, "putting on dead list: %s", "");
171 if (async) {
173 * We are here as part of zio's write done callback,
174 * which means we're a zio interrupt thread. We can't
175 * call bplist_enqueue() now because it may block
176 * waiting for I/O. Instead, put bp on the deferred
177 * queue and let dsl_pool_sync() finish the job.
179 bplist_enqueue_deferred(&ds->ds_deadlist, bp);
180 } else {
181 VERIFY(0 == bplist_enqueue(&ds->ds_deadlist, bp, tx));
183 ASSERT3U(ds->ds_prev->ds_object, ==,
184 ds->ds_phys->ds_prev_snap_obj);
185 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
186 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
187 if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
188 ds->ds_object && bp->blk_birth >
189 ds->ds_prev->ds_phys->ds_prev_snap_txg) {
190 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
191 mutex_enter(&ds->ds_prev->ds_lock);
192 ds->ds_prev->ds_phys->ds_unique_bytes += used;
193 mutex_exit(&ds->ds_prev->ds_lock);
195 if (bp->blk_birth > ds->ds_origin_txg) {
196 dsl_dir_transfer_space(ds->ds_dir, used,
197 DD_USED_HEAD, DD_USED_SNAP, tx);
200 mutex_enter(&ds->ds_lock);
201 ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used);
202 ds->ds_phys->ds_used_bytes -= used;
203 ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
204 ds->ds_phys->ds_compressed_bytes -= compressed;
205 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
206 ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
207 mutex_exit(&ds->ds_lock);
209 return (used);
212 uint64_t
213 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
215 uint64_t trysnap = 0;
217 if (ds == NULL)
218 return (0);
220 * The snapshot creation could fail, but that would cause an
221 * incorrect FALSE return, which would only result in an
222 * overestimation of the amount of space that an operation would
223 * consume, which is OK.
225 * There's also a small window where we could miss a pending
226 * snapshot, because we could set the sync task in the quiescing
227 * phase. So this should only be used as a guess.
229 if (ds->ds_trysnap_txg >
230 spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
231 trysnap = ds->ds_trysnap_txg;
232 return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
235 boolean_t
236 dsl_dataset_block_freeable(dsl_dataset_t *ds, uint64_t blk_birth)
238 return (blk_birth > dsl_dataset_prev_snap_txg(ds));
241 /* ARGSUSED */
242 static void
243 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
245 dsl_dataset_t *ds = dsv;
247 ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
249 unique_remove(ds->ds_fsid_guid);
251 if (ds->ds_objset != NULL)
252 dmu_objset_evict(ds->ds_objset);
254 if (ds->ds_prev) {
255 dsl_dataset_drop_ref(ds->ds_prev, ds);
256 ds->ds_prev = NULL;
259 bplist_close(&ds->ds_deadlist);
260 if (ds->ds_dir)
261 dsl_dir_close(ds->ds_dir, ds);
263 ASSERT(!list_link_active(&ds->ds_synced_link));
265 mutex_destroy(&ds->ds_lock);
266 mutex_destroy(&ds->ds_recvlock);
267 mutex_destroy(&ds->ds_opening_lock);
268 rw_destroy(&ds->ds_rwlock);
269 cv_destroy(&ds->ds_exclusive_cv);
270 bplist_fini(&ds->ds_deadlist);
272 kmem_free(ds, sizeof (dsl_dataset_t));
275 static int
276 dsl_dataset_get_snapname(dsl_dataset_t *ds)
278 dsl_dataset_phys_t *headphys;
279 int err;
280 dmu_buf_t *headdbuf;
281 dsl_pool_t *dp = ds->ds_dir->dd_pool;
282 objset_t *mos = dp->dp_meta_objset;
284 if (ds->ds_snapname[0])
285 return (0);
286 if (ds->ds_phys->ds_next_snap_obj == 0)
287 return (0);
289 err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
290 FTAG, &headdbuf);
291 if (err)
292 return (err);
293 headphys = headdbuf->db_data;
294 err = zap_value_search(dp->dp_meta_objset,
295 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
296 dmu_buf_rele(headdbuf, FTAG);
297 return (err);
300 static int
301 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
303 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
304 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
305 matchtype_t mt;
306 int err;
308 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
309 mt = MT_FIRST;
310 else
311 mt = MT_EXACT;
313 err = zap_lookup_norm(mos, snapobj, name, 8, 1,
314 value, mt, NULL, 0, NULL);
315 if (err == ENOTSUP && mt == MT_FIRST)
316 err = zap_lookup(mos, snapobj, name, 8, 1, value);
317 return (err);
320 static int
321 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
323 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
324 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
325 matchtype_t mt;
326 int err;
328 dsl_dir_snap_cmtime_update(ds->ds_dir);
330 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
331 mt = MT_FIRST;
332 else
333 mt = MT_EXACT;
335 err = zap_remove_norm(mos, snapobj, name, mt, tx);
336 if (err == ENOTSUP && mt == MT_FIRST)
337 err = zap_remove(mos, snapobj, name, tx);
338 return (err);
341 static int
342 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
343 dsl_dataset_t **dsp)
345 objset_t *mos = dp->dp_meta_objset;
346 dmu_buf_t *dbuf;
347 dsl_dataset_t *ds;
348 int err;
350 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
351 dsl_pool_sync_context(dp));
353 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
354 if (err)
355 return (err);
356 ds = dmu_buf_get_user(dbuf);
357 if (ds == NULL) {
358 dsl_dataset_t *winner;
360 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
361 ds->ds_dbuf = dbuf;
362 ds->ds_object = dsobj;
363 ds->ds_phys = dbuf->db_data;
365 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
366 mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL);
367 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
368 rw_init(&ds->ds_rwlock, 0, 0, 0);
369 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
370 bplist_init(&ds->ds_deadlist);
372 err = bplist_open(&ds->ds_deadlist,
373 mos, ds->ds_phys->ds_deadlist_obj);
374 if (err == 0) {
375 err = dsl_dir_open_obj(dp,
376 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
378 if (err) {
380 * we don't really need to close the blist if we
381 * just opened it.
383 mutex_destroy(&ds->ds_lock);
384 mutex_destroy(&ds->ds_recvlock);
385 mutex_destroy(&ds->ds_opening_lock);
386 rw_destroy(&ds->ds_rwlock);
387 cv_destroy(&ds->ds_exclusive_cv);
388 bplist_fini(&ds->ds_deadlist);
389 kmem_free(ds, sizeof (dsl_dataset_t));
390 dmu_buf_rele(dbuf, tag);
391 return (err);
394 if (!dsl_dataset_is_snapshot(ds)) {
395 ds->ds_snapname[0] = '\0';
396 if (ds->ds_phys->ds_prev_snap_obj) {
397 err = dsl_dataset_get_ref(dp,
398 ds->ds_phys->ds_prev_snap_obj,
399 ds, &ds->ds_prev);
402 if (err == 0 && dsl_dir_is_clone(ds->ds_dir)) {
403 dsl_dataset_t *origin;
405 err = dsl_dataset_hold_obj(dp,
406 ds->ds_dir->dd_phys->dd_origin_obj,
407 FTAG, &origin);
408 if (err == 0) {
409 ds->ds_origin_txg =
410 origin->ds_phys->ds_creation_txg;
411 dsl_dataset_rele(origin, FTAG);
414 } else {
415 if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
416 err = dsl_dataset_get_snapname(ds);
417 if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) {
418 err = zap_count(
419 ds->ds_dir->dd_pool->dp_meta_objset,
420 ds->ds_phys->ds_userrefs_obj,
421 &ds->ds_userrefs);
425 if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
427 * In sync context, we're called with either no lock
428 * or with the write lock. If we're not syncing,
429 * we're always called with the read lock held.
431 boolean_t need_lock =
432 !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
433 dsl_pool_sync_context(dp);
435 if (need_lock)
436 rw_enter(&dp->dp_config_rwlock, RW_READER);
438 err = dsl_prop_get_ds(ds,
439 "refreservation", sizeof (uint64_t), 1,
440 &ds->ds_reserved, NULL);
441 if (err == 0) {
442 err = dsl_prop_get_ds(ds,
443 "refquota", sizeof (uint64_t), 1,
444 &ds->ds_quota, NULL);
447 if (need_lock)
448 rw_exit(&dp->dp_config_rwlock);
449 } else {
450 ds->ds_reserved = ds->ds_quota = 0;
453 if (err == 0) {
454 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
455 dsl_dataset_evict);
457 if (err || winner) {
458 bplist_close(&ds->ds_deadlist);
459 if (ds->ds_prev)
460 dsl_dataset_drop_ref(ds->ds_prev, ds);
461 dsl_dir_close(ds->ds_dir, ds);
462 mutex_destroy(&ds->ds_lock);
463 mutex_destroy(&ds->ds_recvlock);
464 mutex_destroy(&ds->ds_opening_lock);
465 rw_destroy(&ds->ds_rwlock);
466 cv_destroy(&ds->ds_exclusive_cv);
467 bplist_fini(&ds->ds_deadlist);
468 kmem_free(ds, sizeof (dsl_dataset_t));
469 if (err) {
470 dmu_buf_rele(dbuf, tag);
471 return (err);
473 ds = winner;
474 } else {
475 ds->ds_fsid_guid =
476 unique_insert(ds->ds_phys->ds_fsid_guid);
479 ASSERT3P(ds->ds_dbuf, ==, dbuf);
480 ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
481 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
482 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
483 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
484 mutex_enter(&ds->ds_lock);
485 if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
486 mutex_exit(&ds->ds_lock);
487 dmu_buf_rele(ds->ds_dbuf, tag);
488 return (ENOENT);
490 mutex_exit(&ds->ds_lock);
491 *dsp = ds;
492 return (0);
495 static int
496 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
498 dsl_pool_t *dp = ds->ds_dir->dd_pool;
501 * In syncing context we don't want the rwlock lock: there
502 * may be an existing writer waiting for sync phase to
503 * finish. We don't need to worry about such writers, since
504 * sync phase is single-threaded, so the writer can't be
505 * doing anything while we are active.
507 if (dsl_pool_sync_context(dp)) {
508 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
509 return (0);
513 * Normal users will hold the ds_rwlock as a READER until they
514 * are finished (i.e., call dsl_dataset_rele()). "Owners" will
515 * drop their READER lock after they set the ds_owner field.
517 * If the dataset is being destroyed, the destroy thread will
518 * obtain a WRITER lock for exclusive access after it's done its
519 * open-context work and then change the ds_owner to
520 * dsl_reaper once destruction is assured. So threads
521 * may block here temporarily, until the "destructability" of
522 * the dataset is determined.
524 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
525 mutex_enter(&ds->ds_lock);
526 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
527 rw_exit(&dp->dp_config_rwlock);
528 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
529 if (DSL_DATASET_IS_DESTROYED(ds)) {
530 mutex_exit(&ds->ds_lock);
531 dsl_dataset_drop_ref(ds, tag);
532 rw_enter(&dp->dp_config_rwlock, RW_READER);
533 return (ENOENT);
535 rw_enter(&dp->dp_config_rwlock, RW_READER);
537 mutex_exit(&ds->ds_lock);
538 return (0);
542 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
543 dsl_dataset_t **dsp)
545 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
547 if (err)
548 return (err);
549 return (dsl_dataset_hold_ref(*dsp, tag));
553 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok,
554 void *tag, dsl_dataset_t **dsp)
556 int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
557 if (err)
558 return (err);
559 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
560 dsl_dataset_rele(*dsp, tag);
561 *dsp = NULL;
562 return (EBUSY);
564 return (0);
568 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
570 dsl_dir_t *dd;
571 dsl_pool_t *dp;
572 const char *snapname;
573 uint64_t obj;
574 int err = 0;
576 err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
577 if (err)
578 return (err);
580 dp = dd->dd_pool;
581 obj = dd->dd_phys->dd_head_dataset_obj;
582 rw_enter(&dp->dp_config_rwlock, RW_READER);
583 if (obj)
584 err = dsl_dataset_get_ref(dp, obj, tag, dsp);
585 else
586 err = ENOENT;
587 if (err)
588 goto out;
590 err = dsl_dataset_hold_ref(*dsp, tag);
592 /* we may be looking for a snapshot */
593 if (err == 0 && snapname != NULL) {
594 dsl_dataset_t *ds = NULL;
596 if (*snapname++ != '@') {
597 dsl_dataset_rele(*dsp, tag);
598 err = ENOENT;
599 goto out;
602 dprintf("looking for snapshot '%s'\n", snapname);
603 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
604 if (err == 0)
605 err = dsl_dataset_get_ref(dp, obj, tag, &ds);
606 dsl_dataset_rele(*dsp, tag);
608 ASSERT3U((err == 0), ==, (ds != NULL));
610 if (ds) {
611 mutex_enter(&ds->ds_lock);
612 if (ds->ds_snapname[0] == 0)
613 (void) strlcpy(ds->ds_snapname, snapname,
614 sizeof (ds->ds_snapname));
615 mutex_exit(&ds->ds_lock);
616 err = dsl_dataset_hold_ref(ds, tag);
617 *dsp = err ? NULL : ds;
620 out:
621 rw_exit(&dp->dp_config_rwlock);
622 dsl_dir_close(dd, FTAG);
623 return (err);
627 dsl_dataset_own(const char *name, boolean_t inconsistentok,
628 void *tag, dsl_dataset_t **dsp)
630 int err = dsl_dataset_hold(name, tag, dsp);
631 if (err)
632 return (err);
633 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
634 dsl_dataset_rele(*dsp, tag);
635 return (EBUSY);
637 return (0);
640 void
641 dsl_dataset_name(dsl_dataset_t *ds, char *name)
643 if (ds == NULL) {
644 (void) strcpy(name, "mos");
645 } else {
646 dsl_dir_name(ds->ds_dir, name);
647 VERIFY(0 == dsl_dataset_get_snapname(ds));
648 if (ds->ds_snapname[0]) {
649 (void) strcat(name, "@");
651 * We use a "recursive" mutex so that we
652 * can call dprintf_ds() with ds_lock held.
654 if (!MUTEX_HELD(&ds->ds_lock)) {
655 mutex_enter(&ds->ds_lock);
656 (void) strcat(name, ds->ds_snapname);
657 mutex_exit(&ds->ds_lock);
658 } else {
659 (void) strcat(name, ds->ds_snapname);
665 static int
666 dsl_dataset_namelen(dsl_dataset_t *ds)
668 int result;
670 if (ds == NULL) {
671 result = 3; /* "mos" */
672 } else {
673 result = dsl_dir_namelen(ds->ds_dir);
674 VERIFY(0 == dsl_dataset_get_snapname(ds));
675 if (ds->ds_snapname[0]) {
676 ++result; /* adding one for the @-sign */
677 if (!MUTEX_HELD(&ds->ds_lock)) {
678 mutex_enter(&ds->ds_lock);
679 result += strlen(ds->ds_snapname);
680 mutex_exit(&ds->ds_lock);
681 } else {
682 result += strlen(ds->ds_snapname);
687 return (result);
690 void
691 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
693 dmu_buf_rele(ds->ds_dbuf, tag);
696 void
697 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
699 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
700 rw_exit(&ds->ds_rwlock);
702 dsl_dataset_drop_ref(ds, tag);
705 void
706 dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
708 ASSERT((ds->ds_owner == tag && ds->ds_dbuf) ||
709 (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
711 mutex_enter(&ds->ds_lock);
712 ds->ds_owner = NULL;
713 if (RW_WRITE_HELD(&ds->ds_rwlock)) {
714 rw_exit(&ds->ds_rwlock);
715 cv_broadcast(&ds->ds_exclusive_cv);
717 mutex_exit(&ds->ds_lock);
718 if (ds->ds_dbuf)
719 dsl_dataset_drop_ref(ds, tag);
720 else
721 dsl_dataset_evict(ds->ds_dbuf, ds);
724 boolean_t
725 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag)
727 boolean_t gotit = FALSE;
729 mutex_enter(&ds->ds_lock);
730 if (ds->ds_owner == NULL &&
731 (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
732 ds->ds_owner = tag;
733 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
734 rw_exit(&ds->ds_rwlock);
735 gotit = TRUE;
737 mutex_exit(&ds->ds_lock);
738 return (gotit);
741 void
742 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
744 ASSERT3P(owner, ==, ds->ds_owner);
745 if (!RW_WRITE_HELD(&ds->ds_rwlock))
746 rw_enter(&ds->ds_rwlock, RW_WRITER);
749 uint64_t
750 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
751 uint64_t flags, dmu_tx_t *tx)
753 dsl_pool_t *dp = dd->dd_pool;
754 dmu_buf_t *dbuf;
755 dsl_dataset_phys_t *dsphys;
756 uint64_t dsobj;
757 objset_t *mos = dp->dp_meta_objset;
759 if (origin == NULL)
760 origin = dp->dp_origin_snap;
762 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
763 ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
764 ASSERT(dmu_tx_is_syncing(tx));
765 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
767 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
768 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
769 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
770 dmu_buf_will_dirty(dbuf, tx);
771 dsphys = dbuf->db_data;
772 bzero(dsphys, sizeof (dsl_dataset_phys_t));
773 dsphys->ds_dir_obj = dd->dd_object;
774 dsphys->ds_flags = flags;
775 dsphys->ds_fsid_guid = unique_create();
776 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
777 sizeof (dsphys->ds_guid));
778 dsphys->ds_snapnames_zapobj =
779 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
780 DMU_OT_NONE, 0, tx);
781 dsphys->ds_creation_time = gethrestime_sec();
782 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
783 dsphys->ds_deadlist_obj =
784 bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx);
786 if (origin) {
787 dsphys->ds_prev_snap_obj = origin->ds_object;
788 dsphys->ds_prev_snap_txg =
789 origin->ds_phys->ds_creation_txg;
790 dsphys->ds_used_bytes =
791 origin->ds_phys->ds_used_bytes;
792 dsphys->ds_compressed_bytes =
793 origin->ds_phys->ds_compressed_bytes;
794 dsphys->ds_uncompressed_bytes =
795 origin->ds_phys->ds_uncompressed_bytes;
796 dsphys->ds_bp = origin->ds_phys->ds_bp;
797 dsphys->ds_flags |= origin->ds_phys->ds_flags;
799 dmu_buf_will_dirty(origin->ds_dbuf, tx);
800 origin->ds_phys->ds_num_children++;
802 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
803 if (origin->ds_phys->ds_next_clones_obj == 0) {
804 origin->ds_phys->ds_next_clones_obj =
805 zap_create(mos,
806 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
808 VERIFY(0 == zap_add_int(mos,
809 origin->ds_phys->ds_next_clones_obj,
810 dsobj, tx));
813 dmu_buf_will_dirty(dd->dd_dbuf, tx);
814 dd->dd_phys->dd_origin_obj = origin->ds_object;
817 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
818 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
820 dmu_buf_rele(dbuf, FTAG);
822 dmu_buf_will_dirty(dd->dd_dbuf, tx);
823 dd->dd_phys->dd_head_dataset_obj = dsobj;
825 return (dsobj);
828 uint64_t
829 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
830 dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
832 dsl_pool_t *dp = pdd->dd_pool;
833 uint64_t dsobj, ddobj;
834 dsl_dir_t *dd;
836 ASSERT(lastname[0] != '@');
838 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
839 VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
841 dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
843 dsl_deleg_set_create_perms(dd, tx, cr);
845 dsl_dir_close(dd, FTAG);
847 return (dsobj);
850 struct destroyarg {
851 dsl_sync_task_group_t *dstg;
852 char *snapname;
853 char *failed;
854 boolean_t defer;
857 static int
858 dsl_snapshot_destroy_one(const char *name, void *arg)
860 struct destroyarg *da = arg;
861 dsl_dataset_t *ds;
862 int err;
863 char *dsname;
865 dsname = kmem_asprintf("%s@%s", name, da->snapname);
866 err = dsl_dataset_own(dsname, B_TRUE, da->dstg, &ds);
867 strfree(dsname);
868 if (err == 0) {
869 struct dsl_ds_destroyarg *dsda;
871 dsl_dataset_make_exclusive(ds, da->dstg);
872 if (ds->ds_objset != NULL) {
873 dmu_objset_evict(ds->ds_objset);
874 ds->ds_objset = NULL;
876 dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg), KM_SLEEP);
877 dsda->ds = ds;
878 dsda->defer = da->defer;
879 dsl_sync_task_create(da->dstg, dsl_dataset_destroy_check,
880 dsl_dataset_destroy_sync, dsda, da->dstg, 0);
881 } else if (err == ENOENT) {
882 err = 0;
883 } else {
884 (void) strcpy(da->failed, name);
886 return (err);
890 * Destroy 'snapname' in all descendants of 'fsname'.
892 #pragma weak dmu_snapshots_destroy = dsl_snapshots_destroy
894 dsl_snapshots_destroy(char *fsname, char *snapname, boolean_t defer)
896 int err;
897 struct destroyarg da;
898 dsl_sync_task_t *dst;
899 spa_t *spa;
901 err = spa_open(fsname, &spa, FTAG);
902 if (err)
903 return (err);
904 da.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
905 da.snapname = snapname;
906 da.failed = fsname;
907 da.defer = defer;
909 err = dmu_objset_find(fsname,
910 dsl_snapshot_destroy_one, &da, DS_FIND_CHILDREN);
912 if (err == 0)
913 err = dsl_sync_task_group_wait(da.dstg);
915 for (dst = list_head(&da.dstg->dstg_tasks); dst;
916 dst = list_next(&da.dstg->dstg_tasks, dst)) {
917 struct dsl_ds_destroyarg *dsda = dst->dst_arg1;
918 dsl_dataset_t *ds = dsda->ds;
921 * Return the file system name that triggered the error
923 if (dst->dst_err) {
924 dsl_dataset_name(ds, fsname);
925 *strchr(fsname, '@') = '\0';
927 ASSERT3P(dsda->rm_origin, ==, NULL);
928 dsl_dataset_disown(ds, da.dstg);
929 kmem_free(dsda, sizeof (struct dsl_ds_destroyarg));
932 dsl_sync_task_group_destroy(da.dstg);
933 spa_close(spa, FTAG);
934 return (err);
937 static boolean_t
938 dsl_dataset_might_destroy_origin(dsl_dataset_t *ds)
940 boolean_t might_destroy = B_FALSE;
942 mutex_enter(&ds->ds_lock);
943 if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 &&
944 DS_IS_DEFER_DESTROY(ds))
945 might_destroy = B_TRUE;
946 mutex_exit(&ds->ds_lock);
948 return (might_destroy);
952 * If we're removing a clone, and these three conditions are true:
953 * 1) the clone's origin has no other children
954 * 2) the clone's origin has no user references
955 * 3) the clone's origin has been marked for deferred destruction
956 * Then, prepare to remove the origin as part of this sync task group.
958 static int
959 dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag)
961 dsl_dataset_t *ds = dsda->ds;
962 dsl_dataset_t *origin = ds->ds_prev;
964 if (dsl_dataset_might_destroy_origin(origin)) {
965 char *name;
966 int namelen;
967 int error;
969 namelen = dsl_dataset_namelen(origin) + 1;
970 name = kmem_alloc(namelen, KM_SLEEP);
971 dsl_dataset_name(origin, name);
972 #ifdef _KERNEL
973 error = zfs_unmount_snap(name, NULL);
974 if (error) {
975 kmem_free(name, namelen);
976 return (error);
978 #endif
979 error = dsl_dataset_own(name, B_TRUE, tag, &origin);
980 kmem_free(name, namelen);
981 if (error)
982 return (error);
983 dsda->rm_origin = origin;
984 dsl_dataset_make_exclusive(origin, tag);
986 if (origin->ds_objset != NULL) {
987 dmu_objset_evict(origin->ds_objset);
988 origin->ds_objset = NULL;
992 return (0);
996 * ds must be opened as OWNER. On return (whether successful or not),
997 * ds will be closed and caller can no longer dereference it.
1000 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer)
1002 int err;
1003 dsl_sync_task_group_t *dstg;
1004 objset_t *os;
1005 dsl_dir_t *dd;
1006 uint64_t obj;
1007 struct dsl_ds_destroyarg dsda = { 0 };
1008 dsl_dataset_t dummy_ds = { 0 };
1010 dsda.ds = ds;
1012 if (dsl_dataset_is_snapshot(ds)) {
1013 /* Destroying a snapshot is simpler */
1014 dsl_dataset_make_exclusive(ds, tag);
1016 if (ds->ds_objset != NULL) {
1017 dmu_objset_evict(ds->ds_objset);
1018 ds->ds_objset = NULL;
1020 dsda.defer = defer;
1021 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1022 dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
1023 &dsda, tag, 0);
1024 ASSERT3P(dsda.rm_origin, ==, NULL);
1025 goto out;
1026 } else if (defer) {
1027 err = EINVAL;
1028 goto out;
1031 dd = ds->ds_dir;
1032 dummy_ds.ds_dir = dd;
1033 dummy_ds.ds_object = ds->ds_object;
1036 * Check for errors and mark this ds as inconsistent, in
1037 * case we crash while freeing the objects.
1039 err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check,
1040 dsl_dataset_destroy_begin_sync, ds, NULL, 0);
1041 if (err)
1042 goto out;
1044 err = dmu_objset_from_ds(ds, &os);
1045 if (err)
1046 goto out;
1049 * remove the objects in open context, so that we won't
1050 * have too much to do in syncing context.
1052 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
1053 ds->ds_phys->ds_prev_snap_txg)) {
1055 * Ignore errors, if there is not enough disk space
1056 * we will deal with it in dsl_dataset_destroy_sync().
1058 (void) dmu_free_object(os, obj);
1062 * We need to sync out all in-flight IO before we try to evict
1063 * (the dataset evict func is trying to clear the cached entries
1064 * for this dataset in the ARC).
1066 txg_wait_synced(dd->dd_pool, 0);
1069 * If we managed to free all the objects in open
1070 * context, the user space accounting should be zero.
1072 if (ds->ds_phys->ds_bp.blk_fill == 0 &&
1073 dmu_objset_userused_enabled(os)) {
1074 uint64_t count;
1076 ASSERT(zap_count(os, DMU_USERUSED_OBJECT, &count) != 0 ||
1077 count == 0);
1078 ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT, &count) != 0 ||
1079 count == 0);
1082 if (err != ESRCH)
1083 goto out;
1085 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1086 err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
1087 rw_exit(&dd->dd_pool->dp_config_rwlock);
1089 if (err)
1090 goto out;
1092 if (ds->ds_objset) {
1094 * We need to sync out all in-flight IO before we try
1095 * to evict (the dataset evict func is trying to clear
1096 * the cached entries for this dataset in the ARC).
1098 txg_wait_synced(dd->dd_pool, 0);
1102 * Blow away the dsl_dir + head dataset.
1104 dsl_dataset_make_exclusive(ds, tag);
1105 if (ds->ds_objset) {
1106 dmu_objset_evict(ds->ds_objset);
1107 ds->ds_objset = NULL;
1111 * If we're removing a clone, we might also need to remove its
1112 * origin.
1114 do {
1115 dsda.need_prep = B_FALSE;
1116 if (dsl_dir_is_clone(dd)) {
1117 err = dsl_dataset_origin_rm_prep(&dsda, tag);
1118 if (err) {
1119 dsl_dir_close(dd, FTAG);
1120 goto out;
1124 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1125 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1126 dsl_dataset_destroy_sync, &dsda, tag, 0);
1127 dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1128 dsl_dir_destroy_sync, &dummy_ds, FTAG, 0);
1129 err = dsl_sync_task_group_wait(dstg);
1130 dsl_sync_task_group_destroy(dstg);
1133 * We could be racing against 'zfs release' or 'zfs destroy -d'
1134 * on the origin snap, in which case we can get EBUSY if we
1135 * needed to destroy the origin snap but were not ready to
1136 * do so.
1138 if (dsda.need_prep) {
1139 ASSERT(err == EBUSY);
1140 ASSERT(dsl_dir_is_clone(dd));
1141 ASSERT(dsda.rm_origin == NULL);
1143 } while (dsda.need_prep);
1145 if (dsda.rm_origin != NULL)
1146 dsl_dataset_disown(dsda.rm_origin, tag);
1148 /* if it is successful, dsl_dir_destroy_sync will close the dd */
1149 if (err)
1150 dsl_dir_close(dd, FTAG);
1151 out:
1152 dsl_dataset_disown(ds, tag);
1153 return (err);
1156 blkptr_t *
1157 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1159 return (&ds->ds_phys->ds_bp);
1162 void
1163 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1165 ASSERT(dmu_tx_is_syncing(tx));
1166 /* If it's the meta-objset, set dp_meta_rootbp */
1167 if (ds == NULL) {
1168 tx->tx_pool->dp_meta_rootbp = *bp;
1169 } else {
1170 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1171 ds->ds_phys->ds_bp = *bp;
1175 spa_t *
1176 dsl_dataset_get_spa(dsl_dataset_t *ds)
1178 return (ds->ds_dir->dd_pool->dp_spa);
1181 void
1182 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1184 dsl_pool_t *dp;
1186 if (ds == NULL) /* this is the meta-objset */
1187 return;
1189 ASSERT(ds->ds_objset != NULL);
1191 if (ds->ds_phys->ds_next_snap_obj != 0)
1192 panic("dirtying snapshot!");
1194 dp = ds->ds_dir->dd_pool;
1196 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1197 /* up the hold count until we can be written out */
1198 dmu_buf_add_ref(ds->ds_dbuf, ds);
1203 * The unique space in the head dataset can be calculated by subtracting
1204 * the space used in the most recent snapshot, that is still being used
1205 * in this file system, from the space currently in use. To figure out
1206 * the space in the most recent snapshot still in use, we need to take
1207 * the total space used in the snapshot and subtract out the space that
1208 * has been freed up since the snapshot was taken.
1210 static void
1211 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1213 uint64_t mrs_used;
1214 uint64_t dlused, dlcomp, dluncomp;
1216 ASSERT(ds->ds_object == ds->ds_dir->dd_phys->dd_head_dataset_obj);
1218 if (ds->ds_phys->ds_prev_snap_obj != 0)
1219 mrs_used = ds->ds_prev->ds_phys->ds_used_bytes;
1220 else
1221 mrs_used = 0;
1223 VERIFY(0 == bplist_space(&ds->ds_deadlist, &dlused, &dlcomp,
1224 &dluncomp));
1226 ASSERT3U(dlused, <=, mrs_used);
1227 ds->ds_phys->ds_unique_bytes =
1228 ds->ds_phys->ds_used_bytes - (mrs_used - dlused);
1230 if (!DS_UNIQUE_IS_ACCURATE(ds) &&
1231 spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1232 SPA_VERSION_UNIQUE_ACCURATE)
1233 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1236 static uint64_t
1237 dsl_dataset_unique(dsl_dataset_t *ds)
1239 if (!DS_UNIQUE_IS_ACCURATE(ds) && !dsl_dataset_is_snapshot(ds))
1240 dsl_dataset_recalc_head_uniq(ds);
1242 return (ds->ds_phys->ds_unique_bytes);
1245 struct killarg {
1246 dsl_dataset_t *ds;
1247 dmu_tx_t *tx;
1250 /* ARGSUSED */
1251 static int
1252 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1253 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1255 struct killarg *ka = arg;
1256 dmu_tx_t *tx = ka->tx;
1258 if (bp == NULL)
1259 return (0);
1261 if (zb->zb_level == ZB_ZIL_LEVEL) {
1262 ASSERT(zilog != NULL);
1264 * It's a block in the intent log. It has no
1265 * accounting, so just free it.
1267 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
1268 } else {
1269 ASSERT(zilog == NULL);
1270 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1271 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
1274 return (0);
1277 /* ARGSUSED */
1278 static int
1279 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1281 dsl_dataset_t *ds = arg1;
1282 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1283 uint64_t count;
1284 int err;
1287 * Can't delete a head dataset if there are snapshots of it.
1288 * (Except if the only snapshots are from the branch we cloned
1289 * from.)
1291 if (ds->ds_prev != NULL &&
1292 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1293 return (EBUSY);
1296 * This is really a dsl_dir thing, but check it here so that
1297 * we'll be less likely to leave this dataset inconsistent &
1298 * nearly destroyed.
1300 err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1301 if (err)
1302 return (err);
1303 if (count != 0)
1304 return (EEXIST);
1306 return (0);
1309 /* ARGSUSED */
1310 static void
1311 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1313 dsl_dataset_t *ds = arg1;
1314 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1316 /* Mark it as inconsistent on-disk, in case we crash */
1317 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1318 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1320 spa_history_internal_log(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1321 cr, "dataset = %llu", ds->ds_object);
1324 static int
1325 dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag,
1326 dmu_tx_t *tx)
1328 dsl_dataset_t *ds = dsda->ds;
1329 dsl_dataset_t *ds_prev = ds->ds_prev;
1331 if (dsl_dataset_might_destroy_origin(ds_prev)) {
1332 struct dsl_ds_destroyarg ndsda = {0};
1335 * If we're not prepared to remove the origin, don't remove
1336 * the clone either.
1338 if (dsda->rm_origin == NULL) {
1339 dsda->need_prep = B_TRUE;
1340 return (EBUSY);
1343 ndsda.ds = ds_prev;
1344 ndsda.is_origin_rm = B_TRUE;
1345 return (dsl_dataset_destroy_check(&ndsda, tag, tx));
1349 * If we're not going to remove the origin after all,
1350 * undo the open context setup.
1352 if (dsda->rm_origin != NULL) {
1353 dsl_dataset_disown(dsda->rm_origin, tag);
1354 dsda->rm_origin = NULL;
1357 return (0);
1360 /* ARGSUSED */
1362 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1364 struct dsl_ds_destroyarg *dsda = arg1;
1365 dsl_dataset_t *ds = dsda->ds;
1367 /* we have an owner hold, so noone else can destroy us */
1368 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1371 * Only allow deferred destroy on pools that support it.
1372 * NOTE: deferred destroy is only supported on snapshots.
1374 if (dsda->defer) {
1375 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
1376 SPA_VERSION_USERREFS)
1377 return (ENOTSUP);
1378 ASSERT(dsl_dataset_is_snapshot(ds));
1379 return (0);
1383 * Can't delete a head dataset if there are snapshots of it.
1384 * (Except if the only snapshots are from the branch we cloned
1385 * from.)
1387 if (ds->ds_prev != NULL &&
1388 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1389 return (EBUSY);
1392 * If we made changes this txg, traverse_dsl_dataset won't find
1393 * them. Try again.
1395 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1396 return (EAGAIN);
1398 if (dsl_dataset_is_snapshot(ds)) {
1400 * If this snapshot has an elevated user reference count,
1401 * we can't destroy it yet.
1403 if (ds->ds_userrefs > 0 && !dsda->releasing)
1404 return (EBUSY);
1406 mutex_enter(&ds->ds_lock);
1408 * Can't delete a branch point. However, if we're destroying
1409 * a clone and removing its origin due to it having a user
1410 * hold count of 0 and having been marked for deferred destroy,
1411 * it's OK for the origin to have a single clone.
1413 if (ds->ds_phys->ds_num_children >
1414 (dsda->is_origin_rm ? 2 : 1)) {
1415 mutex_exit(&ds->ds_lock);
1416 return (EEXIST);
1418 mutex_exit(&ds->ds_lock);
1419 } else if (dsl_dir_is_clone(ds->ds_dir)) {
1420 return (dsl_dataset_origin_check(dsda, arg2, tx));
1423 /* XXX we should do some i/o error checking... */
1424 return (0);
1427 struct refsarg {
1428 kmutex_t lock;
1429 boolean_t gone;
1430 kcondvar_t cv;
1433 /* ARGSUSED */
1434 static void
1435 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1437 struct refsarg *arg = argv;
1439 mutex_enter(&arg->lock);
1440 arg->gone = TRUE;
1441 cv_signal(&arg->cv);
1442 mutex_exit(&arg->lock);
1445 static void
1446 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1448 struct refsarg arg;
1450 mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1451 cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1452 arg.gone = FALSE;
1453 (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1454 dsl_dataset_refs_gone);
1455 dmu_buf_rele(ds->ds_dbuf, tag);
1456 mutex_enter(&arg.lock);
1457 while (!arg.gone)
1458 cv_wait(&arg.cv, &arg.lock);
1459 ASSERT(arg.gone);
1460 mutex_exit(&arg.lock);
1461 ds->ds_dbuf = NULL;
1462 ds->ds_phys = NULL;
1463 mutex_destroy(&arg.lock);
1464 cv_destroy(&arg.cv);
1467 static void
1468 remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx)
1470 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1471 uint64_t count;
1472 int err;
1474 ASSERT(ds->ds_phys->ds_num_children >= 2);
1475 err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx);
1477 * The err should not be ENOENT, but a bug in a previous version
1478 * of the code could cause upgrade_clones_cb() to not set
1479 * ds_next_snap_obj when it should, leading to a missing entry.
1480 * If we knew that the pool was created after
1481 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1482 * ENOENT. However, at least we can check that we don't have
1483 * too many entries in the next_clones_obj even after failing to
1484 * remove this one.
1486 if (err != ENOENT) {
1487 VERIFY3U(err, ==, 0);
1489 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
1490 &count));
1491 ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2);
1494 void
1495 dsl_dataset_destroy_sync(void *arg1, void *tag, cred_t *cr, dmu_tx_t *tx)
1497 struct dsl_ds_destroyarg *dsda = arg1;
1498 dsl_dataset_t *ds = dsda->ds;
1499 int err;
1500 int after_branch_point = FALSE;
1501 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1502 objset_t *mos = dp->dp_meta_objset;
1503 dsl_dataset_t *ds_prev = NULL;
1504 uint64_t obj;
1506 ASSERT(ds->ds_owner);
1507 ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1);
1508 ASSERT(ds->ds_prev == NULL ||
1509 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1510 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1512 if (dsda->defer) {
1513 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1514 if (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1) {
1515 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1516 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
1517 return;
1521 /* signal any waiters that this dataset is going away */
1522 mutex_enter(&ds->ds_lock);
1523 ds->ds_owner = dsl_reaper;
1524 cv_broadcast(&ds->ds_exclusive_cv);
1525 mutex_exit(&ds->ds_lock);
1527 /* Remove our reservation */
1528 if (ds->ds_reserved != 0) {
1529 dsl_prop_setarg_t psa;
1530 uint64_t value = 0;
1532 dsl_prop_setarg_init_uint64(&psa, "refreservation",
1533 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1534 &value);
1535 psa.psa_effective_value = 0; /* predict default value */
1537 dsl_dataset_set_reservation_sync(ds, &psa, cr, tx);
1538 ASSERT3U(ds->ds_reserved, ==, 0);
1541 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1543 dsl_pool_ds_destroyed(ds, tx);
1545 obj = ds->ds_object;
1547 if (ds->ds_phys->ds_prev_snap_obj != 0) {
1548 if (ds->ds_prev) {
1549 ds_prev = ds->ds_prev;
1550 } else {
1551 VERIFY(0 == dsl_dataset_hold_obj(dp,
1552 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1554 after_branch_point =
1555 (ds_prev->ds_phys->ds_next_snap_obj != obj);
1557 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1558 if (after_branch_point &&
1559 ds_prev->ds_phys->ds_next_clones_obj != 0) {
1560 remove_from_next_clones(ds_prev, obj, tx);
1561 if (ds->ds_phys->ds_next_snap_obj != 0) {
1562 VERIFY(0 == zap_add_int(mos,
1563 ds_prev->ds_phys->ds_next_clones_obj,
1564 ds->ds_phys->ds_next_snap_obj, tx));
1567 if (after_branch_point &&
1568 ds->ds_phys->ds_next_snap_obj == 0) {
1569 /* This clone is toast. */
1570 ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1571 ds_prev->ds_phys->ds_num_children--;
1574 * If the clone's origin has no other clones, no
1575 * user holds, and has been marked for deferred
1576 * deletion, then we should have done the necessary
1577 * destroy setup for it.
1579 if (ds_prev->ds_phys->ds_num_children == 1 &&
1580 ds_prev->ds_userrefs == 0 &&
1581 DS_IS_DEFER_DESTROY(ds_prev)) {
1582 ASSERT3P(dsda->rm_origin, !=, NULL);
1583 } else {
1584 ASSERT3P(dsda->rm_origin, ==, NULL);
1586 } else if (!after_branch_point) {
1587 ds_prev->ds_phys->ds_next_snap_obj =
1588 ds->ds_phys->ds_next_snap_obj;
1592 if (ds->ds_phys->ds_next_snap_obj != 0) {
1593 blkptr_t bp;
1594 dsl_dataset_t *ds_next;
1595 uint64_t itor = 0;
1596 uint64_t old_unique;
1597 int64_t used = 0, compressed = 0, uncompressed = 0;
1599 VERIFY(0 == dsl_dataset_hold_obj(dp,
1600 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1601 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1603 old_unique = dsl_dataset_unique(ds_next);
1605 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1606 ds_next->ds_phys->ds_prev_snap_obj =
1607 ds->ds_phys->ds_prev_snap_obj;
1608 ds_next->ds_phys->ds_prev_snap_txg =
1609 ds->ds_phys->ds_prev_snap_txg;
1610 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1611 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1614 * Transfer to our deadlist (which will become next's
1615 * new deadlist) any entries from next's current
1616 * deadlist which were born before prev, and free the
1617 * other entries.
1619 * XXX we're doing this long task with the config lock held
1621 while (bplist_iterate(&ds_next->ds_deadlist, &itor, &bp) == 0) {
1622 if (bp.blk_birth <= ds->ds_phys->ds_prev_snap_txg) {
1623 VERIFY(0 == bplist_enqueue(&ds->ds_deadlist,
1624 &bp, tx));
1625 if (ds_prev && !after_branch_point &&
1626 bp.blk_birth >
1627 ds_prev->ds_phys->ds_prev_snap_txg) {
1628 ds_prev->ds_phys->ds_unique_bytes +=
1629 bp_get_dsize_sync(dp->dp_spa, &bp);
1631 } else {
1632 used += bp_get_dsize_sync(dp->dp_spa, &bp);
1633 compressed += BP_GET_PSIZE(&bp);
1634 uncompressed += BP_GET_UCSIZE(&bp);
1635 dsl_free(dp, tx->tx_txg, &bp);
1639 ASSERT3U(used, ==, ds->ds_phys->ds_unique_bytes);
1641 /* change snapused */
1642 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1643 -used, -compressed, -uncompressed, tx);
1645 /* free next's deadlist */
1646 bplist_close(&ds_next->ds_deadlist);
1647 bplist_destroy(mos, ds_next->ds_phys->ds_deadlist_obj, tx);
1649 /* set next's deadlist to our deadlist */
1650 bplist_close(&ds->ds_deadlist);
1651 ds_next->ds_phys->ds_deadlist_obj =
1652 ds->ds_phys->ds_deadlist_obj;
1653 VERIFY(0 == bplist_open(&ds_next->ds_deadlist, mos,
1654 ds_next->ds_phys->ds_deadlist_obj));
1655 ds->ds_phys->ds_deadlist_obj = 0;
1657 if (ds_next->ds_phys->ds_next_snap_obj != 0) {
1659 * Update next's unique to include blocks which
1660 * were previously shared by only this snapshot
1661 * and it. Those blocks will be born after the
1662 * prev snap and before this snap, and will have
1663 * died after the next snap and before the one
1664 * after that (ie. be on the snap after next's
1665 * deadlist).
1667 * XXX we're doing this long task with the
1668 * config lock held
1670 dsl_dataset_t *ds_after_next;
1671 uint64_t space;
1673 VERIFY(0 == dsl_dataset_hold_obj(dp,
1674 ds_next->ds_phys->ds_next_snap_obj,
1675 FTAG, &ds_after_next));
1677 VERIFY(0 ==
1678 bplist_space_birthrange(&ds_after_next->ds_deadlist,
1679 ds->ds_phys->ds_prev_snap_txg,
1680 ds->ds_phys->ds_creation_txg, &space));
1681 ds_next->ds_phys->ds_unique_bytes += space;
1683 dsl_dataset_rele(ds_after_next, FTAG);
1684 ASSERT3P(ds_next->ds_prev, ==, NULL);
1685 } else {
1686 ASSERT3P(ds_next->ds_prev, ==, ds);
1687 dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1688 ds_next->ds_prev = NULL;
1689 if (ds_prev) {
1690 VERIFY(0 == dsl_dataset_get_ref(dp,
1691 ds->ds_phys->ds_prev_snap_obj,
1692 ds_next, &ds_next->ds_prev));
1695 dsl_dataset_recalc_head_uniq(ds_next);
1698 * Reduce the amount of our unconsmed refreservation
1699 * being charged to our parent by the amount of
1700 * new unique data we have gained.
1702 if (old_unique < ds_next->ds_reserved) {
1703 int64_t mrsdelta;
1704 uint64_t new_unique =
1705 ds_next->ds_phys->ds_unique_bytes;
1707 ASSERT(old_unique <= new_unique);
1708 mrsdelta = MIN(new_unique - old_unique,
1709 ds_next->ds_reserved - old_unique);
1710 dsl_dir_diduse_space(ds->ds_dir,
1711 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1714 dsl_dataset_rele(ds_next, FTAG);
1715 } else {
1717 * There's no next snapshot, so this is a head dataset.
1718 * Destroy the deadlist. Unless it's a clone, the
1719 * deadlist should be empty. (If it's a clone, it's
1720 * safe to ignore the deadlist contents.)
1722 struct killarg ka;
1724 ASSERT(after_branch_point || bplist_empty(&ds->ds_deadlist));
1725 bplist_close(&ds->ds_deadlist);
1726 bplist_destroy(mos, ds->ds_phys->ds_deadlist_obj, tx);
1727 ds->ds_phys->ds_deadlist_obj = 0;
1730 * Free everything that we point to (that's born after
1731 * the previous snapshot, if we are a clone)
1733 * NB: this should be very quick, because we already
1734 * freed all the objects in open context.
1736 ka.ds = ds;
1737 ka.tx = tx;
1738 err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1739 TRAVERSE_POST, kill_blkptr, &ka);
1740 ASSERT3U(err, ==, 0);
1741 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1742 ds->ds_phys->ds_unique_bytes == 0);
1744 if (ds->ds_prev != NULL) {
1745 dsl_dataset_rele(ds->ds_prev, ds);
1746 ds->ds_prev = ds_prev = NULL;
1750 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1751 /* Erase the link in the dir */
1752 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1753 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1754 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1755 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1756 ASSERT(err == 0);
1757 } else {
1758 /* remove from snapshot namespace */
1759 dsl_dataset_t *ds_head;
1760 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1761 VERIFY(0 == dsl_dataset_hold_obj(dp,
1762 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1763 VERIFY(0 == dsl_dataset_get_snapname(ds));
1764 #ifdef ZFS_DEBUG
1766 uint64_t val;
1768 err = dsl_dataset_snap_lookup(ds_head,
1769 ds->ds_snapname, &val);
1770 ASSERT3U(err, ==, 0);
1771 ASSERT3U(val, ==, obj);
1773 #endif
1774 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1775 ASSERT(err == 0);
1776 dsl_dataset_rele(ds_head, FTAG);
1779 if (ds_prev && ds->ds_prev != ds_prev)
1780 dsl_dataset_rele(ds_prev, FTAG);
1782 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1783 spa_history_internal_log(LOG_DS_DESTROY, dp->dp_spa, tx,
1784 cr, "dataset = %llu", ds->ds_object);
1786 if (ds->ds_phys->ds_next_clones_obj != 0) {
1787 uint64_t count;
1788 ASSERT(0 == zap_count(mos,
1789 ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1790 VERIFY(0 == dmu_object_free(mos,
1791 ds->ds_phys->ds_next_clones_obj, tx));
1793 if (ds->ds_phys->ds_props_obj != 0)
1794 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1795 if (ds->ds_phys->ds_userrefs_obj != 0)
1796 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
1797 dsl_dir_close(ds->ds_dir, ds);
1798 ds->ds_dir = NULL;
1799 dsl_dataset_drain_refs(ds, tag);
1800 VERIFY(0 == dmu_object_free(mos, obj, tx));
1802 if (dsda->rm_origin) {
1804 * Remove the origin of the clone we just destroyed.
1806 struct dsl_ds_destroyarg ndsda = {0};
1808 ndsda.ds = dsda->rm_origin;
1809 dsl_dataset_destroy_sync(&ndsda, tag, cr, tx);
1813 static int
1814 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1816 uint64_t asize;
1818 if (!dmu_tx_is_syncing(tx))
1819 return (0);
1822 * If there's an fs-only reservation, any blocks that might become
1823 * owned by the snapshot dataset must be accommodated by space
1824 * outside of the reservation.
1826 asize = MIN(dsl_dataset_unique(ds), ds->ds_reserved);
1827 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, FALSE))
1828 return (ENOSPC);
1831 * Propogate any reserved space for this snapshot to other
1832 * snapshot checks in this sync group.
1834 if (asize > 0)
1835 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
1837 return (0);
1840 /* ARGSUSED */
1842 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
1844 dsl_dataset_t *ds = arg1;
1845 const char *snapname = arg2;
1846 int err;
1847 uint64_t value;
1850 * We don't allow multiple snapshots of the same txg. If there
1851 * is already one, try again.
1853 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
1854 return (EAGAIN);
1857 * Check for conflicting name snapshot name.
1859 err = dsl_dataset_snap_lookup(ds, snapname, &value);
1860 if (err == 0)
1861 return (EEXIST);
1862 if (err != ENOENT)
1863 return (err);
1866 * Check that the dataset's name is not too long. Name consists
1867 * of the dataset's length + 1 for the @-sign + snapshot name's length
1869 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
1870 return (ENAMETOOLONG);
1872 err = dsl_dataset_snapshot_reserve_space(ds, tx);
1873 if (err)
1874 return (err);
1876 ds->ds_trysnap_txg = tx->tx_txg;
1877 return (0);
1880 void
1881 dsl_dataset_snapshot_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1883 dsl_dataset_t *ds = arg1;
1884 const char *snapname = arg2;
1885 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1886 dmu_buf_t *dbuf;
1887 dsl_dataset_phys_t *dsphys;
1888 uint64_t dsobj, crtxg;
1889 objset_t *mos = dp->dp_meta_objset;
1890 int err;
1892 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1895 * The origin's ds_creation_txg has to be < TXG_INITIAL
1897 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
1898 crtxg = 1;
1899 else
1900 crtxg = tx->tx_txg;
1902 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
1903 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
1904 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
1905 dmu_buf_will_dirty(dbuf, tx);
1906 dsphys = dbuf->db_data;
1907 bzero(dsphys, sizeof (dsl_dataset_phys_t));
1908 dsphys->ds_dir_obj = ds->ds_dir->dd_object;
1909 dsphys->ds_fsid_guid = unique_create();
1910 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
1911 sizeof (dsphys->ds_guid));
1912 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
1913 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
1914 dsphys->ds_next_snap_obj = ds->ds_object;
1915 dsphys->ds_num_children = 1;
1916 dsphys->ds_creation_time = gethrestime_sec();
1917 dsphys->ds_creation_txg = crtxg;
1918 dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
1919 dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes;
1920 dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
1921 dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
1922 dsphys->ds_flags = ds->ds_phys->ds_flags;
1923 dsphys->ds_bp = ds->ds_phys->ds_bp;
1924 dmu_buf_rele(dbuf, FTAG);
1926 ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
1927 if (ds->ds_prev) {
1928 uint64_t next_clones_obj =
1929 ds->ds_prev->ds_phys->ds_next_clones_obj;
1930 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
1931 ds->ds_object ||
1932 ds->ds_prev->ds_phys->ds_num_children > 1);
1933 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
1934 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1935 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1936 ds->ds_prev->ds_phys->ds_creation_txg);
1937 ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
1938 } else if (next_clones_obj != 0) {
1939 remove_from_next_clones(ds->ds_prev,
1940 dsphys->ds_next_snap_obj, tx);
1941 VERIFY3U(0, ==, zap_add_int(mos,
1942 next_clones_obj, dsobj, tx));
1947 * If we have a reference-reservation on this dataset, we will
1948 * need to increase the amount of refreservation being charged
1949 * since our unique space is going to zero.
1951 if (ds->ds_reserved) {
1952 int64_t add = MIN(dsl_dataset_unique(ds), ds->ds_reserved);
1953 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
1954 add, 0, 0, tx);
1957 bplist_close(&ds->ds_deadlist);
1958 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1959 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
1960 ds->ds_phys->ds_prev_snap_obj = dsobj;
1961 ds->ds_phys->ds_prev_snap_txg = crtxg;
1962 ds->ds_phys->ds_unique_bytes = 0;
1963 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
1964 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1965 ds->ds_phys->ds_deadlist_obj =
1966 bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx);
1967 VERIFY(0 == bplist_open(&ds->ds_deadlist, mos,
1968 ds->ds_phys->ds_deadlist_obj));
1970 dprintf("snap '%s' -> obj %llu\n", snapname, dsobj);
1971 err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
1972 snapname, 8, 1, &dsobj, tx);
1973 ASSERT(err == 0);
1975 if (ds->ds_prev)
1976 dsl_dataset_drop_ref(ds->ds_prev, ds);
1977 VERIFY(0 == dsl_dataset_get_ref(dp,
1978 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
1980 dsl_pool_ds_snapshotted(ds, tx);
1982 dsl_dir_snap_cmtime_update(ds->ds_dir);
1984 spa_history_internal_log(LOG_DS_SNAPSHOT, dp->dp_spa, tx, cr,
1985 "dataset = %llu", dsobj);
1988 void
1989 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
1991 ASSERT(dmu_tx_is_syncing(tx));
1992 ASSERT(ds->ds_objset != NULL);
1993 ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
1996 * in case we had to change ds_fsid_guid when we opened it,
1997 * sync it out now.
1999 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2000 ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
2002 dsl_dir_dirty(ds->ds_dir, tx);
2003 dmu_objset_sync(ds->ds_objset, zio, tx);
2006 void
2007 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
2009 uint64_t refd, avail, uobjs, aobjs;
2011 dsl_dir_stats(ds->ds_dir, nv);
2013 dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
2014 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
2015 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
2017 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
2018 ds->ds_phys->ds_creation_time);
2019 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
2020 ds->ds_phys->ds_creation_txg);
2021 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
2022 ds->ds_quota);
2023 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
2024 ds->ds_reserved);
2025 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
2026 ds->ds_phys->ds_guid);
2027 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
2028 dsl_dataset_unique(ds));
2029 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
2030 ds->ds_object);
2031 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2032 ds->ds_userrefs);
2033 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2034 DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2036 if (ds->ds_phys->ds_next_snap_obj) {
2038 * This is a snapshot; override the dd's space used with
2039 * our unique space and compression ratio.
2041 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2042 ds->ds_phys->ds_unique_bytes);
2043 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
2044 ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
2045 (ds->ds_phys->ds_uncompressed_bytes * 100 /
2046 ds->ds_phys->ds_compressed_bytes));
2050 void
2051 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2053 stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
2054 stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
2055 stat->dds_guid = ds->ds_phys->ds_guid;
2056 if (ds->ds_phys->ds_next_snap_obj) {
2057 stat->dds_is_snapshot = B_TRUE;
2058 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
2059 } else {
2060 stat->dds_is_snapshot = B_FALSE;
2061 stat->dds_num_clones = 0;
2064 /* clone origin is really a dsl_dir thing... */
2065 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2066 if (dsl_dir_is_clone(ds->ds_dir)) {
2067 dsl_dataset_t *ods;
2069 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
2070 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
2071 dsl_dataset_name(ods, stat->dds_origin);
2072 dsl_dataset_drop_ref(ods, FTAG);
2073 } else {
2074 stat->dds_origin[0] = '\0';
2076 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2079 uint64_t
2080 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2082 return (ds->ds_fsid_guid);
2085 void
2086 dsl_dataset_space(dsl_dataset_t *ds,
2087 uint64_t *refdbytesp, uint64_t *availbytesp,
2088 uint64_t *usedobjsp, uint64_t *availobjsp)
2090 *refdbytesp = ds->ds_phys->ds_used_bytes;
2091 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2092 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
2093 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
2094 if (ds->ds_quota != 0) {
2096 * Adjust available bytes according to refquota
2098 if (*refdbytesp < ds->ds_quota)
2099 *availbytesp = MIN(*availbytesp,
2100 ds->ds_quota - *refdbytesp);
2101 else
2102 *availbytesp = 0;
2104 *usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2105 *availobjsp = DN_MAX_OBJECT - *usedobjsp;
2108 boolean_t
2109 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2111 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2113 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2114 dsl_pool_sync_context(dp));
2115 if (ds->ds_prev == NULL)
2116 return (B_FALSE);
2117 if (ds->ds_phys->ds_bp.blk_birth >
2118 ds->ds_prev->ds_phys->ds_creation_txg)
2119 return (B_TRUE);
2120 return (B_FALSE);
2123 /* ARGSUSED */
2124 static int
2125 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2127 dsl_dataset_t *ds = arg1;
2128 char *newsnapname = arg2;
2129 dsl_dir_t *dd = ds->ds_dir;
2130 dsl_dataset_t *hds;
2131 uint64_t val;
2132 int err;
2134 err = dsl_dataset_hold_obj(dd->dd_pool,
2135 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2136 if (err)
2137 return (err);
2139 /* new name better not be in use */
2140 err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2141 dsl_dataset_rele(hds, FTAG);
2143 if (err == 0)
2144 err = EEXIST;
2145 else if (err == ENOENT)
2146 err = 0;
2148 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2149 if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2150 err = ENAMETOOLONG;
2152 return (err);
2155 static void
2156 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2,
2157 cred_t *cr, dmu_tx_t *tx)
2159 dsl_dataset_t *ds = arg1;
2160 const char *newsnapname = arg2;
2161 dsl_dir_t *dd = ds->ds_dir;
2162 objset_t *mos = dd->dd_pool->dp_meta_objset;
2163 dsl_dataset_t *hds;
2164 int err;
2166 ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2168 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2169 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2171 VERIFY(0 == dsl_dataset_get_snapname(ds));
2172 err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2173 ASSERT3U(err, ==, 0);
2174 mutex_enter(&ds->ds_lock);
2175 (void) strcpy(ds->ds_snapname, newsnapname);
2176 mutex_exit(&ds->ds_lock);
2177 err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2178 ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2179 ASSERT3U(err, ==, 0);
2181 spa_history_internal_log(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2182 cr, "dataset = %llu", ds->ds_object);
2183 dsl_dataset_rele(hds, FTAG);
2186 struct renamesnaparg {
2187 dsl_sync_task_group_t *dstg;
2188 char failed[MAXPATHLEN];
2189 char *oldsnap;
2190 char *newsnap;
2193 static int
2194 dsl_snapshot_rename_one(const char *name, void *arg)
2196 struct renamesnaparg *ra = arg;
2197 dsl_dataset_t *ds = NULL;
2198 char *snapname;
2199 int err;
2201 snapname = kmem_asprintf("%s@%s", name, ra->oldsnap);
2202 (void) strlcpy(ra->failed, snapname, sizeof (ra->failed));
2205 * For recursive snapshot renames the parent won't be changing
2206 * so we just pass name for both the to/from argument.
2208 err = zfs_secpolicy_rename_perms(snapname, snapname, CRED());
2209 if (err != 0) {
2210 strfree(snapname);
2211 return (err == ENOENT ? 0 : err);
2214 #ifdef _KERNEL
2216 * For all filesystems undergoing rename, we'll need to unmount it.
2218 (void) zfs_unmount_snap(snapname, NULL);
2219 #endif
2220 err = dsl_dataset_hold(snapname, ra->dstg, &ds);
2221 if (err != 0) {
2222 strfree(snapname);
2223 return (err == ENOENT ? 0 : err);
2226 dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2227 dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2229 strfree(snapname);
2230 return (0);
2233 static int
2234 dsl_recursive_rename(char *oldname, const char *newname)
2236 int err;
2237 struct renamesnaparg *ra;
2238 dsl_sync_task_t *dst;
2239 spa_t *spa;
2240 char *cp, *fsname = spa_strdup(oldname);
2241 int len = strlen(oldname) + 1;
2243 /* truncate the snapshot name to get the fsname */
2244 cp = strchr(fsname, '@');
2245 *cp = '\0';
2247 err = spa_open(fsname, &spa, FTAG);
2248 if (err) {
2249 kmem_free(fsname, len);
2250 return (err);
2252 ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2253 ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2255 ra->oldsnap = strchr(oldname, '@') + 1;
2256 ra->newsnap = strchr(newname, '@') + 1;
2257 *ra->failed = '\0';
2259 err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2260 DS_FIND_CHILDREN);
2261 kmem_free(fsname, len);
2263 if (err == 0) {
2264 err = dsl_sync_task_group_wait(ra->dstg);
2267 for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2268 dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2269 dsl_dataset_t *ds = dst->dst_arg1;
2270 if (dst->dst_err) {
2271 dsl_dir_name(ds->ds_dir, ra->failed);
2272 (void) strlcat(ra->failed, "@", sizeof (ra->failed));
2273 (void) strlcat(ra->failed, ra->newsnap,
2274 sizeof (ra->failed));
2276 dsl_dataset_rele(ds, ra->dstg);
2279 if (err)
2280 (void) strlcpy(oldname, ra->failed, sizeof (ra->failed));
2282 dsl_sync_task_group_destroy(ra->dstg);
2283 kmem_free(ra, sizeof (struct renamesnaparg));
2284 spa_close(spa, FTAG);
2285 return (err);
2288 static int
2289 dsl_valid_rename(const char *oldname, void *arg)
2291 int delta = *(int *)arg;
2293 if (strlen(oldname) + delta >= MAXNAMELEN)
2294 return (ENAMETOOLONG);
2296 return (0);
2299 #pragma weak dmu_objset_rename = dsl_dataset_rename
2301 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2303 dsl_dir_t *dd;
2304 dsl_dataset_t *ds;
2305 const char *tail;
2306 int err;
2308 err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2309 if (err)
2310 return (err);
2312 * If there are more than 2 references there may be holds
2313 * hanging around that haven't been cleared out yet.
2315 if (dmu_buf_refcount(dd->dd_dbuf) > 2)
2316 txg_wait_synced(dd->dd_pool, 0);
2317 if (tail == NULL) {
2318 int delta = strlen(newname) - strlen(oldname);
2320 /* if we're growing, validate child name lengths */
2321 if (delta > 0)
2322 err = dmu_objset_find(oldname, dsl_valid_rename,
2323 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2325 if (!err)
2326 err = dsl_dir_rename(dd, newname);
2327 dsl_dir_close(dd, FTAG);
2328 return (err);
2330 if (tail[0] != '@') {
2331 /* the name ended in a nonexistent component */
2332 dsl_dir_close(dd, FTAG);
2333 return (ENOENT);
2336 dsl_dir_close(dd, FTAG);
2338 /* new name must be snapshot in same filesystem */
2339 tail = strchr(newname, '@');
2340 if (tail == NULL)
2341 return (EINVAL);
2342 tail++;
2343 if (strncmp(oldname, newname, tail - newname) != 0)
2344 return (EXDEV);
2346 if (recursive) {
2347 err = dsl_recursive_rename(oldname, newname);
2348 } else {
2349 err = dsl_dataset_hold(oldname, FTAG, &ds);
2350 if (err)
2351 return (err);
2353 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2354 dsl_dataset_snapshot_rename_check,
2355 dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2357 dsl_dataset_rele(ds, FTAG);
2360 return (err);
2363 struct promotenode {
2364 list_node_t link;
2365 dsl_dataset_t *ds;
2368 struct promotearg {
2369 list_t shared_snaps, origin_snaps, clone_snaps;
2370 dsl_dataset_t *origin_origin, *origin_head;
2371 uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2372 char *err_ds;
2375 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2377 /* ARGSUSED */
2378 static int
2379 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2381 dsl_dataset_t *hds = arg1;
2382 struct promotearg *pa = arg2;
2383 struct promotenode *snap = list_head(&pa->shared_snaps);
2384 dsl_dataset_t *origin_ds = snap->ds;
2385 int err;
2387 /* Check that it is a real clone */
2388 if (!dsl_dir_is_clone(hds->ds_dir))
2389 return (EINVAL);
2391 /* Since this is so expensive, don't do the preliminary check */
2392 if (!dmu_tx_is_syncing(tx))
2393 return (0);
2395 if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2396 return (EXDEV);
2398 /* compute origin's new unique space */
2399 snap = list_tail(&pa->clone_snaps);
2400 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2401 err = bplist_space_birthrange(&snap->ds->ds_deadlist,
2402 origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX, &pa->unique);
2403 if (err)
2404 return (err);
2407 * Walk the snapshots that we are moving
2409 * Compute space to transfer. Consider the incremental changes
2410 * to used for each snapshot:
2411 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2412 * So each snapshot gave birth to:
2413 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2414 * So a sequence would look like:
2415 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2416 * Which simplifies to:
2417 * uN + kN + kN-1 + ... + k1 + k0
2418 * Note however, if we stop before we reach the ORIGIN we get:
2419 * uN + kN + kN-1 + ... + kM - uM-1
2421 pa->used = origin_ds->ds_phys->ds_used_bytes;
2422 pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2423 pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2424 for (snap = list_head(&pa->shared_snaps); snap;
2425 snap = list_next(&pa->shared_snaps, snap)) {
2426 uint64_t val, dlused, dlcomp, dluncomp;
2427 dsl_dataset_t *ds = snap->ds;
2429 /* Check that the snapshot name does not conflict */
2430 VERIFY(0 == dsl_dataset_get_snapname(ds));
2431 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2432 if (err == 0) {
2433 err = EEXIST;
2434 goto out;
2436 if (err != ENOENT)
2437 goto out;
2439 /* The very first snapshot does not have a deadlist */
2440 if (ds->ds_phys->ds_prev_snap_obj == 0)
2441 continue;
2443 if (err = bplist_space(&ds->ds_deadlist,
2444 &dlused, &dlcomp, &dluncomp))
2445 goto out;
2446 pa->used += dlused;
2447 pa->comp += dlcomp;
2448 pa->uncomp += dluncomp;
2452 * If we are a clone of a clone then we never reached ORIGIN,
2453 * so we need to subtract out the clone origin's used space.
2455 if (pa->origin_origin) {
2456 pa->used -= pa->origin_origin->ds_phys->ds_used_bytes;
2457 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2458 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2461 /* Check that there is enough space here */
2462 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2463 pa->used);
2464 if (err)
2465 return (err);
2468 * Compute the amounts of space that will be used by snapshots
2469 * after the promotion (for both origin and clone). For each,
2470 * it is the amount of space that will be on all of their
2471 * deadlists (that was not born before their new origin).
2473 if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2474 uint64_t space;
2477 * Note, typically this will not be a clone of a clone,
2478 * so snap->ds->ds_origin_txg will be < TXG_INITIAL, so
2479 * these snaplist_space() -> bplist_space_birthrange()
2480 * calls will be fast because they do not have to
2481 * iterate over all bps.
2483 snap = list_head(&pa->origin_snaps);
2484 err = snaplist_space(&pa->shared_snaps,
2485 snap->ds->ds_origin_txg, &pa->cloneusedsnap);
2486 if (err)
2487 return (err);
2489 err = snaplist_space(&pa->clone_snaps,
2490 snap->ds->ds_origin_txg, &space);
2491 if (err)
2492 return (err);
2493 pa->cloneusedsnap += space;
2495 if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2496 err = snaplist_space(&pa->origin_snaps,
2497 origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2498 if (err)
2499 return (err);
2502 return (0);
2503 out:
2504 pa->err_ds = snap->ds->ds_snapname;
2505 return (err);
2508 static void
2509 dsl_dataset_promote_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
2511 dsl_dataset_t *hds = arg1;
2512 struct promotearg *pa = arg2;
2513 struct promotenode *snap = list_head(&pa->shared_snaps);
2514 dsl_dataset_t *origin_ds = snap->ds;
2515 dsl_dataset_t *origin_head;
2516 dsl_dir_t *dd = hds->ds_dir;
2517 dsl_pool_t *dp = hds->ds_dir->dd_pool;
2518 dsl_dir_t *odd = NULL;
2519 uint64_t oldnext_obj;
2520 int64_t delta;
2522 ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2524 snap = list_head(&pa->origin_snaps);
2525 origin_head = snap->ds;
2528 * We need to explicitly open odd, since origin_ds's dd will be
2529 * changing.
2531 VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2532 NULL, FTAG, &odd));
2534 /* change origin's next snap */
2535 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2536 oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2537 snap = list_tail(&pa->clone_snaps);
2538 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2539 origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2541 /* change the origin's next clone */
2542 if (origin_ds->ds_phys->ds_next_clones_obj) {
2543 remove_from_next_clones(origin_ds, snap->ds->ds_object, tx);
2544 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2545 origin_ds->ds_phys->ds_next_clones_obj,
2546 oldnext_obj, tx));
2549 /* change origin */
2550 dmu_buf_will_dirty(dd->dd_dbuf, tx);
2551 ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2552 dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2553 hds->ds_origin_txg = origin_head->ds_origin_txg;
2554 dmu_buf_will_dirty(odd->dd_dbuf, tx);
2555 odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2556 origin_head->ds_origin_txg = origin_ds->ds_phys->ds_creation_txg;
2558 /* move snapshots to this dir */
2559 for (snap = list_head(&pa->shared_snaps); snap;
2560 snap = list_next(&pa->shared_snaps, snap)) {
2561 dsl_dataset_t *ds = snap->ds;
2563 /* unregister props as dsl_dir is changing */
2564 if (ds->ds_objset) {
2565 dmu_objset_evict(ds->ds_objset);
2566 ds->ds_objset = NULL;
2568 /* move snap name entry */
2569 VERIFY(0 == dsl_dataset_get_snapname(ds));
2570 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2571 ds->ds_snapname, tx));
2572 VERIFY(0 == zap_add(dp->dp_meta_objset,
2573 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2574 8, 1, &ds->ds_object, tx));
2575 /* change containing dsl_dir */
2576 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2577 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2578 ds->ds_phys->ds_dir_obj = dd->dd_object;
2579 ASSERT3P(ds->ds_dir, ==, odd);
2580 dsl_dir_close(ds->ds_dir, ds);
2581 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2582 NULL, ds, &ds->ds_dir));
2584 ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2588 * Change space accounting.
2589 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2590 * both be valid, or both be 0 (resulting in delta == 0). This
2591 * is true for each of {clone,origin} independently.
2594 delta = pa->cloneusedsnap -
2595 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2596 ASSERT3S(delta, >=, 0);
2597 ASSERT3U(pa->used, >=, delta);
2598 dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2599 dsl_dir_diduse_space(dd, DD_USED_HEAD,
2600 pa->used - delta, pa->comp, pa->uncomp, tx);
2602 delta = pa->originusedsnap -
2603 odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2604 ASSERT3S(delta, <=, 0);
2605 ASSERT3U(pa->used, >=, -delta);
2606 dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2607 dsl_dir_diduse_space(odd, DD_USED_HEAD,
2608 -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2610 origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2612 /* log history record */
2613 spa_history_internal_log(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2614 cr, "dataset = %llu", hds->ds_object);
2616 dsl_dir_close(odd, FTAG);
2619 static char *snaplist_tag = "snaplist";
2621 * Make a list of dsl_dataset_t's for the snapshots between first_obj
2622 * (exclusive) and last_obj (inclusive). The list will be in reverse
2623 * order (last_obj will be the list_head()). If first_obj == 0, do all
2624 * snapshots back to this dataset's origin.
2626 static int
2627 snaplist_make(dsl_pool_t *dp, boolean_t own,
2628 uint64_t first_obj, uint64_t last_obj, list_t *l)
2630 uint64_t obj = last_obj;
2632 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2634 list_create(l, sizeof (struct promotenode),
2635 offsetof(struct promotenode, link));
2637 while (obj != first_obj) {
2638 dsl_dataset_t *ds;
2639 struct promotenode *snap;
2640 int err;
2642 if (own) {
2643 err = dsl_dataset_own_obj(dp, obj,
2644 0, snaplist_tag, &ds);
2645 if (err == 0)
2646 dsl_dataset_make_exclusive(ds, snaplist_tag);
2647 } else {
2648 err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2650 if (err == ENOENT) {
2651 /* lost race with snapshot destroy */
2652 struct promotenode *last = list_tail(l);
2653 ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2654 obj = last->ds->ds_phys->ds_prev_snap_obj;
2655 continue;
2656 } else if (err) {
2657 return (err);
2660 if (first_obj == 0)
2661 first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2663 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2664 snap->ds = ds;
2665 list_insert_tail(l, snap);
2666 obj = ds->ds_phys->ds_prev_snap_obj;
2669 return (0);
2672 static int
2673 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
2675 struct promotenode *snap;
2677 *spacep = 0;
2678 for (snap = list_head(l); snap; snap = list_next(l, snap)) {
2679 uint64_t used;
2680 int err = bplist_space_birthrange(&snap->ds->ds_deadlist,
2681 mintxg, UINT64_MAX, &used);
2682 if (err)
2683 return (err);
2684 *spacep += used;
2686 return (0);
2689 static void
2690 snaplist_destroy(list_t *l, boolean_t own)
2692 struct promotenode *snap;
2694 if (!l || !list_link_active(&l->list_head))
2695 return;
2697 while ((snap = list_tail(l)) != NULL) {
2698 list_remove(l, snap);
2699 if (own)
2700 dsl_dataset_disown(snap->ds, snaplist_tag);
2701 else
2702 dsl_dataset_rele(snap->ds, snaplist_tag);
2703 kmem_free(snap, sizeof (struct promotenode));
2705 list_destroy(l);
2709 * Promote a clone. Nomenclature note:
2710 * "clone" or "cds": the original clone which is being promoted
2711 * "origin" or "ods": the snapshot which is originally clone's origin
2712 * "origin head" or "ohds": the dataset which is the head
2713 * (filesystem/volume) for the origin
2714 * "origin origin": the origin of the origin's filesystem (typically
2715 * NULL, indicating that the clone is not a clone of a clone).
2718 dsl_dataset_promote(const char *name, char *conflsnap)
2720 dsl_dataset_t *ds;
2721 dsl_dir_t *dd;
2722 dsl_pool_t *dp;
2723 dmu_object_info_t doi;
2724 struct promotearg pa = { 0 };
2725 struct promotenode *snap;
2726 int err;
2728 err = dsl_dataset_hold(name, FTAG, &ds);
2729 if (err)
2730 return (err);
2731 dd = ds->ds_dir;
2732 dp = dd->dd_pool;
2734 err = dmu_object_info(dp->dp_meta_objset,
2735 ds->ds_phys->ds_snapnames_zapobj, &doi);
2736 if (err) {
2737 dsl_dataset_rele(ds, FTAG);
2738 return (err);
2741 if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
2742 dsl_dataset_rele(ds, FTAG);
2743 return (EINVAL);
2747 * We are going to inherit all the snapshots taken before our
2748 * origin (i.e., our new origin will be our parent's origin).
2749 * Take ownership of them so that we can rename them into our
2750 * namespace.
2752 rw_enter(&dp->dp_config_rwlock, RW_READER);
2754 err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
2755 &pa.shared_snaps);
2756 if (err != 0)
2757 goto out;
2759 err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
2760 if (err != 0)
2761 goto out;
2763 snap = list_head(&pa.shared_snaps);
2764 ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
2765 err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
2766 snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
2767 if (err != 0)
2768 goto out;
2770 if (dsl_dir_is_clone(snap->ds->ds_dir)) {
2771 err = dsl_dataset_own_obj(dp,
2772 snap->ds->ds_dir->dd_phys->dd_origin_obj,
2773 0, FTAG, &pa.origin_origin);
2774 if (err != 0)
2775 goto out;
2778 out:
2779 rw_exit(&dp->dp_config_rwlock);
2782 * Add in 128x the snapnames zapobj size, since we will be moving
2783 * a bunch of snapnames to the promoted ds, and dirtying their
2784 * bonus buffers.
2786 if (err == 0) {
2787 err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
2788 dsl_dataset_promote_sync, ds, &pa,
2789 2 + 2 * doi.doi_physical_blocks_512);
2790 if (err && pa.err_ds && conflsnap)
2791 (void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN);
2794 snaplist_destroy(&pa.shared_snaps, B_TRUE);
2795 snaplist_destroy(&pa.clone_snaps, B_FALSE);
2796 snaplist_destroy(&pa.origin_snaps, B_FALSE);
2797 if (pa.origin_origin)
2798 dsl_dataset_disown(pa.origin_origin, FTAG);
2799 dsl_dataset_rele(ds, FTAG);
2800 return (err);
2803 struct cloneswaparg {
2804 dsl_dataset_t *cds; /* clone dataset */
2805 dsl_dataset_t *ohds; /* origin's head dataset */
2806 boolean_t force;
2807 int64_t unused_refres_delta; /* change in unconsumed refreservation */
2810 /* ARGSUSED */
2811 static int
2812 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
2814 struct cloneswaparg *csa = arg1;
2816 /* they should both be heads */
2817 if (dsl_dataset_is_snapshot(csa->cds) ||
2818 dsl_dataset_is_snapshot(csa->ohds))
2819 return (EINVAL);
2821 /* the branch point should be just before them */
2822 if (csa->cds->ds_prev != csa->ohds->ds_prev)
2823 return (EINVAL);
2825 /* cds should be the clone (unless they are unrelated) */
2826 if (csa->cds->ds_prev != NULL &&
2827 csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap &&
2828 csa->ohds->ds_object !=
2829 csa->cds->ds_prev->ds_phys->ds_next_snap_obj)
2830 return (EINVAL);
2832 /* the clone should be a child of the origin */
2833 if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
2834 return (EINVAL);
2836 /* ohds shouldn't be modified unless 'force' */
2837 if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
2838 return (ETXTBSY);
2840 /* adjust amount of any unconsumed refreservation */
2841 csa->unused_refres_delta =
2842 (int64_t)MIN(csa->ohds->ds_reserved,
2843 csa->ohds->ds_phys->ds_unique_bytes) -
2844 (int64_t)MIN(csa->ohds->ds_reserved,
2845 csa->cds->ds_phys->ds_unique_bytes);
2847 if (csa->unused_refres_delta > 0 &&
2848 csa->unused_refres_delta >
2849 dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
2850 return (ENOSPC);
2852 if (csa->ohds->ds_quota != 0 &&
2853 csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota)
2854 return (EDQUOT);
2856 return (0);
2859 /* ARGSUSED */
2860 static void
2861 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
2863 struct cloneswaparg *csa = arg1;
2864 dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
2866 ASSERT(csa->cds->ds_reserved == 0);
2867 ASSERT(csa->ohds->ds_quota == 0 ||
2868 csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota);
2870 dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
2871 dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
2873 if (csa->cds->ds_objset != NULL) {
2874 dmu_objset_evict(csa->cds->ds_objset);
2875 csa->cds->ds_objset = NULL;
2878 if (csa->ohds->ds_objset != NULL) {
2879 dmu_objset_evict(csa->ohds->ds_objset);
2880 csa->ohds->ds_objset = NULL;
2884 * Reset origin's unique bytes, if it exists.
2886 if (csa->cds->ds_prev) {
2887 dsl_dataset_t *origin = csa->cds->ds_prev;
2888 dmu_buf_will_dirty(origin->ds_dbuf, tx);
2889 VERIFY(0 == bplist_space_birthrange(&csa->cds->ds_deadlist,
2890 origin->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2891 &origin->ds_phys->ds_unique_bytes));
2894 /* swap blkptrs */
2896 blkptr_t tmp;
2897 tmp = csa->ohds->ds_phys->ds_bp;
2898 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
2899 csa->cds->ds_phys->ds_bp = tmp;
2902 /* set dd_*_bytes */
2904 int64_t dused, dcomp, duncomp;
2905 uint64_t cdl_used, cdl_comp, cdl_uncomp;
2906 uint64_t odl_used, odl_comp, odl_uncomp;
2908 ASSERT3U(csa->cds->ds_dir->dd_phys->
2909 dd_used_breakdown[DD_USED_SNAP], ==, 0);
2911 VERIFY(0 == bplist_space(&csa->cds->ds_deadlist, &cdl_used,
2912 &cdl_comp, &cdl_uncomp));
2913 VERIFY(0 == bplist_space(&csa->ohds->ds_deadlist, &odl_used,
2914 &odl_comp, &odl_uncomp));
2916 dused = csa->cds->ds_phys->ds_used_bytes + cdl_used -
2917 (csa->ohds->ds_phys->ds_used_bytes + odl_used);
2918 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
2919 (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
2920 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
2921 cdl_uncomp -
2922 (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
2924 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
2925 dused, dcomp, duncomp, tx);
2926 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
2927 -dused, -dcomp, -duncomp, tx);
2930 * The difference in the space used by snapshots is the
2931 * difference in snapshot space due to the head's
2932 * deadlist (since that's the only thing that's
2933 * changing that affects the snapused).
2935 VERIFY(0 == bplist_space_birthrange(&csa->cds->ds_deadlist,
2936 csa->ohds->ds_origin_txg, UINT64_MAX, &cdl_used));
2937 VERIFY(0 == bplist_space_birthrange(&csa->ohds->ds_deadlist,
2938 csa->ohds->ds_origin_txg, UINT64_MAX, &odl_used));
2939 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
2940 DD_USED_HEAD, DD_USED_SNAP, tx);
2943 #define SWITCH64(x, y) \
2945 uint64_t __tmp = (x); \
2946 (x) = (y); \
2947 (y) = __tmp; \
2950 /* swap ds_*_bytes */
2951 SWITCH64(csa->ohds->ds_phys->ds_used_bytes,
2952 csa->cds->ds_phys->ds_used_bytes);
2953 SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
2954 csa->cds->ds_phys->ds_compressed_bytes);
2955 SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
2956 csa->cds->ds_phys->ds_uncompressed_bytes);
2957 SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
2958 csa->cds->ds_phys->ds_unique_bytes);
2960 /* apply any parent delta for change in unconsumed refreservation */
2961 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
2962 csa->unused_refres_delta, 0, 0, tx);
2964 /* swap deadlists */
2965 bplist_close(&csa->cds->ds_deadlist);
2966 bplist_close(&csa->ohds->ds_deadlist);
2967 SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
2968 csa->cds->ds_phys->ds_deadlist_obj);
2969 VERIFY(0 == bplist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
2970 csa->cds->ds_phys->ds_deadlist_obj));
2971 VERIFY(0 == bplist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
2972 csa->ohds->ds_phys->ds_deadlist_obj));
2974 dsl_pool_ds_clone_swapped(csa->ohds, csa->cds, tx);
2978 * Swap 'clone' with its origin head datasets. Used at the end of "zfs
2979 * recv" into an existing fs to swizzle the file system to the new
2980 * version, and by "zfs rollback". Can also be used to swap two
2981 * independent head datasets if neither has any snapshots.
2984 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
2985 boolean_t force)
2987 struct cloneswaparg csa;
2988 int error;
2990 ASSERT(clone->ds_owner);
2991 ASSERT(origin_head->ds_owner);
2992 retry:
2993 /* Need exclusive access for the swap */
2994 rw_enter(&clone->ds_rwlock, RW_WRITER);
2995 if (!rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
2996 rw_exit(&clone->ds_rwlock);
2997 rw_enter(&origin_head->ds_rwlock, RW_WRITER);
2998 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
2999 rw_exit(&origin_head->ds_rwlock);
3000 goto retry;
3003 csa.cds = clone;
3004 csa.ohds = origin_head;
3005 csa.force = force;
3006 error = dsl_sync_task_do(clone->ds_dir->dd_pool,
3007 dsl_dataset_clone_swap_check,
3008 dsl_dataset_clone_swap_sync, &csa, NULL, 9);
3009 return (error);
3013 * Given a pool name and a dataset object number in that pool,
3014 * return the name of that dataset.
3017 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
3019 spa_t *spa;
3020 dsl_pool_t *dp;
3021 dsl_dataset_t *ds;
3022 int error;
3024 if ((error = spa_open(pname, &spa, FTAG)) != 0)
3025 return (error);
3026 dp = spa_get_dsl(spa);
3027 rw_enter(&dp->dp_config_rwlock, RW_READER);
3028 if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
3029 dsl_dataset_name(ds, buf);
3030 dsl_dataset_rele(ds, FTAG);
3032 rw_exit(&dp->dp_config_rwlock);
3033 spa_close(spa, FTAG);
3035 return (error);
3039 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
3040 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
3042 int error = 0;
3044 ASSERT3S(asize, >, 0);
3047 * *ref_rsrv is the portion of asize that will come from any
3048 * unconsumed refreservation space.
3050 *ref_rsrv = 0;
3052 mutex_enter(&ds->ds_lock);
3054 * Make a space adjustment for reserved bytes.
3056 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
3057 ASSERT3U(*used, >=,
3058 ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3059 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3060 *ref_rsrv =
3061 asize - MIN(asize, parent_delta(ds, asize + inflight));
3064 if (!check_quota || ds->ds_quota == 0) {
3065 mutex_exit(&ds->ds_lock);
3066 return (0);
3069 * If they are requesting more space, and our current estimate
3070 * is over quota, they get to try again unless the actual
3071 * on-disk is over quota and there are no pending changes (which
3072 * may free up space for us).
3074 if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) {
3075 if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota)
3076 error = ERESTART;
3077 else
3078 error = EDQUOT;
3080 mutex_exit(&ds->ds_lock);
3082 return (error);
3085 /* ARGSUSED */
3086 static int
3087 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
3089 dsl_dataset_t *ds = arg1;
3090 dsl_prop_setarg_t *psa = arg2;
3091 int err;
3093 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
3094 return (ENOTSUP);
3096 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3097 return (err);
3099 if (psa->psa_effective_value == 0)
3100 return (0);
3102 if (psa->psa_effective_value < ds->ds_phys->ds_used_bytes ||
3103 psa->psa_effective_value < ds->ds_reserved)
3104 return (ENOSPC);
3106 return (0);
3109 extern void dsl_prop_set_sync(void *, void *, cred_t *, dmu_tx_t *);
3111 void
3112 dsl_dataset_set_quota_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
3114 dsl_dataset_t *ds = arg1;
3115 dsl_prop_setarg_t *psa = arg2;
3116 uint64_t effective_value = psa->psa_effective_value;
3118 dsl_prop_set_sync(ds, psa, cr, tx);
3119 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3121 if (ds->ds_quota != effective_value) {
3122 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3123 ds->ds_quota = effective_value;
3125 spa_history_internal_log(LOG_DS_REFQUOTA,
3126 ds->ds_dir->dd_pool->dp_spa, tx, cr, "%lld dataset = %llu ",
3127 (longlong_t)ds->ds_quota, ds->ds_object);
3132 dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota)
3134 dsl_dataset_t *ds;
3135 dsl_prop_setarg_t psa;
3136 int err;
3138 dsl_prop_setarg_init_uint64(&psa, "refquota", source, &quota);
3140 err = dsl_dataset_hold(dsname, FTAG, &ds);
3141 if (err)
3142 return (err);
3145 * If someone removes a file, then tries to set the quota, we
3146 * want to make sure the file freeing takes effect.
3148 txg_wait_open(ds->ds_dir->dd_pool, 0);
3150 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3151 dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3152 ds, &psa, 0);
3154 dsl_dataset_rele(ds, FTAG);
3155 return (err);
3158 static int
3159 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3161 dsl_dataset_t *ds = arg1;
3162 dsl_prop_setarg_t *psa = arg2;
3163 uint64_t effective_value;
3164 uint64_t unique;
3165 int err;
3167 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3168 SPA_VERSION_REFRESERVATION)
3169 return (ENOTSUP);
3171 if (dsl_dataset_is_snapshot(ds))
3172 return (EINVAL);
3174 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3175 return (err);
3177 effective_value = psa->psa_effective_value;
3180 * If we are doing the preliminary check in open context, the
3181 * space estimates may be inaccurate.
3183 if (!dmu_tx_is_syncing(tx))
3184 return (0);
3186 mutex_enter(&ds->ds_lock);
3187 unique = dsl_dataset_unique(ds);
3188 mutex_exit(&ds->ds_lock);
3190 if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) {
3191 uint64_t delta = MAX(unique, effective_value) -
3192 MAX(unique, ds->ds_reserved);
3194 if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3195 return (ENOSPC);
3196 if (ds->ds_quota > 0 &&
3197 effective_value > ds->ds_quota)
3198 return (ENOSPC);
3201 return (0);
3204 /* ARGSUSED */
3205 static void
3206 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, cred_t *cr,
3207 dmu_tx_t *tx)
3209 dsl_dataset_t *ds = arg1;
3210 dsl_prop_setarg_t *psa = arg2;
3211 uint64_t effective_value = psa->psa_effective_value;
3212 uint64_t unique;
3213 int64_t delta;
3215 dsl_prop_set_sync(ds, psa, cr, tx);
3216 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3218 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3220 mutex_enter(&ds->ds_dir->dd_lock);
3221 mutex_enter(&ds->ds_lock);
3222 unique = dsl_dataset_unique(ds);
3223 delta = MAX(0, (int64_t)(effective_value - unique)) -
3224 MAX(0, (int64_t)(ds->ds_reserved - unique));
3225 ds->ds_reserved = effective_value;
3226 mutex_exit(&ds->ds_lock);
3228 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3229 mutex_exit(&ds->ds_dir->dd_lock);
3231 spa_history_internal_log(LOG_DS_REFRESERV,
3232 ds->ds_dir->dd_pool->dp_spa, tx, cr, "%lld dataset = %llu",
3233 (longlong_t)effective_value, ds->ds_object);
3237 dsl_dataset_set_reservation(const char *dsname, zprop_source_t source,
3238 uint64_t reservation)
3240 dsl_dataset_t *ds;
3241 dsl_prop_setarg_t psa;
3242 int err;
3244 dsl_prop_setarg_init_uint64(&psa, "refreservation", source,
3245 &reservation);
3247 err = dsl_dataset_hold(dsname, FTAG, &ds);
3248 if (err)
3249 return (err);
3251 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3252 dsl_dataset_set_reservation_check,
3253 dsl_dataset_set_reservation_sync, ds, &psa, 0);
3255 dsl_dataset_rele(ds, FTAG);
3256 return (err);
3259 struct dsl_ds_holdarg {
3260 dsl_sync_task_group_t *dstg;
3261 char *htag;
3262 char *snapname;
3263 boolean_t recursive;
3264 boolean_t gotone;
3265 boolean_t temphold;
3266 char failed[MAXPATHLEN];
3270 * The max length of a temporary tag prefix is the number of hex digits
3271 * required to express UINT64_MAX plus one for the hyphen.
3273 #define MAX_TAG_PREFIX_LEN 17
3275 static int
3276 dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx)
3278 dsl_dataset_t *ds = arg1;
3279 struct dsl_ds_holdarg *ha = arg2;
3280 char *htag = ha->htag;
3281 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3282 int error = 0;
3284 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3285 return (ENOTSUP);
3287 if (!dsl_dataset_is_snapshot(ds))
3288 return (EINVAL);
3290 /* tags must be unique */
3291 mutex_enter(&ds->ds_lock);
3292 if (ds->ds_phys->ds_userrefs_obj) {
3293 error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag,
3294 8, 1, tx);
3295 if (error == 0)
3296 error = EEXIST;
3297 else if (error == ENOENT)
3298 error = 0;
3300 mutex_exit(&ds->ds_lock);
3302 if (error == 0 && ha->temphold &&
3303 strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
3304 error = E2BIG;
3306 return (error);
3309 static void
3310 dsl_dataset_user_hold_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
3312 dsl_dataset_t *ds = arg1;
3313 struct dsl_ds_holdarg *ha = arg2;
3314 char *htag = ha->htag;
3315 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3316 objset_t *mos = dp->dp_meta_objset;
3317 uint64_t now = gethrestime_sec();
3318 uint64_t zapobj;
3320 mutex_enter(&ds->ds_lock);
3321 if (ds->ds_phys->ds_userrefs_obj == 0) {
3323 * This is the first user hold for this dataset. Create
3324 * the userrefs zap object.
3326 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3327 zapobj = ds->ds_phys->ds_userrefs_obj =
3328 zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
3329 } else {
3330 zapobj = ds->ds_phys->ds_userrefs_obj;
3332 ds->ds_userrefs++;
3333 mutex_exit(&ds->ds_lock);
3335 VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx));
3337 if (ha->temphold) {
3338 VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object,
3339 htag, &now, tx));
3342 spa_history_internal_log(LOG_DS_USER_HOLD,
3343 dp->dp_spa, tx, cr, "<%s> temp = %d dataset = %llu", htag,
3344 (int)ha->temphold, ds->ds_object);
3347 static int
3348 dsl_dataset_user_hold_one(const char *dsname, void *arg)
3350 struct dsl_ds_holdarg *ha = arg;
3351 dsl_dataset_t *ds;
3352 int error;
3353 char *name;
3355 /* alloc a buffer to hold dsname@snapname plus terminating NULL */
3356 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3357 error = dsl_dataset_hold(name, ha->dstg, &ds);
3358 strfree(name);
3359 if (error == 0) {
3360 ha->gotone = B_TRUE;
3361 dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check,
3362 dsl_dataset_user_hold_sync, ds, ha, 0);
3363 } else if (error == ENOENT && ha->recursive) {
3364 error = 0;
3365 } else {
3366 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3368 return (error);
3372 dsl_dataset_user_hold(char *dsname, char *snapname, char *htag,
3373 boolean_t recursive, boolean_t temphold)
3375 struct dsl_ds_holdarg *ha;
3376 dsl_sync_task_t *dst;
3377 spa_t *spa;
3378 int error;
3380 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3382 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3384 error = spa_open(dsname, &spa, FTAG);
3385 if (error) {
3386 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3387 return (error);
3390 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3391 ha->htag = htag;
3392 ha->snapname = snapname;
3393 ha->recursive = recursive;
3394 ha->temphold = temphold;
3395 if (recursive) {
3396 error = dmu_objset_find(dsname, dsl_dataset_user_hold_one,
3397 ha, DS_FIND_CHILDREN);
3398 } else {
3399 error = dsl_dataset_user_hold_one(dsname, ha);
3401 if (error == 0)
3402 error = dsl_sync_task_group_wait(ha->dstg);
3404 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3405 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3406 dsl_dataset_t *ds = dst->dst_arg1;
3408 if (dst->dst_err) {
3409 dsl_dataset_name(ds, ha->failed);
3410 *strchr(ha->failed, '@') = '\0';
3412 dsl_dataset_rele(ds, ha->dstg);
3415 if (error == 0 && recursive && !ha->gotone)
3416 error = ENOENT;
3418 if (error)
3419 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3421 dsl_sync_task_group_destroy(ha->dstg);
3422 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3423 spa_close(spa, FTAG);
3424 return (error);
3427 struct dsl_ds_releasearg {
3428 dsl_dataset_t *ds;
3429 const char *htag;
3430 boolean_t own; /* do we own or just hold ds? */
3433 static int
3434 dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag,
3435 boolean_t *might_destroy)
3437 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3438 uint64_t zapobj;
3439 uint64_t tmp;
3440 int error;
3442 *might_destroy = B_FALSE;
3444 mutex_enter(&ds->ds_lock);
3445 zapobj = ds->ds_phys->ds_userrefs_obj;
3446 if (zapobj == 0) {
3447 /* The tag can't possibly exist */
3448 mutex_exit(&ds->ds_lock);
3449 return (ESRCH);
3452 /* Make sure the tag exists */
3453 error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp);
3454 if (error) {
3455 mutex_exit(&ds->ds_lock);
3456 if (error == ENOENT)
3457 error = ESRCH;
3458 return (error);
3461 if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 &&
3462 DS_IS_DEFER_DESTROY(ds))
3463 *might_destroy = B_TRUE;
3465 mutex_exit(&ds->ds_lock);
3466 return (0);
3469 static int
3470 dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx)
3472 struct dsl_ds_releasearg *ra = arg1;
3473 dsl_dataset_t *ds = ra->ds;
3474 boolean_t might_destroy;
3475 int error;
3477 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3478 return (ENOTSUP);
3480 error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy);
3481 if (error)
3482 return (error);
3484 if (might_destroy) {
3485 struct dsl_ds_destroyarg dsda = {0};
3487 if (dmu_tx_is_syncing(tx)) {
3489 * If we're not prepared to remove the snapshot,
3490 * we can't allow the release to happen right now.
3492 if (!ra->own)
3493 return (EBUSY);
3494 if (ds->ds_objset) {
3495 dmu_objset_evict(ds->ds_objset);
3496 ds->ds_objset = NULL;
3499 dsda.ds = ds;
3500 dsda.releasing = B_TRUE;
3501 return (dsl_dataset_destroy_check(&dsda, tag, tx));
3504 return (0);
3507 static void
3508 dsl_dataset_user_release_sync(void *arg1, void *tag, cred_t *cr, dmu_tx_t *tx)
3510 struct dsl_ds_releasearg *ra = arg1;
3511 dsl_dataset_t *ds = ra->ds;
3512 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3513 objset_t *mos = dp->dp_meta_objset;
3514 uint64_t zapobj;
3515 uint64_t dsobj = ds->ds_object;
3516 uint64_t refs;
3517 int error;
3519 mutex_enter(&ds->ds_lock);
3520 ds->ds_userrefs--;
3521 refs = ds->ds_userrefs;
3522 mutex_exit(&ds->ds_lock);
3523 error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx);
3524 VERIFY(error == 0 || error == ENOENT);
3525 zapobj = ds->ds_phys->ds_userrefs_obj;
3526 VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx));
3527 if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 &&
3528 DS_IS_DEFER_DESTROY(ds)) {
3529 struct dsl_ds_destroyarg dsda = {0};
3531 ASSERT(ra->own);
3532 dsda.ds = ds;
3533 dsda.releasing = B_TRUE;
3534 /* We already did the destroy_check */
3535 dsl_dataset_destroy_sync(&dsda, tag, cr, tx);
3538 spa_history_internal_log(LOG_DS_USER_RELEASE,
3539 dp->dp_spa, tx, cr, "<%s> %lld dataset = %llu",
3540 ra->htag, (longlong_t)refs, dsobj);
3543 static int
3544 dsl_dataset_user_release_one(const char *dsname, void *arg)
3546 struct dsl_ds_holdarg *ha = arg;
3547 struct dsl_ds_releasearg *ra;
3548 dsl_dataset_t *ds;
3549 int error;
3550 void *dtag = ha->dstg;
3551 char *name;
3552 boolean_t own = B_FALSE;
3553 boolean_t might_destroy;
3555 /* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3556 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3557 error = dsl_dataset_hold(name, dtag, &ds);
3558 strfree(name);
3559 if (error == ENOENT && ha->recursive)
3560 return (0);
3561 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3562 if (error)
3563 return (error);
3565 ha->gotone = B_TRUE;
3567 ASSERT(dsl_dataset_is_snapshot(ds));
3569 error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy);
3570 if (error) {
3571 dsl_dataset_rele(ds, dtag);
3572 return (error);
3575 if (might_destroy) {
3576 #ifdef _KERNEL
3577 error = zfs_unmount_snap(name, NULL);
3578 if (error) {
3579 dsl_dataset_rele(ds, dtag);
3580 return (error);
3582 #endif
3583 if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) {
3584 dsl_dataset_rele(ds, dtag);
3585 return (EBUSY);
3586 } else {
3587 own = B_TRUE;
3588 dsl_dataset_make_exclusive(ds, dtag);
3592 ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP);
3593 ra->ds = ds;
3594 ra->htag = ha->htag;
3595 ra->own = own;
3596 dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check,
3597 dsl_dataset_user_release_sync, ra, dtag, 0);
3599 return (0);
3603 dsl_dataset_user_release(char *dsname, char *snapname, char *htag,
3604 boolean_t recursive)
3606 struct dsl_ds_holdarg *ha;
3607 dsl_sync_task_t *dst;
3608 spa_t *spa;
3609 int error;
3611 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3613 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3615 error = spa_open(dsname, &spa, FTAG);
3616 if (error) {
3617 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3618 return (error);
3621 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3622 ha->htag = htag;
3623 ha->snapname = snapname;
3624 ha->recursive = recursive;
3625 if (recursive) {
3626 error = dmu_objset_find(dsname, dsl_dataset_user_release_one,
3627 ha, DS_FIND_CHILDREN);
3628 } else {
3629 error = dsl_dataset_user_release_one(dsname, ha);
3631 if (error == 0)
3632 error = dsl_sync_task_group_wait(ha->dstg);
3634 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3635 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3636 struct dsl_ds_releasearg *ra = dst->dst_arg1;
3637 dsl_dataset_t *ds = ra->ds;
3639 if (dst->dst_err)
3640 dsl_dataset_name(ds, ha->failed);
3642 if (ra->own)
3643 dsl_dataset_disown(ds, ha->dstg);
3644 else
3645 dsl_dataset_rele(ds, ha->dstg);
3647 kmem_free(ra, sizeof (struct dsl_ds_releasearg));
3650 if (error == 0 && recursive && !ha->gotone)
3651 error = ENOENT;
3653 if (error)
3654 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3656 dsl_sync_task_group_destroy(ha->dstg);
3657 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3658 spa_close(spa, FTAG);
3659 return (error);
3663 * Called at spa_load time to release a stale temporary user hold.
3666 dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag)
3668 dsl_dataset_t *ds;
3669 char *snap;
3670 char *name;
3671 int namelen;
3672 int error;
3674 rw_enter(&dp->dp_config_rwlock, RW_READER);
3675 error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
3676 rw_exit(&dp->dp_config_rwlock);
3677 if (error)
3678 return (error);
3679 namelen = dsl_dataset_namelen(ds)+1;
3680 name = kmem_alloc(namelen, KM_SLEEP);
3681 dsl_dataset_name(ds, name);
3682 dsl_dataset_rele(ds, FTAG);
3684 snap = strchr(name, '@');
3685 *snap = '\0';
3686 ++snap;
3687 return (dsl_dataset_user_release(name, snap, htag, B_FALSE));
3691 dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp)
3693 dsl_dataset_t *ds;
3694 int err;
3696 err = dsl_dataset_hold(dsname, FTAG, &ds);
3697 if (err)
3698 return (err);
3700 VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
3701 if (ds->ds_phys->ds_userrefs_obj != 0) {
3702 zap_attribute_t *za;
3703 zap_cursor_t zc;
3705 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
3706 for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset,
3707 ds->ds_phys->ds_userrefs_obj);
3708 zap_cursor_retrieve(&zc, za) == 0;
3709 zap_cursor_advance(&zc)) {
3710 VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name,
3711 za->za_first_integer));
3713 zap_cursor_fini(&zc);
3714 kmem_free(za, sizeof (zap_attribute_t));
3716 dsl_dataset_rele(ds, FTAG);
3717 return (0);
3721 * Note, this fuction is used as the callback for dmu_objset_find(). We
3722 * always return 0 so that we will continue to find and process
3723 * inconsistent datasets, even if we encounter an error trying to
3724 * process one of them.
3726 /* ARGSUSED */
3728 dsl_destroy_inconsistent(const char *dsname, void *arg)
3730 dsl_dataset_t *ds;
3732 if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) {
3733 if (DS_IS_INCONSISTENT(ds))
3734 (void) dsl_dataset_destroy(ds, FTAG, B_FALSE);
3735 else
3736 dsl_dataset_disown(ds, FTAG);
3738 return (0);