5056 ZFS deadlock on db_mtx and dn_holds
[illumos-gate.git] / usr / src / uts / common / fs / zfs / dsl_dir.c
blobee69cc8dbffe8c9024b34328690c4bdcce5bff1b
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
24 * Copyright (c) 2013 Martin Matuska. All rights reserved.
25 * Copyright (c) 2014 Joyent, Inc. All rights reserved.
26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
29 #include <sys/dmu.h>
30 #include <sys/dmu_objset.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dsl_dataset.h>
33 #include <sys/dsl_dir.h>
34 #include <sys/dsl_prop.h>
35 #include <sys/dsl_synctask.h>
36 #include <sys/dsl_deleg.h>
37 #include <sys/dmu_impl.h>
38 #include <sys/spa.h>
39 #include <sys/metaslab.h>
40 #include <sys/zap.h>
41 #include <sys/zio.h>
42 #include <sys/arc.h>
43 #include <sys/sunddi.h>
44 #include <sys/zfeature.h>
45 #include <sys/policy.h>
46 #include <sys/zfs_znode.h>
47 #include "zfs_namecheck.h"
48 #include "zfs_prop.h"
51 * Filesystem and Snapshot Limits
52 * ------------------------------
54 * These limits are used to restrict the number of filesystems and/or snapshots
55 * that can be created at a given level in the tree or below. A typical
56 * use-case is with a delegated dataset where the administrator wants to ensure
57 * that a user within the zone is not creating too many additional filesystems
58 * or snapshots, even though they're not exceeding their space quota.
60 * The filesystem and snapshot counts are stored as extensible properties. This
61 * capability is controlled by a feature flag and must be enabled to be used.
62 * Once enabled, the feature is not active until the first limit is set. At
63 * that point, future operations to create/destroy filesystems or snapshots
64 * will validate and update the counts.
66 * Because the count properties will not exist before the feature is active,
67 * the counts are updated when a limit is first set on an uninitialized
68 * dsl_dir node in the tree (The filesystem/snapshot count on a node includes
69 * all of the nested filesystems/snapshots. Thus, a new leaf node has a
70 * filesystem count of 0 and a snapshot count of 0. Non-existent filesystem and
71 * snapshot count properties on a node indicate uninitialized counts on that
72 * node.) When first setting a limit on an uninitialized node, the code starts
73 * at the filesystem with the new limit and descends into all sub-filesystems
74 * to add the count properties.
76 * In practice this is lightweight since a limit is typically set when the
77 * filesystem is created and thus has no children. Once valid, changing the
78 * limit value won't require a re-traversal since the counts are already valid.
79 * When recursively fixing the counts, if a node with a limit is encountered
80 * during the descent, the counts are known to be valid and there is no need to
81 * descend into that filesystem's children. The counts on filesystems above the
82 * one with the new limit will still be uninitialized, unless a limit is
83 * eventually set on one of those filesystems. The counts are always recursively
84 * updated when a limit is set on a dataset, unless there is already a limit.
85 * When a new limit value is set on a filesystem with an existing limit, it is
86 * possible for the new limit to be less than the current count at that level
87 * since a user who can change the limit is also allowed to exceed the limit.
89 * Once the feature is active, then whenever a filesystem or snapshot is
90 * created, the code recurses up the tree, validating the new count against the
91 * limit at each initialized level. In practice, most levels will not have a
92 * limit set. If there is a limit at any initialized level up the tree, the
93 * check must pass or the creation will fail. Likewise, when a filesystem or
94 * snapshot is destroyed, the counts are recursively adjusted all the way up
95 * the initizized nodes in the tree. Renaming a filesystem into different point
96 * in the tree will first validate, then update the counts on each branch up to
97 * the common ancestor. A receive will also validate the counts and then update
98 * them.
100 * An exception to the above behavior is that the limit is not enforced if the
101 * user has permission to modify the limit. This is primarily so that
102 * recursive snapshots in the global zone always work. We want to prevent a
103 * denial-of-service in which a lower level delegated dataset could max out its
104 * limit and thus block recursive snapshots from being taken in the global zone.
105 * Because of this, it is possible for the snapshot count to be over the limit
106 * and snapshots taken in the global zone could cause a lower level dataset to
107 * hit or exceed its limit. The administrator taking the global zone recursive
108 * snapshot should be aware of this side-effect and behave accordingly.
109 * For consistency, the filesystem limit is also not enforced if the user can
110 * modify the limit.
112 * The filesystem and snapshot limits are validated by dsl_fs_ss_limit_check()
113 * and updated by dsl_fs_ss_count_adjust(). A new limit value is setup in
114 * dsl_dir_activate_fs_ss_limit() and the counts are adjusted, if necessary, by
115 * dsl_dir_init_fs_ss_count().
117 * There is a special case when we receive a filesystem that already exists. In
118 * this case a temporary clone name of %X is created (see dmu_recv_begin). We
119 * never update the filesystem counts for temporary clones.
121 * Likewise, we do not update the snapshot counts for temporary snapshots,
122 * such as those created by zfs diff.
125 extern inline dsl_dir_phys_t *dsl_dir_phys(dsl_dir_t *dd);
127 static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd);
129 static void
130 dsl_dir_evict(void *dbu)
132 dsl_dir_t *dd = dbu;
133 dsl_pool_t *dp = dd->dd_pool;
134 int t;
136 dd->dd_dbuf = NULL;
138 for (t = 0; t < TXG_SIZE; t++) {
139 ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
140 ASSERT(dd->dd_tempreserved[t] == 0);
141 ASSERT(dd->dd_space_towrite[t] == 0);
144 if (dd->dd_parent)
145 dsl_dir_async_rele(dd->dd_parent, dd);
147 spa_async_close(dd->dd_pool->dp_spa, dd);
150 * The props callback list should have been cleaned up by
151 * objset_evict().
153 list_destroy(&dd->dd_prop_cbs);
154 mutex_destroy(&dd->dd_lock);
155 kmem_free(dd, sizeof (dsl_dir_t));
159 dsl_dir_hold_obj(dsl_pool_t *dp, uint64_t ddobj,
160 const char *tail, void *tag, dsl_dir_t **ddp)
162 dmu_buf_t *dbuf;
163 dsl_dir_t *dd;
164 int err;
166 ASSERT(dsl_pool_config_held(dp));
168 err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf);
169 if (err != 0)
170 return (err);
171 dd = dmu_buf_get_user(dbuf);
172 #ifdef ZFS_DEBUG
174 dmu_object_info_t doi;
175 dmu_object_info_from_db(dbuf, &doi);
176 ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_DSL_DIR);
177 ASSERT3U(doi.doi_bonus_size, >=, sizeof (dsl_dir_phys_t));
179 #endif
180 if (dd == NULL) {
181 dsl_dir_t *winner;
183 dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
184 dd->dd_object = ddobj;
185 dd->dd_dbuf = dbuf;
186 dd->dd_pool = dp;
187 mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL);
189 list_create(&dd->dd_prop_cbs, sizeof (dsl_prop_cb_record_t),
190 offsetof(dsl_prop_cb_record_t, cbr_node));
192 dsl_dir_snap_cmtime_update(dd);
194 if (dsl_dir_phys(dd)->dd_parent_obj) {
195 err = dsl_dir_hold_obj(dp,
196 dsl_dir_phys(dd)->dd_parent_obj, NULL, dd,
197 &dd->dd_parent);
198 if (err != 0)
199 goto errout;
200 if (tail) {
201 #ifdef ZFS_DEBUG
202 uint64_t foundobj;
204 err = zap_lookup(dp->dp_meta_objset,
205 dsl_dir_phys(dd->dd_parent)->
206 dd_child_dir_zapobj, tail,
207 sizeof (foundobj), 1, &foundobj);
208 ASSERT(err || foundobj == ddobj);
209 #endif
210 (void) strcpy(dd->dd_myname, tail);
211 } else {
212 err = zap_value_search(dp->dp_meta_objset,
213 dsl_dir_phys(dd->dd_parent)->
214 dd_child_dir_zapobj,
215 ddobj, 0, dd->dd_myname);
217 if (err != 0)
218 goto errout;
219 } else {
220 (void) strcpy(dd->dd_myname, spa_name(dp->dp_spa));
223 if (dsl_dir_is_clone(dd)) {
224 dmu_buf_t *origin_bonus;
225 dsl_dataset_phys_t *origin_phys;
228 * We can't open the origin dataset, because
229 * that would require opening this dsl_dir.
230 * Just look at its phys directly instead.
232 err = dmu_bonus_hold(dp->dp_meta_objset,
233 dsl_dir_phys(dd)->dd_origin_obj, FTAG,
234 &origin_bonus);
235 if (err != 0)
236 goto errout;
237 origin_phys = origin_bonus->db_data;
238 dd->dd_origin_txg =
239 origin_phys->ds_creation_txg;
240 dmu_buf_rele(origin_bonus, FTAG);
243 dmu_buf_init_user(&dd->dd_dbu, dsl_dir_evict, &dd->dd_dbuf);
244 winner = dmu_buf_set_user_ie(dbuf, &dd->dd_dbu);
245 if (winner != NULL) {
246 if (dd->dd_parent)
247 dsl_dir_rele(dd->dd_parent, dd);
248 mutex_destroy(&dd->dd_lock);
249 kmem_free(dd, sizeof (dsl_dir_t));
250 dd = winner;
251 } else {
252 spa_open_ref(dp->dp_spa, dd);
257 * The dsl_dir_t has both open-to-close and instantiate-to-evict
258 * holds on the spa. We need the open-to-close holds because
259 * otherwise the spa_refcnt wouldn't change when we open a
260 * dir which the spa also has open, so we could incorrectly
261 * think it was OK to unload/export/destroy the pool. We need
262 * the instantiate-to-evict hold because the dsl_dir_t has a
263 * pointer to the dd_pool, which has a pointer to the spa_t.
265 spa_open_ref(dp->dp_spa, tag);
266 ASSERT3P(dd->dd_pool, ==, dp);
267 ASSERT3U(dd->dd_object, ==, ddobj);
268 ASSERT3P(dd->dd_dbuf, ==, dbuf);
269 *ddp = dd;
270 return (0);
272 errout:
273 if (dd->dd_parent)
274 dsl_dir_rele(dd->dd_parent, dd);
275 mutex_destroy(&dd->dd_lock);
276 kmem_free(dd, sizeof (dsl_dir_t));
277 dmu_buf_rele(dbuf, tag);
278 return (err);
281 void
282 dsl_dir_rele(dsl_dir_t *dd, void *tag)
284 dprintf_dd(dd, "%s\n", "");
285 spa_close(dd->dd_pool->dp_spa, tag);
286 dmu_buf_rele(dd->dd_dbuf, tag);
290 * Remove a reference to the given dsl dir that is being asynchronously
291 * released. Async releases occur from a taskq performing eviction of
292 * dsl datasets and dirs. This process is identical to a normal release
293 * with the exception of using the async API for releasing the reference on
294 * the spa.
296 void
297 dsl_dir_async_rele(dsl_dir_t *dd, void *tag)
299 dprintf_dd(dd, "%s\n", "");
300 spa_async_close(dd->dd_pool->dp_spa, tag);
301 dmu_buf_rele(dd->dd_dbuf, tag);
304 /* buf must be long enough (MAXNAMELEN + strlen(MOS_DIR_NAME) + 1 should do) */
305 void
306 dsl_dir_name(dsl_dir_t *dd, char *buf)
308 if (dd->dd_parent) {
309 dsl_dir_name(dd->dd_parent, buf);
310 (void) strcat(buf, "/");
311 } else {
312 buf[0] = '\0';
314 if (!MUTEX_HELD(&dd->dd_lock)) {
316 * recursive mutex so that we can use
317 * dprintf_dd() with dd_lock held
319 mutex_enter(&dd->dd_lock);
320 (void) strcat(buf, dd->dd_myname);
321 mutex_exit(&dd->dd_lock);
322 } else {
323 (void) strcat(buf, dd->dd_myname);
327 /* Calculate name length, avoiding all the strcat calls of dsl_dir_name */
329 dsl_dir_namelen(dsl_dir_t *dd)
331 int result = 0;
333 if (dd->dd_parent) {
334 /* parent's name + 1 for the "/" */
335 result = dsl_dir_namelen(dd->dd_parent) + 1;
338 if (!MUTEX_HELD(&dd->dd_lock)) {
339 /* see dsl_dir_name */
340 mutex_enter(&dd->dd_lock);
341 result += strlen(dd->dd_myname);
342 mutex_exit(&dd->dd_lock);
343 } else {
344 result += strlen(dd->dd_myname);
347 return (result);
350 static int
351 getcomponent(const char *path, char *component, const char **nextp)
353 char *p;
355 if ((path == NULL) || (path[0] == '\0'))
356 return (SET_ERROR(ENOENT));
357 /* This would be a good place to reserve some namespace... */
358 p = strpbrk(path, "/@");
359 if (p && (p[1] == '/' || p[1] == '@')) {
360 /* two separators in a row */
361 return (SET_ERROR(EINVAL));
363 if (p == NULL || p == path) {
365 * if the first thing is an @ or /, it had better be an
366 * @ and it had better not have any more ats or slashes,
367 * and it had better have something after the @.
369 if (p != NULL &&
370 (p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0'))
371 return (SET_ERROR(EINVAL));
372 if (strlen(path) >= MAXNAMELEN)
373 return (SET_ERROR(ENAMETOOLONG));
374 (void) strcpy(component, path);
375 p = NULL;
376 } else if (p[0] == '/') {
377 if (p - path >= MAXNAMELEN)
378 return (SET_ERROR(ENAMETOOLONG));
379 (void) strncpy(component, path, p - path);
380 component[p - path] = '\0';
381 p++;
382 } else if (p[0] == '@') {
384 * if the next separator is an @, there better not be
385 * any more slashes.
387 if (strchr(path, '/'))
388 return (SET_ERROR(EINVAL));
389 if (p - path >= MAXNAMELEN)
390 return (SET_ERROR(ENAMETOOLONG));
391 (void) strncpy(component, path, p - path);
392 component[p - path] = '\0';
393 } else {
394 panic("invalid p=%p", (void *)p);
396 *nextp = p;
397 return (0);
401 * Return the dsl_dir_t, and possibly the last component which couldn't
402 * be found in *tail. The name must be in the specified dsl_pool_t. This
403 * thread must hold the dp_config_rwlock for the pool. Returns NULL if the
404 * path is bogus, or if tail==NULL and we couldn't parse the whole name.
405 * (*tail)[0] == '@' means that the last component is a snapshot.
408 dsl_dir_hold(dsl_pool_t *dp, const char *name, void *tag,
409 dsl_dir_t **ddp, const char **tailp)
411 char buf[MAXNAMELEN];
412 const char *spaname, *next, *nextnext = NULL;
413 int err;
414 dsl_dir_t *dd;
415 uint64_t ddobj;
417 err = getcomponent(name, buf, &next);
418 if (err != 0)
419 return (err);
421 /* Make sure the name is in the specified pool. */
422 spaname = spa_name(dp->dp_spa);
423 if (strcmp(buf, spaname) != 0)
424 return (SET_ERROR(EINVAL));
426 ASSERT(dsl_pool_config_held(dp));
428 err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd);
429 if (err != 0) {
430 return (err);
433 while (next != NULL) {
434 dsl_dir_t *child_dd;
435 err = getcomponent(next, buf, &nextnext);
436 if (err != 0)
437 break;
438 ASSERT(next[0] != '\0');
439 if (next[0] == '@')
440 break;
441 dprintf("looking up %s in obj%lld\n",
442 buf, dsl_dir_phys(dd)->dd_child_dir_zapobj);
444 err = zap_lookup(dp->dp_meta_objset,
445 dsl_dir_phys(dd)->dd_child_dir_zapobj,
446 buf, sizeof (ddobj), 1, &ddobj);
447 if (err != 0) {
448 if (err == ENOENT)
449 err = 0;
450 break;
453 err = dsl_dir_hold_obj(dp, ddobj, buf, tag, &child_dd);
454 if (err != 0)
455 break;
456 dsl_dir_rele(dd, tag);
457 dd = child_dd;
458 next = nextnext;
461 if (err != 0) {
462 dsl_dir_rele(dd, tag);
463 return (err);
467 * It's an error if there's more than one component left, or
468 * tailp==NULL and there's any component left.
470 if (next != NULL &&
471 (tailp == NULL || (nextnext && nextnext[0] != '\0'))) {
472 /* bad path name */
473 dsl_dir_rele(dd, tag);
474 dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
475 err = SET_ERROR(ENOENT);
477 if (tailp != NULL)
478 *tailp = next;
479 *ddp = dd;
480 return (err);
484 * If the counts are already initialized for this filesystem and its
485 * descendants then do nothing, otherwise initialize the counts.
487 * The counts on this filesystem, and those below, may be uninitialized due to
488 * either the use of a pre-existing pool which did not support the
489 * filesystem/snapshot limit feature, or one in which the feature had not yet
490 * been enabled.
492 * Recursively descend the filesystem tree and update the filesystem/snapshot
493 * counts on each filesystem below, then update the cumulative count on the
494 * current filesystem. If the filesystem already has a count set on it,
495 * then we know that its counts, and the counts on the filesystems below it,
496 * are already correct, so we don't have to update this filesystem.
498 static void
499 dsl_dir_init_fs_ss_count(dsl_dir_t *dd, dmu_tx_t *tx)
501 uint64_t my_fs_cnt = 0;
502 uint64_t my_ss_cnt = 0;
503 dsl_pool_t *dp = dd->dd_pool;
504 objset_t *os = dp->dp_meta_objset;
505 zap_cursor_t *zc;
506 zap_attribute_t *za;
507 dsl_dataset_t *ds;
509 ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT));
510 ASSERT(dsl_pool_config_held(dp));
511 ASSERT(dmu_tx_is_syncing(tx));
513 dsl_dir_zapify(dd, tx);
516 * If the filesystem count has already been initialized then we
517 * don't need to recurse down any further.
519 if (zap_contains(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT) == 0)
520 return;
522 zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
523 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
525 /* Iterate my child dirs */
526 for (zap_cursor_init(zc, os, dsl_dir_phys(dd)->dd_child_dir_zapobj);
527 zap_cursor_retrieve(zc, za) == 0; zap_cursor_advance(zc)) {
528 dsl_dir_t *chld_dd;
529 uint64_t count;
531 VERIFY0(dsl_dir_hold_obj(dp, za->za_first_integer, NULL, FTAG,
532 &chld_dd));
535 * Ignore hidden ($FREE, $MOS & $ORIGIN) objsets and
536 * temporary datasets.
538 if (chld_dd->dd_myname[0] == '$' ||
539 chld_dd->dd_myname[0] == '%') {
540 dsl_dir_rele(chld_dd, FTAG);
541 continue;
544 my_fs_cnt++; /* count this child */
546 dsl_dir_init_fs_ss_count(chld_dd, tx);
548 VERIFY0(zap_lookup(os, chld_dd->dd_object,
549 DD_FIELD_FILESYSTEM_COUNT, sizeof (count), 1, &count));
550 my_fs_cnt += count;
551 VERIFY0(zap_lookup(os, chld_dd->dd_object,
552 DD_FIELD_SNAPSHOT_COUNT, sizeof (count), 1, &count));
553 my_ss_cnt += count;
555 dsl_dir_rele(chld_dd, FTAG);
557 zap_cursor_fini(zc);
558 /* Count my snapshots (we counted children's snapshots above) */
559 VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
560 dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds));
562 for (zap_cursor_init(zc, os, dsl_dataset_phys(ds)->ds_snapnames_zapobj);
563 zap_cursor_retrieve(zc, za) == 0;
564 zap_cursor_advance(zc)) {
565 /* Don't count temporary snapshots */
566 if (za->za_name[0] != '%')
567 my_ss_cnt++;
569 zap_cursor_fini(zc);
571 dsl_dataset_rele(ds, FTAG);
573 kmem_free(zc, sizeof (zap_cursor_t));
574 kmem_free(za, sizeof (zap_attribute_t));
576 /* we're in a sync task, update counts */
577 dmu_buf_will_dirty(dd->dd_dbuf, tx);
578 VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
579 sizeof (my_fs_cnt), 1, &my_fs_cnt, tx));
580 VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
581 sizeof (my_ss_cnt), 1, &my_ss_cnt, tx));
584 static int
585 dsl_dir_actv_fs_ss_limit_check(void *arg, dmu_tx_t *tx)
587 char *ddname = (char *)arg;
588 dsl_pool_t *dp = dmu_tx_pool(tx);
589 dsl_dataset_t *ds;
590 dsl_dir_t *dd;
591 int error;
593 error = dsl_dataset_hold(dp, ddname, FTAG, &ds);
594 if (error != 0)
595 return (error);
597 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)) {
598 dsl_dataset_rele(ds, FTAG);
599 return (SET_ERROR(ENOTSUP));
602 dd = ds->ds_dir;
603 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT) &&
604 dsl_dir_is_zapified(dd) &&
605 zap_contains(dp->dp_meta_objset, dd->dd_object,
606 DD_FIELD_FILESYSTEM_COUNT) == 0) {
607 dsl_dataset_rele(ds, FTAG);
608 return (SET_ERROR(EALREADY));
611 dsl_dataset_rele(ds, FTAG);
612 return (0);
615 static void
616 dsl_dir_actv_fs_ss_limit_sync(void *arg, dmu_tx_t *tx)
618 char *ddname = (char *)arg;
619 dsl_pool_t *dp = dmu_tx_pool(tx);
620 dsl_dataset_t *ds;
621 spa_t *spa;
623 VERIFY0(dsl_dataset_hold(dp, ddname, FTAG, &ds));
625 spa = dsl_dataset_get_spa(ds);
627 if (!spa_feature_is_active(spa, SPA_FEATURE_FS_SS_LIMIT)) {
629 * Since the feature was not active and we're now setting a
630 * limit, increment the feature-active counter so that the
631 * feature becomes active for the first time.
633 * We are already in a sync task so we can update the MOS.
635 spa_feature_incr(spa, SPA_FEATURE_FS_SS_LIMIT, tx);
639 * Since we are now setting a non-UINT64_MAX limit on the filesystem,
640 * we need to ensure the counts are correct. Descend down the tree from
641 * this point and update all of the counts to be accurate.
643 dsl_dir_init_fs_ss_count(ds->ds_dir, tx);
645 dsl_dataset_rele(ds, FTAG);
649 * Make sure the feature is enabled and activate it if necessary.
650 * Since we're setting a limit, ensure the on-disk counts are valid.
651 * This is only called by the ioctl path when setting a limit value.
653 * We do not need to validate the new limit, since users who can change the
654 * limit are also allowed to exceed the limit.
657 dsl_dir_activate_fs_ss_limit(const char *ddname)
659 int error;
661 error = dsl_sync_task(ddname, dsl_dir_actv_fs_ss_limit_check,
662 dsl_dir_actv_fs_ss_limit_sync, (void *)ddname, 0,
663 ZFS_SPACE_CHECK_RESERVED);
665 if (error == EALREADY)
666 error = 0;
668 return (error);
672 * Used to determine if the filesystem_limit or snapshot_limit should be
673 * enforced. We allow the limit to be exceeded if the user has permission to
674 * write the property value. We pass in the creds that we got in the open
675 * context since we will always be the GZ root in syncing context. We also have
676 * to handle the case where we are allowed to change the limit on the current
677 * dataset, but there may be another limit in the tree above.
679 * We can never modify these two properties within a non-global zone. In
680 * addition, the other checks are modeled on zfs_secpolicy_write_perms. We
681 * can't use that function since we are already holding the dp_config_rwlock.
682 * In addition, we already have the dd and dealing with snapshots is simplified
683 * in this code.
686 typedef enum {
687 ENFORCE_ALWAYS,
688 ENFORCE_NEVER,
689 ENFORCE_ABOVE
690 } enforce_res_t;
692 static enforce_res_t
693 dsl_enforce_ds_ss_limits(dsl_dir_t *dd, zfs_prop_t prop, cred_t *cr)
695 enforce_res_t enforce = ENFORCE_ALWAYS;
696 uint64_t obj;
697 dsl_dataset_t *ds;
698 uint64_t zoned;
700 ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
701 prop == ZFS_PROP_SNAPSHOT_LIMIT);
703 #ifdef _KERNEL
704 if (crgetzoneid(cr) != GLOBAL_ZONEID)
705 return (ENFORCE_ALWAYS);
707 if (secpolicy_zfs(cr) == 0)
708 return (ENFORCE_NEVER);
709 #endif
711 if ((obj = dsl_dir_phys(dd)->dd_head_dataset_obj) == 0)
712 return (ENFORCE_ALWAYS);
714 ASSERT(dsl_pool_config_held(dd->dd_pool));
716 if (dsl_dataset_hold_obj(dd->dd_pool, obj, FTAG, &ds) != 0)
717 return (ENFORCE_ALWAYS);
719 if (dsl_prop_get_ds(ds, "zoned", 8, 1, &zoned, NULL) || zoned) {
720 /* Only root can access zoned fs's from the GZ */
721 enforce = ENFORCE_ALWAYS;
722 } else {
723 if (dsl_deleg_access_impl(ds, zfs_prop_to_name(prop), cr) == 0)
724 enforce = ENFORCE_ABOVE;
727 dsl_dataset_rele(ds, FTAG);
728 return (enforce);
732 * Check if adding additional child filesystem(s) would exceed any filesystem
733 * limits or adding additional snapshot(s) would exceed any snapshot limits.
734 * The prop argument indicates which limit to check.
736 * Note that all filesystem limits up to the root (or the highest
737 * initialized) filesystem or the given ancestor must be satisfied.
740 dsl_fs_ss_limit_check(dsl_dir_t *dd, uint64_t delta, zfs_prop_t prop,
741 dsl_dir_t *ancestor, cred_t *cr)
743 objset_t *os = dd->dd_pool->dp_meta_objset;
744 uint64_t limit, count;
745 char *count_prop;
746 enforce_res_t enforce;
747 int err = 0;
749 ASSERT(dsl_pool_config_held(dd->dd_pool));
750 ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
751 prop == ZFS_PROP_SNAPSHOT_LIMIT);
754 * If we're allowed to change the limit, don't enforce the limit
755 * e.g. this can happen if a snapshot is taken by an administrative
756 * user in the global zone (i.e. a recursive snapshot by root).
757 * However, we must handle the case of delegated permissions where we
758 * are allowed to change the limit on the current dataset, but there
759 * is another limit in the tree above.
761 enforce = dsl_enforce_ds_ss_limits(dd, prop, cr);
762 if (enforce == ENFORCE_NEVER)
763 return (0);
766 * e.g. if renaming a dataset with no snapshots, count adjustment
767 * is 0.
769 if (delta == 0)
770 return (0);
772 if (prop == ZFS_PROP_SNAPSHOT_LIMIT) {
774 * We don't enforce the limit for temporary snapshots. This is
775 * indicated by a NULL cred_t argument.
777 if (cr == NULL)
778 return (0);
780 count_prop = DD_FIELD_SNAPSHOT_COUNT;
781 } else {
782 count_prop = DD_FIELD_FILESYSTEM_COUNT;
786 * If an ancestor has been provided, stop checking the limit once we
787 * hit that dir. We need this during rename so that we don't overcount
788 * the check once we recurse up to the common ancestor.
790 if (ancestor == dd)
791 return (0);
794 * If we hit an uninitialized node while recursing up the tree, we can
795 * stop since we know there is no limit here (or above). The counts are
796 * not valid on this node and we know we won't touch this node's counts.
798 if (!dsl_dir_is_zapified(dd) || zap_lookup(os, dd->dd_object,
799 count_prop, sizeof (count), 1, &count) == ENOENT)
800 return (0);
802 err = dsl_prop_get_dd(dd, zfs_prop_to_name(prop), 8, 1, &limit, NULL,
803 B_FALSE);
804 if (err != 0)
805 return (err);
807 /* Is there a limit which we've hit? */
808 if (enforce == ENFORCE_ALWAYS && (count + delta) > limit)
809 return (SET_ERROR(EDQUOT));
811 if (dd->dd_parent != NULL)
812 err = dsl_fs_ss_limit_check(dd->dd_parent, delta, prop,
813 ancestor, cr);
815 return (err);
819 * Adjust the filesystem or snapshot count for the specified dsl_dir_t and all
820 * parents. When a new filesystem/snapshot is created, increment the count on
821 * all parents, and when a filesystem/snapshot is destroyed, decrement the
822 * count.
824 void
825 dsl_fs_ss_count_adjust(dsl_dir_t *dd, int64_t delta, const char *prop,
826 dmu_tx_t *tx)
828 int err;
829 objset_t *os = dd->dd_pool->dp_meta_objset;
830 uint64_t count;
832 ASSERT(dsl_pool_config_held(dd->dd_pool));
833 ASSERT(dmu_tx_is_syncing(tx));
834 ASSERT(strcmp(prop, DD_FIELD_FILESYSTEM_COUNT) == 0 ||
835 strcmp(prop, DD_FIELD_SNAPSHOT_COUNT) == 0);
838 * When we receive an incremental stream into a filesystem that already
839 * exists, a temporary clone is created. We don't count this temporary
840 * clone, whose name begins with a '%'. We also ignore hidden ($FREE,
841 * $MOS & $ORIGIN) objsets.
843 if ((dd->dd_myname[0] == '%' || dd->dd_myname[0] == '$') &&
844 strcmp(prop, DD_FIELD_FILESYSTEM_COUNT) == 0)
845 return;
848 * e.g. if renaming a dataset with no snapshots, count adjustment is 0
850 if (delta == 0)
851 return;
854 * If we hit an uninitialized node while recursing up the tree, we can
855 * stop since we know the counts are not valid on this node and we
856 * know we shouldn't touch this node's counts. An uninitialized count
857 * on the node indicates that either the feature has not yet been
858 * activated or there are no limits on this part of the tree.
860 if (!dsl_dir_is_zapified(dd) || (err = zap_lookup(os, dd->dd_object,
861 prop, sizeof (count), 1, &count)) == ENOENT)
862 return;
863 VERIFY0(err);
865 count += delta;
866 /* Use a signed verify to make sure we're not neg. */
867 VERIFY3S(count, >=, 0);
869 VERIFY0(zap_update(os, dd->dd_object, prop, sizeof (count), 1, &count,
870 tx));
872 /* Roll up this additional count into our ancestors */
873 if (dd->dd_parent != NULL)
874 dsl_fs_ss_count_adjust(dd->dd_parent, delta, prop, tx);
877 uint64_t
878 dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds, const char *name,
879 dmu_tx_t *tx)
881 objset_t *mos = dp->dp_meta_objset;
882 uint64_t ddobj;
883 dsl_dir_phys_t *ddphys;
884 dmu_buf_t *dbuf;
886 ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
887 DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
888 if (pds) {
889 VERIFY(0 == zap_add(mos, dsl_dir_phys(pds)->dd_child_dir_zapobj,
890 name, sizeof (uint64_t), 1, &ddobj, tx));
891 } else {
892 /* it's the root dir */
893 VERIFY(0 == zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
894 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &ddobj, tx));
896 VERIFY(0 == dmu_bonus_hold(mos, ddobj, FTAG, &dbuf));
897 dmu_buf_will_dirty(dbuf, tx);
898 ddphys = dbuf->db_data;
900 ddphys->dd_creation_time = gethrestime_sec();
901 if (pds) {
902 ddphys->dd_parent_obj = pds->dd_object;
904 /* update the filesystem counts */
905 dsl_fs_ss_count_adjust(pds, 1, DD_FIELD_FILESYSTEM_COUNT, tx);
907 ddphys->dd_props_zapobj = zap_create(mos,
908 DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
909 ddphys->dd_child_dir_zapobj = zap_create(mos,
910 DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
911 if (spa_version(dp->dp_spa) >= SPA_VERSION_USED_BREAKDOWN)
912 ddphys->dd_flags |= DD_FLAG_USED_BREAKDOWN;
913 dmu_buf_rele(dbuf, FTAG);
915 return (ddobj);
918 boolean_t
919 dsl_dir_is_clone(dsl_dir_t *dd)
921 return (dsl_dir_phys(dd)->dd_origin_obj &&
922 (dd->dd_pool->dp_origin_snap == NULL ||
923 dsl_dir_phys(dd)->dd_origin_obj !=
924 dd->dd_pool->dp_origin_snap->ds_object));
927 void
928 dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
930 mutex_enter(&dd->dd_lock);
931 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
932 dsl_dir_phys(dd)->dd_used_bytes);
933 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA,
934 dsl_dir_phys(dd)->dd_quota);
935 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION,
936 dsl_dir_phys(dd)->dd_reserved);
937 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
938 dsl_dir_phys(dd)->dd_compressed_bytes == 0 ? 100 :
939 (dsl_dir_phys(dd)->dd_uncompressed_bytes * 100 /
940 dsl_dir_phys(dd)->dd_compressed_bytes));
941 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALUSED,
942 dsl_dir_phys(dd)->dd_uncompressed_bytes);
943 if (dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN) {
944 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDSNAP,
945 dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_SNAP]);
946 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDDS,
947 dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_HEAD]);
948 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDREFRESERV,
949 dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_REFRSRV]);
950 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDCHILD,
951 dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD] +
952 dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD_RSRV]);
954 mutex_exit(&dd->dd_lock);
956 if (dsl_dir_is_zapified(dd)) {
957 uint64_t count;
958 objset_t *os = dd->dd_pool->dp_meta_objset;
960 if (zap_lookup(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
961 sizeof (count), 1, &count) == 0) {
962 dsl_prop_nvlist_add_uint64(nv,
963 ZFS_PROP_FILESYSTEM_COUNT, count);
965 if (zap_lookup(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
966 sizeof (count), 1, &count) == 0) {
967 dsl_prop_nvlist_add_uint64(nv,
968 ZFS_PROP_SNAPSHOT_COUNT, count);
972 if (dsl_dir_is_clone(dd)) {
973 dsl_dataset_t *ds;
974 char buf[MAXNAMELEN];
976 VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
977 dsl_dir_phys(dd)->dd_origin_obj, FTAG, &ds));
978 dsl_dataset_name(ds, buf);
979 dsl_dataset_rele(ds, FTAG);
980 dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf);
984 void
985 dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx)
987 dsl_pool_t *dp = dd->dd_pool;
989 ASSERT(dsl_dir_phys(dd));
991 if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg)) {
992 /* up the hold count until we can be written out */
993 dmu_buf_add_ref(dd->dd_dbuf, dd);
997 static int64_t
998 parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta)
1000 uint64_t old_accounted = MAX(used, dsl_dir_phys(dd)->dd_reserved);
1001 uint64_t new_accounted =
1002 MAX(used + delta, dsl_dir_phys(dd)->dd_reserved);
1003 return (new_accounted - old_accounted);
1006 void
1007 dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx)
1009 ASSERT(dmu_tx_is_syncing(tx));
1011 mutex_enter(&dd->dd_lock);
1012 ASSERT0(dd->dd_tempreserved[tx->tx_txg&TXG_MASK]);
1013 dprintf_dd(dd, "txg=%llu towrite=%lluK\n", tx->tx_txg,
1014 dd->dd_space_towrite[tx->tx_txg&TXG_MASK] / 1024);
1015 dd->dd_space_towrite[tx->tx_txg&TXG_MASK] = 0;
1016 mutex_exit(&dd->dd_lock);
1018 /* release the hold from dsl_dir_dirty */
1019 dmu_buf_rele(dd->dd_dbuf, dd);
1022 static uint64_t
1023 dsl_dir_space_towrite(dsl_dir_t *dd)
1025 uint64_t space = 0;
1026 int i;
1028 ASSERT(MUTEX_HELD(&dd->dd_lock));
1030 for (i = 0; i < TXG_SIZE; i++) {
1031 space += dd->dd_space_towrite[i&TXG_MASK];
1032 ASSERT3U(dd->dd_space_towrite[i&TXG_MASK], >=, 0);
1034 return (space);
1038 * How much space would dd have available if ancestor had delta applied
1039 * to it? If ondiskonly is set, we're only interested in what's
1040 * on-disk, not estimated pending changes.
1042 uint64_t
1043 dsl_dir_space_available(dsl_dir_t *dd,
1044 dsl_dir_t *ancestor, int64_t delta, int ondiskonly)
1046 uint64_t parentspace, myspace, quota, used;
1049 * If there are no restrictions otherwise, assume we have
1050 * unlimited space available.
1052 quota = UINT64_MAX;
1053 parentspace = UINT64_MAX;
1055 if (dd->dd_parent != NULL) {
1056 parentspace = dsl_dir_space_available(dd->dd_parent,
1057 ancestor, delta, ondiskonly);
1060 mutex_enter(&dd->dd_lock);
1061 if (dsl_dir_phys(dd)->dd_quota != 0)
1062 quota = dsl_dir_phys(dd)->dd_quota;
1063 used = dsl_dir_phys(dd)->dd_used_bytes;
1064 if (!ondiskonly)
1065 used += dsl_dir_space_towrite(dd);
1067 if (dd->dd_parent == NULL) {
1068 uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, FALSE);
1069 quota = MIN(quota, poolsize);
1072 if (dsl_dir_phys(dd)->dd_reserved > used && parentspace != UINT64_MAX) {
1074 * We have some space reserved, in addition to what our
1075 * parent gave us.
1077 parentspace += dsl_dir_phys(dd)->dd_reserved - used;
1080 if (dd == ancestor) {
1081 ASSERT(delta <= 0);
1082 ASSERT(used >= -delta);
1083 used += delta;
1084 if (parentspace != UINT64_MAX)
1085 parentspace -= delta;
1088 if (used > quota) {
1089 /* over quota */
1090 myspace = 0;
1091 } else {
1093 * the lesser of the space provided by our parent and
1094 * the space left in our quota
1096 myspace = MIN(parentspace, quota - used);
1099 mutex_exit(&dd->dd_lock);
1101 return (myspace);
1104 struct tempreserve {
1105 list_node_t tr_node;
1106 dsl_dir_t *tr_ds;
1107 uint64_t tr_size;
1110 static int
1111 dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
1112 boolean_t ignorequota, boolean_t checkrefquota, list_t *tr_list,
1113 dmu_tx_t *tx, boolean_t first)
1115 uint64_t txg = tx->tx_txg;
1116 uint64_t est_inflight, used_on_disk, quota, parent_rsrv;
1117 uint64_t deferred = 0;
1118 struct tempreserve *tr;
1119 int retval = EDQUOT;
1120 int txgidx = txg & TXG_MASK;
1121 int i;
1122 uint64_t ref_rsrv = 0;
1124 ASSERT3U(txg, !=, 0);
1125 ASSERT3S(asize, >, 0);
1127 mutex_enter(&dd->dd_lock);
1130 * Check against the dsl_dir's quota. We don't add in the delta
1131 * when checking for over-quota because they get one free hit.
1133 est_inflight = dsl_dir_space_towrite(dd);
1134 for (i = 0; i < TXG_SIZE; i++)
1135 est_inflight += dd->dd_tempreserved[i];
1136 used_on_disk = dsl_dir_phys(dd)->dd_used_bytes;
1139 * On the first iteration, fetch the dataset's used-on-disk and
1140 * refreservation values. Also, if checkrefquota is set, test if
1141 * allocating this space would exceed the dataset's refquota.
1143 if (first && tx->tx_objset) {
1144 int error;
1145 dsl_dataset_t *ds = tx->tx_objset->os_dsl_dataset;
1147 error = dsl_dataset_check_quota(ds, checkrefquota,
1148 asize, est_inflight, &used_on_disk, &ref_rsrv);
1149 if (error) {
1150 mutex_exit(&dd->dd_lock);
1151 return (error);
1156 * If this transaction will result in a net free of space,
1157 * we want to let it through.
1159 if (ignorequota || netfree || dsl_dir_phys(dd)->dd_quota == 0)
1160 quota = UINT64_MAX;
1161 else
1162 quota = dsl_dir_phys(dd)->dd_quota;
1165 * Adjust the quota against the actual pool size at the root
1166 * minus any outstanding deferred frees.
1167 * To ensure that it's possible to remove files from a full
1168 * pool without inducing transient overcommits, we throttle
1169 * netfree transactions against a quota that is slightly larger,
1170 * but still within the pool's allocation slop. In cases where
1171 * we're very close to full, this will allow a steady trickle of
1172 * removes to get through.
1174 if (dd->dd_parent == NULL) {
1175 spa_t *spa = dd->dd_pool->dp_spa;
1176 uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, netfree);
1177 deferred = metaslab_class_get_deferred(spa_normal_class(spa));
1178 if (poolsize - deferred < quota) {
1179 quota = poolsize - deferred;
1180 retval = ENOSPC;
1185 * If they are requesting more space, and our current estimate
1186 * is over quota, they get to try again unless the actual
1187 * on-disk is over quota and there are no pending changes (which
1188 * may free up space for us).
1190 if (used_on_disk + est_inflight >= quota) {
1191 if (est_inflight > 0 || used_on_disk < quota ||
1192 (retval == ENOSPC && used_on_disk < quota + deferred))
1193 retval = ERESTART;
1194 dprintf_dd(dd, "failing: used=%lluK inflight = %lluK "
1195 "quota=%lluK tr=%lluK err=%d\n",
1196 used_on_disk>>10, est_inflight>>10,
1197 quota>>10, asize>>10, retval);
1198 mutex_exit(&dd->dd_lock);
1199 return (SET_ERROR(retval));
1202 /* We need to up our estimated delta before dropping dd_lock */
1203 dd->dd_tempreserved[txgidx] += asize;
1205 parent_rsrv = parent_delta(dd, used_on_disk + est_inflight,
1206 asize - ref_rsrv);
1207 mutex_exit(&dd->dd_lock);
1209 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
1210 tr->tr_ds = dd;
1211 tr->tr_size = asize;
1212 list_insert_tail(tr_list, tr);
1214 /* see if it's OK with our parent */
1215 if (dd->dd_parent && parent_rsrv) {
1216 boolean_t ismos = (dsl_dir_phys(dd)->dd_head_dataset_obj == 0);
1218 return (dsl_dir_tempreserve_impl(dd->dd_parent,
1219 parent_rsrv, netfree, ismos, TRUE, tr_list, tx, FALSE));
1220 } else {
1221 return (0);
1226 * Reserve space in this dsl_dir, to be used in this tx's txg.
1227 * After the space has been dirtied (and dsl_dir_willuse_space()
1228 * has been called), the reservation should be canceled, using
1229 * dsl_dir_tempreserve_clear().
1232 dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
1233 uint64_t fsize, uint64_t usize, void **tr_cookiep, dmu_tx_t *tx)
1235 int err;
1236 list_t *tr_list;
1238 if (asize == 0) {
1239 *tr_cookiep = NULL;
1240 return (0);
1243 tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
1244 list_create(tr_list, sizeof (struct tempreserve),
1245 offsetof(struct tempreserve, tr_node));
1246 ASSERT3S(asize, >, 0);
1247 ASSERT3S(fsize, >=, 0);
1249 err = arc_tempreserve_space(lsize, tx->tx_txg);
1250 if (err == 0) {
1251 struct tempreserve *tr;
1253 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
1254 tr->tr_size = lsize;
1255 list_insert_tail(tr_list, tr);
1256 } else {
1257 if (err == EAGAIN) {
1259 * If arc_memory_throttle() detected that pageout
1260 * is running and we are low on memory, we delay new
1261 * non-pageout transactions to give pageout an
1262 * advantage.
1264 * It is unfortunate to be delaying while the caller's
1265 * locks are held.
1267 txg_delay(dd->dd_pool, tx->tx_txg,
1268 MSEC2NSEC(10), MSEC2NSEC(10));
1269 err = SET_ERROR(ERESTART);
1273 if (err == 0) {
1274 err = dsl_dir_tempreserve_impl(dd, asize, fsize >= asize,
1275 FALSE, asize > usize, tr_list, tx, TRUE);
1278 if (err != 0)
1279 dsl_dir_tempreserve_clear(tr_list, tx);
1280 else
1281 *tr_cookiep = tr_list;
1283 return (err);
1287 * Clear a temporary reservation that we previously made with
1288 * dsl_dir_tempreserve_space().
1290 void
1291 dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
1293 int txgidx = tx->tx_txg & TXG_MASK;
1294 list_t *tr_list = tr_cookie;
1295 struct tempreserve *tr;
1297 ASSERT3U(tx->tx_txg, !=, 0);
1299 if (tr_cookie == NULL)
1300 return;
1302 while ((tr = list_head(tr_list)) != NULL) {
1303 if (tr->tr_ds) {
1304 mutex_enter(&tr->tr_ds->dd_lock);
1305 ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=,
1306 tr->tr_size);
1307 tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size;
1308 mutex_exit(&tr->tr_ds->dd_lock);
1309 } else {
1310 arc_tempreserve_clear(tr->tr_size);
1312 list_remove(tr_list, tr);
1313 kmem_free(tr, sizeof (struct tempreserve));
1316 kmem_free(tr_list, sizeof (list_t));
1320 * This should be called from open context when we think we're going to write
1321 * or free space, for example when dirtying data. Be conservative; it's okay
1322 * to write less space or free more, but we don't want to write more or free
1323 * less than the amount specified.
1325 void
1326 dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
1328 int64_t parent_space;
1329 uint64_t est_used;
1331 mutex_enter(&dd->dd_lock);
1332 if (space > 0)
1333 dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space;
1335 est_used = dsl_dir_space_towrite(dd) + dsl_dir_phys(dd)->dd_used_bytes;
1336 parent_space = parent_delta(dd, est_used, space);
1337 mutex_exit(&dd->dd_lock);
1339 /* Make sure that we clean up dd_space_to* */
1340 dsl_dir_dirty(dd, tx);
1342 /* XXX this is potentially expensive and unnecessary... */
1343 if (parent_space && dd->dd_parent)
1344 dsl_dir_willuse_space(dd->dd_parent, parent_space, tx);
1347 /* call from syncing context when we actually write/free space for this dd */
1348 void
1349 dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
1350 int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
1352 int64_t accounted_delta;
1355 * dsl_dataset_set_refreservation_sync_impl() calls this with
1356 * dd_lock held, so that it can atomically update
1357 * ds->ds_reserved and the dsl_dir accounting, so that
1358 * dsl_dataset_check_quota() can see dataset and dir accounting
1359 * consistently.
1361 boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
1363 ASSERT(dmu_tx_is_syncing(tx));
1364 ASSERT(type < DD_USED_NUM);
1366 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1368 if (needlock)
1369 mutex_enter(&dd->dd_lock);
1370 accounted_delta =
1371 parent_delta(dd, dsl_dir_phys(dd)->dd_used_bytes, used);
1372 ASSERT(used >= 0 || dsl_dir_phys(dd)->dd_used_bytes >= -used);
1373 ASSERT(compressed >= 0 ||
1374 dsl_dir_phys(dd)->dd_compressed_bytes >= -compressed);
1375 ASSERT(uncompressed >= 0 ||
1376 dsl_dir_phys(dd)->dd_uncompressed_bytes >= -uncompressed);
1377 dsl_dir_phys(dd)->dd_used_bytes += used;
1378 dsl_dir_phys(dd)->dd_uncompressed_bytes += uncompressed;
1379 dsl_dir_phys(dd)->dd_compressed_bytes += compressed;
1381 if (dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN) {
1382 ASSERT(used > 0 ||
1383 dsl_dir_phys(dd)->dd_used_breakdown[type] >= -used);
1384 dsl_dir_phys(dd)->dd_used_breakdown[type] += used;
1385 #ifdef DEBUG
1386 dd_used_t t;
1387 uint64_t u = 0;
1388 for (t = 0; t < DD_USED_NUM; t++)
1389 u += dsl_dir_phys(dd)->dd_used_breakdown[t];
1390 ASSERT3U(u, ==, dsl_dir_phys(dd)->dd_used_bytes);
1391 #endif
1393 if (needlock)
1394 mutex_exit(&dd->dd_lock);
1396 if (dd->dd_parent != NULL) {
1397 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
1398 accounted_delta, compressed, uncompressed, tx);
1399 dsl_dir_transfer_space(dd->dd_parent,
1400 used - accounted_delta,
1401 DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
1405 void
1406 dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
1407 dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
1409 ASSERT(dmu_tx_is_syncing(tx));
1410 ASSERT(oldtype < DD_USED_NUM);
1411 ASSERT(newtype < DD_USED_NUM);
1413 if (delta == 0 ||
1414 !(dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN))
1415 return;
1417 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1418 mutex_enter(&dd->dd_lock);
1419 ASSERT(delta > 0 ?
1420 dsl_dir_phys(dd)->dd_used_breakdown[oldtype] >= delta :
1421 dsl_dir_phys(dd)->dd_used_breakdown[newtype] >= -delta);
1422 ASSERT(dsl_dir_phys(dd)->dd_used_bytes >= ABS(delta));
1423 dsl_dir_phys(dd)->dd_used_breakdown[oldtype] -= delta;
1424 dsl_dir_phys(dd)->dd_used_breakdown[newtype] += delta;
1425 mutex_exit(&dd->dd_lock);
1428 typedef struct dsl_dir_set_qr_arg {
1429 const char *ddsqra_name;
1430 zprop_source_t ddsqra_source;
1431 uint64_t ddsqra_value;
1432 } dsl_dir_set_qr_arg_t;
1434 static int
1435 dsl_dir_set_quota_check(void *arg, dmu_tx_t *tx)
1437 dsl_dir_set_qr_arg_t *ddsqra = arg;
1438 dsl_pool_t *dp = dmu_tx_pool(tx);
1439 dsl_dataset_t *ds;
1440 int error;
1441 uint64_t towrite, newval;
1443 error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
1444 if (error != 0)
1445 return (error);
1447 error = dsl_prop_predict(ds->ds_dir, "quota",
1448 ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
1449 if (error != 0) {
1450 dsl_dataset_rele(ds, FTAG);
1451 return (error);
1454 if (newval == 0) {
1455 dsl_dataset_rele(ds, FTAG);
1456 return (0);
1459 mutex_enter(&ds->ds_dir->dd_lock);
1461 * If we are doing the preliminary check in open context, and
1462 * there are pending changes, then don't fail it, since the
1463 * pending changes could under-estimate the amount of space to be
1464 * freed up.
1466 towrite = dsl_dir_space_towrite(ds->ds_dir);
1467 if ((dmu_tx_is_syncing(tx) || towrite == 0) &&
1468 (newval < dsl_dir_phys(ds->ds_dir)->dd_reserved ||
1469 newval < dsl_dir_phys(ds->ds_dir)->dd_used_bytes + towrite)) {
1470 error = SET_ERROR(ENOSPC);
1472 mutex_exit(&ds->ds_dir->dd_lock);
1473 dsl_dataset_rele(ds, FTAG);
1474 return (error);
1477 static void
1478 dsl_dir_set_quota_sync(void *arg, dmu_tx_t *tx)
1480 dsl_dir_set_qr_arg_t *ddsqra = arg;
1481 dsl_pool_t *dp = dmu_tx_pool(tx);
1482 dsl_dataset_t *ds;
1483 uint64_t newval;
1485 VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
1487 if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
1488 dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_QUOTA),
1489 ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
1490 &ddsqra->ddsqra_value, tx);
1492 VERIFY0(dsl_prop_get_int_ds(ds,
1493 zfs_prop_to_name(ZFS_PROP_QUOTA), &newval));
1494 } else {
1495 newval = ddsqra->ddsqra_value;
1496 spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
1497 zfs_prop_to_name(ZFS_PROP_QUOTA), (longlong_t)newval);
1500 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1501 mutex_enter(&ds->ds_dir->dd_lock);
1502 dsl_dir_phys(ds->ds_dir)->dd_quota = newval;
1503 mutex_exit(&ds->ds_dir->dd_lock);
1504 dsl_dataset_rele(ds, FTAG);
1508 dsl_dir_set_quota(const char *ddname, zprop_source_t source, uint64_t quota)
1510 dsl_dir_set_qr_arg_t ddsqra;
1512 ddsqra.ddsqra_name = ddname;
1513 ddsqra.ddsqra_source = source;
1514 ddsqra.ddsqra_value = quota;
1516 return (dsl_sync_task(ddname, dsl_dir_set_quota_check,
1517 dsl_dir_set_quota_sync, &ddsqra, 0, ZFS_SPACE_CHECK_NONE));
1521 dsl_dir_set_reservation_check(void *arg, dmu_tx_t *tx)
1523 dsl_dir_set_qr_arg_t *ddsqra = arg;
1524 dsl_pool_t *dp = dmu_tx_pool(tx);
1525 dsl_dataset_t *ds;
1526 dsl_dir_t *dd;
1527 uint64_t newval, used, avail;
1528 int error;
1530 error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
1531 if (error != 0)
1532 return (error);
1533 dd = ds->ds_dir;
1536 * If we are doing the preliminary check in open context, the
1537 * space estimates may be inaccurate.
1539 if (!dmu_tx_is_syncing(tx)) {
1540 dsl_dataset_rele(ds, FTAG);
1541 return (0);
1544 error = dsl_prop_predict(ds->ds_dir,
1545 zfs_prop_to_name(ZFS_PROP_RESERVATION),
1546 ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
1547 if (error != 0) {
1548 dsl_dataset_rele(ds, FTAG);
1549 return (error);
1552 mutex_enter(&dd->dd_lock);
1553 used = dsl_dir_phys(dd)->dd_used_bytes;
1554 mutex_exit(&dd->dd_lock);
1556 if (dd->dd_parent) {
1557 avail = dsl_dir_space_available(dd->dd_parent,
1558 NULL, 0, FALSE);
1559 } else {
1560 avail = dsl_pool_adjustedsize(dd->dd_pool, B_FALSE) - used;
1563 if (MAX(used, newval) > MAX(used, dsl_dir_phys(dd)->dd_reserved)) {
1564 uint64_t delta = MAX(used, newval) -
1565 MAX(used, dsl_dir_phys(dd)->dd_reserved);
1567 if (delta > avail ||
1568 (dsl_dir_phys(dd)->dd_quota > 0 &&
1569 newval > dsl_dir_phys(dd)->dd_quota))
1570 error = SET_ERROR(ENOSPC);
1573 dsl_dataset_rele(ds, FTAG);
1574 return (error);
1577 void
1578 dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd, uint64_t value, dmu_tx_t *tx)
1580 uint64_t used;
1581 int64_t delta;
1583 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1585 mutex_enter(&dd->dd_lock);
1586 used = dsl_dir_phys(dd)->dd_used_bytes;
1587 delta = MAX(used, value) - MAX(used, dsl_dir_phys(dd)->dd_reserved);
1588 dsl_dir_phys(dd)->dd_reserved = value;
1590 if (dd->dd_parent != NULL) {
1591 /* Roll up this additional usage into our ancestors */
1592 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
1593 delta, 0, 0, tx);
1595 mutex_exit(&dd->dd_lock);
1599 static void
1600 dsl_dir_set_reservation_sync(void *arg, dmu_tx_t *tx)
1602 dsl_dir_set_qr_arg_t *ddsqra = arg;
1603 dsl_pool_t *dp = dmu_tx_pool(tx);
1604 dsl_dataset_t *ds;
1605 uint64_t newval;
1607 VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
1609 if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
1610 dsl_prop_set_sync_impl(ds,
1611 zfs_prop_to_name(ZFS_PROP_RESERVATION),
1612 ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
1613 &ddsqra->ddsqra_value, tx);
1615 VERIFY0(dsl_prop_get_int_ds(ds,
1616 zfs_prop_to_name(ZFS_PROP_RESERVATION), &newval));
1617 } else {
1618 newval = ddsqra->ddsqra_value;
1619 spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
1620 zfs_prop_to_name(ZFS_PROP_RESERVATION),
1621 (longlong_t)newval);
1624 dsl_dir_set_reservation_sync_impl(ds->ds_dir, newval, tx);
1625 dsl_dataset_rele(ds, FTAG);
1629 dsl_dir_set_reservation(const char *ddname, zprop_source_t source,
1630 uint64_t reservation)
1632 dsl_dir_set_qr_arg_t ddsqra;
1634 ddsqra.ddsqra_name = ddname;
1635 ddsqra.ddsqra_source = source;
1636 ddsqra.ddsqra_value = reservation;
1638 return (dsl_sync_task(ddname, dsl_dir_set_reservation_check,
1639 dsl_dir_set_reservation_sync, &ddsqra, 0, ZFS_SPACE_CHECK_NONE));
1642 static dsl_dir_t *
1643 closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2)
1645 for (; ds1; ds1 = ds1->dd_parent) {
1646 dsl_dir_t *dd;
1647 for (dd = ds2; dd; dd = dd->dd_parent) {
1648 if (ds1 == dd)
1649 return (dd);
1652 return (NULL);
1656 * If delta is applied to dd, how much of that delta would be applied to
1657 * ancestor? Syncing context only.
1659 static int64_t
1660 would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor)
1662 if (dd == ancestor)
1663 return (delta);
1665 mutex_enter(&dd->dd_lock);
1666 delta = parent_delta(dd, dsl_dir_phys(dd)->dd_used_bytes, delta);
1667 mutex_exit(&dd->dd_lock);
1668 return (would_change(dd->dd_parent, delta, ancestor));
1671 typedef struct dsl_dir_rename_arg {
1672 const char *ddra_oldname;
1673 const char *ddra_newname;
1674 cred_t *ddra_cred;
1675 } dsl_dir_rename_arg_t;
1677 /* ARGSUSED */
1678 static int
1679 dsl_valid_rename(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
1681 int *deltap = arg;
1682 char namebuf[MAXNAMELEN];
1684 dsl_dataset_name(ds, namebuf);
1686 if (strlen(namebuf) + *deltap >= MAXNAMELEN)
1687 return (SET_ERROR(ENAMETOOLONG));
1688 return (0);
1691 static int
1692 dsl_dir_rename_check(void *arg, dmu_tx_t *tx)
1694 dsl_dir_rename_arg_t *ddra = arg;
1695 dsl_pool_t *dp = dmu_tx_pool(tx);
1696 dsl_dir_t *dd, *newparent;
1697 const char *mynewname;
1698 int error;
1699 int delta = strlen(ddra->ddra_newname) - strlen(ddra->ddra_oldname);
1701 /* target dir should exist */
1702 error = dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL);
1703 if (error != 0)
1704 return (error);
1706 /* new parent should exist */
1707 error = dsl_dir_hold(dp, ddra->ddra_newname, FTAG,
1708 &newparent, &mynewname);
1709 if (error != 0) {
1710 dsl_dir_rele(dd, FTAG);
1711 return (error);
1714 /* can't rename to different pool */
1715 if (dd->dd_pool != newparent->dd_pool) {
1716 dsl_dir_rele(newparent, FTAG);
1717 dsl_dir_rele(dd, FTAG);
1718 return (SET_ERROR(ENXIO));
1721 /* new name should not already exist */
1722 if (mynewname == NULL) {
1723 dsl_dir_rele(newparent, FTAG);
1724 dsl_dir_rele(dd, FTAG);
1725 return (SET_ERROR(EEXIST));
1728 /* if the name length is growing, validate child name lengths */
1729 if (delta > 0) {
1730 error = dmu_objset_find_dp(dp, dd->dd_object, dsl_valid_rename,
1731 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
1732 if (error != 0) {
1733 dsl_dir_rele(newparent, FTAG);
1734 dsl_dir_rele(dd, FTAG);
1735 return (error);
1739 if (dmu_tx_is_syncing(tx)) {
1740 if (spa_feature_is_active(dp->dp_spa,
1741 SPA_FEATURE_FS_SS_LIMIT)) {
1743 * Although this is the check function and we don't
1744 * normally make on-disk changes in check functions,
1745 * we need to do that here.
1747 * Ensure this portion of the tree's counts have been
1748 * initialized in case the new parent has limits set.
1750 dsl_dir_init_fs_ss_count(dd, tx);
1754 if (newparent != dd->dd_parent) {
1755 /* is there enough space? */
1756 uint64_t myspace =
1757 MAX(dsl_dir_phys(dd)->dd_used_bytes,
1758 dsl_dir_phys(dd)->dd_reserved);
1759 objset_t *os = dd->dd_pool->dp_meta_objset;
1760 uint64_t fs_cnt = 0;
1761 uint64_t ss_cnt = 0;
1763 if (dsl_dir_is_zapified(dd)) {
1764 int err;
1766 err = zap_lookup(os, dd->dd_object,
1767 DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1,
1768 &fs_cnt);
1769 if (err != ENOENT && err != 0) {
1770 dsl_dir_rele(newparent, FTAG);
1771 dsl_dir_rele(dd, FTAG);
1772 return (err);
1776 * have to add 1 for the filesystem itself that we're
1777 * moving
1779 fs_cnt++;
1781 err = zap_lookup(os, dd->dd_object,
1782 DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1,
1783 &ss_cnt);
1784 if (err != ENOENT && err != 0) {
1785 dsl_dir_rele(newparent, FTAG);
1786 dsl_dir_rele(dd, FTAG);
1787 return (err);
1791 /* no rename into our descendant */
1792 if (closest_common_ancestor(dd, newparent) == dd) {
1793 dsl_dir_rele(newparent, FTAG);
1794 dsl_dir_rele(dd, FTAG);
1795 return (SET_ERROR(EINVAL));
1798 error = dsl_dir_transfer_possible(dd->dd_parent,
1799 newparent, fs_cnt, ss_cnt, myspace, ddra->ddra_cred);
1800 if (error != 0) {
1801 dsl_dir_rele(newparent, FTAG);
1802 dsl_dir_rele(dd, FTAG);
1803 return (error);
1807 dsl_dir_rele(newparent, FTAG);
1808 dsl_dir_rele(dd, FTAG);
1809 return (0);
1812 static void
1813 dsl_dir_rename_sync(void *arg, dmu_tx_t *tx)
1815 dsl_dir_rename_arg_t *ddra = arg;
1816 dsl_pool_t *dp = dmu_tx_pool(tx);
1817 dsl_dir_t *dd, *newparent;
1818 const char *mynewname;
1819 int error;
1820 objset_t *mos = dp->dp_meta_objset;
1822 VERIFY0(dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL));
1823 VERIFY0(dsl_dir_hold(dp, ddra->ddra_newname, FTAG, &newparent,
1824 &mynewname));
1826 /* Log this before we change the name. */
1827 spa_history_log_internal_dd(dd, "rename", tx,
1828 "-> %s", ddra->ddra_newname);
1830 if (newparent != dd->dd_parent) {
1831 objset_t *os = dd->dd_pool->dp_meta_objset;
1832 uint64_t fs_cnt = 0;
1833 uint64_t ss_cnt = 0;
1836 * We already made sure the dd counts were initialized in the
1837 * check function.
1839 if (spa_feature_is_active(dp->dp_spa,
1840 SPA_FEATURE_FS_SS_LIMIT)) {
1841 VERIFY0(zap_lookup(os, dd->dd_object,
1842 DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1,
1843 &fs_cnt));
1844 /* add 1 for the filesystem itself that we're moving */
1845 fs_cnt++;
1847 VERIFY0(zap_lookup(os, dd->dd_object,
1848 DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1,
1849 &ss_cnt));
1852 dsl_fs_ss_count_adjust(dd->dd_parent, -fs_cnt,
1853 DD_FIELD_FILESYSTEM_COUNT, tx);
1854 dsl_fs_ss_count_adjust(newparent, fs_cnt,
1855 DD_FIELD_FILESYSTEM_COUNT, tx);
1857 dsl_fs_ss_count_adjust(dd->dd_parent, -ss_cnt,
1858 DD_FIELD_SNAPSHOT_COUNT, tx);
1859 dsl_fs_ss_count_adjust(newparent, ss_cnt,
1860 DD_FIELD_SNAPSHOT_COUNT, tx);
1862 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
1863 -dsl_dir_phys(dd)->dd_used_bytes,
1864 -dsl_dir_phys(dd)->dd_compressed_bytes,
1865 -dsl_dir_phys(dd)->dd_uncompressed_bytes, tx);
1866 dsl_dir_diduse_space(newparent, DD_USED_CHILD,
1867 dsl_dir_phys(dd)->dd_used_bytes,
1868 dsl_dir_phys(dd)->dd_compressed_bytes,
1869 dsl_dir_phys(dd)->dd_uncompressed_bytes, tx);
1871 if (dsl_dir_phys(dd)->dd_reserved >
1872 dsl_dir_phys(dd)->dd_used_bytes) {
1873 uint64_t unused_rsrv = dsl_dir_phys(dd)->dd_reserved -
1874 dsl_dir_phys(dd)->dd_used_bytes;
1876 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
1877 -unused_rsrv, 0, 0, tx);
1878 dsl_dir_diduse_space(newparent, DD_USED_CHILD_RSRV,
1879 unused_rsrv, 0, 0, tx);
1883 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1885 /* remove from old parent zapobj */
1886 error = zap_remove(mos,
1887 dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
1888 dd->dd_myname, tx);
1889 ASSERT0(error);
1891 (void) strcpy(dd->dd_myname, mynewname);
1892 dsl_dir_rele(dd->dd_parent, dd);
1893 dsl_dir_phys(dd)->dd_parent_obj = newparent->dd_object;
1894 VERIFY0(dsl_dir_hold_obj(dp,
1895 newparent->dd_object, NULL, dd, &dd->dd_parent));
1897 /* add to new parent zapobj */
1898 VERIFY0(zap_add(mos, dsl_dir_phys(newparent)->dd_child_dir_zapobj,
1899 dd->dd_myname, 8, 1, &dd->dd_object, tx));
1901 dsl_prop_notify_all(dd);
1903 dsl_dir_rele(newparent, FTAG);
1904 dsl_dir_rele(dd, FTAG);
1908 dsl_dir_rename(const char *oldname, const char *newname)
1910 dsl_dir_rename_arg_t ddra;
1912 ddra.ddra_oldname = oldname;
1913 ddra.ddra_newname = newname;
1914 ddra.ddra_cred = CRED();
1916 return (dsl_sync_task(oldname,
1917 dsl_dir_rename_check, dsl_dir_rename_sync, &ddra,
1918 3, ZFS_SPACE_CHECK_RESERVED));
1922 dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd,
1923 uint64_t fs_cnt, uint64_t ss_cnt, uint64_t space, cred_t *cr)
1925 dsl_dir_t *ancestor;
1926 int64_t adelta;
1927 uint64_t avail;
1928 int err;
1930 ancestor = closest_common_ancestor(sdd, tdd);
1931 adelta = would_change(sdd, -space, ancestor);
1932 avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE);
1933 if (avail < space)
1934 return (SET_ERROR(ENOSPC));
1936 err = dsl_fs_ss_limit_check(tdd, fs_cnt, ZFS_PROP_FILESYSTEM_LIMIT,
1937 ancestor, cr);
1938 if (err != 0)
1939 return (err);
1940 err = dsl_fs_ss_limit_check(tdd, ss_cnt, ZFS_PROP_SNAPSHOT_LIMIT,
1941 ancestor, cr);
1942 if (err != 0)
1943 return (err);
1945 return (0);
1948 timestruc_t
1949 dsl_dir_snap_cmtime(dsl_dir_t *dd)
1951 timestruc_t t;
1953 mutex_enter(&dd->dd_lock);
1954 t = dd->dd_snap_cmtime;
1955 mutex_exit(&dd->dd_lock);
1957 return (t);
1960 void
1961 dsl_dir_snap_cmtime_update(dsl_dir_t *dd)
1963 timestruc_t t;
1965 gethrestime(&t);
1966 mutex_enter(&dd->dd_lock);
1967 dd->dd_snap_cmtime = t;
1968 mutex_exit(&dd->dd_lock);
1971 void
1972 dsl_dir_zapify(dsl_dir_t *dd, dmu_tx_t *tx)
1974 objset_t *mos = dd->dd_pool->dp_meta_objset;
1975 dmu_object_zapify(mos, dd->dd_object, DMU_OT_DSL_DIR, tx);
1978 boolean_t
1979 dsl_dir_is_zapified(dsl_dir_t *dd)
1981 dmu_object_info_t doi;
1983 dmu_object_info_from_db(dd->dd_dbuf, &doi);
1984 return (doi.doi_type == DMU_OTN_ZAP_METADATA);