4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
26 #include <sys/dmu_objset.h>
27 #include <sys/dmu_tx.h>
28 #include <sys/dsl_dataset.h>
29 #include <sys/dsl_dir.h>
30 #include <sys/dsl_prop.h>
31 #include <sys/dsl_synctask.h>
32 #include <sys/dsl_deleg.h>
34 #include <sys/metaslab.h>
38 #include <sys/sunddi.h>
39 #include "zfs_namecheck.h"
41 static uint64_t dsl_dir_space_towrite(dsl_dir_t
*dd
);
42 static void dsl_dir_set_reservation_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
);
47 dsl_dir_evict(dmu_buf_t
*db
, void *arg
)
50 dsl_pool_t
*dp
= dd
->dd_pool
;
53 for (t
= 0; t
< TXG_SIZE
; t
++) {
54 ASSERT(!txg_list_member(&dp
->dp_dirty_dirs
, dd
, t
));
55 ASSERT(dd
->dd_tempreserved
[t
] == 0);
56 ASSERT(dd
->dd_space_towrite
[t
] == 0);
60 dsl_dir_close(dd
->dd_parent
, dd
);
62 spa_close(dd
->dd_pool
->dp_spa
, dd
);
65 * The props callback list should have been cleaned up by
68 list_destroy(&dd
->dd_prop_cbs
);
69 mutex_destroy(&dd
->dd_lock
);
70 kmem_free(dd
, sizeof (dsl_dir_t
));
74 dsl_dir_open_obj(dsl_pool_t
*dp
, uint64_t ddobj
,
75 const char *tail
, void *tag
, dsl_dir_t
**ddp
)
81 ASSERT(RW_LOCK_HELD(&dp
->dp_config_rwlock
) ||
82 dsl_pool_sync_context(dp
));
84 err
= dmu_bonus_hold(dp
->dp_meta_objset
, ddobj
, tag
, &dbuf
);
87 dd
= dmu_buf_get_user(dbuf
);
90 dmu_object_info_t doi
;
91 dmu_object_info_from_db(dbuf
, &doi
);
92 ASSERT3U(doi
.doi_type
, ==, DMU_OT_DSL_DIR
);
93 ASSERT3U(doi
.doi_bonus_size
, >=, sizeof (dsl_dir_phys_t
));
99 dd
= kmem_zalloc(sizeof (dsl_dir_t
), KM_SLEEP
);
100 dd
->dd_object
= ddobj
;
103 dd
->dd_phys
= dbuf
->db_data
;
104 mutex_init(&dd
->dd_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
106 list_create(&dd
->dd_prop_cbs
, sizeof (dsl_prop_cb_record_t
),
107 offsetof(dsl_prop_cb_record_t
, cbr_node
));
109 dsl_dir_snap_cmtime_update(dd
);
111 if (dd
->dd_phys
->dd_parent_obj
) {
112 err
= dsl_dir_open_obj(dp
, dd
->dd_phys
->dd_parent_obj
,
113 NULL
, dd
, &dd
->dd_parent
);
120 err
= zap_lookup(dp
->dp_meta_objset
,
121 dd
->dd_parent
->dd_phys
->dd_child_dir_zapobj
,
122 tail
, sizeof (foundobj
), 1, &foundobj
);
123 ASSERT(err
|| foundobj
== ddobj
);
125 (void) strcpy(dd
->dd_myname
, tail
);
127 err
= zap_value_search(dp
->dp_meta_objset
,
128 dd
->dd_parent
->dd_phys
->dd_child_dir_zapobj
,
129 ddobj
, 0, dd
->dd_myname
);
134 (void) strcpy(dd
->dd_myname
, spa_name(dp
->dp_spa
));
137 if (dsl_dir_is_clone(dd
)) {
138 dmu_buf_t
*origin_bonus
;
139 dsl_dataset_phys_t
*origin_phys
;
142 * We can't open the origin dataset, because
143 * that would require opening this dsl_dir.
144 * Just look at its phys directly instead.
146 err
= dmu_bonus_hold(dp
->dp_meta_objset
,
147 dd
->dd_phys
->dd_origin_obj
, FTAG
, &origin_bonus
);
150 origin_phys
= origin_bonus
->db_data
;
152 origin_phys
->ds_creation_txg
;
153 dmu_buf_rele(origin_bonus
, FTAG
);
156 winner
= dmu_buf_set_user_ie(dbuf
, dd
, &dd
->dd_phys
,
160 dsl_dir_close(dd
->dd_parent
, dd
);
161 mutex_destroy(&dd
->dd_lock
);
162 kmem_free(dd
, sizeof (dsl_dir_t
));
165 spa_open_ref(dp
->dp_spa
, dd
);
170 * The dsl_dir_t has both open-to-close and instantiate-to-evict
171 * holds on the spa. We need the open-to-close holds because
172 * otherwise the spa_refcnt wouldn't change when we open a
173 * dir which the spa also has open, so we could incorrectly
174 * think it was OK to unload/export/destroy the pool. We need
175 * the instantiate-to-evict hold because the dsl_dir_t has a
176 * pointer to the dd_pool, which has a pointer to the spa_t.
178 spa_open_ref(dp
->dp_spa
, tag
);
179 ASSERT3P(dd
->dd_pool
, ==, dp
);
180 ASSERT3U(dd
->dd_object
, ==, ddobj
);
181 ASSERT3P(dd
->dd_dbuf
, ==, dbuf
);
187 dsl_dir_close(dd
->dd_parent
, dd
);
188 mutex_destroy(&dd
->dd_lock
);
189 kmem_free(dd
, sizeof (dsl_dir_t
));
190 dmu_buf_rele(dbuf
, tag
);
196 dsl_dir_close(dsl_dir_t
*dd
, void *tag
)
198 dprintf_dd(dd
, "%s\n", "");
199 spa_close(dd
->dd_pool
->dp_spa
, tag
);
200 dmu_buf_rele(dd
->dd_dbuf
, tag
);
203 /* buf must be long enough (MAXNAMELEN + strlen(MOS_DIR_NAME) + 1 should do) */
205 dsl_dir_name(dsl_dir_t
*dd
, char *buf
)
208 dsl_dir_name(dd
->dd_parent
, buf
);
209 (void) strcat(buf
, "/");
213 if (!MUTEX_HELD(&dd
->dd_lock
)) {
215 * recursive mutex so that we can use
216 * dprintf_dd() with dd_lock held
218 mutex_enter(&dd
->dd_lock
);
219 (void) strcat(buf
, dd
->dd_myname
);
220 mutex_exit(&dd
->dd_lock
);
222 (void) strcat(buf
, dd
->dd_myname
);
226 /* Calculate name legnth, avoiding all the strcat calls of dsl_dir_name */
228 dsl_dir_namelen(dsl_dir_t
*dd
)
233 /* parent's name + 1 for the "/" */
234 result
= dsl_dir_namelen(dd
->dd_parent
) + 1;
237 if (!MUTEX_HELD(&dd
->dd_lock
)) {
238 /* see dsl_dir_name */
239 mutex_enter(&dd
->dd_lock
);
240 result
+= strlen(dd
->dd_myname
);
241 mutex_exit(&dd
->dd_lock
);
243 result
+= strlen(dd
->dd_myname
);
250 getcomponent(const char *path
, char *component
, const char **nextp
)
253 if ((path
== NULL
) || (path
[0] == '\0'))
255 /* This would be a good place to reserve some namespace... */
256 p
= strpbrk(path
, "/@");
257 if (p
&& (p
[1] == '/' || p
[1] == '@')) {
258 /* two separators in a row */
261 if (p
== NULL
|| p
== path
) {
263 * if the first thing is an @ or /, it had better be an
264 * @ and it had better not have any more ats or slashes,
265 * and it had better have something after the @.
268 (p
[0] != '@' || strpbrk(path
+1, "/@") || p
[1] == '\0'))
270 if (strlen(path
) >= MAXNAMELEN
)
271 return (ENAMETOOLONG
);
272 (void) strcpy(component
, path
);
274 } else if (p
[0] == '/') {
275 if (p
-path
>= MAXNAMELEN
)
276 return (ENAMETOOLONG
);
277 (void) strncpy(component
, path
, p
- path
);
278 component
[p
-path
] = '\0';
280 } else if (p
[0] == '@') {
282 * if the next separator is an @, there better not be
285 if (strchr(path
, '/'))
287 if (p
-path
>= MAXNAMELEN
)
288 return (ENAMETOOLONG
);
289 (void) strncpy(component
, path
, p
- path
);
290 component
[p
-path
] = '\0';
292 ASSERT(!"invalid p");
299 * same as dsl_open_dir, ignore the first component of name and use the
303 dsl_dir_open_spa(spa_t
*spa
, const char *name
, void *tag
,
304 dsl_dir_t
**ddp
, const char **tailp
)
306 char buf
[MAXNAMELEN
];
307 const char *next
, *nextnext
= NULL
;
312 int openedspa
= FALSE
;
314 dprintf("%s\n", name
);
316 err
= getcomponent(name
, buf
, &next
);
320 err
= spa_open(buf
, &spa
, FTAG
);
322 dprintf("spa_open(%s) failed\n", buf
);
327 /* XXX this assertion belongs in spa_open */
328 ASSERT(!dsl_pool_sync_context(spa_get_dsl(spa
)));
331 dp
= spa_get_dsl(spa
);
333 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
334 err
= dsl_dir_open_obj(dp
, dp
->dp_root_dir_obj
, NULL
, tag
, &dd
);
336 rw_exit(&dp
->dp_config_rwlock
);
338 spa_close(spa
, FTAG
);
342 while (next
!= NULL
) {
344 err
= getcomponent(next
, buf
, &nextnext
);
347 ASSERT(next
[0] != '\0');
350 dprintf("looking up %s in obj%lld\n",
351 buf
, dd
->dd_phys
->dd_child_dir_zapobj
);
353 err
= zap_lookup(dp
->dp_meta_objset
,
354 dd
->dd_phys
->dd_child_dir_zapobj
,
355 buf
, sizeof (ddobj
), 1, &ddobj
);
362 err
= dsl_dir_open_obj(dp
, ddobj
, buf
, tag
, &child_ds
);
365 dsl_dir_close(dd
, tag
);
369 rw_exit(&dp
->dp_config_rwlock
);
372 dsl_dir_close(dd
, tag
);
374 spa_close(spa
, FTAG
);
379 * It's an error if there's more than one component left, or
380 * tailp==NULL and there's any component left.
383 (tailp
== NULL
|| (nextnext
&& nextnext
[0] != '\0'))) {
385 dsl_dir_close(dd
, tag
);
386 dprintf("next=%p (%s) tail=%p\n", next
, next
?next
:"", tailp
);
392 spa_close(spa
, FTAG
);
398 * Return the dsl_dir_t, and possibly the last component which couldn't
399 * be found in *tail. Return NULL if the path is bogus, or if
400 * tail==NULL and we couldn't parse the whole name. (*tail)[0] == '@'
401 * means that the last component is a snapshot.
404 dsl_dir_open(const char *name
, void *tag
, dsl_dir_t
**ddp
, const char **tailp
)
406 return (dsl_dir_open_spa(NULL
, name
, tag
, ddp
, tailp
));
410 dsl_dir_create_sync(dsl_pool_t
*dp
, dsl_dir_t
*pds
, const char *name
,
413 objset_t
*mos
= dp
->dp_meta_objset
;
415 dsl_dir_phys_t
*ddphys
;
418 ddobj
= dmu_object_alloc(mos
, DMU_OT_DSL_DIR
, 0,
419 DMU_OT_DSL_DIR
, sizeof (dsl_dir_phys_t
), tx
);
421 VERIFY(0 == zap_add(mos
, pds
->dd_phys
->dd_child_dir_zapobj
,
422 name
, sizeof (uint64_t), 1, &ddobj
, tx
));
424 /* it's the root dir */
425 VERIFY(0 == zap_add(mos
, DMU_POOL_DIRECTORY_OBJECT
,
426 DMU_POOL_ROOT_DATASET
, sizeof (uint64_t), 1, &ddobj
, tx
));
428 VERIFY(0 == dmu_bonus_hold(mos
, ddobj
, FTAG
, &dbuf
));
429 dmu_buf_will_dirty(dbuf
, tx
);
430 ddphys
= dbuf
->db_data
;
432 ddphys
->dd_creation_time
= gethrestime_sec();
434 ddphys
->dd_parent_obj
= pds
->dd_object
;
435 ddphys
->dd_props_zapobj
= zap_create(mos
,
436 DMU_OT_DSL_PROPS
, DMU_OT_NONE
, 0, tx
);
437 ddphys
->dd_child_dir_zapobj
= zap_create(mos
,
438 DMU_OT_DSL_DIR_CHILD_MAP
, DMU_OT_NONE
, 0, tx
);
439 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_USED_BREAKDOWN
)
440 ddphys
->dd_flags
|= DD_FLAG_USED_BREAKDOWN
;
441 dmu_buf_rele(dbuf
, FTAG
);
448 dsl_dir_destroy_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
450 dsl_dataset_t
*ds
= arg1
;
451 dsl_dir_t
*dd
= ds
->ds_dir
;
452 dsl_pool_t
*dp
= dd
->dd_pool
;
453 objset_t
*mos
= dp
->dp_meta_objset
;
458 * There should be exactly two holds, both from
459 * dsl_dataset_destroy: one on the dd directory, and one on its
460 * head ds. Otherwise, someone is trying to lookup something
461 * inside this dir while we want to destroy it. The
462 * config_rwlock ensures that nobody else opens it after we
465 if (dmu_buf_refcount(dd
->dd_dbuf
) > 2)
468 err
= zap_count(mos
, dd
->dd_phys
->dd_child_dir_zapobj
, &count
);
478 dsl_dir_destroy_sync(void *arg1
, void *tag
, dmu_tx_t
*tx
)
480 dsl_dataset_t
*ds
= arg1
;
481 dsl_dir_t
*dd
= ds
->ds_dir
;
482 objset_t
*mos
= dd
->dd_pool
->dp_meta_objset
;
483 dsl_prop_setarg_t psa
;
488 ASSERT(RW_WRITE_HELD(&dd
->dd_pool
->dp_config_rwlock
));
489 ASSERT(dd
->dd_phys
->dd_head_dataset_obj
== 0);
491 /* Remove our reservation. */
492 dsl_prop_setarg_init_uint64(&psa
, "reservation",
493 (ZPROP_SRC_NONE
| ZPROP_SRC_LOCAL
| ZPROP_SRC_RECEIVED
),
495 psa
.psa_effective_value
= 0; /* predict default value */
497 dsl_dir_set_reservation_sync(ds
, &psa
, tx
);
499 ASSERT3U(dd
->dd_phys
->dd_used_bytes
, ==, 0);
500 ASSERT3U(dd
->dd_phys
->dd_reserved
, ==, 0);
501 for (t
= 0; t
< DD_USED_NUM
; t
++)
502 ASSERT3U(dd
->dd_phys
->dd_used_breakdown
[t
], ==, 0);
504 VERIFY(0 == zap_destroy(mos
, dd
->dd_phys
->dd_child_dir_zapobj
, tx
));
505 VERIFY(0 == zap_destroy(mos
, dd
->dd_phys
->dd_props_zapobj
, tx
));
506 VERIFY(0 == dsl_deleg_destroy(mos
, dd
->dd_phys
->dd_deleg_zapobj
, tx
));
507 VERIFY(0 == zap_remove(mos
,
508 dd
->dd_parent
->dd_phys
->dd_child_dir_zapobj
, dd
->dd_myname
, tx
));
511 dsl_dir_close(dd
, tag
);
512 VERIFY(0 == dmu_object_free(mos
, obj
, tx
));
516 dsl_dir_is_clone(dsl_dir_t
*dd
)
518 return (dd
->dd_phys
->dd_origin_obj
&&
519 (dd
->dd_pool
->dp_origin_snap
== NULL
||
520 dd
->dd_phys
->dd_origin_obj
!=
521 dd
->dd_pool
->dp_origin_snap
->ds_object
));
525 dsl_dir_stats(dsl_dir_t
*dd
, nvlist_t
*nv
)
527 mutex_enter(&dd
->dd_lock
);
528 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USED
,
529 dd
->dd_phys
->dd_used_bytes
);
530 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_QUOTA
, dd
->dd_phys
->dd_quota
);
531 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_RESERVATION
,
532 dd
->dd_phys
->dd_reserved
);
533 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_COMPRESSRATIO
,
534 dd
->dd_phys
->dd_compressed_bytes
== 0 ? 100 :
535 (dd
->dd_phys
->dd_uncompressed_bytes
* 100 /
536 dd
->dd_phys
->dd_compressed_bytes
));
537 if (dd
->dd_phys
->dd_flags
& DD_FLAG_USED_BREAKDOWN
) {
538 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USEDSNAP
,
539 dd
->dd_phys
->dd_used_breakdown
[DD_USED_SNAP
]);
540 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USEDDS
,
541 dd
->dd_phys
->dd_used_breakdown
[DD_USED_HEAD
]);
542 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USEDREFRESERV
,
543 dd
->dd_phys
->dd_used_breakdown
[DD_USED_REFRSRV
]);
544 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USEDCHILD
,
545 dd
->dd_phys
->dd_used_breakdown
[DD_USED_CHILD
] +
546 dd
->dd_phys
->dd_used_breakdown
[DD_USED_CHILD_RSRV
]);
548 mutex_exit(&dd
->dd_lock
);
550 rw_enter(&dd
->dd_pool
->dp_config_rwlock
, RW_READER
);
551 if (dsl_dir_is_clone(dd
)) {
553 char buf
[MAXNAMELEN
];
555 VERIFY(0 == dsl_dataset_hold_obj(dd
->dd_pool
,
556 dd
->dd_phys
->dd_origin_obj
, FTAG
, &ds
));
557 dsl_dataset_name(ds
, buf
);
558 dsl_dataset_rele(ds
, FTAG
);
559 dsl_prop_nvlist_add_string(nv
, ZFS_PROP_ORIGIN
, buf
);
561 rw_exit(&dd
->dd_pool
->dp_config_rwlock
);
565 dsl_dir_dirty(dsl_dir_t
*dd
, dmu_tx_t
*tx
)
567 dsl_pool_t
*dp
= dd
->dd_pool
;
571 if (txg_list_add(&dp
->dp_dirty_dirs
, dd
, tx
->tx_txg
) == 0) {
572 /* up the hold count until we can be written out */
573 dmu_buf_add_ref(dd
->dd_dbuf
, dd
);
578 parent_delta(dsl_dir_t
*dd
, uint64_t used
, int64_t delta
)
580 uint64_t old_accounted
= MAX(used
, dd
->dd_phys
->dd_reserved
);
581 uint64_t new_accounted
= MAX(used
+ delta
, dd
->dd_phys
->dd_reserved
);
582 return (new_accounted
- old_accounted
);
586 dsl_dir_sync(dsl_dir_t
*dd
, dmu_tx_t
*tx
)
588 ASSERT(dmu_tx_is_syncing(tx
));
590 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
592 mutex_enter(&dd
->dd_lock
);
593 ASSERT3U(dd
->dd_tempreserved
[tx
->tx_txg
&TXG_MASK
], ==, 0);
594 dprintf_dd(dd
, "txg=%llu towrite=%lluK\n", tx
->tx_txg
,
595 dd
->dd_space_towrite
[tx
->tx_txg
&TXG_MASK
] / 1024);
596 dd
->dd_space_towrite
[tx
->tx_txg
&TXG_MASK
] = 0;
597 mutex_exit(&dd
->dd_lock
);
599 /* release the hold from dsl_dir_dirty */
600 dmu_buf_rele(dd
->dd_dbuf
, dd
);
604 dsl_dir_space_towrite(dsl_dir_t
*dd
)
609 ASSERT(MUTEX_HELD(&dd
->dd_lock
));
611 for (i
= 0; i
< TXG_SIZE
; i
++) {
612 space
+= dd
->dd_space_towrite
[i
&TXG_MASK
];
613 ASSERT3U(dd
->dd_space_towrite
[i
&TXG_MASK
], >=, 0);
619 * How much space would dd have available if ancestor had delta applied
620 * to it? If ondiskonly is set, we're only interested in what's
621 * on-disk, not estimated pending changes.
624 dsl_dir_space_available(dsl_dir_t
*dd
,
625 dsl_dir_t
*ancestor
, int64_t delta
, int ondiskonly
)
627 uint64_t parentspace
, myspace
, quota
, used
;
630 * If there are no restrictions otherwise, assume we have
631 * unlimited space available.
634 parentspace
= UINT64_MAX
;
636 if (dd
->dd_parent
!= NULL
) {
637 parentspace
= dsl_dir_space_available(dd
->dd_parent
,
638 ancestor
, delta
, ondiskonly
);
641 mutex_enter(&dd
->dd_lock
);
642 if (dd
->dd_phys
->dd_quota
!= 0)
643 quota
= dd
->dd_phys
->dd_quota
;
644 used
= dd
->dd_phys
->dd_used_bytes
;
646 used
+= dsl_dir_space_towrite(dd
);
648 if (dd
->dd_parent
== NULL
) {
649 uint64_t poolsize
= dsl_pool_adjustedsize(dd
->dd_pool
, FALSE
);
650 quota
= MIN(quota
, poolsize
);
653 if (dd
->dd_phys
->dd_reserved
> used
&& parentspace
!= UINT64_MAX
) {
655 * We have some space reserved, in addition to what our
658 parentspace
+= dd
->dd_phys
->dd_reserved
- used
;
661 if (dd
== ancestor
) {
663 ASSERT(used
>= -delta
);
665 if (parentspace
!= UINT64_MAX
)
666 parentspace
-= delta
;
674 * the lesser of the space provided by our parent and
675 * the space left in our quota
677 myspace
= MIN(parentspace
, quota
- used
);
680 mutex_exit(&dd
->dd_lock
);
693 dsl_dir_tempreserve_impl(dsl_dir_t
*dd
, uint64_t asize
, boolean_t netfree
,
694 boolean_t ignorequota
, boolean_t checkrefquota
, list_t
*tr_list
,
695 dmu_tx_t
*tx
, boolean_t first
)
697 uint64_t txg
= tx
->tx_txg
;
698 uint64_t est_inflight
, used_on_disk
, quota
, parent_rsrv
;
699 uint64_t deferred
= 0;
700 struct tempreserve
*tr
;
702 int txgidx
= txg
& TXG_MASK
;
704 uint64_t ref_rsrv
= 0;
706 ASSERT3U(txg
, !=, 0);
707 ASSERT3S(asize
, >, 0);
709 mutex_enter(&dd
->dd_lock
);
712 * Check against the dsl_dir's quota. We don't add in the delta
713 * when checking for over-quota because they get one free hit.
715 est_inflight
= dsl_dir_space_towrite(dd
);
716 for (i
= 0; i
< TXG_SIZE
; i
++)
717 est_inflight
+= dd
->dd_tempreserved
[i
];
718 used_on_disk
= dd
->dd_phys
->dd_used_bytes
;
721 * On the first iteration, fetch the dataset's used-on-disk and
722 * refreservation values. Also, if checkrefquota is set, test if
723 * allocating this space would exceed the dataset's refquota.
725 if (first
&& tx
->tx_objset
) {
727 dsl_dataset_t
*ds
= tx
->tx_objset
->os_dsl_dataset
;
729 error
= dsl_dataset_check_quota(ds
, checkrefquota
,
730 asize
, est_inflight
, &used_on_disk
, &ref_rsrv
);
732 mutex_exit(&dd
->dd_lock
);
738 * If this transaction will result in a net free of space,
739 * we want to let it through.
741 if (ignorequota
|| netfree
|| dd
->dd_phys
->dd_quota
== 0)
744 quota
= dd
->dd_phys
->dd_quota
;
747 * Adjust the quota against the actual pool size at the root
748 * minus any outstanding deferred frees.
749 * To ensure that it's possible to remove files from a full
750 * pool without inducing transient overcommits, we throttle
751 * netfree transactions against a quota that is slightly larger,
752 * but still within the pool's allocation slop. In cases where
753 * we're very close to full, this will allow a steady trickle of
754 * removes to get through.
756 if (dd
->dd_parent
== NULL
) {
757 spa_t
*spa
= dd
->dd_pool
->dp_spa
;
758 uint64_t poolsize
= dsl_pool_adjustedsize(dd
->dd_pool
, netfree
);
759 deferred
= metaslab_class_get_deferred(spa_normal_class(spa
));
760 if (poolsize
- deferred
< quota
) {
761 quota
= poolsize
- deferred
;
767 * If they are requesting more space, and our current estimate
768 * is over quota, they get to try again unless the actual
769 * on-disk is over quota and there are no pending changes (which
770 * may free up space for us).
772 if (used_on_disk
+ est_inflight
>= quota
) {
773 if (est_inflight
> 0 || used_on_disk
< quota
||
774 (retval
== ENOSPC
&& used_on_disk
< quota
+ deferred
))
776 dprintf_dd(dd
, "failing: used=%lluK inflight = %lluK "
777 "quota=%lluK tr=%lluK err=%d\n",
778 used_on_disk
>>10, est_inflight
>>10,
779 quota
>>10, asize
>>10, retval
);
780 mutex_exit(&dd
->dd_lock
);
784 /* We need to up our estimated delta before dropping dd_lock */
785 dd
->dd_tempreserved
[txgidx
] += asize
;
787 parent_rsrv
= parent_delta(dd
, used_on_disk
+ est_inflight
,
789 mutex_exit(&dd
->dd_lock
);
791 tr
= kmem_zalloc(sizeof (struct tempreserve
), KM_SLEEP
);
794 list_insert_tail(tr_list
, tr
);
796 /* see if it's OK with our parent */
797 if (dd
->dd_parent
&& parent_rsrv
) {
798 boolean_t ismos
= (dd
->dd_phys
->dd_head_dataset_obj
== 0);
800 return (dsl_dir_tempreserve_impl(dd
->dd_parent
,
801 parent_rsrv
, netfree
, ismos
, TRUE
, tr_list
, tx
, FALSE
));
808 * Reserve space in this dsl_dir, to be used in this tx's txg.
809 * After the space has been dirtied (and dsl_dir_willuse_space()
810 * has been called), the reservation should be canceled, using
811 * dsl_dir_tempreserve_clear().
814 dsl_dir_tempreserve_space(dsl_dir_t
*dd
, uint64_t lsize
, uint64_t asize
,
815 uint64_t fsize
, uint64_t usize
, void **tr_cookiep
, dmu_tx_t
*tx
)
825 tr_list
= kmem_alloc(sizeof (list_t
), KM_SLEEP
);
826 list_create(tr_list
, sizeof (struct tempreserve
),
827 offsetof(struct tempreserve
, tr_node
));
828 ASSERT3S(asize
, >, 0);
829 ASSERT3S(fsize
, >=, 0);
831 err
= arc_tempreserve_space(lsize
, tx
->tx_txg
);
833 struct tempreserve
*tr
;
835 tr
= kmem_zalloc(sizeof (struct tempreserve
), KM_SLEEP
);
837 list_insert_tail(tr_list
, tr
);
839 err
= dsl_pool_tempreserve_space(dd
->dd_pool
, asize
, tx
);
842 txg_delay(dd
->dd_pool
, tx
->tx_txg
, 1);
845 dsl_pool_memory_pressure(dd
->dd_pool
);
849 struct tempreserve
*tr
;
851 tr
= kmem_zalloc(sizeof (struct tempreserve
), KM_SLEEP
);
852 tr
->tr_dp
= dd
->dd_pool
;
854 list_insert_tail(tr_list
, tr
);
856 err
= dsl_dir_tempreserve_impl(dd
, asize
, fsize
>= asize
,
857 FALSE
, asize
> usize
, tr_list
, tx
, TRUE
);
861 dsl_dir_tempreserve_clear(tr_list
, tx
);
863 *tr_cookiep
= tr_list
;
869 * Clear a temporary reservation that we previously made with
870 * dsl_dir_tempreserve_space().
873 dsl_dir_tempreserve_clear(void *tr_cookie
, dmu_tx_t
*tx
)
875 int txgidx
= tx
->tx_txg
& TXG_MASK
;
876 list_t
*tr_list
= tr_cookie
;
877 struct tempreserve
*tr
;
879 ASSERT3U(tx
->tx_txg
, !=, 0);
881 if (tr_cookie
== NULL
)
884 while (tr
= list_head(tr_list
)) {
886 dsl_pool_tempreserve_clear(tr
->tr_dp
, tr
->tr_size
, tx
);
887 } else if (tr
->tr_ds
) {
888 mutex_enter(&tr
->tr_ds
->dd_lock
);
889 ASSERT3U(tr
->tr_ds
->dd_tempreserved
[txgidx
], >=,
891 tr
->tr_ds
->dd_tempreserved
[txgidx
] -= tr
->tr_size
;
892 mutex_exit(&tr
->tr_ds
->dd_lock
);
894 arc_tempreserve_clear(tr
->tr_size
);
896 list_remove(tr_list
, tr
);
897 kmem_free(tr
, sizeof (struct tempreserve
));
900 kmem_free(tr_list
, sizeof (list_t
));
904 dsl_dir_willuse_space_impl(dsl_dir_t
*dd
, int64_t space
, dmu_tx_t
*tx
)
906 int64_t parent_space
;
909 mutex_enter(&dd
->dd_lock
);
911 dd
->dd_space_towrite
[tx
->tx_txg
& TXG_MASK
] += space
;
913 est_used
= dsl_dir_space_towrite(dd
) + dd
->dd_phys
->dd_used_bytes
;
914 parent_space
= parent_delta(dd
, est_used
, space
);
915 mutex_exit(&dd
->dd_lock
);
917 /* Make sure that we clean up dd_space_to* */
918 dsl_dir_dirty(dd
, tx
);
920 /* XXX this is potentially expensive and unnecessary... */
921 if (parent_space
&& dd
->dd_parent
)
922 dsl_dir_willuse_space_impl(dd
->dd_parent
, parent_space
, tx
);
926 * Call in open context when we think we're going to write/free space,
927 * eg. when dirtying data. Be conservative (ie. OK to write less than
928 * this or free more than this, but don't write more or free less).
931 dsl_dir_willuse_space(dsl_dir_t
*dd
, int64_t space
, dmu_tx_t
*tx
)
933 dsl_pool_willuse_space(dd
->dd_pool
, space
, tx
);
934 dsl_dir_willuse_space_impl(dd
, space
, tx
);
937 /* call from syncing context when we actually write/free space for this dd */
939 dsl_dir_diduse_space(dsl_dir_t
*dd
, dd_used_t type
,
940 int64_t used
, int64_t compressed
, int64_t uncompressed
, dmu_tx_t
*tx
)
942 int64_t accounted_delta
;
943 boolean_t needlock
= !MUTEX_HELD(&dd
->dd_lock
);
945 ASSERT(dmu_tx_is_syncing(tx
));
946 ASSERT(type
< DD_USED_NUM
);
948 dsl_dir_dirty(dd
, tx
);
951 mutex_enter(&dd
->dd_lock
);
952 accounted_delta
= parent_delta(dd
, dd
->dd_phys
->dd_used_bytes
, used
);
953 ASSERT(used
>= 0 || dd
->dd_phys
->dd_used_bytes
>= -used
);
954 ASSERT(compressed
>= 0 ||
955 dd
->dd_phys
->dd_compressed_bytes
>= -compressed
);
956 ASSERT(uncompressed
>= 0 ||
957 dd
->dd_phys
->dd_uncompressed_bytes
>= -uncompressed
);
958 dd
->dd_phys
->dd_used_bytes
+= used
;
959 dd
->dd_phys
->dd_uncompressed_bytes
+= uncompressed
;
960 dd
->dd_phys
->dd_compressed_bytes
+= compressed
;
962 if (dd
->dd_phys
->dd_flags
& DD_FLAG_USED_BREAKDOWN
) {
964 dd
->dd_phys
->dd_used_breakdown
[type
] >= -used
);
965 dd
->dd_phys
->dd_used_breakdown
[type
] += used
;
969 for (t
= 0; t
< DD_USED_NUM
; t
++)
970 u
+= dd
->dd_phys
->dd_used_breakdown
[t
];
971 ASSERT3U(u
, ==, dd
->dd_phys
->dd_used_bytes
);
975 mutex_exit(&dd
->dd_lock
);
977 if (dd
->dd_parent
!= NULL
) {
978 dsl_dir_diduse_space(dd
->dd_parent
, DD_USED_CHILD
,
979 accounted_delta
, compressed
, uncompressed
, tx
);
980 dsl_dir_transfer_space(dd
->dd_parent
,
981 used
- accounted_delta
,
982 DD_USED_CHILD_RSRV
, DD_USED_CHILD
, tx
);
987 dsl_dir_transfer_space(dsl_dir_t
*dd
, int64_t delta
,
988 dd_used_t oldtype
, dd_used_t newtype
, dmu_tx_t
*tx
)
990 boolean_t needlock
= !MUTEX_HELD(&dd
->dd_lock
);
992 ASSERT(dmu_tx_is_syncing(tx
));
993 ASSERT(oldtype
< DD_USED_NUM
);
994 ASSERT(newtype
< DD_USED_NUM
);
996 if (delta
== 0 || !(dd
->dd_phys
->dd_flags
& DD_FLAG_USED_BREAKDOWN
))
999 dsl_dir_dirty(dd
, tx
);
1001 mutex_enter(&dd
->dd_lock
);
1003 dd
->dd_phys
->dd_used_breakdown
[oldtype
] >= delta
:
1004 dd
->dd_phys
->dd_used_breakdown
[newtype
] >= -delta
);
1005 ASSERT(dd
->dd_phys
->dd_used_bytes
>= ABS(delta
));
1006 dd
->dd_phys
->dd_used_breakdown
[oldtype
] -= delta
;
1007 dd
->dd_phys
->dd_used_breakdown
[newtype
] += delta
;
1009 mutex_exit(&dd
->dd_lock
);
1013 dsl_dir_set_quota_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1015 dsl_dataset_t
*ds
= arg1
;
1016 dsl_dir_t
*dd
= ds
->ds_dir
;
1017 dsl_prop_setarg_t
*psa
= arg2
;
1021 if ((err
= dsl_prop_predict_sync(ds
->ds_dir
, psa
)) != 0)
1024 if (psa
->psa_effective_value
== 0)
1027 mutex_enter(&dd
->dd_lock
);
1029 * If we are doing the preliminary check in open context, and
1030 * there are pending changes, then don't fail it, since the
1031 * pending changes could under-estimate the amount of space to be
1034 towrite
= dsl_dir_space_towrite(dd
);
1035 if ((dmu_tx_is_syncing(tx
) || towrite
== 0) &&
1036 (psa
->psa_effective_value
< dd
->dd_phys
->dd_reserved
||
1037 psa
->psa_effective_value
< dd
->dd_phys
->dd_used_bytes
+ towrite
)) {
1040 mutex_exit(&dd
->dd_lock
);
1044 extern dsl_syncfunc_t dsl_prop_set_sync
;
1047 dsl_dir_set_quota_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1049 dsl_dataset_t
*ds
= arg1
;
1050 dsl_dir_t
*dd
= ds
->ds_dir
;
1051 dsl_prop_setarg_t
*psa
= arg2
;
1052 uint64_t effective_value
= psa
->psa_effective_value
;
1054 dsl_prop_set_sync(ds
, psa
, tx
);
1055 DSL_PROP_CHECK_PREDICTION(dd
, psa
);
1057 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
1059 mutex_enter(&dd
->dd_lock
);
1060 dd
->dd_phys
->dd_quota
= effective_value
;
1061 mutex_exit(&dd
->dd_lock
);
1063 spa_history_log_internal(LOG_DS_QUOTA
, dd
->dd_pool
->dp_spa
,
1064 tx
, "%lld dataset = %llu ",
1065 (longlong_t
)effective_value
, dd
->dd_phys
->dd_head_dataset_obj
);
1069 dsl_dir_set_quota(const char *ddname
, zprop_source_t source
, uint64_t quota
)
1073 dsl_prop_setarg_t psa
;
1076 dsl_prop_setarg_init_uint64(&psa
, "quota", source
, "a
);
1078 err
= dsl_dataset_hold(ddname
, FTAG
, &ds
);
1082 err
= dsl_dir_open(ddname
, FTAG
, &dd
, NULL
);
1084 dsl_dataset_rele(ds
, FTAG
);
1088 ASSERT(ds
->ds_dir
== dd
);
1091 * If someone removes a file, then tries to set the quota, we want to
1092 * make sure the file freeing takes effect.
1094 txg_wait_open(dd
->dd_pool
, 0);
1096 err
= dsl_sync_task_do(dd
->dd_pool
, dsl_dir_set_quota_check
,
1097 dsl_dir_set_quota_sync
, ds
, &psa
, 0);
1099 dsl_dir_close(dd
, FTAG
);
1100 dsl_dataset_rele(ds
, FTAG
);
1105 dsl_dir_set_reservation_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1107 dsl_dataset_t
*ds
= arg1
;
1108 dsl_dir_t
*dd
= ds
->ds_dir
;
1109 dsl_prop_setarg_t
*psa
= arg2
;
1110 uint64_t effective_value
;
1111 uint64_t used
, avail
;
1114 if ((err
= dsl_prop_predict_sync(ds
->ds_dir
, psa
)) != 0)
1117 effective_value
= psa
->psa_effective_value
;
1120 * If we are doing the preliminary check in open context, the
1121 * space estimates may be inaccurate.
1123 if (!dmu_tx_is_syncing(tx
))
1126 mutex_enter(&dd
->dd_lock
);
1127 used
= dd
->dd_phys
->dd_used_bytes
;
1128 mutex_exit(&dd
->dd_lock
);
1130 if (dd
->dd_parent
) {
1131 avail
= dsl_dir_space_available(dd
->dd_parent
,
1134 avail
= dsl_pool_adjustedsize(dd
->dd_pool
, B_FALSE
) - used
;
1137 if (MAX(used
, effective_value
) > MAX(used
, dd
->dd_phys
->dd_reserved
)) {
1138 uint64_t delta
= MAX(used
, effective_value
) -
1139 MAX(used
, dd
->dd_phys
->dd_reserved
);
1143 if (dd
->dd_phys
->dd_quota
> 0 &&
1144 effective_value
> dd
->dd_phys
->dd_quota
)
1152 dsl_dir_set_reservation_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1154 dsl_dataset_t
*ds
= arg1
;
1155 dsl_dir_t
*dd
= ds
->ds_dir
;
1156 dsl_prop_setarg_t
*psa
= arg2
;
1157 uint64_t effective_value
= psa
->psa_effective_value
;
1161 dsl_prop_set_sync(ds
, psa
, tx
);
1162 DSL_PROP_CHECK_PREDICTION(dd
, psa
);
1164 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
1166 mutex_enter(&dd
->dd_lock
);
1167 used
= dd
->dd_phys
->dd_used_bytes
;
1168 delta
= MAX(used
, effective_value
) -
1169 MAX(used
, dd
->dd_phys
->dd_reserved
);
1170 dd
->dd_phys
->dd_reserved
= effective_value
;
1172 if (dd
->dd_parent
!= NULL
) {
1173 /* Roll up this additional usage into our ancestors */
1174 dsl_dir_diduse_space(dd
->dd_parent
, DD_USED_CHILD_RSRV
,
1177 mutex_exit(&dd
->dd_lock
);
1179 spa_history_log_internal(LOG_DS_RESERVATION
, dd
->dd_pool
->dp_spa
,
1180 tx
, "%lld dataset = %llu",
1181 (longlong_t
)effective_value
, dd
->dd_phys
->dd_head_dataset_obj
);
1185 dsl_dir_set_reservation(const char *ddname
, zprop_source_t source
,
1186 uint64_t reservation
)
1190 dsl_prop_setarg_t psa
;
1193 dsl_prop_setarg_init_uint64(&psa
, "reservation", source
, &reservation
);
1195 err
= dsl_dataset_hold(ddname
, FTAG
, &ds
);
1199 err
= dsl_dir_open(ddname
, FTAG
, &dd
, NULL
);
1201 dsl_dataset_rele(ds
, FTAG
);
1205 ASSERT(ds
->ds_dir
== dd
);
1207 err
= dsl_sync_task_do(dd
->dd_pool
, dsl_dir_set_reservation_check
,
1208 dsl_dir_set_reservation_sync
, ds
, &psa
, 0);
1210 dsl_dir_close(dd
, FTAG
);
1211 dsl_dataset_rele(ds
, FTAG
);
1216 closest_common_ancestor(dsl_dir_t
*ds1
, dsl_dir_t
*ds2
)
1218 for (; ds1
; ds1
= ds1
->dd_parent
) {
1220 for (dd
= ds2
; dd
; dd
= dd
->dd_parent
) {
1229 * If delta is applied to dd, how much of that delta would be applied to
1230 * ancestor? Syncing context only.
1233 would_change(dsl_dir_t
*dd
, int64_t delta
, dsl_dir_t
*ancestor
)
1238 mutex_enter(&dd
->dd_lock
);
1239 delta
= parent_delta(dd
, dd
->dd_phys
->dd_used_bytes
, delta
);
1240 mutex_exit(&dd
->dd_lock
);
1241 return (would_change(dd
->dd_parent
, delta
, ancestor
));
1245 dsl_dir_t
*newparent
;
1246 const char *mynewname
;
1250 dsl_dir_rename_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1252 dsl_dir_t
*dd
= arg1
;
1253 struct renamearg
*ra
= arg2
;
1254 dsl_pool_t
*dp
= dd
->dd_pool
;
1255 objset_t
*mos
= dp
->dp_meta_objset
;
1260 * There should only be one reference, from dmu_objset_rename().
1261 * Fleeting holds are also possible (eg, from "zfs list" getting
1262 * stats), but any that are present in open context will likely
1263 * be gone by syncing context, so only fail from syncing
1266 if (dmu_tx_is_syncing(tx
) && dmu_buf_refcount(dd
->dd_dbuf
) > 1)
1269 /* check for existing name */
1270 err
= zap_lookup(mos
, ra
->newparent
->dd_phys
->dd_child_dir_zapobj
,
1271 ra
->mynewname
, 8, 1, &val
);
1277 if (ra
->newparent
!= dd
->dd_parent
) {
1278 /* is there enough space? */
1280 MAX(dd
->dd_phys
->dd_used_bytes
, dd
->dd_phys
->dd_reserved
);
1282 /* no rename into our descendant */
1283 if (closest_common_ancestor(dd
, ra
->newparent
) == dd
)
1286 if (err
= dsl_dir_transfer_possible(dd
->dd_parent
,
1287 ra
->newparent
, myspace
))
1295 dsl_dir_rename_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1297 dsl_dir_t
*dd
= arg1
;
1298 struct renamearg
*ra
= arg2
;
1299 dsl_pool_t
*dp
= dd
->dd_pool
;
1300 objset_t
*mos
= dp
->dp_meta_objset
;
1303 ASSERT(dmu_buf_refcount(dd
->dd_dbuf
) <= 2);
1305 if (ra
->newparent
!= dd
->dd_parent
) {
1306 dsl_dir_diduse_space(dd
->dd_parent
, DD_USED_CHILD
,
1307 -dd
->dd_phys
->dd_used_bytes
,
1308 -dd
->dd_phys
->dd_compressed_bytes
,
1309 -dd
->dd_phys
->dd_uncompressed_bytes
, tx
);
1310 dsl_dir_diduse_space(ra
->newparent
, DD_USED_CHILD
,
1311 dd
->dd_phys
->dd_used_bytes
,
1312 dd
->dd_phys
->dd_compressed_bytes
,
1313 dd
->dd_phys
->dd_uncompressed_bytes
, tx
);
1315 if (dd
->dd_phys
->dd_reserved
> dd
->dd_phys
->dd_used_bytes
) {
1316 uint64_t unused_rsrv
= dd
->dd_phys
->dd_reserved
-
1317 dd
->dd_phys
->dd_used_bytes
;
1319 dsl_dir_diduse_space(dd
->dd_parent
, DD_USED_CHILD_RSRV
,
1320 -unused_rsrv
, 0, 0, tx
);
1321 dsl_dir_diduse_space(ra
->newparent
, DD_USED_CHILD_RSRV
,
1322 unused_rsrv
, 0, 0, tx
);
1326 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
1328 /* remove from old parent zapobj */
1329 err
= zap_remove(mos
, dd
->dd_parent
->dd_phys
->dd_child_dir_zapobj
,
1331 ASSERT3U(err
, ==, 0);
1333 (void) strcpy(dd
->dd_myname
, ra
->mynewname
);
1334 dsl_dir_close(dd
->dd_parent
, dd
);
1335 dd
->dd_phys
->dd_parent_obj
= ra
->newparent
->dd_object
;
1336 VERIFY(0 == dsl_dir_open_obj(dd
->dd_pool
,
1337 ra
->newparent
->dd_object
, NULL
, dd
, &dd
->dd_parent
));
1339 /* add to new parent zapobj */
1340 err
= zap_add(mos
, ra
->newparent
->dd_phys
->dd_child_dir_zapobj
,
1341 dd
->dd_myname
, 8, 1, &dd
->dd_object
, tx
);
1342 ASSERT3U(err
, ==, 0);
1344 spa_history_log_internal(LOG_DS_RENAME
, dd
->dd_pool
->dp_spa
,
1345 tx
, "dataset = %llu", dd
->dd_phys
->dd_head_dataset_obj
);
1349 dsl_dir_rename(dsl_dir_t
*dd
, const char *newname
)
1351 struct renamearg ra
;
1354 /* new parent should exist */
1355 err
= dsl_dir_open(newname
, FTAG
, &ra
.newparent
, &ra
.mynewname
);
1359 /* can't rename to different pool */
1360 if (dd
->dd_pool
!= ra
.newparent
->dd_pool
) {
1365 /* new name should not already exist */
1366 if (ra
.mynewname
== NULL
) {
1371 err
= dsl_sync_task_do(dd
->dd_pool
,
1372 dsl_dir_rename_check
, dsl_dir_rename_sync
, dd
, &ra
, 3);
1375 dsl_dir_close(ra
.newparent
, FTAG
);
1380 dsl_dir_transfer_possible(dsl_dir_t
*sdd
, dsl_dir_t
*tdd
, uint64_t space
)
1382 dsl_dir_t
*ancestor
;
1386 ancestor
= closest_common_ancestor(sdd
, tdd
);
1387 adelta
= would_change(sdd
, -space
, ancestor
);
1388 avail
= dsl_dir_space_available(tdd
, ancestor
, adelta
, FALSE
);
1396 dsl_dir_snap_cmtime(dsl_dir_t
*dd
)
1400 mutex_enter(&dd
->dd_lock
);
1401 t
= dd
->dd_snap_cmtime
;
1402 mutex_exit(&dd
->dd_lock
);
1408 dsl_dir_snap_cmtime_update(dsl_dir_t
*dd
)
1413 mutex_enter(&dd
->dd_lock
);
1414 dd
->dd_snap_cmtime
= t
;
1415 mutex_exit(&dd
->dd_lock
);