4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
29 #include <sys/dmu_impl.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dsl_dataset.h>
34 #include <sys/dsl_dir.h>
35 #include <sys/dsl_pool.h>
36 #include <sys/zap_impl.h>
39 #include <sys/sa_impl.h>
40 #include <sys/zfs_context.h>
41 #include <sys/varargs.h>
43 typedef void (*dmu_tx_hold_func_t
)(dmu_tx_t
*tx
, struct dnode
*dn
,
44 uint64_t arg1
, uint64_t arg2
);
48 dmu_tx_create_dd(dsl_dir_t
*dd
)
50 dmu_tx_t
*tx
= kmem_zalloc(sizeof (dmu_tx_t
), KM_SLEEP
);
53 tx
->tx_pool
= dd
->dd_pool
;
54 list_create(&tx
->tx_holds
, sizeof (dmu_tx_hold_t
),
55 offsetof(dmu_tx_hold_t
, txh_node
));
56 list_create(&tx
->tx_callbacks
, sizeof (dmu_tx_callback_t
),
57 offsetof(dmu_tx_callback_t
, dcb_node
));
58 tx
->tx_start
= gethrtime();
63 dmu_tx_create(objset_t
*os
)
65 dmu_tx_t
*tx
= dmu_tx_create_dd(os
->os_dsl_dataset
->ds_dir
);
71 dmu_tx_create_assigned(struct dsl_pool
*dp
, uint64_t txg
)
73 dmu_tx_t
*tx
= dmu_tx_create_dd(NULL
);
75 txg_verify(dp
->dp_spa
, txg
);
84 dmu_tx_is_syncing(dmu_tx_t
*tx
)
86 return (tx
->tx_anyobj
);
90 dmu_tx_private_ok(dmu_tx_t
*tx
)
92 return (tx
->tx_anyobj
);
95 static dmu_tx_hold_t
*
96 dmu_tx_hold_dnode_impl(dmu_tx_t
*tx
, dnode_t
*dn
, enum dmu_tx_hold_type type
,
97 uint64_t arg1
, uint64_t arg2
)
102 (void) refcount_add(&dn
->dn_holds
, tx
);
103 if (tx
->tx_txg
!= 0) {
104 mutex_enter(&dn
->dn_mtx
);
106 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
107 * problem, but there's no way for it to happen (for
110 ASSERT(dn
->dn_assigned_txg
== 0);
111 dn
->dn_assigned_txg
= tx
->tx_txg
;
112 (void) refcount_add(&dn
->dn_tx_holds
, tx
);
113 mutex_exit(&dn
->dn_mtx
);
117 txh
= kmem_zalloc(sizeof (dmu_tx_hold_t
), KM_SLEEP
);
120 refcount_create(&txh
->txh_space_towrite
);
121 refcount_create(&txh
->txh_memory_tohold
);
122 txh
->txh_type
= type
;
123 txh
->txh_arg1
= arg1
;
124 txh
->txh_arg2
= arg2
;
125 list_insert_tail(&tx
->tx_holds
, txh
);
130 static dmu_tx_hold_t
*
131 dmu_tx_hold_object_impl(dmu_tx_t
*tx
, objset_t
*os
, uint64_t object
,
132 enum dmu_tx_hold_type type
, uint64_t arg1
, uint64_t arg2
)
138 if (object
!= DMU_NEW_OBJECT
) {
139 err
= dnode_hold(os
, object
, FTAG
, &dn
);
145 txh
= dmu_tx_hold_dnode_impl(tx
, dn
, type
, arg1
, arg2
);
147 dnode_rele(dn
, FTAG
);
152 dmu_tx_add_new_object(dmu_tx_t
*tx
, dnode_t
*dn
)
155 * If we're syncing, they can manipulate any object anyhow, and
156 * the hold on the dnode_t can cause problems.
158 if (!dmu_tx_is_syncing(tx
))
159 (void) dmu_tx_hold_dnode_impl(tx
, dn
, THT_NEWOBJECT
, 0, 0);
163 * This function reads specified data from disk. The specified data will
164 * be needed to perform the transaction -- i.e, it will be read after
165 * we do dmu_tx_assign(). There are two reasons that we read the data now
166 * (before dmu_tx_assign()):
168 * 1. Reading it now has potentially better performance. The transaction
169 * has not yet been assigned, so the TXG is not held open, and also the
170 * caller typically has less locks held when calling dmu_tx_hold_*() than
171 * after the transaction has been assigned. This reduces the lock (and txg)
172 * hold times, thus reducing lock contention.
174 * 2. It is easier for callers (primarily the ZPL) to handle i/o errors
175 * that are detected before they start making changes to the DMU state
176 * (i.e. now). Once the transaction has been assigned, and some DMU
177 * state has been changed, it can be difficult to recover from an i/o
178 * error (e.g. to undo the changes already made in memory at the DMU
179 * layer). Typically code to do so does not exist in the caller -- it
180 * assumes that the data has already been cached and thus i/o errors are
183 * It has been observed that the i/o initiated here can be a performance
184 * problem, and it appears to be optional, because we don't look at the
185 * data which is read. However, removing this read would only serve to
186 * move the work elsewhere (after the dmu_tx_assign()), where it may
187 * have a greater impact on performance (in addition to the impact on
188 * fault tolerance noted above).
191 dmu_tx_check_ioerr(zio_t
*zio
, dnode_t
*dn
, int level
, uint64_t blkid
)
196 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
197 db
= dbuf_hold_level(dn
, level
, blkid
, FTAG
);
198 rw_exit(&dn
->dn_struct_rwlock
);
200 return (SET_ERROR(EIO
));
201 err
= dbuf_read(db
, zio
, DB_RF_CANFAIL
| DB_RF_NOPREFETCH
);
208 dmu_tx_count_write(dmu_tx_hold_t
*txh
, uint64_t off
, uint64_t len
)
210 dnode_t
*dn
= txh
->txh_dnode
;
216 (void) refcount_add_many(&txh
->txh_space_towrite
, len
, FTAG
);
218 if (refcount_count(&txh
->txh_space_towrite
) > 2 * DMU_MAX_ACCESS
)
219 err
= SET_ERROR(EFBIG
);
225 * For i/o error checking, read the blocks that will be needed
226 * to perform the write: the first and last level-0 blocks (if
227 * they are not aligned, i.e. if they are partial-block writes),
228 * and all the level-1 blocks.
230 if (dn
->dn_maxblkid
== 0) {
231 if (off
< dn
->dn_datablksz
&&
232 (off
> 0 || len
< dn
->dn_datablksz
)) {
233 err
= dmu_tx_check_ioerr(NULL
, dn
, 0, 0);
235 txh
->txh_tx
->tx_err
= err
;
239 zio_t
*zio
= zio_root(dn
->dn_objset
->os_spa
,
240 NULL
, NULL
, ZIO_FLAG_CANFAIL
);
242 /* first level-0 block */
243 uint64_t start
= off
>> dn
->dn_datablkshift
;
244 if (P2PHASE(off
, dn
->dn_datablksz
) || len
< dn
->dn_datablksz
) {
245 err
= dmu_tx_check_ioerr(zio
, dn
, 0, start
);
247 txh
->txh_tx
->tx_err
= err
;
251 /* last level-0 block */
252 uint64_t end
= (off
+ len
- 1) >> dn
->dn_datablkshift
;
253 if (end
!= start
&& end
<= dn
->dn_maxblkid
&&
254 P2PHASE(off
+ len
, dn
->dn_datablksz
)) {
255 err
= dmu_tx_check_ioerr(zio
, dn
, 0, end
);
257 txh
->txh_tx
->tx_err
= err
;
262 if (dn
->dn_nlevels
> 1) {
263 int shft
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
264 for (uint64_t i
= (start
>> shft
) + 1;
265 i
< end
>> shft
; i
++) {
266 err
= dmu_tx_check_ioerr(zio
, dn
, 1, i
);
268 txh
->txh_tx
->tx_err
= err
;
275 txh
->txh_tx
->tx_err
= err
;
281 dmu_tx_count_dnode(dmu_tx_hold_t
*txh
)
283 (void) refcount_add_many(&txh
->txh_space_towrite
, DNODE_SIZE
, FTAG
);
287 dmu_tx_hold_write(dmu_tx_t
*tx
, uint64_t object
, uint64_t off
, int len
)
292 ASSERT3U(len
, <=, DMU_MAX_ACCESS
);
293 ASSERT(len
== 0 || UINT64_MAX
- off
>= len
- 1);
295 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
296 object
, THT_WRITE
, off
, len
);
298 dmu_tx_count_write(txh
, off
, len
);
299 dmu_tx_count_dnode(txh
);
304 dmu_tx_hold_remap_l1indirect(dmu_tx_t
*tx
, uint64_t object
)
308 ASSERT(tx
->tx_txg
== 0);
309 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
310 object
, THT_WRITE
, 0, 0);
314 dnode_t
*dn
= txh
->txh_dnode
;
315 (void) refcount_add_many(&txh
->txh_space_towrite
,
316 1ULL << dn
->dn_indblkshift
, FTAG
);
317 dmu_tx_count_dnode(txh
);
321 dmu_tx_hold_write_by_dnode(dmu_tx_t
*tx
, dnode_t
*dn
, uint64_t off
, int len
)
326 ASSERT3U(len
, <=, DMU_MAX_ACCESS
);
327 ASSERT(len
== 0 || UINT64_MAX
- off
>= len
- 1);
329 txh
= dmu_tx_hold_dnode_impl(tx
, dn
, THT_WRITE
, off
, len
);
331 dmu_tx_count_write(txh
, off
, len
);
332 dmu_tx_count_dnode(txh
);
337 * This function marks the transaction as being a "net free". The end
338 * result is that refquotas will be disabled for this transaction, and
339 * this transaction will be able to use half of the pool space overhead
340 * (see dsl_pool_adjustedsize()). Therefore this function should only
341 * be called for transactions that we expect will not cause a net increase
342 * in the amount of space used (but it's OK if that is occasionally not true).
345 dmu_tx_mark_netfree(dmu_tx_t
*tx
)
347 tx
->tx_netfree
= B_TRUE
;
351 dmu_tx_hold_free_impl(dmu_tx_hold_t
*txh
, uint64_t off
, uint64_t len
)
358 ASSERT(tx
->tx_txg
== 0);
361 dmu_tx_count_dnode(txh
);
363 if (off
>= (dn
->dn_maxblkid
+ 1) * dn
->dn_datablksz
)
365 if (len
== DMU_OBJECT_END
)
366 len
= (dn
->dn_maxblkid
+ 1) * dn
->dn_datablksz
- off
;
369 * For i/o error checking, we read the first and last level-0
370 * blocks if they are not aligned, and all the level-1 blocks.
372 * Note: dbuf_free_range() assumes that we have not instantiated
373 * any level-0 dbufs that will be completely freed. Therefore we must
374 * exercise care to not read or count the first and last blocks
375 * if they are blocksize-aligned.
377 if (dn
->dn_datablkshift
== 0) {
378 if (off
!= 0 || len
< dn
->dn_datablksz
)
379 dmu_tx_count_write(txh
, 0, dn
->dn_datablksz
);
381 /* first block will be modified if it is not aligned */
382 if (!IS_P2ALIGNED(off
, 1 << dn
->dn_datablkshift
))
383 dmu_tx_count_write(txh
, off
, 1);
384 /* last block will be modified if it is not aligned */
385 if (!IS_P2ALIGNED(off
+ len
, 1 << dn
->dn_datablkshift
))
386 dmu_tx_count_write(txh
, off
+ len
, 1);
390 * Check level-1 blocks.
392 if (dn
->dn_nlevels
> 1) {
393 int shift
= dn
->dn_datablkshift
+ dn
->dn_indblkshift
-
395 uint64_t start
= off
>> shift
;
396 uint64_t end
= (off
+ len
) >> shift
;
398 ASSERT(dn
->dn_indblkshift
!= 0);
401 * dnode_reallocate() can result in an object with indirect
402 * blocks having an odd data block size. In this case,
403 * just check the single block.
405 if (dn
->dn_datablkshift
== 0)
408 zio_t
*zio
= zio_root(tx
->tx_pool
->dp_spa
,
409 NULL
, NULL
, ZIO_FLAG_CANFAIL
);
410 for (uint64_t i
= start
; i
<= end
; i
++) {
411 uint64_t ibyte
= i
<< shift
;
412 err
= dnode_next_offset(dn
, 0, &ibyte
, 2, 1, 0);
414 if (err
== ESRCH
|| i
> end
)
418 (void) zio_wait(zio
);
422 (void) refcount_add_many(&txh
->txh_memory_tohold
,
423 1 << dn
->dn_indblkshift
, FTAG
);
425 err
= dmu_tx_check_ioerr(zio
, dn
, 1, i
);
428 (void) zio_wait(zio
);
441 dmu_tx_hold_free(dmu_tx_t
*tx
, uint64_t object
, uint64_t off
, uint64_t len
)
445 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
446 object
, THT_FREE
, off
, len
);
448 (void) dmu_tx_hold_free_impl(txh
, off
, len
);
452 dmu_tx_hold_free_by_dnode(dmu_tx_t
*tx
, dnode_t
*dn
, uint64_t off
, uint64_t len
)
456 txh
= dmu_tx_hold_dnode_impl(tx
, dn
, THT_FREE
, off
, len
);
458 (void) dmu_tx_hold_free_impl(txh
, off
, len
);
462 dmu_tx_hold_zap_impl(dmu_tx_hold_t
*txh
, const char *name
)
464 dmu_tx_t
*tx
= txh
->txh_tx
;
468 ASSERT(tx
->tx_txg
== 0);
472 dmu_tx_count_dnode(txh
);
475 * Modifying a almost-full microzap is around the worst case (128KB)
477 * If it is a fat zap, the worst case would be 7*16KB=112KB:
478 * - 3 blocks overwritten: target leaf, ptrtbl block, header block
479 * - 4 new blocks written if adding:
480 * - 2 blocks for possibly split leaves,
481 * - 2 grown ptrtbl blocks
483 (void) refcount_add_many(&txh
->txh_space_towrite
,
484 MZAP_MAX_BLKSZ
, FTAG
);
489 ASSERT3P(DMU_OT_BYTESWAP(dn
->dn_type
), ==, DMU_BSWAP_ZAP
);
491 if (dn
->dn_maxblkid
== 0 || name
== NULL
) {
493 * This is a microzap (only one block), or we don't know
494 * the name. Check the first block for i/o errors.
496 err
= dmu_tx_check_ioerr(NULL
, dn
, 0, 0);
502 * Access the name so that we'll check for i/o errors to
503 * the leaf blocks, etc. We ignore ENOENT, as this name
506 err
= zap_lookup_by_dnode(dn
, name
, 8, 0, NULL
);
507 if (err
== EIO
|| err
== ECKSUM
|| err
== ENXIO
) {
514 dmu_tx_hold_zap(dmu_tx_t
*tx
, uint64_t object
, int add
, const char *name
)
520 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
521 object
, THT_ZAP
, add
, (uintptr_t)name
);
523 dmu_tx_hold_zap_impl(txh
, name
);
527 dmu_tx_hold_zap_by_dnode(dmu_tx_t
*tx
, dnode_t
*dn
, int add
, const char *name
)
534 txh
= dmu_tx_hold_dnode_impl(tx
, dn
, THT_ZAP
, add
, (uintptr_t)name
);
536 dmu_tx_hold_zap_impl(txh
, name
);
540 dmu_tx_hold_bonus(dmu_tx_t
*tx
, uint64_t object
)
544 ASSERT(tx
->tx_txg
== 0);
546 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
547 object
, THT_BONUS
, 0, 0);
549 dmu_tx_count_dnode(txh
);
553 dmu_tx_hold_bonus_by_dnode(dmu_tx_t
*tx
, dnode_t
*dn
)
559 txh
= dmu_tx_hold_dnode_impl(tx
, dn
, THT_BONUS
, 0, 0);
561 dmu_tx_count_dnode(txh
);
565 dmu_tx_hold_space(dmu_tx_t
*tx
, uint64_t space
)
568 ASSERT(tx
->tx_txg
== 0);
570 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
571 DMU_NEW_OBJECT
, THT_SPACE
, space
, 0);
573 (void) refcount_add_many(&txh
->txh_space_towrite
, space
, FTAG
);
578 dmu_tx_dirty_buf(dmu_tx_t
*tx
, dmu_buf_impl_t
*db
)
580 boolean_t match_object
= B_FALSE
;
581 boolean_t match_offset
= B_FALSE
;
584 dnode_t
*dn
= DB_DNODE(db
);
585 ASSERT(tx
->tx_txg
!= 0);
586 ASSERT(tx
->tx_objset
== NULL
|| dn
->dn_objset
== tx
->tx_objset
);
587 ASSERT3U(dn
->dn_object
, ==, db
->db
.db_object
);
594 /* XXX No checking on the meta dnode for now */
595 if (db
->db
.db_object
== DMU_META_DNODE_OBJECT
) {
600 for (dmu_tx_hold_t
*txh
= list_head(&tx
->tx_holds
); txh
!= NULL
;
601 txh
= list_next(&tx
->tx_holds
, txh
)) {
602 ASSERT(dn
== NULL
|| dn
->dn_assigned_txg
== tx
->tx_txg
);
603 if (txh
->txh_dnode
== dn
&& txh
->txh_type
!= THT_NEWOBJECT
)
605 if (txh
->txh_dnode
== NULL
|| txh
->txh_dnode
== dn
) {
606 int datablkshift
= dn
->dn_datablkshift
?
607 dn
->dn_datablkshift
: SPA_MAXBLOCKSHIFT
;
608 int epbs
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
609 int shift
= datablkshift
+ epbs
* db
->db_level
;
610 uint64_t beginblk
= shift
>= 64 ? 0 :
611 (txh
->txh_arg1
>> shift
);
612 uint64_t endblk
= shift
>= 64 ? 0 :
613 ((txh
->txh_arg1
+ txh
->txh_arg2
- 1) >> shift
);
614 uint64_t blkid
= db
->db_blkid
;
616 /* XXX txh_arg2 better not be zero... */
618 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
619 txh
->txh_type
, beginblk
, endblk
);
621 switch (txh
->txh_type
) {
623 if (blkid
>= beginblk
&& blkid
<= endblk
)
626 * We will let this hold work for the bonus
627 * or spill buffer so that we don't need to
628 * hold it when creating a new object.
630 if (blkid
== DMU_BONUS_BLKID
||
631 blkid
== DMU_SPILL_BLKID
)
634 * They might have to increase nlevels,
635 * thus dirtying the new TLIBs. Or the
636 * might have to change the block size,
637 * thus dirying the new lvl=0 blk=0.
644 * We will dirty all the level 1 blocks in
645 * the free range and perhaps the first and
646 * last level 0 block.
648 if (blkid
>= beginblk
&& (blkid
<= endblk
||
649 txh
->txh_arg2
== DMU_OBJECT_END
))
653 if (blkid
== DMU_SPILL_BLKID
)
657 if (blkid
== DMU_BONUS_BLKID
)
667 ASSERT(!"bad txh_type");
670 if (match_object
&& match_offset
) {
676 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
677 (u_longlong_t
)db
->db
.db_object
, db
->db_level
,
678 (u_longlong_t
)db
->db_blkid
);
683 * If we can't do 10 iops, something is wrong. Let us go ahead
684 * and hit zfs_dirty_data_max.
686 hrtime_t zfs_delay_max_ns
= MSEC2NSEC(100);
687 int zfs_delay_resolution_ns
= 100 * 1000; /* 100 microseconds */
690 * We delay transactions when we've determined that the backend storage
691 * isn't able to accommodate the rate of incoming writes.
693 * If there is already a transaction waiting, we delay relative to when
694 * that transaction finishes waiting. This way the calculated min_time
695 * is independent of the number of threads concurrently executing
698 * If we are the only waiter, wait relative to when the transaction
699 * started, rather than the current time. This credits the transaction for
700 * "time already served", e.g. reading indirect blocks.
702 * The minimum time for a transaction to take is calculated as:
703 * min_time = scale * (dirty - min) / (max - dirty)
704 * min_time is then capped at zfs_delay_max_ns.
706 * The delay has two degrees of freedom that can be adjusted via tunables.
707 * The percentage of dirty data at which we start to delay is defined by
708 * zfs_delay_min_dirty_percent. This should typically be at or above
709 * zfs_vdev_async_write_active_max_dirty_percent so that we only start to
710 * delay after writing at full speed has failed to keep up with the incoming
711 * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
712 * speaking, this variable determines the amount of delay at the midpoint of
716 * 10ms +-------------------------------------------------------------*+
732 * 2ms + (midpoint) * +
735 * | zfs_delay_scale ----------> ******** |
736 * 0 +-------------------------------------*********----------------+
737 * 0% <- zfs_dirty_data_max -> 100%
739 * Note that since the delay is added to the outstanding time remaining on the
740 * most recent transaction, the delay is effectively the inverse of IOPS.
741 * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
742 * was chosen such that small changes in the amount of accumulated dirty data
743 * in the first 3/4 of the curve yield relatively small differences in the
746 * The effects can be easier to understand when the amount of delay is
747 * represented on a log scale:
750 * 100ms +-------------------------------------------------------------++
759 * + zfs_delay_scale ----------> ***** +
770 * +--------------------------------------------------------------+
771 * 0% <- zfs_dirty_data_max -> 100%
773 * Note here that only as the amount of dirty data approaches its limit does
774 * the delay start to increase rapidly. The goal of a properly tuned system
775 * should be to keep the amount of dirty data out of that range by first
776 * ensuring that the appropriate limits are set for the I/O scheduler to reach
777 * optimal throughput on the backend storage, and then by changing the value
778 * of zfs_delay_scale to increase the steepness of the curve.
781 dmu_tx_delay(dmu_tx_t
*tx
, uint64_t dirty
)
783 dsl_pool_t
*dp
= tx
->tx_pool
;
784 uint64_t delay_min_bytes
=
785 zfs_dirty_data_max
* zfs_delay_min_dirty_percent
/ 100;
786 hrtime_t wakeup
, min_tx_time
, now
;
788 if (dirty
<= delay_min_bytes
)
792 * The caller has already waited until we are under the max.
793 * We make them pass us the amount of dirty data so we don't
794 * have to handle the case of it being >= the max, which could
795 * cause a divide-by-zero if it's == the max.
797 ASSERT3U(dirty
, <, zfs_dirty_data_max
);
800 min_tx_time
= zfs_delay_scale
*
801 (dirty
- delay_min_bytes
) / (zfs_dirty_data_max
- dirty
);
802 if (now
> tx
->tx_start
+ min_tx_time
)
805 min_tx_time
= MIN(min_tx_time
, zfs_delay_max_ns
);
807 DTRACE_PROBE3(delay__mintime
, dmu_tx_t
*, tx
, uint64_t, dirty
,
808 uint64_t, min_tx_time
);
810 mutex_enter(&dp
->dp_lock
);
811 wakeup
= MAX(tx
->tx_start
+ min_tx_time
,
812 dp
->dp_last_wakeup
+ min_tx_time
);
813 dp
->dp_last_wakeup
= wakeup
;
814 mutex_exit(&dp
->dp_lock
);
817 mutex_enter(&curthread
->t_delay_lock
);
818 while (cv_timedwait_hires(&curthread
->t_delay_cv
,
819 &curthread
->t_delay_lock
, wakeup
, zfs_delay_resolution_ns
,
820 CALLOUT_FLAG_ABSOLUTE
| CALLOUT_FLAG_ROUNDUP
) > 0)
822 mutex_exit(&curthread
->t_delay_lock
);
824 hrtime_t delta
= wakeup
- gethrtime();
826 ts
.tv_sec
= delta
/ NANOSEC
;
827 ts
.tv_nsec
= delta
% NANOSEC
;
828 (void) nanosleep(&ts
, NULL
);
833 * This routine attempts to assign the transaction to a transaction group.
834 * To do so, we must determine if there is sufficient free space on disk.
836 * If this is a "netfree" transaction (i.e. we called dmu_tx_mark_netfree()
837 * on it), then it is assumed that there is sufficient free space,
838 * unless there's insufficient slop space in the pool (see the comment
839 * above spa_slop_shift in spa_misc.c).
841 * If it is not a "netfree" transaction, then if the data already on disk
842 * is over the allowed usage (e.g. quota), this will fail with EDQUOT or
843 * ENOSPC. Otherwise, if the current rough estimate of pending changes,
844 * plus the rough estimate of this transaction's changes, may exceed the
845 * allowed usage, then this will fail with ERESTART, which will cause the
846 * caller to wait for the pending changes to be written to disk (by waiting
847 * for the next TXG to open), and then check the space usage again.
849 * The rough estimate of pending changes is comprised of the sum of:
851 * - this transaction's holds' txh_space_towrite
853 * - dd_tempreserved[], which is the sum of in-flight transactions'
854 * holds' txh_space_towrite (i.e. those transactions that have called
855 * dmu_tx_assign() but not yet called dmu_tx_commit()).
857 * - dd_space_towrite[], which is the amount of dirtied dbufs.
859 * Note that all of these values are inflated by spa_get_worst_case_asize(),
860 * which means that we may get ERESTART well before we are actually in danger
861 * of running out of space, but this also mitigates any small inaccuracies
862 * in the rough estimate (e.g. txh_space_towrite doesn't take into account
863 * indirect blocks, and dd_space_towrite[] doesn't take into account changes
866 * Note that due to this algorithm, it is possible to exceed the allowed
867 * usage by one transaction. Also, as we approach the allowed usage,
868 * we will allow a very limited amount of changes into each TXG, thus
869 * decreasing performance.
872 dmu_tx_try_assign(dmu_tx_t
*tx
, uint64_t txg_how
)
874 spa_t
*spa
= tx
->tx_pool
->dp_spa
;
881 if (spa_suspended(spa
)) {
883 * If the user has indicated a blocking failure mode
884 * then return ERESTART which will block in dmu_tx_wait().
885 * Otherwise, return EIO so that an error can get
886 * propagated back to the VOP calls.
888 * Note that we always honor the txg_how flag regardless
889 * of the failuremode setting.
891 if (spa_get_failmode(spa
) == ZIO_FAILURE_MODE_CONTINUE
&&
892 !(txg_how
& TXG_WAIT
))
893 return (SET_ERROR(EIO
));
895 return (SET_ERROR(ERESTART
));
898 if (!tx
->tx_dirty_delayed
&&
899 dsl_pool_need_dirty_delay(tx
->tx_pool
)) {
900 tx
->tx_wait_dirty
= B_TRUE
;
901 return (SET_ERROR(ERESTART
));
904 tx
->tx_txg
= txg_hold_open(tx
->tx_pool
, &tx
->tx_txgh
);
905 tx
->tx_needassign_txh
= NULL
;
908 * NB: No error returns are allowed after txg_hold_open, but
909 * before processing the dnode holds, due to the
910 * dmu_tx_unassign() logic.
913 uint64_t towrite
= 0;
915 for (dmu_tx_hold_t
*txh
= list_head(&tx
->tx_holds
); txh
!= NULL
;
916 txh
= list_next(&tx
->tx_holds
, txh
)) {
917 dnode_t
*dn
= txh
->txh_dnode
;
919 mutex_enter(&dn
->dn_mtx
);
920 if (dn
->dn_assigned_txg
== tx
->tx_txg
- 1) {
921 mutex_exit(&dn
->dn_mtx
);
922 tx
->tx_needassign_txh
= txh
;
923 return (SET_ERROR(ERESTART
));
925 if (dn
->dn_assigned_txg
== 0)
926 dn
->dn_assigned_txg
= tx
->tx_txg
;
927 ASSERT3U(dn
->dn_assigned_txg
, ==, tx
->tx_txg
);
928 (void) refcount_add(&dn
->dn_tx_holds
, tx
);
929 mutex_exit(&dn
->dn_mtx
);
931 towrite
+= refcount_count(&txh
->txh_space_towrite
);
932 tohold
+= refcount_count(&txh
->txh_memory_tohold
);
935 /* needed allocation: worst-case estimate of write space */
936 uint64_t asize
= spa_get_worst_case_asize(tx
->tx_pool
->dp_spa
, towrite
);
937 /* calculate memory footprint estimate */
938 uint64_t memory
= towrite
+ tohold
;
940 if (tx
->tx_dir
!= NULL
&& asize
!= 0) {
941 int err
= dsl_dir_tempreserve_space(tx
->tx_dir
, memory
,
942 asize
, tx
->tx_netfree
, &tx
->tx_tempreserve_cookie
, tx
);
951 dmu_tx_unassign(dmu_tx_t
*tx
)
956 txg_rele_to_quiesce(&tx
->tx_txgh
);
959 * Walk the transaction's hold list, removing the hold on the
960 * associated dnode, and notifying waiters if the refcount drops to 0.
962 for (dmu_tx_hold_t
*txh
= list_head(&tx
->tx_holds
);
963 txh
!= tx
->tx_needassign_txh
;
964 txh
= list_next(&tx
->tx_holds
, txh
)) {
965 dnode_t
*dn
= txh
->txh_dnode
;
969 mutex_enter(&dn
->dn_mtx
);
970 ASSERT3U(dn
->dn_assigned_txg
, ==, tx
->tx_txg
);
972 if (refcount_remove(&dn
->dn_tx_holds
, tx
) == 0) {
973 dn
->dn_assigned_txg
= 0;
974 cv_broadcast(&dn
->dn_notxholds
);
976 mutex_exit(&dn
->dn_mtx
);
979 txg_rele_to_sync(&tx
->tx_txgh
);
981 tx
->tx_lasttried_txg
= tx
->tx_txg
;
986 * Assign tx to a transaction group; txg_how is a bitmask:
988 * If TXG_WAIT is set and the currently open txg is full, this function
989 * will wait until there's a new txg. This should be used when no locks
990 * are being held. With this bit set, this function will only fail if
991 * we're truly out of space (or over quota).
993 * If TXG_WAIT is *not* set and we can't assign into the currently open
994 * txg without blocking, this function will return immediately with
995 * ERESTART. This should be used whenever locks are being held. On an
996 * ERESTART error, the caller should drop all locks, call dmu_tx_wait(),
999 * If TXG_NOTHROTTLE is set, this indicates that this tx should not be
1000 * delayed due on the ZFS Write Throttle (see comments in dsl_pool.c for
1001 * details on the throttle). This is used by the VFS operations, after
1002 * they have already called dmu_tx_wait() (though most likely on a
1006 dmu_tx_assign(dmu_tx_t
*tx
, uint64_t txg_how
)
1010 ASSERT(tx
->tx_txg
== 0);
1011 ASSERT0(txg_how
& ~(TXG_WAIT
| TXG_NOTHROTTLE
));
1012 ASSERT(!dsl_pool_sync_context(tx
->tx_pool
));
1014 /* If we might wait, we must not hold the config lock. */
1015 IMPLY((txg_how
& TXG_WAIT
), !dsl_pool_config_held(tx
->tx_pool
));
1017 if ((txg_how
& TXG_NOTHROTTLE
))
1018 tx
->tx_dirty_delayed
= B_TRUE
;
1020 while ((err
= dmu_tx_try_assign(tx
, txg_how
)) != 0) {
1021 dmu_tx_unassign(tx
);
1023 if (err
!= ERESTART
|| !(txg_how
& TXG_WAIT
))
1029 txg_rele_to_quiesce(&tx
->tx_txgh
);
1035 dmu_tx_wait(dmu_tx_t
*tx
)
1037 spa_t
*spa
= tx
->tx_pool
->dp_spa
;
1038 dsl_pool_t
*dp
= tx
->tx_pool
;
1040 ASSERT(tx
->tx_txg
== 0);
1041 ASSERT(!dsl_pool_config_held(tx
->tx_pool
));
1043 if (tx
->tx_wait_dirty
) {
1045 * dmu_tx_try_assign() has determined that we need to wait
1046 * because we've consumed much or all of the dirty buffer
1049 mutex_enter(&dp
->dp_lock
);
1050 while (dp
->dp_dirty_total
>= zfs_dirty_data_max
)
1051 cv_wait(&dp
->dp_spaceavail_cv
, &dp
->dp_lock
);
1052 uint64_t dirty
= dp
->dp_dirty_total
;
1053 mutex_exit(&dp
->dp_lock
);
1055 dmu_tx_delay(tx
, dirty
);
1057 tx
->tx_wait_dirty
= B_FALSE
;
1060 * Note: setting tx_dirty_delayed only has effect if the
1061 * caller used TX_WAIT. Otherwise they are going to
1062 * destroy this tx and try again. The common case,
1063 * zfs_write(), uses TX_WAIT.
1065 tx
->tx_dirty_delayed
= B_TRUE
;
1066 } else if (spa_suspended(spa
) || tx
->tx_lasttried_txg
== 0) {
1068 * If the pool is suspended we need to wait until it
1069 * is resumed. Note that it's possible that the pool
1070 * has become active after this thread has tried to
1071 * obtain a tx. If that's the case then tx_lasttried_txg
1072 * would not have been set.
1074 txg_wait_synced(dp
, spa_last_synced_txg(spa
) + 1);
1075 } else if (tx
->tx_needassign_txh
) {
1077 * A dnode is assigned to the quiescing txg. Wait for its
1078 * transaction to complete.
1080 dnode_t
*dn
= tx
->tx_needassign_txh
->txh_dnode
;
1082 mutex_enter(&dn
->dn_mtx
);
1083 while (dn
->dn_assigned_txg
== tx
->tx_lasttried_txg
- 1)
1084 cv_wait(&dn
->dn_notxholds
, &dn
->dn_mtx
);
1085 mutex_exit(&dn
->dn_mtx
);
1086 tx
->tx_needassign_txh
= NULL
;
1088 txg_wait_open(tx
->tx_pool
, tx
->tx_lasttried_txg
+ 1);
1093 dmu_tx_destroy(dmu_tx_t
*tx
)
1097 while ((txh
= list_head(&tx
->tx_holds
)) != NULL
) {
1098 dnode_t
*dn
= txh
->txh_dnode
;
1100 list_remove(&tx
->tx_holds
, txh
);
1101 refcount_destroy_many(&txh
->txh_space_towrite
,
1102 refcount_count(&txh
->txh_space_towrite
));
1103 refcount_destroy_many(&txh
->txh_memory_tohold
,
1104 refcount_count(&txh
->txh_memory_tohold
));
1105 kmem_free(txh
, sizeof (dmu_tx_hold_t
));
1110 list_destroy(&tx
->tx_callbacks
);
1111 list_destroy(&tx
->tx_holds
);
1112 kmem_free(tx
, sizeof (dmu_tx_t
));
1116 dmu_tx_commit(dmu_tx_t
*tx
)
1118 ASSERT(tx
->tx_txg
!= 0);
1121 * Go through the transaction's hold list and remove holds on
1122 * associated dnodes, notifying waiters if no holds remain.
1124 for (dmu_tx_hold_t
*txh
= list_head(&tx
->tx_holds
); txh
!= NULL
;
1125 txh
= list_next(&tx
->tx_holds
, txh
)) {
1126 dnode_t
*dn
= txh
->txh_dnode
;
1131 mutex_enter(&dn
->dn_mtx
);
1132 ASSERT3U(dn
->dn_assigned_txg
, ==, tx
->tx_txg
);
1134 if (refcount_remove(&dn
->dn_tx_holds
, tx
) == 0) {
1135 dn
->dn_assigned_txg
= 0;
1136 cv_broadcast(&dn
->dn_notxholds
);
1138 mutex_exit(&dn
->dn_mtx
);
1141 if (tx
->tx_tempreserve_cookie
)
1142 dsl_dir_tempreserve_clear(tx
->tx_tempreserve_cookie
, tx
);
1144 if (!list_is_empty(&tx
->tx_callbacks
))
1145 txg_register_callbacks(&tx
->tx_txgh
, &tx
->tx_callbacks
);
1147 if (tx
->tx_anyobj
== FALSE
)
1148 txg_rele_to_sync(&tx
->tx_txgh
);
1154 dmu_tx_abort(dmu_tx_t
*tx
)
1156 ASSERT(tx
->tx_txg
== 0);
1159 * Call any registered callbacks with an error code.
1161 if (!list_is_empty(&tx
->tx_callbacks
))
1162 dmu_tx_do_callbacks(&tx
->tx_callbacks
, ECANCELED
);
1168 dmu_tx_get_txg(dmu_tx_t
*tx
)
1170 ASSERT(tx
->tx_txg
!= 0);
1171 return (tx
->tx_txg
);
1175 dmu_tx_pool(dmu_tx_t
*tx
)
1177 ASSERT(tx
->tx_pool
!= NULL
);
1178 return (tx
->tx_pool
);
1182 dmu_tx_callback_register(dmu_tx_t
*tx
, dmu_tx_callback_func_t
*func
, void *data
)
1184 dmu_tx_callback_t
*dcb
;
1186 dcb
= kmem_alloc(sizeof (dmu_tx_callback_t
), KM_SLEEP
);
1188 dcb
->dcb_func
= func
;
1189 dcb
->dcb_data
= data
;
1191 list_insert_tail(&tx
->tx_callbacks
, dcb
);
1195 * Call all the commit callbacks on a list, with a given error code.
1198 dmu_tx_do_callbacks(list_t
*cb_list
, int error
)
1200 dmu_tx_callback_t
*dcb
;
1202 while ((dcb
= list_head(cb_list
)) != NULL
) {
1203 list_remove(cb_list
, dcb
);
1204 dcb
->dcb_func(dcb
->dcb_data
, error
);
1205 kmem_free(dcb
, sizeof (dmu_tx_callback_t
));
1210 * Interface to hold a bunch of attributes.
1211 * used for creating new files.
1212 * attrsize is the total size of all attributes
1213 * to be added during object creation
1215 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1219 * hold necessary attribute name for attribute registration.
1220 * should be a very rare case where this is needed. If it does
1221 * happen it would only happen on the first write to the file system.
1224 dmu_tx_sa_registration_hold(sa_os_t
*sa
, dmu_tx_t
*tx
)
1226 if (!sa
->sa_need_attr_registration
)
1229 for (int i
= 0; i
!= sa
->sa_num_attrs
; i
++) {
1230 if (!sa
->sa_attr_table
[i
].sa_registered
) {
1231 if (sa
->sa_reg_attr_obj
)
1232 dmu_tx_hold_zap(tx
, sa
->sa_reg_attr_obj
,
1233 B_TRUE
, sa
->sa_attr_table
[i
].sa_name
);
1235 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
,
1236 B_TRUE
, sa
->sa_attr_table
[i
].sa_name
);
1242 dmu_tx_hold_spill(dmu_tx_t
*tx
, uint64_t object
)
1244 dmu_tx_hold_t
*txh
= dmu_tx_hold_object_impl(tx
,
1245 tx
->tx_objset
, object
, THT_SPILL
, 0, 0);
1247 (void) refcount_add_many(&txh
->txh_space_towrite
,
1248 SPA_OLD_MAXBLOCKSIZE
, FTAG
);
1252 dmu_tx_hold_sa_create(dmu_tx_t
*tx
, int attrsize
)
1254 sa_os_t
*sa
= tx
->tx_objset
->os_sa
;
1256 dmu_tx_hold_bonus(tx
, DMU_NEW_OBJECT
);
1258 if (tx
->tx_objset
->os_sa
->sa_master_obj
== 0)
1261 if (tx
->tx_objset
->os_sa
->sa_layout_attr_obj
) {
1262 dmu_tx_hold_zap(tx
, sa
->sa_layout_attr_obj
, B_TRUE
, NULL
);
1264 dmu_tx_hold_zap(tx
, sa
->sa_master_obj
, B_TRUE
, SA_LAYOUTS
);
1265 dmu_tx_hold_zap(tx
, sa
->sa_master_obj
, B_TRUE
, SA_REGISTRY
);
1266 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1267 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1270 dmu_tx_sa_registration_hold(sa
, tx
);
1272 if (attrsize
<= DN_MAX_BONUSLEN
&& !sa
->sa_force_spill
)
1275 (void) dmu_tx_hold_object_impl(tx
, tx
->tx_objset
, DMU_NEW_OBJECT
,
1282 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1284 * variable_size is the total size of all variable sized attributes
1285 * passed to this function. It is not the total size of all
1286 * variable size attributes that *may* exist on this object.
1289 dmu_tx_hold_sa(dmu_tx_t
*tx
, sa_handle_t
*hdl
, boolean_t may_grow
)
1292 sa_os_t
*sa
= tx
->tx_objset
->os_sa
;
1294 ASSERT(hdl
!= NULL
);
1296 object
= sa_handle_object(hdl
);
1298 dmu_tx_hold_bonus(tx
, object
);
1300 if (tx
->tx_objset
->os_sa
->sa_master_obj
== 0)
1303 if (tx
->tx_objset
->os_sa
->sa_reg_attr_obj
== 0 ||
1304 tx
->tx_objset
->os_sa
->sa_layout_attr_obj
== 0) {
1305 dmu_tx_hold_zap(tx
, sa
->sa_master_obj
, B_TRUE
, SA_LAYOUTS
);
1306 dmu_tx_hold_zap(tx
, sa
->sa_master_obj
, B_TRUE
, SA_REGISTRY
);
1307 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1308 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1311 dmu_tx_sa_registration_hold(sa
, tx
);
1313 if (may_grow
&& tx
->tx_objset
->os_sa
->sa_layout_attr_obj
)
1314 dmu_tx_hold_zap(tx
, sa
->sa_layout_attr_obj
, B_TRUE
, NULL
);
1316 if (sa
->sa_force_spill
|| may_grow
|| hdl
->sa_spill
) {
1317 ASSERT(tx
->tx_txg
== 0);
1318 dmu_tx_hold_spill(tx
, object
);
1320 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)hdl
->sa_bonus
;
1325 if (dn
->dn_have_spill
) {
1326 ASSERT(tx
->tx_txg
== 0);
1327 dmu_tx_hold_spill(tx
, object
);