4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
29 #include <sys/dmu_impl.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
34 #include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
35 #include <sys/dsl_pool.h>
36 #include <sys/zap_impl.h> /* for fzap_default_block_shift */
39 #include <sys/sa_impl.h>
40 #include <sys/zfs_context.h>
41 #include <sys/varargs.h>
43 typedef void (*dmu_tx_hold_func_t
)(dmu_tx_t
*tx
, struct dnode
*dn
,
44 uint64_t arg1
, uint64_t arg2
);
48 dmu_tx_create_dd(dsl_dir_t
*dd
)
50 dmu_tx_t
*tx
= kmem_zalloc(sizeof (dmu_tx_t
), KM_SLEEP
);
53 tx
->tx_pool
= dd
->dd_pool
;
54 list_create(&tx
->tx_holds
, sizeof (dmu_tx_hold_t
),
55 offsetof(dmu_tx_hold_t
, txh_node
));
56 list_create(&tx
->tx_callbacks
, sizeof (dmu_tx_callback_t
),
57 offsetof(dmu_tx_callback_t
, dcb_node
));
58 tx
->tx_start
= gethrtime();
60 refcount_create(&tx
->tx_space_written
);
61 refcount_create(&tx
->tx_space_freed
);
67 dmu_tx_create(objset_t
*os
)
69 dmu_tx_t
*tx
= dmu_tx_create_dd(os
->os_dsl_dataset
->ds_dir
);
71 tx
->tx_lastsnap_txg
= dsl_dataset_prev_snap_txg(os
->os_dsl_dataset
);
76 dmu_tx_create_assigned(struct dsl_pool
*dp
, uint64_t txg
)
78 dmu_tx_t
*tx
= dmu_tx_create_dd(NULL
);
80 ASSERT3U(txg
, <=, dp
->dp_tx
.tx_open_txg
);
89 dmu_tx_is_syncing(dmu_tx_t
*tx
)
91 return (tx
->tx_anyobj
);
95 dmu_tx_private_ok(dmu_tx_t
*tx
)
97 return (tx
->tx_anyobj
);
100 static dmu_tx_hold_t
*
101 dmu_tx_hold_object_impl(dmu_tx_t
*tx
, objset_t
*os
, uint64_t object
,
102 enum dmu_tx_hold_type type
, uint64_t arg1
, uint64_t arg2
)
108 if (object
!= DMU_NEW_OBJECT
) {
109 err
= dnode_hold(os
, object
, tx
, &dn
);
115 if (err
== 0 && tx
->tx_txg
!= 0) {
116 mutex_enter(&dn
->dn_mtx
);
118 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
119 * problem, but there's no way for it to happen (for
122 ASSERT(dn
->dn_assigned_txg
== 0);
123 dn
->dn_assigned_txg
= tx
->tx_txg
;
124 (void) refcount_add(&dn
->dn_tx_holds
, tx
);
125 mutex_exit(&dn
->dn_mtx
);
129 txh
= kmem_zalloc(sizeof (dmu_tx_hold_t
), KM_SLEEP
);
132 refcount_create(&txh
->txh_space_towrite
);
133 refcount_create(&txh
->txh_space_tofree
);
134 refcount_create(&txh
->txh_space_tooverwrite
);
135 refcount_create(&txh
->txh_space_tounref
);
136 refcount_create(&txh
->txh_memory_tohold
);
137 refcount_create(&txh
->txh_fudge
);
139 txh
->txh_type
= type
;
140 txh
->txh_arg1
= arg1
;
141 txh
->txh_arg2
= arg2
;
143 list_insert_tail(&tx
->tx_holds
, txh
);
149 dmu_tx_add_new_object(dmu_tx_t
*tx
, objset_t
*os
, uint64_t object
)
152 * If we're syncing, they can manipulate any object anyhow, and
153 * the hold on the dnode_t can cause problems.
155 if (!dmu_tx_is_syncing(tx
)) {
156 (void) dmu_tx_hold_object_impl(tx
, os
,
157 object
, THT_NEWOBJECT
, 0, 0);
162 dmu_tx_check_ioerr(zio_t
*zio
, dnode_t
*dn
, int level
, uint64_t blkid
)
167 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
168 db
= dbuf_hold_level(dn
, level
, blkid
, FTAG
);
169 rw_exit(&dn
->dn_struct_rwlock
);
171 return (SET_ERROR(EIO
));
172 err
= dbuf_read(db
, zio
, DB_RF_CANFAIL
| DB_RF_NOPREFETCH
);
178 dmu_tx_count_twig(dmu_tx_hold_t
*txh
, dnode_t
*dn
, dmu_buf_impl_t
*db
,
179 int level
, uint64_t blkid
, boolean_t freeable
, uint64_t *history
)
181 objset_t
*os
= dn
->dn_objset
;
182 dsl_dataset_t
*ds
= os
->os_dsl_dataset
;
183 int epbs
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
184 dmu_buf_impl_t
*parent
= NULL
;
188 if (level
>= dn
->dn_nlevels
|| history
[level
] == blkid
)
191 history
[level
] = blkid
;
193 space
= (level
== 0) ? dn
->dn_datablksz
: (1ULL << dn
->dn_indblkshift
);
195 if (db
== NULL
|| db
== dn
->dn_dbuf
) {
199 ASSERT(DB_DNODE(db
) == dn
);
200 ASSERT(db
->db_level
== level
);
201 ASSERT(db
->db
.db_size
== space
);
202 ASSERT(db
->db_blkid
== blkid
);
204 parent
= db
->db_parent
;
207 freeable
= (bp
&& (freeable
||
208 dsl_dataset_block_freeable(ds
, bp
, bp
->blk_birth
)));
211 (void) refcount_add_many(&txh
->txh_space_tooverwrite
,
214 (void) refcount_add_many(&txh
->txh_space_towrite
,
219 (void) refcount_add_many(&txh
->txh_space_tounref
,
220 bp_get_dsize(os
->os_spa
, bp
), FTAG
);
223 dmu_tx_count_twig(txh
, dn
, parent
, level
+ 1,
224 blkid
>> epbs
, freeable
, history
);
229 dmu_tx_count_write(dmu_tx_hold_t
*txh
, uint64_t off
, uint64_t len
)
231 dnode_t
*dn
= txh
->txh_dnode
;
232 uint64_t start
, end
, i
;
233 int min_bs
, max_bs
, min_ibs
, max_ibs
, epbs
, bits
;
239 min_bs
= SPA_MINBLOCKSHIFT
;
240 max_bs
= highbit64(txh
->txh_tx
->tx_objset
->os_recordsize
) - 1;
241 min_ibs
= DN_MIN_INDBLKSHIFT
;
242 max_ibs
= DN_MAX_INDBLKSHIFT
;
245 uint64_t history
[DN_MAX_LEVELS
];
246 int nlvls
= dn
->dn_nlevels
;
250 * For i/o error checking, read the first and last level-0
251 * blocks (if they are not aligned), and all the level-1 blocks.
253 if (dn
->dn_maxblkid
== 0) {
254 delta
= dn
->dn_datablksz
;
255 start
= (off
< dn
->dn_datablksz
) ? 0 : 1;
256 end
= (off
+len
<= dn
->dn_datablksz
) ? 0 : 1;
257 if (start
== 0 && (off
> 0 || len
< dn
->dn_datablksz
)) {
258 err
= dmu_tx_check_ioerr(NULL
, dn
, 0, 0);
264 zio_t
*zio
= zio_root(dn
->dn_objset
->os_spa
,
265 NULL
, NULL
, ZIO_FLAG_CANFAIL
);
267 /* first level-0 block */
268 start
= off
>> dn
->dn_datablkshift
;
269 if (P2PHASE(off
, dn
->dn_datablksz
) ||
270 len
< dn
->dn_datablksz
) {
271 err
= dmu_tx_check_ioerr(zio
, dn
, 0, start
);
276 /* last level-0 block */
277 end
= (off
+len
-1) >> dn
->dn_datablkshift
;
278 if (end
!= start
&& end
<= dn
->dn_maxblkid
&&
279 P2PHASE(off
+len
, dn
->dn_datablksz
)) {
280 err
= dmu_tx_check_ioerr(zio
, dn
, 0, end
);
287 int shft
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
288 for (i
= (start
>>shft
)+1; i
< end
>>shft
; i
++) {
289 err
= dmu_tx_check_ioerr(zio
, dn
, 1, i
);
298 delta
= P2NPHASE(off
, dn
->dn_datablksz
);
301 min_ibs
= max_ibs
= dn
->dn_indblkshift
;
302 if (dn
->dn_maxblkid
> 0) {
304 * The blocksize can't change,
305 * so we can make a more precise estimate.
307 ASSERT(dn
->dn_datablkshift
!= 0);
308 min_bs
= max_bs
= dn
->dn_datablkshift
;
311 * The blocksize can increase up to the recordsize,
312 * or if it is already more than the recordsize,
313 * up to the next power of 2.
315 min_bs
= highbit64(dn
->dn_datablksz
- 1);
316 max_bs
= MAX(max_bs
, highbit64(dn
->dn_datablksz
- 1));
320 * If this write is not off the end of the file
321 * we need to account for overwrites/unref.
323 if (start
<= dn
->dn_maxblkid
) {
324 for (int l
= 0; l
< DN_MAX_LEVELS
; l
++)
327 while (start
<= dn
->dn_maxblkid
) {
330 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
331 err
= dbuf_hold_impl(dn
, 0, start
,
332 FALSE
, FALSE
, FTAG
, &db
);
333 rw_exit(&dn
->dn_struct_rwlock
);
336 txh
->txh_tx
->tx_err
= err
;
340 dmu_tx_count_twig(txh
, dn
, db
, 0, start
, B_FALSE
,
345 * Account for new indirects appearing
346 * before this IO gets assigned into a txg.
349 epbs
= min_ibs
- SPA_BLKPTRSHIFT
;
350 for (bits
-= epbs
* (nlvls
- 1);
351 bits
>= 0; bits
-= epbs
) {
352 (void) refcount_add_many(
354 1ULL << max_ibs
, FTAG
);
361 delta
= dn
->dn_datablksz
;
366 * 'end' is the last thing we will access, not one past.
367 * This way we won't overflow when accessing the last byte.
369 start
= P2ALIGN(off
, 1ULL << max_bs
);
370 end
= P2ROUNDUP(off
+ len
, 1ULL << max_bs
) - 1;
371 (void) refcount_add_many(&txh
->txh_space_towrite
,
372 end
- start
+ 1, FTAG
);
377 epbs
= min_ibs
- SPA_BLKPTRSHIFT
;
380 * The object contains at most 2^(64 - min_bs) blocks,
381 * and each indirect level maps 2^epbs.
383 for (bits
= 64 - min_bs
; bits
>= 0; bits
-= epbs
) {
386 ASSERT3U(end
, >=, start
);
387 (void) refcount_add_many(&txh
->txh_space_towrite
,
388 (end
- start
+ 1) << max_ibs
, FTAG
);
391 * We also need a new blkid=0 indirect block
392 * to reference any existing file data.
394 (void) refcount_add_many(&txh
->txh_space_towrite
,
395 1ULL << max_ibs
, FTAG
);
400 if (refcount_count(&txh
->txh_space_towrite
) +
401 refcount_count(&txh
->txh_space_tooverwrite
) >
403 err
= SET_ERROR(EFBIG
);
406 txh
->txh_tx
->tx_err
= err
;
410 dmu_tx_count_dnode(dmu_tx_hold_t
*txh
)
412 dnode_t
*dn
= txh
->txh_dnode
;
413 dnode_t
*mdn
= DMU_META_DNODE(txh
->txh_tx
->tx_objset
);
414 uint64_t space
= mdn
->dn_datablksz
+
415 ((mdn
->dn_nlevels
-1) << mdn
->dn_indblkshift
);
417 if (dn
&& dn
->dn_dbuf
->db_blkptr
&&
418 dsl_dataset_block_freeable(dn
->dn_objset
->os_dsl_dataset
,
419 dn
->dn_dbuf
->db_blkptr
, dn
->dn_dbuf
->db_blkptr
->blk_birth
)) {
420 (void) refcount_add_many(&txh
->txh_space_tooverwrite
,
422 (void) refcount_add_many(&txh
->txh_space_tounref
, space
, FTAG
);
424 (void) refcount_add_many(&txh
->txh_space_towrite
, space
, FTAG
);
425 if (dn
&& dn
->dn_dbuf
->db_blkptr
) {
426 (void) refcount_add_many(&txh
->txh_space_tounref
,
433 dmu_tx_hold_write(dmu_tx_t
*tx
, uint64_t object
, uint64_t off
, int len
)
437 ASSERT(tx
->tx_txg
== 0);
438 ASSERT(len
< DMU_MAX_ACCESS
);
439 ASSERT(len
== 0 || UINT64_MAX
- off
>= len
- 1);
441 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
442 object
, THT_WRITE
, off
, len
);
446 dmu_tx_count_write(txh
, off
, len
);
447 dmu_tx_count_dnode(txh
);
451 dmu_tx_count_free(dmu_tx_hold_t
*txh
, uint64_t off
, uint64_t len
)
453 uint64_t blkid
, nblks
, lastblk
;
454 uint64_t space
= 0, unref
= 0, skipped
= 0;
455 dnode_t
*dn
= txh
->txh_dnode
;
456 dsl_dataset_t
*ds
= dn
->dn_objset
->os_dsl_dataset
;
457 spa_t
*spa
= txh
->txh_tx
->tx_pool
->dp_spa
;
459 uint64_t l0span
= 0, nl1blks
= 0;
461 if (dn
->dn_nlevels
== 0)
465 * The struct_rwlock protects us against dn_nlevels
466 * changing, in case (against all odds) we manage to dirty &
467 * sync out the changes after we check for being dirty.
468 * Also, dbuf_hold_impl() wants us to have the struct_rwlock.
470 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
471 epbs
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
472 if (dn
->dn_maxblkid
== 0) {
473 if (off
== 0 && len
>= dn
->dn_datablksz
) {
477 rw_exit(&dn
->dn_struct_rwlock
);
481 blkid
= off
>> dn
->dn_datablkshift
;
482 nblks
= (len
+ dn
->dn_datablksz
- 1) >> dn
->dn_datablkshift
;
484 if (blkid
> dn
->dn_maxblkid
) {
485 rw_exit(&dn
->dn_struct_rwlock
);
488 if (blkid
+ nblks
> dn
->dn_maxblkid
)
489 nblks
= dn
->dn_maxblkid
- blkid
+ 1;
492 l0span
= nblks
; /* save for later use to calc level > 1 overhead */
493 if (dn
->dn_nlevels
== 1) {
495 for (i
= 0; i
< nblks
; i
++) {
496 blkptr_t
*bp
= dn
->dn_phys
->dn_blkptr
;
497 ASSERT3U(blkid
+ i
, <, dn
->dn_nblkptr
);
499 if (dsl_dataset_block_freeable(ds
, bp
, bp
->blk_birth
)) {
500 dprintf_bp(bp
, "can free old%s", "");
501 space
+= bp_get_dsize(spa
, bp
);
503 unref
+= BP_GET_ASIZE(bp
);
509 lastblk
= blkid
+ nblks
- 1;
511 dmu_buf_impl_t
*dbuf
;
512 uint64_t ibyte
, new_blkid
;
514 int err
, i
, blkoff
, tochk
;
517 ibyte
= blkid
<< dn
->dn_datablkshift
;
518 err
= dnode_next_offset(dn
,
519 DNODE_FIND_HAVELOCK
, &ibyte
, 2, 1, 0);
520 new_blkid
= ibyte
>> dn
->dn_datablkshift
;
522 skipped
+= (lastblk
>> epbs
) - (blkid
>> epbs
) + 1;
526 txh
->txh_tx
->tx_err
= err
;
529 if (new_blkid
> lastblk
) {
530 skipped
+= (lastblk
>> epbs
) - (blkid
>> epbs
) + 1;
534 if (new_blkid
> blkid
) {
535 ASSERT((new_blkid
>> epbs
) > (blkid
>> epbs
));
536 skipped
+= (new_blkid
>> epbs
) - (blkid
>> epbs
) - 1;
537 nblks
-= new_blkid
- blkid
;
540 blkoff
= P2PHASE(blkid
, epb
);
541 tochk
= MIN(epb
- blkoff
, nblks
);
543 err
= dbuf_hold_impl(dn
, 1, blkid
>> epbs
,
544 FALSE
, FALSE
, FTAG
, &dbuf
);
546 txh
->txh_tx
->tx_err
= err
;
550 (void) refcount_add_many(&txh
->txh_memory_tohold
,
551 dbuf
->db
.db_size
, FTAG
);
554 * We don't check memory_tohold against DMU_MAX_ACCESS because
555 * memory_tohold is an over-estimation (especially the >L1
556 * indirect blocks), so it could fail. Callers should have
557 * already verified that they will not be holding too much
561 err
= dbuf_read(dbuf
, NULL
, DB_RF_HAVESTRUCT
| DB_RF_CANFAIL
);
563 txh
->txh_tx
->tx_err
= err
;
564 dbuf_rele(dbuf
, FTAG
);
568 bp
= dbuf
->db
.db_data
;
571 for (i
= 0; i
< tochk
; i
++) {
572 if (dsl_dataset_block_freeable(ds
, &bp
[i
],
574 dprintf_bp(&bp
[i
], "can free old%s", "");
575 space
+= bp_get_dsize(spa
, &bp
[i
]);
577 unref
+= BP_GET_ASIZE(bp
);
579 dbuf_rele(dbuf
, FTAG
);
585 rw_exit(&dn
->dn_struct_rwlock
);
588 * Add in memory requirements of higher-level indirects.
589 * This assumes a worst-possible scenario for dn_nlevels and a
590 * worst-possible distribution of l1-blocks over the region to free.
593 uint64_t blkcnt
= 1 + ((l0span
>> epbs
) >> epbs
);
596 * Here we don't use DN_MAX_LEVEL, but calculate it with the
597 * given datablkshift and indblkshift. This makes the
598 * difference between 19 and 8 on large files.
600 int maxlevel
= 2 + (DN_MAX_OFFSET_SHIFT
- dn
->dn_datablkshift
) /
601 (dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
);
603 while (level
++ < maxlevel
) {
604 (void) refcount_add_many(&txh
->txh_memory_tohold
,
605 MAX(MIN(blkcnt
, nl1blks
), 1) << dn
->dn_indblkshift
,
607 blkcnt
= 1 + (blkcnt
>> epbs
);
611 /* account for new level 1 indirect blocks that might show up */
613 (void) refcount_add_many(&txh
->txh_fudge
,
614 skipped
<< dn
->dn_indblkshift
, FTAG
);
615 skipped
= MIN(skipped
, DMU_MAX_DELETEBLKCNT
>> epbs
);
616 (void) refcount_add_many(&txh
->txh_memory_tohold
,
617 skipped
<< dn
->dn_indblkshift
, FTAG
);
619 (void) refcount_add_many(&txh
->txh_space_tofree
, space
, FTAG
);
620 (void) refcount_add_many(&txh
->txh_space_tounref
, unref
, FTAG
);
624 * This function marks the transaction as being a "net free". The end
625 * result is that refquotas will be disabled for this transaction, and
626 * this transaction will be able to use half of the pool space overhead
627 * (see dsl_pool_adjustedsize()). Therefore this function should only
628 * be called for transactions that we expect will not cause a net increase
629 * in the amount of space used (but it's OK if that is occasionally not true).
632 dmu_tx_mark_netfree(dmu_tx_t
*tx
)
636 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
637 DMU_NEW_OBJECT
, THT_FREE
, 0, 0);
640 * Pretend that this operation will free 1GB of space. This
641 * should be large enough to cancel out the largest write.
642 * We don't want to use something like UINT64_MAX, because that would
643 * cause overflows when doing math with these values (e.g. in
644 * dmu_tx_try_assign()).
646 (void) refcount_add_many(&txh
->txh_space_tofree
,
647 1024 * 1024 * 1024, FTAG
);
648 (void) refcount_add_many(&txh
->txh_space_tounref
,
649 1024 * 1024 * 1024, FTAG
);
653 dmu_tx_hold_free(dmu_tx_t
*tx
, uint64_t object
, uint64_t off
, uint64_t len
)
660 ASSERT(tx
->tx_txg
== 0);
662 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
663 object
, THT_FREE
, off
, len
);
667 dmu_tx_count_dnode(txh
);
669 if (off
>= (dn
->dn_maxblkid
+1) * dn
->dn_datablksz
)
671 if (len
== DMU_OBJECT_END
)
672 len
= (dn
->dn_maxblkid
+1) * dn
->dn_datablksz
- off
;
675 * For i/o error checking, we read the first and last level-0
676 * blocks if they are not aligned, and all the level-1 blocks.
678 * Note: dbuf_free_range() assumes that we have not instantiated
679 * any level-0 dbufs that will be completely freed. Therefore we must
680 * exercise care to not read or count the first and last blocks
681 * if they are blocksize-aligned.
683 if (dn
->dn_datablkshift
== 0) {
684 if (off
!= 0 || len
< dn
->dn_datablksz
)
685 dmu_tx_count_write(txh
, 0, dn
->dn_datablksz
);
687 /* first block will be modified if it is not aligned */
688 if (!IS_P2ALIGNED(off
, 1 << dn
->dn_datablkshift
))
689 dmu_tx_count_write(txh
, off
, 1);
690 /* last block will be modified if it is not aligned */
691 if (!IS_P2ALIGNED(off
+ len
, 1 << dn
->dn_datablkshift
))
692 dmu_tx_count_write(txh
, off
+len
, 1);
696 * Check level-1 blocks.
698 if (dn
->dn_nlevels
> 1) {
699 int shift
= dn
->dn_datablkshift
+ dn
->dn_indblkshift
-
701 uint64_t start
= off
>> shift
;
702 uint64_t end
= (off
+ len
) >> shift
;
704 ASSERT(dn
->dn_indblkshift
!= 0);
707 * dnode_reallocate() can result in an object with indirect
708 * blocks having an odd data block size. In this case,
709 * just check the single block.
711 if (dn
->dn_datablkshift
== 0)
714 zio
= zio_root(tx
->tx_pool
->dp_spa
,
715 NULL
, NULL
, ZIO_FLAG_CANFAIL
);
716 for (uint64_t i
= start
; i
<= end
; i
++) {
717 uint64_t ibyte
= i
<< shift
;
718 err
= dnode_next_offset(dn
, 0, &ibyte
, 2, 1, 0);
720 if (err
== ESRCH
|| i
> end
)
727 err
= dmu_tx_check_ioerr(zio
, dn
, 1, i
);
740 dmu_tx_count_free(txh
, off
, len
);
744 dmu_tx_hold_zap(dmu_tx_t
*tx
, uint64_t object
, int add
, const char *name
)
750 ASSERT(tx
->tx_txg
== 0);
752 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
753 object
, THT_ZAP
, add
, (uintptr_t)name
);
758 dmu_tx_count_dnode(txh
);
762 * We will be able to fit a new object's entries into one leaf
763 * block. So there will be at most 2 blocks total,
764 * including the header block.
766 dmu_tx_count_write(txh
, 0, 2 << fzap_default_block_shift
);
770 ASSERT3P(DMU_OT_BYTESWAP(dn
->dn_type
), ==, DMU_BSWAP_ZAP
);
772 if (dn
->dn_maxblkid
== 0 && !add
) {
776 * If there is only one block (i.e. this is a micro-zap)
777 * and we are not adding anything, the accounting is simple.
779 err
= dmu_tx_check_ioerr(NULL
, dn
, 0, 0);
786 * Use max block size here, since we don't know how much
787 * the size will change between now and the dbuf dirty call.
789 bp
= &dn
->dn_phys
->dn_blkptr
[0];
790 if (dsl_dataset_block_freeable(dn
->dn_objset
->os_dsl_dataset
,
791 bp
, bp
->blk_birth
)) {
792 (void) refcount_add_many(&txh
->txh_space_tooverwrite
,
793 MZAP_MAX_BLKSZ
, FTAG
);
795 (void) refcount_add_many(&txh
->txh_space_towrite
,
796 MZAP_MAX_BLKSZ
, FTAG
);
798 if (!BP_IS_HOLE(bp
)) {
799 (void) refcount_add_many(&txh
->txh_space_tounref
,
800 MZAP_MAX_BLKSZ
, FTAG
);
805 if (dn
->dn_maxblkid
> 0 && name
) {
807 * access the name in this fat-zap so that we'll check
808 * for i/o errors to the leaf blocks, etc.
810 err
= zap_lookup(dn
->dn_objset
, dn
->dn_object
, name
,
818 err
= zap_count_write(dn
->dn_objset
, dn
->dn_object
, name
, add
,
819 &txh
->txh_space_towrite
, &txh
->txh_space_tooverwrite
);
822 * If the modified blocks are scattered to the four winds,
823 * we'll have to modify an indirect twig for each. We can make
824 * modifications at up to 3 locations:
825 * - header block at the beginning of the object
826 * - target leaf block
827 * - end of the object, where we might need to write:
828 * - a new leaf block if the target block needs to be split
829 * - the new pointer table, if it is growing
830 * - the new cookie table, if it is growing
832 int epbs
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
833 dsl_dataset_phys_t
*ds_phys
=
834 dsl_dataset_phys(dn
->dn_objset
->os_dsl_dataset
);
835 for (int lvl
= 1; lvl
< dn
->dn_nlevels
; lvl
++) {
836 uint64_t num_indirects
= 1 + (dn
->dn_maxblkid
>> (epbs
* lvl
));
837 uint64_t spc
= MIN(3, num_indirects
) << dn
->dn_indblkshift
;
838 if (ds_phys
->ds_prev_snap_obj
!= 0) {
839 (void) refcount_add_many(&txh
->txh_space_towrite
,
842 (void) refcount_add_many(&txh
->txh_space_tooverwrite
,
849 dmu_tx_hold_bonus(dmu_tx_t
*tx
, uint64_t object
)
853 ASSERT(tx
->tx_txg
== 0);
855 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
856 object
, THT_BONUS
, 0, 0);
858 dmu_tx_count_dnode(txh
);
862 dmu_tx_hold_space(dmu_tx_t
*tx
, uint64_t space
)
865 ASSERT(tx
->tx_txg
== 0);
867 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
,
868 DMU_NEW_OBJECT
, THT_SPACE
, space
, 0);
870 (void) refcount_add_many(&txh
->txh_space_towrite
, space
, FTAG
);
874 dmu_tx_holds(dmu_tx_t
*tx
, uint64_t object
)
880 * By asserting that the tx is assigned, we're counting the
881 * number of dn_tx_holds, which is the same as the number of
882 * dn_holds. Otherwise, we'd be counting dn_holds, but
883 * dn_tx_holds could be 0.
885 ASSERT(tx
->tx_txg
!= 0);
887 /* if (tx->tx_anyobj == TRUE) */
890 for (txh
= list_head(&tx
->tx_holds
); txh
;
891 txh
= list_next(&tx
->tx_holds
, txh
)) {
892 if (txh
->txh_dnode
&& txh
->txh_dnode
->dn_object
== object
)
901 dmu_tx_dirty_buf(dmu_tx_t
*tx
, dmu_buf_impl_t
*db
)
904 int match_object
= FALSE
, match_offset
= FALSE
;
909 ASSERT(tx
->tx_txg
!= 0);
910 ASSERT(tx
->tx_objset
== NULL
|| dn
->dn_objset
== tx
->tx_objset
);
911 ASSERT3U(dn
->dn_object
, ==, db
->db
.db_object
);
918 /* XXX No checking on the meta dnode for now */
919 if (db
->db
.db_object
== DMU_META_DNODE_OBJECT
) {
924 for (txh
= list_head(&tx
->tx_holds
); txh
;
925 txh
= list_next(&tx
->tx_holds
, txh
)) {
926 ASSERT(dn
== NULL
|| dn
->dn_assigned_txg
== tx
->tx_txg
);
927 if (txh
->txh_dnode
== dn
&& txh
->txh_type
!= THT_NEWOBJECT
)
929 if (txh
->txh_dnode
== NULL
|| txh
->txh_dnode
== dn
) {
930 int datablkshift
= dn
->dn_datablkshift
?
931 dn
->dn_datablkshift
: SPA_MAXBLOCKSHIFT
;
932 int epbs
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
933 int shift
= datablkshift
+ epbs
* db
->db_level
;
934 uint64_t beginblk
= shift
>= 64 ? 0 :
935 (txh
->txh_arg1
>> shift
);
936 uint64_t endblk
= shift
>= 64 ? 0 :
937 ((txh
->txh_arg1
+ txh
->txh_arg2
- 1) >> shift
);
938 uint64_t blkid
= db
->db_blkid
;
940 /* XXX txh_arg2 better not be zero... */
942 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
943 txh
->txh_type
, beginblk
, endblk
);
945 switch (txh
->txh_type
) {
947 if (blkid
>= beginblk
&& blkid
<= endblk
)
950 * We will let this hold work for the bonus
951 * or spill buffer so that we don't need to
952 * hold it when creating a new object.
954 if (blkid
== DMU_BONUS_BLKID
||
955 blkid
== DMU_SPILL_BLKID
)
958 * They might have to increase nlevels,
959 * thus dirtying the new TLIBs. Or the
960 * might have to change the block size,
961 * thus dirying the new lvl=0 blk=0.
968 * We will dirty all the level 1 blocks in
969 * the free range and perhaps the first and
970 * last level 0 block.
972 if (blkid
>= beginblk
&& (blkid
<= endblk
||
973 txh
->txh_arg2
== DMU_OBJECT_END
))
977 if (blkid
== DMU_SPILL_BLKID
)
981 if (blkid
== DMU_BONUS_BLKID
)
991 ASSERT(!"bad txh_type");
994 if (match_object
&& match_offset
) {
1000 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
1001 (u_longlong_t
)db
->db
.db_object
, db
->db_level
,
1002 (u_longlong_t
)db
->db_blkid
);
1007 * If we can't do 10 iops, something is wrong. Let us go ahead
1008 * and hit zfs_dirty_data_max.
1010 hrtime_t zfs_delay_max_ns
= MSEC2NSEC(100);
1011 int zfs_delay_resolution_ns
= 100 * 1000; /* 100 microseconds */
1014 * We delay transactions when we've determined that the backend storage
1015 * isn't able to accommodate the rate of incoming writes.
1017 * If there is already a transaction waiting, we delay relative to when
1018 * that transaction finishes waiting. This way the calculated min_time
1019 * is independent of the number of threads concurrently executing
1022 * If we are the only waiter, wait relative to when the transaction
1023 * started, rather than the current time. This credits the transaction for
1024 * "time already served", e.g. reading indirect blocks.
1026 * The minimum time for a transaction to take is calculated as:
1027 * min_time = scale * (dirty - min) / (max - dirty)
1028 * min_time is then capped at zfs_delay_max_ns.
1030 * The delay has two degrees of freedom that can be adjusted via tunables.
1031 * The percentage of dirty data at which we start to delay is defined by
1032 * zfs_delay_min_dirty_percent. This should typically be at or above
1033 * zfs_vdev_async_write_active_max_dirty_percent so that we only start to
1034 * delay after writing at full speed has failed to keep up with the incoming
1035 * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
1036 * speaking, this variable determines the amount of delay at the midpoint of
1040 * 10ms +-------------------------------------------------------------*+
1056 * 2ms + (midpoint) * +
1059 * | zfs_delay_scale ----------> ******** |
1060 * 0 +-------------------------------------*********----------------+
1061 * 0% <- zfs_dirty_data_max -> 100%
1063 * Note that since the delay is added to the outstanding time remaining on the
1064 * most recent transaction, the delay is effectively the inverse of IOPS.
1065 * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
1066 * was chosen such that small changes in the amount of accumulated dirty data
1067 * in the first 3/4 of the curve yield relatively small differences in the
1070 * The effects can be easier to understand when the amount of delay is
1071 * represented on a log scale:
1074 * 100ms +-------------------------------------------------------------++
1083 * + zfs_delay_scale ----------> ***** +
1094 * +--------------------------------------------------------------+
1095 * 0% <- zfs_dirty_data_max -> 100%
1097 * Note here that only as the amount of dirty data approaches its limit does
1098 * the delay start to increase rapidly. The goal of a properly tuned system
1099 * should be to keep the amount of dirty data out of that range by first
1100 * ensuring that the appropriate limits are set for the I/O scheduler to reach
1101 * optimal throughput on the backend storage, and then by changing the value
1102 * of zfs_delay_scale to increase the steepness of the curve.
1105 dmu_tx_delay(dmu_tx_t
*tx
, uint64_t dirty
)
1107 dsl_pool_t
*dp
= tx
->tx_pool
;
1108 uint64_t delay_min_bytes
=
1109 zfs_dirty_data_max
* zfs_delay_min_dirty_percent
/ 100;
1110 hrtime_t wakeup
, min_tx_time
, now
;
1112 if (dirty
<= delay_min_bytes
)
1116 * The caller has already waited until we are under the max.
1117 * We make them pass us the amount of dirty data so we don't
1118 * have to handle the case of it being >= the max, which could
1119 * cause a divide-by-zero if it's == the max.
1121 ASSERT3U(dirty
, <, zfs_dirty_data_max
);
1124 min_tx_time
= zfs_delay_scale
*
1125 (dirty
- delay_min_bytes
) / (zfs_dirty_data_max
- dirty
);
1126 if (now
> tx
->tx_start
+ min_tx_time
)
1129 min_tx_time
= MIN(min_tx_time
, zfs_delay_max_ns
);
1131 DTRACE_PROBE3(delay__mintime
, dmu_tx_t
*, tx
, uint64_t, dirty
,
1132 uint64_t, min_tx_time
);
1134 mutex_enter(&dp
->dp_lock
);
1135 wakeup
= MAX(tx
->tx_start
+ min_tx_time
,
1136 dp
->dp_last_wakeup
+ min_tx_time
);
1137 dp
->dp_last_wakeup
= wakeup
;
1138 mutex_exit(&dp
->dp_lock
);
1141 mutex_enter(&curthread
->t_delay_lock
);
1142 while (cv_timedwait_hires(&curthread
->t_delay_cv
,
1143 &curthread
->t_delay_lock
, wakeup
, zfs_delay_resolution_ns
,
1144 CALLOUT_FLAG_ABSOLUTE
| CALLOUT_FLAG_ROUNDUP
) > 0)
1146 mutex_exit(&curthread
->t_delay_lock
);
1148 hrtime_t delta
= wakeup
- gethrtime();
1150 ts
.tv_sec
= delta
/ NANOSEC
;
1151 ts
.tv_nsec
= delta
% NANOSEC
;
1152 (void) nanosleep(&ts
, NULL
);
1157 dmu_tx_try_assign(dmu_tx_t
*tx
, txg_how_t txg_how
)
1160 spa_t
*spa
= tx
->tx_pool
->dp_spa
;
1161 uint64_t memory
, asize
, fsize
, usize
;
1162 uint64_t towrite
, tofree
, tooverwrite
, tounref
, tohold
, fudge
;
1164 ASSERT0(tx
->tx_txg
);
1167 return (tx
->tx_err
);
1169 if (spa_suspended(spa
)) {
1171 * If the user has indicated a blocking failure mode
1172 * then return ERESTART which will block in dmu_tx_wait().
1173 * Otherwise, return EIO so that an error can get
1174 * propagated back to the VOP calls.
1176 * Note that we always honor the txg_how flag regardless
1177 * of the failuremode setting.
1179 if (spa_get_failmode(spa
) == ZIO_FAILURE_MODE_CONTINUE
&&
1180 txg_how
!= TXG_WAIT
)
1181 return (SET_ERROR(EIO
));
1183 return (SET_ERROR(ERESTART
));
1186 if (!tx
->tx_waited
&&
1187 dsl_pool_need_dirty_delay(tx
->tx_pool
)) {
1188 tx
->tx_wait_dirty
= B_TRUE
;
1189 return (SET_ERROR(ERESTART
));
1192 tx
->tx_txg
= txg_hold_open(tx
->tx_pool
, &tx
->tx_txgh
);
1193 tx
->tx_needassign_txh
= NULL
;
1196 * NB: No error returns are allowed after txg_hold_open, but
1197 * before processing the dnode holds, due to the
1198 * dmu_tx_unassign() logic.
1201 towrite
= tofree
= tooverwrite
= tounref
= tohold
= fudge
= 0;
1202 for (txh
= list_head(&tx
->tx_holds
); txh
;
1203 txh
= list_next(&tx
->tx_holds
, txh
)) {
1204 dnode_t
*dn
= txh
->txh_dnode
;
1206 mutex_enter(&dn
->dn_mtx
);
1207 if (dn
->dn_assigned_txg
== tx
->tx_txg
- 1) {
1208 mutex_exit(&dn
->dn_mtx
);
1209 tx
->tx_needassign_txh
= txh
;
1210 return (SET_ERROR(ERESTART
));
1212 if (dn
->dn_assigned_txg
== 0)
1213 dn
->dn_assigned_txg
= tx
->tx_txg
;
1214 ASSERT3U(dn
->dn_assigned_txg
, ==, tx
->tx_txg
);
1215 (void) refcount_add(&dn
->dn_tx_holds
, tx
);
1216 mutex_exit(&dn
->dn_mtx
);
1218 towrite
+= refcount_count(&txh
->txh_space_towrite
);
1219 tofree
+= refcount_count(&txh
->txh_space_tofree
);
1220 tooverwrite
+= refcount_count(&txh
->txh_space_tooverwrite
);
1221 tounref
+= refcount_count(&txh
->txh_space_tounref
);
1222 tohold
+= refcount_count(&txh
->txh_memory_tohold
);
1223 fudge
+= refcount_count(&txh
->txh_fudge
);
1227 * If a snapshot has been taken since we made our estimates,
1228 * assume that we won't be able to free or overwrite anything.
1230 if (tx
->tx_objset
&&
1231 dsl_dataset_prev_snap_txg(tx
->tx_objset
->os_dsl_dataset
) >
1232 tx
->tx_lastsnap_txg
) {
1233 towrite
+= tooverwrite
;
1234 tooverwrite
= tofree
= 0;
1237 /* needed allocation: worst-case estimate of write space */
1238 asize
= spa_get_asize(tx
->tx_pool
->dp_spa
, towrite
+ tooverwrite
);
1239 /* freed space estimate: worst-case overwrite + free estimate */
1240 fsize
= spa_get_asize(tx
->tx_pool
->dp_spa
, tooverwrite
) + tofree
;
1241 /* convert unrefd space to worst-case estimate */
1242 usize
= spa_get_asize(tx
->tx_pool
->dp_spa
, tounref
);
1243 /* calculate memory footprint estimate */
1244 memory
= towrite
+ tooverwrite
+ tohold
;
1248 * Add in 'tohold' to account for our dirty holds on this memory
1249 * XXX - the "fudge" factor is to account for skipped blocks that
1250 * we missed because dnode_next_offset() misses in-core-only blocks.
1252 tx
->tx_space_towrite
= asize
+
1253 spa_get_asize(tx
->tx_pool
->dp_spa
, tohold
+ fudge
);
1254 tx
->tx_space_tofree
= tofree
;
1255 tx
->tx_space_tooverwrite
= tooverwrite
;
1256 tx
->tx_space_tounref
= tounref
;
1259 if (tx
->tx_dir
&& asize
!= 0) {
1260 int err
= dsl_dir_tempreserve_space(tx
->tx_dir
, memory
,
1261 asize
, fsize
, usize
, &tx
->tx_tempreserve_cookie
, tx
);
1270 dmu_tx_unassign(dmu_tx_t
*tx
)
1274 if (tx
->tx_txg
== 0)
1277 txg_rele_to_quiesce(&tx
->tx_txgh
);
1280 * Walk the transaction's hold list, removing the hold on the
1281 * associated dnode, and notifying waiters if the refcount drops to 0.
1283 for (txh
= list_head(&tx
->tx_holds
); txh
!= tx
->tx_needassign_txh
;
1284 txh
= list_next(&tx
->tx_holds
, txh
)) {
1285 dnode_t
*dn
= txh
->txh_dnode
;
1289 mutex_enter(&dn
->dn_mtx
);
1290 ASSERT3U(dn
->dn_assigned_txg
, ==, tx
->tx_txg
);
1292 if (refcount_remove(&dn
->dn_tx_holds
, tx
) == 0) {
1293 dn
->dn_assigned_txg
= 0;
1294 cv_broadcast(&dn
->dn_notxholds
);
1296 mutex_exit(&dn
->dn_mtx
);
1299 txg_rele_to_sync(&tx
->tx_txgh
);
1301 tx
->tx_lasttried_txg
= tx
->tx_txg
;
1306 * Assign tx to a transaction group. txg_how can be one of:
1308 * (1) TXG_WAIT. If the current open txg is full, waits until there's
1309 * a new one. This should be used when you're not holding locks.
1310 * It will only fail if we're truly out of space (or over quota).
1312 * (2) TXG_NOWAIT. If we can't assign into the current open txg without
1313 * blocking, returns immediately with ERESTART. This should be used
1314 * whenever you're holding locks. On an ERESTART error, the caller
1315 * should drop locks, do a dmu_tx_wait(tx), and try again.
1317 * (3) TXG_WAITED. Like TXG_NOWAIT, but indicates that dmu_tx_wait()
1318 * has already been called on behalf of this operation (though
1319 * most likely on a different tx).
1322 dmu_tx_assign(dmu_tx_t
*tx
, txg_how_t txg_how
)
1326 ASSERT(tx
->tx_txg
== 0);
1327 ASSERT(txg_how
== TXG_WAIT
|| txg_how
== TXG_NOWAIT
||
1328 txg_how
== TXG_WAITED
);
1329 ASSERT(!dsl_pool_sync_context(tx
->tx_pool
));
1331 /* If we might wait, we must not hold the config lock. */
1332 ASSERT(txg_how
!= TXG_WAIT
|| !dsl_pool_config_held(tx
->tx_pool
));
1334 if (txg_how
== TXG_WAITED
)
1335 tx
->tx_waited
= B_TRUE
;
1337 while ((err
= dmu_tx_try_assign(tx
, txg_how
)) != 0) {
1338 dmu_tx_unassign(tx
);
1340 if (err
!= ERESTART
|| txg_how
!= TXG_WAIT
)
1346 txg_rele_to_quiesce(&tx
->tx_txgh
);
1352 dmu_tx_wait(dmu_tx_t
*tx
)
1354 spa_t
*spa
= tx
->tx_pool
->dp_spa
;
1355 dsl_pool_t
*dp
= tx
->tx_pool
;
1357 ASSERT(tx
->tx_txg
== 0);
1358 ASSERT(!dsl_pool_config_held(tx
->tx_pool
));
1360 if (tx
->tx_wait_dirty
) {
1362 * dmu_tx_try_assign() has determined that we need to wait
1363 * because we've consumed much or all of the dirty buffer
1366 mutex_enter(&dp
->dp_lock
);
1367 while (dp
->dp_dirty_total
>= zfs_dirty_data_max
)
1368 cv_wait(&dp
->dp_spaceavail_cv
, &dp
->dp_lock
);
1369 uint64_t dirty
= dp
->dp_dirty_total
;
1370 mutex_exit(&dp
->dp_lock
);
1372 dmu_tx_delay(tx
, dirty
);
1374 tx
->tx_wait_dirty
= B_FALSE
;
1377 * Note: setting tx_waited only has effect if the caller
1378 * used TX_WAIT. Otherwise they are going to destroy
1379 * this tx and try again. The common case, zfs_write(),
1382 tx
->tx_waited
= B_TRUE
;
1383 } else if (spa_suspended(spa
) || tx
->tx_lasttried_txg
== 0) {
1385 * If the pool is suspended we need to wait until it
1386 * is resumed. Note that it's possible that the pool
1387 * has become active after this thread has tried to
1388 * obtain a tx. If that's the case then tx_lasttried_txg
1389 * would not have been set.
1391 txg_wait_synced(dp
, spa_last_synced_txg(spa
) + 1);
1392 } else if (tx
->tx_needassign_txh
) {
1394 * A dnode is assigned to the quiescing txg. Wait for its
1395 * transaction to complete.
1397 dnode_t
*dn
= tx
->tx_needassign_txh
->txh_dnode
;
1399 mutex_enter(&dn
->dn_mtx
);
1400 while (dn
->dn_assigned_txg
== tx
->tx_lasttried_txg
- 1)
1401 cv_wait(&dn
->dn_notxholds
, &dn
->dn_mtx
);
1402 mutex_exit(&dn
->dn_mtx
);
1403 tx
->tx_needassign_txh
= NULL
;
1405 txg_wait_open(tx
->tx_pool
, tx
->tx_lasttried_txg
+ 1);
1410 dmu_tx_willuse_space(dmu_tx_t
*tx
, int64_t delta
)
1413 if (tx
->tx_dir
== NULL
|| delta
== 0)
1417 ASSERT3U(refcount_count(&tx
->tx_space_written
) + delta
, <=,
1418 tx
->tx_space_towrite
);
1419 (void) refcount_add_many(&tx
->tx_space_written
, delta
, NULL
);
1421 (void) refcount_add_many(&tx
->tx_space_freed
, -delta
, NULL
);
1427 dmu_tx_destroy(dmu_tx_t
*tx
)
1431 while ((txh
= list_head(&tx
->tx_holds
)) != NULL
) {
1432 dnode_t
*dn
= txh
->txh_dnode
;
1434 list_remove(&tx
->tx_holds
, txh
);
1435 refcount_destroy_many(&txh
->txh_space_towrite
,
1436 refcount_count(&txh
->txh_space_towrite
));
1437 refcount_destroy_many(&txh
->txh_space_tofree
,
1438 refcount_count(&txh
->txh_space_tofree
));
1439 refcount_destroy_many(&txh
->txh_space_tooverwrite
,
1440 refcount_count(&txh
->txh_space_tooverwrite
));
1441 refcount_destroy_many(&txh
->txh_space_tounref
,
1442 refcount_count(&txh
->txh_space_tounref
));
1443 refcount_destroy_many(&txh
->txh_memory_tohold
,
1444 refcount_count(&txh
->txh_memory_tohold
));
1445 refcount_destroy_many(&txh
->txh_fudge
,
1446 refcount_count(&txh
->txh_fudge
));
1447 kmem_free(txh
, sizeof (dmu_tx_hold_t
));
1452 list_destroy(&tx
->tx_callbacks
);
1453 list_destroy(&tx
->tx_holds
);
1455 refcount_destroy_many(&tx
->tx_space_written
,
1456 refcount_count(&tx
->tx_space_written
));
1457 refcount_destroy_many(&tx
->tx_space_freed
,
1458 refcount_count(&tx
->tx_space_freed
));
1460 kmem_free(tx
, sizeof (dmu_tx_t
));
1464 dmu_tx_commit(dmu_tx_t
*tx
)
1466 ASSERT(tx
->tx_txg
!= 0);
1469 * Go through the transaction's hold list and remove holds on
1470 * associated dnodes, notifying waiters if no holds remain.
1472 for (dmu_tx_hold_t
*txh
= list_head(&tx
->tx_holds
); txh
!= NULL
;
1473 txh
= list_next(&tx
->tx_holds
, txh
)) {
1474 dnode_t
*dn
= txh
->txh_dnode
;
1479 mutex_enter(&dn
->dn_mtx
);
1480 ASSERT3U(dn
->dn_assigned_txg
, ==, tx
->tx_txg
);
1482 if (refcount_remove(&dn
->dn_tx_holds
, tx
) == 0) {
1483 dn
->dn_assigned_txg
= 0;
1484 cv_broadcast(&dn
->dn_notxholds
);
1486 mutex_exit(&dn
->dn_mtx
);
1489 if (tx
->tx_tempreserve_cookie
)
1490 dsl_dir_tempreserve_clear(tx
->tx_tempreserve_cookie
, tx
);
1492 if (!list_is_empty(&tx
->tx_callbacks
))
1493 txg_register_callbacks(&tx
->tx_txgh
, &tx
->tx_callbacks
);
1495 if (tx
->tx_anyobj
== FALSE
)
1496 txg_rele_to_sync(&tx
->tx_txgh
);
1499 dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1500 tx
->tx_space_towrite
, refcount_count(&tx
->tx_space_written
),
1501 tx
->tx_space_tofree
, refcount_count(&tx
->tx_space_freed
));
1507 dmu_tx_abort(dmu_tx_t
*tx
)
1509 ASSERT(tx
->tx_txg
== 0);
1512 * Call any registered callbacks with an error code.
1514 if (!list_is_empty(&tx
->tx_callbacks
))
1515 dmu_tx_do_callbacks(&tx
->tx_callbacks
, ECANCELED
);
1521 dmu_tx_get_txg(dmu_tx_t
*tx
)
1523 ASSERT(tx
->tx_txg
!= 0);
1524 return (tx
->tx_txg
);
1528 dmu_tx_pool(dmu_tx_t
*tx
)
1530 ASSERT(tx
->tx_pool
!= NULL
);
1531 return (tx
->tx_pool
);
1536 dmu_tx_callback_register(dmu_tx_t
*tx
, dmu_tx_callback_func_t
*func
, void *data
)
1538 dmu_tx_callback_t
*dcb
;
1540 dcb
= kmem_alloc(sizeof (dmu_tx_callback_t
), KM_SLEEP
);
1542 dcb
->dcb_func
= func
;
1543 dcb
->dcb_data
= data
;
1545 list_insert_tail(&tx
->tx_callbacks
, dcb
);
1549 * Call all the commit callbacks on a list, with a given error code.
1552 dmu_tx_do_callbacks(list_t
*cb_list
, int error
)
1554 dmu_tx_callback_t
*dcb
;
1556 while ((dcb
= list_head(cb_list
)) != NULL
) {
1557 list_remove(cb_list
, dcb
);
1558 dcb
->dcb_func(dcb
->dcb_data
, error
);
1559 kmem_free(dcb
, sizeof (dmu_tx_callback_t
));
1564 * Interface to hold a bunch of attributes.
1565 * used for creating new files.
1566 * attrsize is the total size of all attributes
1567 * to be added during object creation
1569 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1573 * hold necessary attribute name for attribute registration.
1574 * should be a very rare case where this is needed. If it does
1575 * happen it would only happen on the first write to the file system.
1578 dmu_tx_sa_registration_hold(sa_os_t
*sa
, dmu_tx_t
*tx
)
1582 if (!sa
->sa_need_attr_registration
)
1585 for (i
= 0; i
!= sa
->sa_num_attrs
; i
++) {
1586 if (!sa
->sa_attr_table
[i
].sa_registered
) {
1587 if (sa
->sa_reg_attr_obj
)
1588 dmu_tx_hold_zap(tx
, sa
->sa_reg_attr_obj
,
1589 B_TRUE
, sa
->sa_attr_table
[i
].sa_name
);
1591 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
,
1592 B_TRUE
, sa
->sa_attr_table
[i
].sa_name
);
1599 dmu_tx_hold_spill(dmu_tx_t
*tx
, uint64_t object
)
1604 txh
= dmu_tx_hold_object_impl(tx
, tx
->tx_objset
, object
,
1607 dn
= txh
->txh_dnode
;
1612 /* If blkptr doesn't exist then add space to towrite */
1613 if (!(dn
->dn_phys
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
)) {
1614 (void) refcount_add_many(&txh
->txh_space_towrite
,
1615 SPA_OLD_MAXBLOCKSIZE
, FTAG
);
1619 bp
= &dn
->dn_phys
->dn_spill
;
1620 if (dsl_dataset_block_freeable(dn
->dn_objset
->os_dsl_dataset
,
1621 bp
, bp
->blk_birth
)) {
1622 (void) refcount_add_many(&txh
->txh_space_tooverwrite
,
1623 SPA_OLD_MAXBLOCKSIZE
, FTAG
);
1625 (void) refcount_add_many(&txh
->txh_space_towrite
,
1626 SPA_OLD_MAXBLOCKSIZE
, FTAG
);
1628 if (!BP_IS_HOLE(bp
)) {
1629 (void) refcount_add_many(&txh
->txh_space_tounref
,
1630 SPA_OLD_MAXBLOCKSIZE
, FTAG
);
1636 dmu_tx_hold_sa_create(dmu_tx_t
*tx
, int attrsize
)
1638 sa_os_t
*sa
= tx
->tx_objset
->os_sa
;
1640 dmu_tx_hold_bonus(tx
, DMU_NEW_OBJECT
);
1642 if (tx
->tx_objset
->os_sa
->sa_master_obj
== 0)
1645 if (tx
->tx_objset
->os_sa
->sa_layout_attr_obj
)
1646 dmu_tx_hold_zap(tx
, sa
->sa_layout_attr_obj
, B_TRUE
, NULL
);
1648 dmu_tx_hold_zap(tx
, sa
->sa_master_obj
, B_TRUE
, SA_LAYOUTS
);
1649 dmu_tx_hold_zap(tx
, sa
->sa_master_obj
, B_TRUE
, SA_REGISTRY
);
1650 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1651 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1654 dmu_tx_sa_registration_hold(sa
, tx
);
1656 if (attrsize
<= DN_MAX_BONUSLEN
&& !sa
->sa_force_spill
)
1659 (void) dmu_tx_hold_object_impl(tx
, tx
->tx_objset
, DMU_NEW_OBJECT
,
1666 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1668 * variable_size is the total size of all variable sized attributes
1669 * passed to this function. It is not the total size of all
1670 * variable size attributes that *may* exist on this object.
1673 dmu_tx_hold_sa(dmu_tx_t
*tx
, sa_handle_t
*hdl
, boolean_t may_grow
)
1676 sa_os_t
*sa
= tx
->tx_objset
->os_sa
;
1678 ASSERT(hdl
!= NULL
);
1680 object
= sa_handle_object(hdl
);
1682 dmu_tx_hold_bonus(tx
, object
);
1684 if (tx
->tx_objset
->os_sa
->sa_master_obj
== 0)
1687 if (tx
->tx_objset
->os_sa
->sa_reg_attr_obj
== 0 ||
1688 tx
->tx_objset
->os_sa
->sa_layout_attr_obj
== 0) {
1689 dmu_tx_hold_zap(tx
, sa
->sa_master_obj
, B_TRUE
, SA_LAYOUTS
);
1690 dmu_tx_hold_zap(tx
, sa
->sa_master_obj
, B_TRUE
, SA_REGISTRY
);
1691 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1692 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1695 dmu_tx_sa_registration_hold(sa
, tx
);
1697 if (may_grow
&& tx
->tx_objset
->os_sa
->sa_layout_attr_obj
)
1698 dmu_tx_hold_zap(tx
, sa
->sa_layout_attr_obj
, B_TRUE
, NULL
);
1700 if (sa
->sa_force_spill
|| may_grow
|| hdl
->sa_spill
) {
1701 ASSERT(tx
->tx_txg
== 0);
1702 dmu_tx_hold_spill(tx
, object
);
1704 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)hdl
->sa_bonus
;
1709 if (dn
->dn_have_spill
) {
1710 ASSERT(tx
->tx_txg
== 0);
1711 dmu_tx_hold_spill(tx
, object
);