3422 zpool create/syseventd race yield non-importable pool
[unleashed.git] / usr / src / uts / common / fs / zfs / dmu_tx.c
blob556ae6a83a537fc7bffde063698d40332e83cf54
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2013 by Delphix. All rights reserved.
27 #include <sys/dmu.h>
28 #include <sys/dmu_impl.h>
29 #include <sys/dbuf.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dmu_objset.h>
32 #include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
33 #include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
34 #include <sys/dsl_pool.h>
35 #include <sys/zap_impl.h> /* for fzap_default_block_shift */
36 #include <sys/spa.h>
37 #include <sys/sa.h>
38 #include <sys/sa_impl.h>
39 #include <sys/zfs_context.h>
40 #include <sys/varargs.h>
42 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
43 uint64_t arg1, uint64_t arg2);
46 dmu_tx_t *
47 dmu_tx_create_dd(dsl_dir_t *dd)
49 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
50 tx->tx_dir = dd;
51 if (dd != NULL)
52 tx->tx_pool = dd->dd_pool;
53 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
54 offsetof(dmu_tx_hold_t, txh_node));
55 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
56 offsetof(dmu_tx_callback_t, dcb_node));
57 #ifdef ZFS_DEBUG
58 refcount_create(&tx->tx_space_written);
59 refcount_create(&tx->tx_space_freed);
60 #endif
61 return (tx);
64 dmu_tx_t *
65 dmu_tx_create(objset_t *os)
67 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
68 tx->tx_objset = os;
69 tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset);
70 return (tx);
73 dmu_tx_t *
74 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
76 dmu_tx_t *tx = dmu_tx_create_dd(NULL);
78 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
79 tx->tx_pool = dp;
80 tx->tx_txg = txg;
81 tx->tx_anyobj = TRUE;
83 return (tx);
86 int
87 dmu_tx_is_syncing(dmu_tx_t *tx)
89 return (tx->tx_anyobj);
92 int
93 dmu_tx_private_ok(dmu_tx_t *tx)
95 return (tx->tx_anyobj);
98 static dmu_tx_hold_t *
99 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
100 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
102 dmu_tx_hold_t *txh;
103 dnode_t *dn = NULL;
104 int err;
106 if (object != DMU_NEW_OBJECT) {
107 err = dnode_hold(os, object, tx, &dn);
108 if (err) {
109 tx->tx_err = err;
110 return (NULL);
113 if (err == 0 && tx->tx_txg != 0) {
114 mutex_enter(&dn->dn_mtx);
116 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
117 * problem, but there's no way for it to happen (for
118 * now, at least).
120 ASSERT(dn->dn_assigned_txg == 0);
121 dn->dn_assigned_txg = tx->tx_txg;
122 (void) refcount_add(&dn->dn_tx_holds, tx);
123 mutex_exit(&dn->dn_mtx);
127 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
128 txh->txh_tx = tx;
129 txh->txh_dnode = dn;
130 #ifdef ZFS_DEBUG
131 txh->txh_type = type;
132 txh->txh_arg1 = arg1;
133 txh->txh_arg2 = arg2;
134 #endif
135 list_insert_tail(&tx->tx_holds, txh);
137 return (txh);
140 void
141 dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
144 * If we're syncing, they can manipulate any object anyhow, and
145 * the hold on the dnode_t can cause problems.
147 if (!dmu_tx_is_syncing(tx)) {
148 (void) dmu_tx_hold_object_impl(tx, os,
149 object, THT_NEWOBJECT, 0, 0);
153 static int
154 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
156 int err;
157 dmu_buf_impl_t *db;
159 rw_enter(&dn->dn_struct_rwlock, RW_READER);
160 db = dbuf_hold_level(dn, level, blkid, FTAG);
161 rw_exit(&dn->dn_struct_rwlock);
162 if (db == NULL)
163 return (EIO);
164 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
165 dbuf_rele(db, FTAG);
166 return (err);
169 static void
170 dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db,
171 int level, uint64_t blkid, boolean_t freeable, uint64_t *history)
173 objset_t *os = dn->dn_objset;
174 dsl_dataset_t *ds = os->os_dsl_dataset;
175 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
176 dmu_buf_impl_t *parent = NULL;
177 blkptr_t *bp = NULL;
178 uint64_t space;
180 if (level >= dn->dn_nlevels || history[level] == blkid)
181 return;
183 history[level] = blkid;
185 space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift);
187 if (db == NULL || db == dn->dn_dbuf) {
188 ASSERT(level != 0);
189 db = NULL;
190 } else {
191 ASSERT(DB_DNODE(db) == dn);
192 ASSERT(db->db_level == level);
193 ASSERT(db->db.db_size == space);
194 ASSERT(db->db_blkid == blkid);
195 bp = db->db_blkptr;
196 parent = db->db_parent;
199 freeable = (bp && (freeable ||
200 dsl_dataset_block_freeable(ds, bp, bp->blk_birth)));
202 if (freeable)
203 txh->txh_space_tooverwrite += space;
204 else
205 txh->txh_space_towrite += space;
206 if (bp)
207 txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp);
209 dmu_tx_count_twig(txh, dn, parent, level + 1,
210 blkid >> epbs, freeable, history);
213 /* ARGSUSED */
214 static void
215 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
217 dnode_t *dn = txh->txh_dnode;
218 uint64_t start, end, i;
219 int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
220 int err = 0;
222 if (len == 0)
223 return;
225 min_bs = SPA_MINBLOCKSHIFT;
226 max_bs = SPA_MAXBLOCKSHIFT;
227 min_ibs = DN_MIN_INDBLKSHIFT;
228 max_ibs = DN_MAX_INDBLKSHIFT;
230 if (dn) {
231 uint64_t history[DN_MAX_LEVELS];
232 int nlvls = dn->dn_nlevels;
233 int delta;
236 * For i/o error checking, read the first and last level-0
237 * blocks (if they are not aligned), and all the level-1 blocks.
239 if (dn->dn_maxblkid == 0) {
240 delta = dn->dn_datablksz;
241 start = (off < dn->dn_datablksz) ? 0 : 1;
242 end = (off+len <= dn->dn_datablksz) ? 0 : 1;
243 if (start == 0 && (off > 0 || len < dn->dn_datablksz)) {
244 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
245 if (err)
246 goto out;
247 delta -= off;
249 } else {
250 zio_t *zio = zio_root(dn->dn_objset->os_spa,
251 NULL, NULL, ZIO_FLAG_CANFAIL);
253 /* first level-0 block */
254 start = off >> dn->dn_datablkshift;
255 if (P2PHASE(off, dn->dn_datablksz) ||
256 len < dn->dn_datablksz) {
257 err = dmu_tx_check_ioerr(zio, dn, 0, start);
258 if (err)
259 goto out;
262 /* last level-0 block */
263 end = (off+len-1) >> dn->dn_datablkshift;
264 if (end != start && end <= dn->dn_maxblkid &&
265 P2PHASE(off+len, dn->dn_datablksz)) {
266 err = dmu_tx_check_ioerr(zio, dn, 0, end);
267 if (err)
268 goto out;
271 /* level-1 blocks */
272 if (nlvls > 1) {
273 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
274 for (i = (start>>shft)+1; i < end>>shft; i++) {
275 err = dmu_tx_check_ioerr(zio, dn, 1, i);
276 if (err)
277 goto out;
281 err = zio_wait(zio);
282 if (err)
283 goto out;
284 delta = P2NPHASE(off, dn->dn_datablksz);
287 min_ibs = max_ibs = dn->dn_indblkshift;
288 if (dn->dn_maxblkid > 0) {
290 * The blocksize can't change,
291 * so we can make a more precise estimate.
293 ASSERT(dn->dn_datablkshift != 0);
294 min_bs = max_bs = dn->dn_datablkshift;
298 * If this write is not off the end of the file
299 * we need to account for overwrites/unref.
301 if (start <= dn->dn_maxblkid) {
302 for (int l = 0; l < DN_MAX_LEVELS; l++)
303 history[l] = -1ULL;
305 while (start <= dn->dn_maxblkid) {
306 dmu_buf_impl_t *db;
308 rw_enter(&dn->dn_struct_rwlock, RW_READER);
309 err = dbuf_hold_impl(dn, 0, start, FALSE, FTAG, &db);
310 rw_exit(&dn->dn_struct_rwlock);
312 if (err) {
313 txh->txh_tx->tx_err = err;
314 return;
317 dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE,
318 history);
319 dbuf_rele(db, FTAG);
320 if (++start > end) {
322 * Account for new indirects appearing
323 * before this IO gets assigned into a txg.
325 bits = 64 - min_bs;
326 epbs = min_ibs - SPA_BLKPTRSHIFT;
327 for (bits -= epbs * (nlvls - 1);
328 bits >= 0; bits -= epbs)
329 txh->txh_fudge += 1ULL << max_ibs;
330 goto out;
332 off += delta;
333 if (len >= delta)
334 len -= delta;
335 delta = dn->dn_datablksz;
340 * 'end' is the last thing we will access, not one past.
341 * This way we won't overflow when accessing the last byte.
343 start = P2ALIGN(off, 1ULL << max_bs);
344 end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1;
345 txh->txh_space_towrite += end - start + 1;
347 start >>= min_bs;
348 end >>= min_bs;
350 epbs = min_ibs - SPA_BLKPTRSHIFT;
353 * The object contains at most 2^(64 - min_bs) blocks,
354 * and each indirect level maps 2^epbs.
356 for (bits = 64 - min_bs; bits >= 0; bits -= epbs) {
357 start >>= epbs;
358 end >>= epbs;
359 ASSERT3U(end, >=, start);
360 txh->txh_space_towrite += (end - start + 1) << max_ibs;
361 if (start != 0) {
363 * We also need a new blkid=0 indirect block
364 * to reference any existing file data.
366 txh->txh_space_towrite += 1ULL << max_ibs;
370 out:
371 if (txh->txh_space_towrite + txh->txh_space_tooverwrite >
372 2 * DMU_MAX_ACCESS)
373 err = EFBIG;
375 if (err)
376 txh->txh_tx->tx_err = err;
379 static void
380 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
382 dnode_t *dn = txh->txh_dnode;
383 dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset);
384 uint64_t space = mdn->dn_datablksz +
385 ((mdn->dn_nlevels-1) << mdn->dn_indblkshift);
387 if (dn && dn->dn_dbuf->db_blkptr &&
388 dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
389 dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) {
390 txh->txh_space_tooverwrite += space;
391 txh->txh_space_tounref += space;
392 } else {
393 txh->txh_space_towrite += space;
394 if (dn && dn->dn_dbuf->db_blkptr)
395 txh->txh_space_tounref += space;
399 void
400 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
402 dmu_tx_hold_t *txh;
404 ASSERT(tx->tx_txg == 0);
405 ASSERT(len < DMU_MAX_ACCESS);
406 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
408 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
409 object, THT_WRITE, off, len);
410 if (txh == NULL)
411 return;
413 dmu_tx_count_write(txh, off, len);
414 dmu_tx_count_dnode(txh);
417 static void
418 dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
420 uint64_t blkid, nblks, lastblk;
421 uint64_t space = 0, unref = 0, skipped = 0;
422 dnode_t *dn = txh->txh_dnode;
423 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
424 spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
425 int epbs;
426 uint64_t l0span = 0, nl1blks = 0;
428 if (dn->dn_nlevels == 0)
429 return;
432 * The struct_rwlock protects us against dn_nlevels
433 * changing, in case (against all odds) we manage to dirty &
434 * sync out the changes after we check for being dirty.
435 * Also, dbuf_hold_impl() wants us to have the struct_rwlock.
437 rw_enter(&dn->dn_struct_rwlock, RW_READER);
438 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
439 if (dn->dn_maxblkid == 0) {
440 if (off == 0 && len >= dn->dn_datablksz) {
441 blkid = 0;
442 nblks = 1;
443 } else {
444 rw_exit(&dn->dn_struct_rwlock);
445 return;
447 } else {
448 blkid = off >> dn->dn_datablkshift;
449 nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift;
451 if (blkid >= dn->dn_maxblkid) {
452 rw_exit(&dn->dn_struct_rwlock);
453 return;
455 if (blkid + nblks > dn->dn_maxblkid)
456 nblks = dn->dn_maxblkid - blkid;
459 l0span = nblks; /* save for later use to calc level > 1 overhead */
460 if (dn->dn_nlevels == 1) {
461 int i;
462 for (i = 0; i < nblks; i++) {
463 blkptr_t *bp = dn->dn_phys->dn_blkptr;
464 ASSERT3U(blkid + i, <, dn->dn_nblkptr);
465 bp += blkid + i;
466 if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) {
467 dprintf_bp(bp, "can free old%s", "");
468 space += bp_get_dsize(spa, bp);
470 unref += BP_GET_ASIZE(bp);
472 nl1blks = 1;
473 nblks = 0;
476 lastblk = blkid + nblks - 1;
477 while (nblks) {
478 dmu_buf_impl_t *dbuf;
479 uint64_t ibyte, new_blkid;
480 int epb = 1 << epbs;
481 int err, i, blkoff, tochk;
482 blkptr_t *bp;
484 ibyte = blkid << dn->dn_datablkshift;
485 err = dnode_next_offset(dn,
486 DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0);
487 new_blkid = ibyte >> dn->dn_datablkshift;
488 if (err == ESRCH) {
489 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
490 break;
492 if (err) {
493 txh->txh_tx->tx_err = err;
494 break;
496 if (new_blkid > lastblk) {
497 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
498 break;
501 if (new_blkid > blkid) {
502 ASSERT((new_blkid >> epbs) > (blkid >> epbs));
503 skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1;
504 nblks -= new_blkid - blkid;
505 blkid = new_blkid;
507 blkoff = P2PHASE(blkid, epb);
508 tochk = MIN(epb - blkoff, nblks);
510 err = dbuf_hold_impl(dn, 1, blkid >> epbs, FALSE, FTAG, &dbuf);
511 if (err) {
512 txh->txh_tx->tx_err = err;
513 break;
516 txh->txh_memory_tohold += dbuf->db.db_size;
519 * We don't check memory_tohold against DMU_MAX_ACCESS because
520 * memory_tohold is an over-estimation (especially the >L1
521 * indirect blocks), so it could fail. Callers should have
522 * already verified that they will not be holding too much
523 * memory.
526 err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL);
527 if (err != 0) {
528 txh->txh_tx->tx_err = err;
529 dbuf_rele(dbuf, FTAG);
530 break;
533 bp = dbuf->db.db_data;
534 bp += blkoff;
536 for (i = 0; i < tochk; i++) {
537 if (dsl_dataset_block_freeable(ds, &bp[i],
538 bp[i].blk_birth)) {
539 dprintf_bp(&bp[i], "can free old%s", "");
540 space += bp_get_dsize(spa, &bp[i]);
542 unref += BP_GET_ASIZE(bp);
544 dbuf_rele(dbuf, FTAG);
546 ++nl1blks;
547 blkid += tochk;
548 nblks -= tochk;
550 rw_exit(&dn->dn_struct_rwlock);
553 * Add in memory requirements of higher-level indirects.
554 * This assumes a worst-possible scenario for dn_nlevels and a
555 * worst-possible distribution of l1-blocks over the region to free.
558 uint64_t blkcnt = 1 + ((l0span >> epbs) >> epbs);
559 int level = 2;
561 * Here we don't use DN_MAX_LEVEL, but calculate it with the
562 * given datablkshift and indblkshift. This makes the
563 * difference between 19 and 8 on large files.
565 int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn->dn_datablkshift) /
566 (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
568 while (level++ < maxlevel) {
569 txh->txh_memory_tohold += MAX(MIN(blkcnt, nl1blks), 1)
570 << dn->dn_indblkshift;
571 blkcnt = 1 + (blkcnt >> epbs);
575 /* account for new level 1 indirect blocks that might show up */
576 if (skipped > 0) {
577 txh->txh_fudge += skipped << dn->dn_indblkshift;
578 skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs);
579 txh->txh_memory_tohold += skipped << dn->dn_indblkshift;
581 txh->txh_space_tofree += space;
582 txh->txh_space_tounref += unref;
585 void
586 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
588 dmu_tx_hold_t *txh;
589 dnode_t *dn;
590 uint64_t start, end, i;
591 int err, shift;
592 zio_t *zio;
594 ASSERT(tx->tx_txg == 0);
596 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
597 object, THT_FREE, off, len);
598 if (txh == NULL)
599 return;
600 dn = txh->txh_dnode;
602 /* first block */
603 if (off != 0)
604 dmu_tx_count_write(txh, off, 1);
605 /* last block */
606 if (len != DMU_OBJECT_END)
607 dmu_tx_count_write(txh, off+len, 1);
609 dmu_tx_count_dnode(txh);
611 if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz)
612 return;
613 if (len == DMU_OBJECT_END)
614 len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off;
617 * For i/o error checking, read the first and last level-0
618 * blocks, and all the level-1 blocks. The above count_write's
619 * have already taken care of the level-0 blocks.
621 if (dn->dn_nlevels > 1) {
622 shift = dn->dn_datablkshift + dn->dn_indblkshift -
623 SPA_BLKPTRSHIFT;
624 start = off >> shift;
625 end = dn->dn_datablkshift ? ((off+len) >> shift) : 0;
627 zio = zio_root(tx->tx_pool->dp_spa,
628 NULL, NULL, ZIO_FLAG_CANFAIL);
629 for (i = start; i <= end; i++) {
630 uint64_t ibyte = i << shift;
631 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
632 i = ibyte >> shift;
633 if (err == ESRCH)
634 break;
635 if (err) {
636 tx->tx_err = err;
637 return;
640 err = dmu_tx_check_ioerr(zio, dn, 1, i);
641 if (err) {
642 tx->tx_err = err;
643 return;
646 err = zio_wait(zio);
647 if (err) {
648 tx->tx_err = err;
649 return;
653 dmu_tx_count_free(txh, off, len);
656 void
657 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
659 dmu_tx_hold_t *txh;
660 dnode_t *dn;
661 uint64_t nblocks;
662 int epbs, err;
664 ASSERT(tx->tx_txg == 0);
666 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
667 object, THT_ZAP, add, (uintptr_t)name);
668 if (txh == NULL)
669 return;
670 dn = txh->txh_dnode;
672 dmu_tx_count_dnode(txh);
674 if (dn == NULL) {
676 * We will be able to fit a new object's entries into one leaf
677 * block. So there will be at most 2 blocks total,
678 * including the header block.
680 dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift);
681 return;
684 ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
686 if (dn->dn_maxblkid == 0 && !add) {
687 blkptr_t *bp;
690 * If there is only one block (i.e. this is a micro-zap)
691 * and we are not adding anything, the accounting is simple.
693 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
694 if (err) {
695 tx->tx_err = err;
696 return;
700 * Use max block size here, since we don't know how much
701 * the size will change between now and the dbuf dirty call.
703 bp = &dn->dn_phys->dn_blkptr[0];
704 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
705 bp, bp->blk_birth))
706 txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
707 else
708 txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
709 if (!BP_IS_HOLE(bp))
710 txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
711 return;
714 if (dn->dn_maxblkid > 0 && name) {
716 * access the name in this fat-zap so that we'll check
717 * for i/o errors to the leaf blocks, etc.
719 err = zap_lookup(dn->dn_objset, dn->dn_object, name,
720 8, 0, NULL);
721 if (err == EIO) {
722 tx->tx_err = err;
723 return;
727 err = zap_count_write(dn->dn_objset, dn->dn_object, name, add,
728 &txh->txh_space_towrite, &txh->txh_space_tooverwrite);
731 * If the modified blocks are scattered to the four winds,
732 * we'll have to modify an indirect twig for each.
734 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
735 for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs)
736 if (dn->dn_objset->os_dsl_dataset->ds_phys->ds_prev_snap_obj)
737 txh->txh_space_towrite += 3 << dn->dn_indblkshift;
738 else
739 txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift;
742 void
743 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
745 dmu_tx_hold_t *txh;
747 ASSERT(tx->tx_txg == 0);
749 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
750 object, THT_BONUS, 0, 0);
751 if (txh)
752 dmu_tx_count_dnode(txh);
755 void
756 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
758 dmu_tx_hold_t *txh;
759 ASSERT(tx->tx_txg == 0);
761 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
762 DMU_NEW_OBJECT, THT_SPACE, space, 0);
764 txh->txh_space_towrite += space;
768 dmu_tx_holds(dmu_tx_t *tx, uint64_t object)
770 dmu_tx_hold_t *txh;
771 int holds = 0;
774 * By asserting that the tx is assigned, we're counting the
775 * number of dn_tx_holds, which is the same as the number of
776 * dn_holds. Otherwise, we'd be counting dn_holds, but
777 * dn_tx_holds could be 0.
779 ASSERT(tx->tx_txg != 0);
781 /* if (tx->tx_anyobj == TRUE) */
782 /* return (0); */
784 for (txh = list_head(&tx->tx_holds); txh;
785 txh = list_next(&tx->tx_holds, txh)) {
786 if (txh->txh_dnode && txh->txh_dnode->dn_object == object)
787 holds++;
790 return (holds);
793 #ifdef ZFS_DEBUG
794 void
795 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
797 dmu_tx_hold_t *txh;
798 int match_object = FALSE, match_offset = FALSE;
799 dnode_t *dn;
801 DB_DNODE_ENTER(db);
802 dn = DB_DNODE(db);
803 ASSERT(tx->tx_txg != 0);
804 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
805 ASSERT3U(dn->dn_object, ==, db->db.db_object);
807 if (tx->tx_anyobj) {
808 DB_DNODE_EXIT(db);
809 return;
812 /* XXX No checking on the meta dnode for now */
813 if (db->db.db_object == DMU_META_DNODE_OBJECT) {
814 DB_DNODE_EXIT(db);
815 return;
818 for (txh = list_head(&tx->tx_holds); txh;
819 txh = list_next(&tx->tx_holds, txh)) {
820 ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg);
821 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
822 match_object = TRUE;
823 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
824 int datablkshift = dn->dn_datablkshift ?
825 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
826 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
827 int shift = datablkshift + epbs * db->db_level;
828 uint64_t beginblk = shift >= 64 ? 0 :
829 (txh->txh_arg1 >> shift);
830 uint64_t endblk = shift >= 64 ? 0 :
831 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
832 uint64_t blkid = db->db_blkid;
834 /* XXX txh_arg2 better not be zero... */
836 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
837 txh->txh_type, beginblk, endblk);
839 switch (txh->txh_type) {
840 case THT_WRITE:
841 if (blkid >= beginblk && blkid <= endblk)
842 match_offset = TRUE;
844 * We will let this hold work for the bonus
845 * or spill buffer so that we don't need to
846 * hold it when creating a new object.
848 if (blkid == DMU_BONUS_BLKID ||
849 blkid == DMU_SPILL_BLKID)
850 match_offset = TRUE;
852 * They might have to increase nlevels,
853 * thus dirtying the new TLIBs. Or the
854 * might have to change the block size,
855 * thus dirying the new lvl=0 blk=0.
857 if (blkid == 0)
858 match_offset = TRUE;
859 break;
860 case THT_FREE:
862 * We will dirty all the level 1 blocks in
863 * the free range and perhaps the first and
864 * last level 0 block.
866 if (blkid >= beginblk && (blkid <= endblk ||
867 txh->txh_arg2 == DMU_OBJECT_END))
868 match_offset = TRUE;
869 break;
870 case THT_SPILL:
871 if (blkid == DMU_SPILL_BLKID)
872 match_offset = TRUE;
873 break;
874 case THT_BONUS:
875 if (blkid == DMU_BONUS_BLKID)
876 match_offset = TRUE;
877 break;
878 case THT_ZAP:
879 match_offset = TRUE;
880 break;
881 case THT_NEWOBJECT:
882 match_object = TRUE;
883 break;
884 default:
885 ASSERT(!"bad txh_type");
888 if (match_object && match_offset) {
889 DB_DNODE_EXIT(db);
890 return;
893 DB_DNODE_EXIT(db);
894 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
895 (u_longlong_t)db->db.db_object, db->db_level,
896 (u_longlong_t)db->db_blkid);
898 #endif
900 static int
901 dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
903 dmu_tx_hold_t *txh;
904 spa_t *spa = tx->tx_pool->dp_spa;
905 uint64_t memory, asize, fsize, usize;
906 uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge;
908 ASSERT0(tx->tx_txg);
910 if (tx->tx_err)
911 return (tx->tx_err);
913 if (spa_suspended(spa)) {
915 * If the user has indicated a blocking failure mode
916 * then return ERESTART which will block in dmu_tx_wait().
917 * Otherwise, return EIO so that an error can get
918 * propagated back to the VOP calls.
920 * Note that we always honor the txg_how flag regardless
921 * of the failuremode setting.
923 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
924 txg_how != TXG_WAIT)
925 return (EIO);
927 return (ERESTART);
930 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
931 tx->tx_needassign_txh = NULL;
934 * NB: No error returns are allowed after txg_hold_open, but
935 * before processing the dnode holds, due to the
936 * dmu_tx_unassign() logic.
939 towrite = tofree = tooverwrite = tounref = tohold = fudge = 0;
940 for (txh = list_head(&tx->tx_holds); txh;
941 txh = list_next(&tx->tx_holds, txh)) {
942 dnode_t *dn = txh->txh_dnode;
943 if (dn != NULL) {
944 mutex_enter(&dn->dn_mtx);
945 if (dn->dn_assigned_txg == tx->tx_txg - 1) {
946 mutex_exit(&dn->dn_mtx);
947 tx->tx_needassign_txh = txh;
948 return (ERESTART);
950 if (dn->dn_assigned_txg == 0)
951 dn->dn_assigned_txg = tx->tx_txg;
952 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
953 (void) refcount_add(&dn->dn_tx_holds, tx);
954 mutex_exit(&dn->dn_mtx);
956 towrite += txh->txh_space_towrite;
957 tofree += txh->txh_space_tofree;
958 tooverwrite += txh->txh_space_tooverwrite;
959 tounref += txh->txh_space_tounref;
960 tohold += txh->txh_memory_tohold;
961 fudge += txh->txh_fudge;
965 * NB: This check must be after we've held the dnodes, so that
966 * the dmu_tx_unassign() logic will work properly
968 if (txg_how >= TXG_INITIAL && txg_how != tx->tx_txg)
969 return (ERESTART);
972 * If a snapshot has been taken since we made our estimates,
973 * assume that we won't be able to free or overwrite anything.
975 if (tx->tx_objset &&
976 dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) >
977 tx->tx_lastsnap_txg) {
978 towrite += tooverwrite;
979 tooverwrite = tofree = 0;
982 /* needed allocation: worst-case estimate of write space */
983 asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite);
984 /* freed space estimate: worst-case overwrite + free estimate */
985 fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree;
986 /* convert unrefd space to worst-case estimate */
987 usize = spa_get_asize(tx->tx_pool->dp_spa, tounref);
988 /* calculate memory footprint estimate */
989 memory = towrite + tooverwrite + tohold;
991 #ifdef ZFS_DEBUG
993 * Add in 'tohold' to account for our dirty holds on this memory
994 * XXX - the "fudge" factor is to account for skipped blocks that
995 * we missed because dnode_next_offset() misses in-core-only blocks.
997 tx->tx_space_towrite = asize +
998 spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge);
999 tx->tx_space_tofree = tofree;
1000 tx->tx_space_tooverwrite = tooverwrite;
1001 tx->tx_space_tounref = tounref;
1002 #endif
1004 if (tx->tx_dir && asize != 0) {
1005 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
1006 asize, fsize, usize, &tx->tx_tempreserve_cookie, tx);
1007 if (err)
1008 return (err);
1011 return (0);
1014 static void
1015 dmu_tx_unassign(dmu_tx_t *tx)
1017 dmu_tx_hold_t *txh;
1019 if (tx->tx_txg == 0)
1020 return;
1022 txg_rele_to_quiesce(&tx->tx_txgh);
1024 for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh;
1025 txh = list_next(&tx->tx_holds, txh)) {
1026 dnode_t *dn = txh->txh_dnode;
1028 if (dn == NULL)
1029 continue;
1030 mutex_enter(&dn->dn_mtx);
1031 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1033 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1034 dn->dn_assigned_txg = 0;
1035 cv_broadcast(&dn->dn_notxholds);
1037 mutex_exit(&dn->dn_mtx);
1040 txg_rele_to_sync(&tx->tx_txgh);
1042 tx->tx_lasttried_txg = tx->tx_txg;
1043 tx->tx_txg = 0;
1047 * Assign tx to a transaction group. txg_how can be one of:
1049 * (1) TXG_WAIT. If the current open txg is full, waits until there's
1050 * a new one. This should be used when you're not holding locks.
1051 * If will only fail if we're truly out of space (or over quota).
1053 * (2) TXG_NOWAIT. If we can't assign into the current open txg without
1054 * blocking, returns immediately with ERESTART. This should be used
1055 * whenever you're holding locks. On an ERESTART error, the caller
1056 * should drop locks, do a dmu_tx_wait(tx), and try again.
1058 * (3) A specific txg. Use this if you need to ensure that multiple
1059 * transactions all sync in the same txg. Like TXG_NOWAIT, it
1060 * returns ERESTART if it can't assign you into the requested txg.
1063 dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
1065 int err;
1067 ASSERT(tx->tx_txg == 0);
1068 ASSERT(txg_how != 0);
1069 ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1071 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1072 dmu_tx_unassign(tx);
1074 if (err != ERESTART || txg_how != TXG_WAIT)
1075 return (err);
1077 dmu_tx_wait(tx);
1080 txg_rele_to_quiesce(&tx->tx_txgh);
1082 return (0);
1085 void
1086 dmu_tx_wait(dmu_tx_t *tx)
1088 spa_t *spa = tx->tx_pool->dp_spa;
1090 ASSERT(tx->tx_txg == 0);
1093 * It's possible that the pool has become active after this thread
1094 * has tried to obtain a tx. If that's the case then his
1095 * tx_lasttried_txg would not have been assigned.
1097 if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1098 txg_wait_synced(tx->tx_pool, spa_last_synced_txg(spa) + 1);
1099 } else if (tx->tx_needassign_txh) {
1100 dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1102 mutex_enter(&dn->dn_mtx);
1103 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1104 cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1105 mutex_exit(&dn->dn_mtx);
1106 tx->tx_needassign_txh = NULL;
1107 } else {
1108 txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
1112 void
1113 dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta)
1115 #ifdef ZFS_DEBUG
1116 if (tx->tx_dir == NULL || delta == 0)
1117 return;
1119 if (delta > 0) {
1120 ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=,
1121 tx->tx_space_towrite);
1122 (void) refcount_add_many(&tx->tx_space_written, delta, NULL);
1123 } else {
1124 (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL);
1126 #endif
1129 void
1130 dmu_tx_commit(dmu_tx_t *tx)
1132 dmu_tx_hold_t *txh;
1134 ASSERT(tx->tx_txg != 0);
1136 while (txh = list_head(&tx->tx_holds)) {
1137 dnode_t *dn = txh->txh_dnode;
1139 list_remove(&tx->tx_holds, txh);
1140 kmem_free(txh, sizeof (dmu_tx_hold_t));
1141 if (dn == NULL)
1142 continue;
1143 mutex_enter(&dn->dn_mtx);
1144 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1146 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1147 dn->dn_assigned_txg = 0;
1148 cv_broadcast(&dn->dn_notxholds);
1150 mutex_exit(&dn->dn_mtx);
1151 dnode_rele(dn, tx);
1154 if (tx->tx_tempreserve_cookie)
1155 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1157 if (!list_is_empty(&tx->tx_callbacks))
1158 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1160 if (tx->tx_anyobj == FALSE)
1161 txg_rele_to_sync(&tx->tx_txgh);
1163 list_destroy(&tx->tx_callbacks);
1164 list_destroy(&tx->tx_holds);
1165 #ifdef ZFS_DEBUG
1166 dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1167 tx->tx_space_towrite, refcount_count(&tx->tx_space_written),
1168 tx->tx_space_tofree, refcount_count(&tx->tx_space_freed));
1169 refcount_destroy_many(&tx->tx_space_written,
1170 refcount_count(&tx->tx_space_written));
1171 refcount_destroy_many(&tx->tx_space_freed,
1172 refcount_count(&tx->tx_space_freed));
1173 #endif
1174 kmem_free(tx, sizeof (dmu_tx_t));
1177 void
1178 dmu_tx_abort(dmu_tx_t *tx)
1180 dmu_tx_hold_t *txh;
1182 ASSERT(tx->tx_txg == 0);
1184 while (txh = list_head(&tx->tx_holds)) {
1185 dnode_t *dn = txh->txh_dnode;
1187 list_remove(&tx->tx_holds, txh);
1188 kmem_free(txh, sizeof (dmu_tx_hold_t));
1189 if (dn != NULL)
1190 dnode_rele(dn, tx);
1194 * Call any registered callbacks with an error code.
1196 if (!list_is_empty(&tx->tx_callbacks))
1197 dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED);
1199 list_destroy(&tx->tx_callbacks);
1200 list_destroy(&tx->tx_holds);
1201 #ifdef ZFS_DEBUG
1202 refcount_destroy_many(&tx->tx_space_written,
1203 refcount_count(&tx->tx_space_written));
1204 refcount_destroy_many(&tx->tx_space_freed,
1205 refcount_count(&tx->tx_space_freed));
1206 #endif
1207 kmem_free(tx, sizeof (dmu_tx_t));
1210 uint64_t
1211 dmu_tx_get_txg(dmu_tx_t *tx)
1213 ASSERT(tx->tx_txg != 0);
1214 return (tx->tx_txg);
1217 void
1218 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1220 dmu_tx_callback_t *dcb;
1222 dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1224 dcb->dcb_func = func;
1225 dcb->dcb_data = data;
1227 list_insert_tail(&tx->tx_callbacks, dcb);
1231 * Call all the commit callbacks on a list, with a given error code.
1233 void
1234 dmu_tx_do_callbacks(list_t *cb_list, int error)
1236 dmu_tx_callback_t *dcb;
1238 while (dcb = list_head(cb_list)) {
1239 list_remove(cb_list, dcb);
1240 dcb->dcb_func(dcb->dcb_data, error);
1241 kmem_free(dcb, sizeof (dmu_tx_callback_t));
1246 * Interface to hold a bunch of attributes.
1247 * used for creating new files.
1248 * attrsize is the total size of all attributes
1249 * to be added during object creation
1251 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1255 * hold necessary attribute name for attribute registration.
1256 * should be a very rare case where this is needed. If it does
1257 * happen it would only happen on the first write to the file system.
1259 static void
1260 dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1262 int i;
1264 if (!sa->sa_need_attr_registration)
1265 return;
1267 for (i = 0; i != sa->sa_num_attrs; i++) {
1268 if (!sa->sa_attr_table[i].sa_registered) {
1269 if (sa->sa_reg_attr_obj)
1270 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1271 B_TRUE, sa->sa_attr_table[i].sa_name);
1272 else
1273 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1274 B_TRUE, sa->sa_attr_table[i].sa_name);
1280 void
1281 dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1283 dnode_t *dn;
1284 dmu_tx_hold_t *txh;
1286 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1287 THT_SPILL, 0, 0);
1289 dn = txh->txh_dnode;
1291 if (dn == NULL)
1292 return;
1294 /* If blkptr doesn't exist then add space to towrite */
1295 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
1296 txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
1297 } else {
1298 blkptr_t *bp;
1300 bp = &dn->dn_phys->dn_spill;
1301 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
1302 bp, bp->blk_birth))
1303 txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
1304 else
1305 txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
1306 if (!BP_IS_HOLE(bp))
1307 txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
1311 void
1312 dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1314 sa_os_t *sa = tx->tx_objset->os_sa;
1316 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1318 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1319 return;
1321 if (tx->tx_objset->os_sa->sa_layout_attr_obj)
1322 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1323 else {
1324 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1325 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1326 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1327 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1330 dmu_tx_sa_registration_hold(sa, tx);
1332 if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill)
1333 return;
1335 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1336 THT_SPILL, 0, 0);
1340 * Hold SA attribute
1342 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1344 * variable_size is the total size of all variable sized attributes
1345 * passed to this function. It is not the total size of all
1346 * variable size attributes that *may* exist on this object.
1348 void
1349 dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1351 uint64_t object;
1352 sa_os_t *sa = tx->tx_objset->os_sa;
1354 ASSERT(hdl != NULL);
1356 object = sa_handle_object(hdl);
1358 dmu_tx_hold_bonus(tx, object);
1360 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1361 return;
1363 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1364 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1365 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1366 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1367 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1368 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1371 dmu_tx_sa_registration_hold(sa, tx);
1373 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1374 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1376 if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1377 ASSERT(tx->tx_txg == 0);
1378 dmu_tx_hold_spill(tx, object);
1379 } else {
1380 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1381 dnode_t *dn;
1383 DB_DNODE_ENTER(db);
1384 dn = DB_DNODE(db);
1385 if (dn->dn_have_spill) {
1386 ASSERT(tx->tx_txg == 0);
1387 dmu_tx_hold_spill(tx, object);
1389 DB_DNODE_EXIT(db);