4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
24 * Copyright (c) 2014 Integros [integros.com]
27 /* Portions Copyright 2010 Robert Milkowski */
29 #include <sys/zfs_context.h>
35 #include <sys/resource.h>
37 #include <sys/zil_impl.h>
38 #include <sys/dsl_dataset.h>
39 #include <sys/vdev_impl.h>
40 #include <sys/dmu_tx.h>
41 #include <sys/dsl_pool.h>
45 * The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system
46 * calls that change the file system. Each itx has enough information to
47 * be able to replay them after a system crash, power loss, or
48 * equivalent failure mode. These are stored in memory until either:
50 * 1. they are committed to the pool by the DMU transaction group
51 * (txg), at which point they can be discarded; or
52 * 2. they are committed to the on-disk ZIL for the dataset being
53 * modified (e.g. due to an fsync, O_DSYNC, or other synchronous
56 * In the event of a crash or power loss, the itxs contained by each
57 * dataset's on-disk ZIL will be replayed when that dataset is first
58 * instantianted (e.g. if the dataset is a normal fileystem, when it is
61 * As hinted at above, there is one ZIL per dataset (both the in-memory
62 * representation, and the on-disk representation). The on-disk format
63 * consists of 3 parts:
65 * - a single, per-dataset, ZIL header; which points to a chain of
66 * - zero or more ZIL blocks; each of which contains
67 * - zero or more ZIL records
69 * A ZIL record holds the information necessary to replay a single
70 * system call transaction. A ZIL block can hold many ZIL records, and
71 * the blocks are chained together, similarly to a singly linked list.
73 * Each ZIL block contains a block pointer (blkptr_t) to the next ZIL
74 * block in the chain, and the ZIL header points to the first block in
77 * Note, there is not a fixed place in the pool to hold these ZIL
78 * blocks; they are dynamically allocated and freed as needed from the
79 * blocks available on the pool, though they can be preferentially
80 * allocated from a dedicated "log" vdev.
84 * This controls the amount of time that a ZIL block (lwb) will remain
85 * "open" when it isn't "full", and it has a thread waiting for it to be
86 * committed to stable storage. Please refer to the zil_commit_waiter()
87 * function (and the comments within it) for more details.
89 int zfs_commit_timeout_pct
= 5;
92 * Disable intent logging replay. This global ZIL switch affects all pools.
94 int zil_replay_disable
= 0;
97 * Tunable parameter for debugging or performance analysis. Setting
98 * zfs_nocacheflush will cause corruption on power loss if a volatile
99 * out-of-order write cache is enabled.
101 boolean_t zfs_nocacheflush
= B_FALSE
;
104 * Limit SLOG write size per commit executed with synchronous priority.
105 * Any writes above that will be executed with lower (asynchronous) priority
106 * to limit potential SLOG device abuse by single active ZIL writer.
108 uint64_t zil_slog_bulk
= 768 * 1024;
110 static kmem_cache_t
*zil_lwb_cache
;
111 static kmem_cache_t
*zil_zcw_cache
;
113 static void zil_async_to_sync(zilog_t
*zilog
, uint64_t foid
);
115 #define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \
116 sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused))
119 zil_bp_compare(const void *x1
, const void *x2
)
121 const dva_t
*dva1
= &((zil_bp_node_t
*)x1
)->zn_dva
;
122 const dva_t
*dva2
= &((zil_bp_node_t
*)x2
)->zn_dva
;
124 if (DVA_GET_VDEV(dva1
) < DVA_GET_VDEV(dva2
))
126 if (DVA_GET_VDEV(dva1
) > DVA_GET_VDEV(dva2
))
129 if (DVA_GET_OFFSET(dva1
) < DVA_GET_OFFSET(dva2
))
131 if (DVA_GET_OFFSET(dva1
) > DVA_GET_OFFSET(dva2
))
138 zil_bp_tree_init(zilog_t
*zilog
)
140 avl_create(&zilog
->zl_bp_tree
, zil_bp_compare
,
141 sizeof (zil_bp_node_t
), offsetof(zil_bp_node_t
, zn_node
));
145 zil_bp_tree_fini(zilog_t
*zilog
)
147 avl_tree_t
*t
= &zilog
->zl_bp_tree
;
151 while ((zn
= avl_destroy_nodes(t
, &cookie
)) != NULL
)
152 kmem_free(zn
, sizeof (zil_bp_node_t
));
158 zil_bp_tree_add(zilog_t
*zilog
, const blkptr_t
*bp
)
160 avl_tree_t
*t
= &zilog
->zl_bp_tree
;
165 if (BP_IS_EMBEDDED(bp
))
168 dva
= BP_IDENTITY(bp
);
170 if (avl_find(t
, dva
, &where
) != NULL
)
171 return (SET_ERROR(EEXIST
));
173 zn
= kmem_alloc(sizeof (zil_bp_node_t
), KM_SLEEP
);
175 avl_insert(t
, zn
, where
);
180 static zil_header_t
*
181 zil_header_in_syncing_context(zilog_t
*zilog
)
183 return ((zil_header_t
*)zilog
->zl_header
);
187 zil_init_log_chain(zilog_t
*zilog
, blkptr_t
*bp
)
189 zio_cksum_t
*zc
= &bp
->blk_cksum
;
191 zc
->zc_word
[ZIL_ZC_GUID_0
] = spa_get_random(-1ULL);
192 zc
->zc_word
[ZIL_ZC_GUID_1
] = spa_get_random(-1ULL);
193 zc
->zc_word
[ZIL_ZC_OBJSET
] = dmu_objset_id(zilog
->zl_os
);
194 zc
->zc_word
[ZIL_ZC_SEQ
] = 1ULL;
198 * Read a log block and make sure it's valid.
201 zil_read_log_block(zilog_t
*zilog
, const blkptr_t
*bp
, blkptr_t
*nbp
, void *dst
,
204 enum zio_flag zio_flags
= ZIO_FLAG_CANFAIL
;
205 arc_flags_t aflags
= ARC_FLAG_WAIT
;
206 arc_buf_t
*abuf
= NULL
;
210 if (zilog
->zl_header
->zh_claim_txg
== 0)
211 zio_flags
|= ZIO_FLAG_SPECULATIVE
| ZIO_FLAG_SCRUB
;
213 if (!(zilog
->zl_header
->zh_flags
& ZIL_CLAIM_LR_SEQ_VALID
))
214 zio_flags
|= ZIO_FLAG_SPECULATIVE
;
216 SET_BOOKMARK(&zb
, bp
->blk_cksum
.zc_word
[ZIL_ZC_OBJSET
],
217 ZB_ZIL_OBJECT
, ZB_ZIL_LEVEL
, bp
->blk_cksum
.zc_word
[ZIL_ZC_SEQ
]);
219 error
= arc_read(NULL
, zilog
->zl_spa
, bp
, arc_getbuf_func
, &abuf
,
220 ZIO_PRIORITY_SYNC_READ
, zio_flags
, &aflags
, &zb
);
223 zio_cksum_t cksum
= bp
->blk_cksum
;
226 * Validate the checksummed log block.
228 * Sequence numbers should be... sequential. The checksum
229 * verifier for the next block should be bp's checksum plus 1.
231 * Also check the log chain linkage and size used.
233 cksum
.zc_word
[ZIL_ZC_SEQ
]++;
235 if (BP_GET_CHECKSUM(bp
) == ZIO_CHECKSUM_ZILOG2
) {
236 zil_chain_t
*zilc
= abuf
->b_data
;
237 char *lr
= (char *)(zilc
+ 1);
238 uint64_t len
= zilc
->zc_nused
- sizeof (zil_chain_t
);
240 if (bcmp(&cksum
, &zilc
->zc_next_blk
.blk_cksum
,
241 sizeof (cksum
)) || BP_IS_HOLE(&zilc
->zc_next_blk
)) {
242 error
= SET_ERROR(ECKSUM
);
244 ASSERT3U(len
, <=, SPA_OLD_MAXBLOCKSIZE
);
246 *end
= (char *)dst
+ len
;
247 *nbp
= zilc
->zc_next_blk
;
250 char *lr
= abuf
->b_data
;
251 uint64_t size
= BP_GET_LSIZE(bp
);
252 zil_chain_t
*zilc
= (zil_chain_t
*)(lr
+ size
) - 1;
254 if (bcmp(&cksum
, &zilc
->zc_next_blk
.blk_cksum
,
255 sizeof (cksum
)) || BP_IS_HOLE(&zilc
->zc_next_blk
) ||
256 (zilc
->zc_nused
> (size
- sizeof (*zilc
)))) {
257 error
= SET_ERROR(ECKSUM
);
259 ASSERT3U(zilc
->zc_nused
, <=,
260 SPA_OLD_MAXBLOCKSIZE
);
261 bcopy(lr
, dst
, zilc
->zc_nused
);
262 *end
= (char *)dst
+ zilc
->zc_nused
;
263 *nbp
= zilc
->zc_next_blk
;
267 arc_buf_destroy(abuf
, &abuf
);
274 * Read a TX_WRITE log data block.
277 zil_read_log_data(zilog_t
*zilog
, const lr_write_t
*lr
, void *wbuf
)
279 enum zio_flag zio_flags
= ZIO_FLAG_CANFAIL
;
280 const blkptr_t
*bp
= &lr
->lr_blkptr
;
281 arc_flags_t aflags
= ARC_FLAG_WAIT
;
282 arc_buf_t
*abuf
= NULL
;
286 if (BP_IS_HOLE(bp
)) {
288 bzero(wbuf
, MAX(BP_GET_LSIZE(bp
), lr
->lr_length
));
292 if (zilog
->zl_header
->zh_claim_txg
== 0)
293 zio_flags
|= ZIO_FLAG_SPECULATIVE
| ZIO_FLAG_SCRUB
;
295 SET_BOOKMARK(&zb
, dmu_objset_id(zilog
->zl_os
), lr
->lr_foid
,
296 ZB_ZIL_LEVEL
, lr
->lr_offset
/ BP_GET_LSIZE(bp
));
298 error
= arc_read(NULL
, zilog
->zl_spa
, bp
, arc_getbuf_func
, &abuf
,
299 ZIO_PRIORITY_SYNC_READ
, zio_flags
, &aflags
, &zb
);
303 bcopy(abuf
->b_data
, wbuf
, arc_buf_size(abuf
));
304 arc_buf_destroy(abuf
, &abuf
);
311 * Parse the intent log, and call parse_func for each valid record within.
314 zil_parse(zilog_t
*zilog
, zil_parse_blk_func_t
*parse_blk_func
,
315 zil_parse_lr_func_t
*parse_lr_func
, void *arg
, uint64_t txg
)
317 const zil_header_t
*zh
= zilog
->zl_header
;
318 boolean_t claimed
= !!zh
->zh_claim_txg
;
319 uint64_t claim_blk_seq
= claimed
? zh
->zh_claim_blk_seq
: UINT64_MAX
;
320 uint64_t claim_lr_seq
= claimed
? zh
->zh_claim_lr_seq
: UINT64_MAX
;
321 uint64_t max_blk_seq
= 0;
322 uint64_t max_lr_seq
= 0;
323 uint64_t blk_count
= 0;
324 uint64_t lr_count
= 0;
325 blkptr_t blk
, next_blk
;
330 * Old logs didn't record the maximum zh_claim_lr_seq.
332 if (!(zh
->zh_flags
& ZIL_CLAIM_LR_SEQ_VALID
))
333 claim_lr_seq
= UINT64_MAX
;
336 * Starting at the block pointed to by zh_log we read the log chain.
337 * For each block in the chain we strongly check that block to
338 * ensure its validity. We stop when an invalid block is found.
339 * For each block pointer in the chain we call parse_blk_func().
340 * For each record in each valid block we call parse_lr_func().
341 * If the log has been claimed, stop if we encounter a sequence
342 * number greater than the highest claimed sequence number.
344 lrbuf
= zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE
);
345 zil_bp_tree_init(zilog
);
347 for (blk
= zh
->zh_log
; !BP_IS_HOLE(&blk
); blk
= next_blk
) {
348 uint64_t blk_seq
= blk
.blk_cksum
.zc_word
[ZIL_ZC_SEQ
];
352 if (blk_seq
> claim_blk_seq
)
354 if ((error
= parse_blk_func(zilog
, &blk
, arg
, txg
)) != 0)
356 ASSERT3U(max_blk_seq
, <, blk_seq
);
357 max_blk_seq
= blk_seq
;
360 if (max_lr_seq
== claim_lr_seq
&& max_blk_seq
== claim_blk_seq
)
363 error
= zil_read_log_block(zilog
, &blk
, &next_blk
, lrbuf
, &end
);
367 for (lrp
= lrbuf
; lrp
< end
; lrp
+= reclen
) {
368 lr_t
*lr
= (lr_t
*)lrp
;
369 reclen
= lr
->lrc_reclen
;
370 ASSERT3U(reclen
, >=, sizeof (lr_t
));
371 if (lr
->lrc_seq
> claim_lr_seq
)
373 if ((error
= parse_lr_func(zilog
, lr
, arg
, txg
)) != 0)
375 ASSERT3U(max_lr_seq
, <, lr
->lrc_seq
);
376 max_lr_seq
= lr
->lrc_seq
;
381 zilog
->zl_parse_error
= error
;
382 zilog
->zl_parse_blk_seq
= max_blk_seq
;
383 zilog
->zl_parse_lr_seq
= max_lr_seq
;
384 zilog
->zl_parse_blk_count
= blk_count
;
385 zilog
->zl_parse_lr_count
= lr_count
;
387 ASSERT(!claimed
|| !(zh
->zh_flags
& ZIL_CLAIM_LR_SEQ_VALID
) ||
388 (max_blk_seq
== claim_blk_seq
&& max_lr_seq
== claim_lr_seq
));
390 zil_bp_tree_fini(zilog
);
391 zio_buf_free(lrbuf
, SPA_OLD_MAXBLOCKSIZE
);
397 zil_claim_log_block(zilog_t
*zilog
, blkptr_t
*bp
, void *tx
, uint64_t first_txg
)
400 * Claim log block if not already committed and not already claimed.
401 * If tx == NULL, just verify that the block is claimable.
403 if (BP_IS_HOLE(bp
) || bp
->blk_birth
< first_txg
||
404 zil_bp_tree_add(zilog
, bp
) != 0)
407 return (zio_wait(zio_claim(NULL
, zilog
->zl_spa
,
408 tx
== NULL
? 0 : first_txg
, bp
, spa_claim_notify
, NULL
,
409 ZIO_FLAG_CANFAIL
| ZIO_FLAG_SPECULATIVE
| ZIO_FLAG_SCRUB
)));
413 zil_claim_log_record(zilog_t
*zilog
, lr_t
*lrc
, void *tx
, uint64_t first_txg
)
415 lr_write_t
*lr
= (lr_write_t
*)lrc
;
418 if (lrc
->lrc_txtype
!= TX_WRITE
)
422 * If the block is not readable, don't claim it. This can happen
423 * in normal operation when a log block is written to disk before
424 * some of the dmu_sync() blocks it points to. In this case, the
425 * transaction cannot have been committed to anyone (we would have
426 * waited for all writes to be stable first), so it is semantically
427 * correct to declare this the end of the log.
429 if (lr
->lr_blkptr
.blk_birth
>= first_txg
&&
430 (error
= zil_read_log_data(zilog
, lr
, NULL
)) != 0)
432 return (zil_claim_log_block(zilog
, &lr
->lr_blkptr
, tx
, first_txg
));
437 zil_free_log_block(zilog_t
*zilog
, blkptr_t
*bp
, void *tx
, uint64_t claim_txg
)
439 zio_free_zil(zilog
->zl_spa
, dmu_tx_get_txg(tx
), bp
);
445 zil_free_log_record(zilog_t
*zilog
, lr_t
*lrc
, void *tx
, uint64_t claim_txg
)
447 lr_write_t
*lr
= (lr_write_t
*)lrc
;
448 blkptr_t
*bp
= &lr
->lr_blkptr
;
451 * If we previously claimed it, we need to free it.
453 if (claim_txg
!= 0 && lrc
->lrc_txtype
== TX_WRITE
&&
454 bp
->blk_birth
>= claim_txg
&& zil_bp_tree_add(zilog
, bp
) == 0 &&
456 zio_free(zilog
->zl_spa
, dmu_tx_get_txg(tx
), bp
);
462 zil_lwb_vdev_compare(const void *x1
, const void *x2
)
464 const uint64_t v1
= ((zil_vdev_node_t
*)x1
)->zv_vdev
;
465 const uint64_t v2
= ((zil_vdev_node_t
*)x2
)->zv_vdev
;
476 zil_alloc_lwb(zilog_t
*zilog
, blkptr_t
*bp
, boolean_t slog
, uint64_t txg
)
480 lwb
= kmem_cache_alloc(zil_lwb_cache
, KM_SLEEP
);
481 lwb
->lwb_zilog
= zilog
;
483 lwb
->lwb_slog
= slog
;
484 lwb
->lwb_state
= LWB_STATE_CLOSED
;
485 lwb
->lwb_buf
= zio_buf_alloc(BP_GET_LSIZE(bp
));
486 lwb
->lwb_max_txg
= txg
;
487 lwb
->lwb_write_zio
= NULL
;
488 lwb
->lwb_root_zio
= NULL
;
490 lwb
->lwb_issued_timestamp
= 0;
491 if (BP_GET_CHECKSUM(bp
) == ZIO_CHECKSUM_ZILOG2
) {
492 lwb
->lwb_nused
= sizeof (zil_chain_t
);
493 lwb
->lwb_sz
= BP_GET_LSIZE(bp
);
496 lwb
->lwb_sz
= BP_GET_LSIZE(bp
) - sizeof (zil_chain_t
);
499 mutex_enter(&zilog
->zl_lock
);
500 list_insert_tail(&zilog
->zl_lwb_list
, lwb
);
501 mutex_exit(&zilog
->zl_lock
);
503 ASSERT(!MUTEX_HELD(&lwb
->lwb_vdev_lock
));
504 ASSERT(avl_is_empty(&lwb
->lwb_vdev_tree
));
505 ASSERT(list_is_empty(&lwb
->lwb_waiters
));
511 zil_free_lwb(zilog_t
*zilog
, lwb_t
*lwb
)
513 ASSERT(MUTEX_HELD(&zilog
->zl_lock
));
514 ASSERT(!MUTEX_HELD(&lwb
->lwb_vdev_lock
));
515 ASSERT(list_is_empty(&lwb
->lwb_waiters
));
517 if (lwb
->lwb_state
== LWB_STATE_OPENED
) {
518 avl_tree_t
*t
= &lwb
->lwb_vdev_tree
;
522 while ((zv
= avl_destroy_nodes(t
, &cookie
)) != NULL
)
523 kmem_free(zv
, sizeof (*zv
));
525 ASSERT3P(lwb
->lwb_root_zio
, !=, NULL
);
526 ASSERT3P(lwb
->lwb_write_zio
, !=, NULL
);
528 zio_cancel(lwb
->lwb_root_zio
);
529 zio_cancel(lwb
->lwb_write_zio
);
531 lwb
->lwb_root_zio
= NULL
;
532 lwb
->lwb_write_zio
= NULL
;
534 ASSERT3S(lwb
->lwb_state
, !=, LWB_STATE_ISSUED
);
537 ASSERT(avl_is_empty(&lwb
->lwb_vdev_tree
));
538 ASSERT3P(lwb
->lwb_write_zio
, ==, NULL
);
539 ASSERT3P(lwb
->lwb_root_zio
, ==, NULL
);
542 * Clear the zilog's field to indicate this lwb is no longer
543 * valid, and prevent use-after-free errors.
545 if (zilog
->zl_last_lwb_opened
== lwb
)
546 zilog
->zl_last_lwb_opened
= NULL
;
548 kmem_cache_free(zil_lwb_cache
, lwb
);
552 * Called when we create in-memory log transactions so that we know
553 * to cleanup the itxs at the end of spa_sync().
556 zilog_dirty(zilog_t
*zilog
, uint64_t txg
)
558 dsl_pool_t
*dp
= zilog
->zl_dmu_pool
;
559 dsl_dataset_t
*ds
= dmu_objset_ds(zilog
->zl_os
);
561 ASSERT(spa_writeable(zilog
->zl_spa
));
563 if (ds
->ds_is_snapshot
)
564 panic("dirtying snapshot!");
566 if (txg_list_add(&dp
->dp_dirty_zilogs
, zilog
, txg
)) {
567 /* up the hold count until we can be written out */
568 dmu_buf_add_ref(ds
->ds_dbuf
, zilog
);
570 zilog
->zl_dirty_max_txg
= MAX(txg
, zilog
->zl_dirty_max_txg
);
575 * Determine if the zil is dirty in the specified txg. Callers wanting to
576 * ensure that the dirty state does not change must hold the itxg_lock for
577 * the specified txg. Holding the lock will ensure that the zil cannot be
578 * dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current
582 zilog_is_dirty_in_txg(zilog_t
*zilog
, uint64_t txg
)
584 dsl_pool_t
*dp
= zilog
->zl_dmu_pool
;
586 if (txg_list_member(&dp
->dp_dirty_zilogs
, zilog
, txg
& TXG_MASK
))
592 * Determine if the zil is dirty. The zil is considered dirty if it has
593 * any pending itx records that have not been cleaned by zil_clean().
596 zilog_is_dirty(zilog_t
*zilog
)
598 dsl_pool_t
*dp
= zilog
->zl_dmu_pool
;
600 for (int t
= 0; t
< TXG_SIZE
; t
++) {
601 if (txg_list_member(&dp
->dp_dirty_zilogs
, zilog
, t
))
608 * Create an on-disk intent log.
611 zil_create(zilog_t
*zilog
)
613 const zil_header_t
*zh
= zilog
->zl_header
;
619 boolean_t slog
= FALSE
;
622 * Wait for any previous destroy to complete.
624 txg_wait_synced(zilog
->zl_dmu_pool
, zilog
->zl_destroy_txg
);
626 ASSERT(zh
->zh_claim_txg
== 0);
627 ASSERT(zh
->zh_replay_seq
== 0);
632 * Allocate an initial log block if:
633 * - there isn't one already
634 * - the existing block is the wrong endianess
636 if (BP_IS_HOLE(&blk
) || BP_SHOULD_BYTESWAP(&blk
)) {
637 tx
= dmu_tx_create(zilog
->zl_os
);
638 VERIFY0(dmu_tx_assign(tx
, TXG_WAIT
));
639 dsl_dataset_dirty(dmu_objset_ds(zilog
->zl_os
), tx
);
640 txg
= dmu_tx_get_txg(tx
);
642 if (!BP_IS_HOLE(&blk
)) {
643 zio_free_zil(zilog
->zl_spa
, txg
, &blk
);
647 error
= zio_alloc_zil(zilog
->zl_spa
, txg
, &blk
, NULL
,
648 ZIL_MIN_BLKSZ
, &slog
);
651 zil_init_log_chain(zilog
, &blk
);
655 * Allocate a log write block (lwb) for the first log block.
658 lwb
= zil_alloc_lwb(zilog
, &blk
, slog
, txg
);
661 * If we just allocated the first log block, commit our transaction
662 * and wait for zil_sync() to stuff the block poiner into zh_log.
663 * (zh is part of the MOS, so we cannot modify it in open context.)
667 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
670 ASSERT(bcmp(&blk
, &zh
->zh_log
, sizeof (blk
)) == 0);
676 * In one tx, free all log blocks and clear the log header. If keep_first
677 * is set, then we're replaying a log with no content. We want to keep the
678 * first block, however, so that the first synchronous transaction doesn't
679 * require a txg_wait_synced() in zil_create(). We don't need to
680 * txg_wait_synced() here either when keep_first is set, because both
681 * zil_create() and zil_destroy() will wait for any in-progress destroys
685 zil_destroy(zilog_t
*zilog
, boolean_t keep_first
)
687 const zil_header_t
*zh
= zilog
->zl_header
;
693 * Wait for any previous destroy to complete.
695 txg_wait_synced(zilog
->zl_dmu_pool
, zilog
->zl_destroy_txg
);
697 zilog
->zl_old_header
= *zh
; /* debugging aid */
699 if (BP_IS_HOLE(&zh
->zh_log
))
702 tx
= dmu_tx_create(zilog
->zl_os
);
703 VERIFY0(dmu_tx_assign(tx
, TXG_WAIT
));
704 dsl_dataset_dirty(dmu_objset_ds(zilog
->zl_os
), tx
);
705 txg
= dmu_tx_get_txg(tx
);
707 mutex_enter(&zilog
->zl_lock
);
709 ASSERT3U(zilog
->zl_destroy_txg
, <, txg
);
710 zilog
->zl_destroy_txg
= txg
;
711 zilog
->zl_keep_first
= keep_first
;
713 if (!list_is_empty(&zilog
->zl_lwb_list
)) {
714 ASSERT(zh
->zh_claim_txg
== 0);
716 while ((lwb
= list_head(&zilog
->zl_lwb_list
)) != NULL
) {
717 list_remove(&zilog
->zl_lwb_list
, lwb
);
718 if (lwb
->lwb_buf
!= NULL
)
719 zio_buf_free(lwb
->lwb_buf
, lwb
->lwb_sz
);
720 zio_free(zilog
->zl_spa
, txg
, &lwb
->lwb_blk
);
721 zil_free_lwb(zilog
, lwb
);
723 } else if (!keep_first
) {
724 zil_destroy_sync(zilog
, tx
);
726 mutex_exit(&zilog
->zl_lock
);
732 zil_destroy_sync(zilog_t
*zilog
, dmu_tx_t
*tx
)
734 ASSERT(list_is_empty(&zilog
->zl_lwb_list
));
735 (void) zil_parse(zilog
, zil_free_log_block
,
736 zil_free_log_record
, tx
, zilog
->zl_header
->zh_claim_txg
);
740 zil_claim(dsl_pool_t
*dp
, dsl_dataset_t
*ds
, void *txarg
)
742 dmu_tx_t
*tx
= txarg
;
743 uint64_t first_txg
= dmu_tx_get_txg(tx
);
749 error
= dmu_objset_own_obj(dp
, ds
->ds_object
,
750 DMU_OST_ANY
, B_FALSE
, FTAG
, &os
);
753 * EBUSY indicates that the objset is inconsistent, in which
754 * case it can not have a ZIL.
756 if (error
!= EBUSY
) {
757 cmn_err(CE_WARN
, "can't open objset for %llu, error %u",
758 (unsigned long long)ds
->ds_object
, error
);
763 zilog
= dmu_objset_zil(os
);
764 zh
= zil_header_in_syncing_context(zilog
);
766 if (spa_get_log_state(zilog
->zl_spa
) == SPA_LOG_CLEAR
) {
767 if (!BP_IS_HOLE(&zh
->zh_log
))
768 zio_free_zil(zilog
->zl_spa
, first_txg
, &zh
->zh_log
);
769 BP_ZERO(&zh
->zh_log
);
770 dsl_dataset_dirty(dmu_objset_ds(os
), tx
);
771 dmu_objset_disown(os
, FTAG
);
776 * Claim all log blocks if we haven't already done so, and remember
777 * the highest claimed sequence number. This ensures that if we can
778 * read only part of the log now (e.g. due to a missing device),
779 * but we can read the entire log later, we will not try to replay
780 * or destroy beyond the last block we successfully claimed.
782 ASSERT3U(zh
->zh_claim_txg
, <=, first_txg
);
783 if (zh
->zh_claim_txg
== 0 && !BP_IS_HOLE(&zh
->zh_log
)) {
784 (void) zil_parse(zilog
, zil_claim_log_block
,
785 zil_claim_log_record
, tx
, first_txg
);
786 zh
->zh_claim_txg
= first_txg
;
787 zh
->zh_claim_blk_seq
= zilog
->zl_parse_blk_seq
;
788 zh
->zh_claim_lr_seq
= zilog
->zl_parse_lr_seq
;
789 if (zilog
->zl_parse_lr_count
|| zilog
->zl_parse_blk_count
> 1)
790 zh
->zh_flags
|= ZIL_REPLAY_NEEDED
;
791 zh
->zh_flags
|= ZIL_CLAIM_LR_SEQ_VALID
;
792 dsl_dataset_dirty(dmu_objset_ds(os
), tx
);
795 ASSERT3U(first_txg
, ==, (spa_last_synced_txg(zilog
->zl_spa
) + 1));
796 dmu_objset_disown(os
, FTAG
);
801 * Check the log by walking the log chain.
802 * Checksum errors are ok as they indicate the end of the chain.
803 * Any other error (no device or read failure) returns an error.
807 zil_check_log_chain(dsl_pool_t
*dp
, dsl_dataset_t
*ds
, void *tx
)
816 error
= dmu_objset_from_ds(ds
, &os
);
818 cmn_err(CE_WARN
, "can't open objset %llu, error %d",
819 (unsigned long long)ds
->ds_object
, error
);
823 zilog
= dmu_objset_zil(os
);
824 bp
= (blkptr_t
*)&zilog
->zl_header
->zh_log
;
827 * Check the first block and determine if it's on a log device
828 * which may have been removed or faulted prior to loading this
829 * pool. If so, there's no point in checking the rest of the log
830 * as its content should have already been synced to the pool.
832 if (!BP_IS_HOLE(bp
)) {
834 boolean_t valid
= B_TRUE
;
836 spa_config_enter(os
->os_spa
, SCL_STATE
, FTAG
, RW_READER
);
837 vd
= vdev_lookup_top(os
->os_spa
, DVA_GET_VDEV(&bp
->blk_dva
[0]));
838 if (vd
->vdev_islog
&& vdev_is_dead(vd
))
839 valid
= vdev_log_state_valid(vd
);
840 spa_config_exit(os
->os_spa
, SCL_STATE
, FTAG
);
847 * Because tx == NULL, zil_claim_log_block() will not actually claim
848 * any blocks, but just determine whether it is possible to do so.
849 * In addition to checking the log chain, zil_claim_log_block()
850 * will invoke zio_claim() with a done func of spa_claim_notify(),
851 * which will update spa_max_claim_txg. See spa_load() for details.
853 error
= zil_parse(zilog
, zil_claim_log_block
, zil_claim_log_record
, tx
,
854 zilog
->zl_header
->zh_claim_txg
? -1ULL : spa_first_txg(os
->os_spa
));
856 return ((error
== ECKSUM
|| error
== ENOENT
) ? 0 : error
);
860 * When an itx is "skipped", this function is used to properly mark the
861 * waiter as "done, and signal any thread(s) waiting on it. An itx can
862 * be skipped (and not committed to an lwb) for a variety of reasons,
863 * one of them being that the itx was committed via spa_sync(), prior to
864 * it being committed to an lwb; this can happen if a thread calling
865 * zil_commit() is racing with spa_sync().
868 zil_commit_waiter_skip(zil_commit_waiter_t
*zcw
)
870 mutex_enter(&zcw
->zcw_lock
);
871 ASSERT3B(zcw
->zcw_done
, ==, B_FALSE
);
872 zcw
->zcw_done
= B_TRUE
;
873 cv_broadcast(&zcw
->zcw_cv
);
874 mutex_exit(&zcw
->zcw_lock
);
878 * This function is used when the given waiter is to be linked into an
879 * lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb.
880 * At this point, the waiter will no longer be referenced by the itx,
881 * and instead, will be referenced by the lwb.
884 zil_commit_waiter_link_lwb(zil_commit_waiter_t
*zcw
, lwb_t
*lwb
)
886 mutex_enter(&zcw
->zcw_lock
);
887 ASSERT(!list_link_active(&zcw
->zcw_node
));
888 ASSERT3P(zcw
->zcw_lwb
, ==, NULL
);
889 ASSERT3P(lwb
, !=, NULL
);
890 ASSERT(lwb
->lwb_state
== LWB_STATE_OPENED
||
891 lwb
->lwb_state
== LWB_STATE_ISSUED
);
893 list_insert_tail(&lwb
->lwb_waiters
, zcw
);
895 mutex_exit(&zcw
->zcw_lock
);
899 * This function is used when zio_alloc_zil() fails to allocate a ZIL
900 * block, and the given waiter must be linked to the "nolwb waiters"
901 * list inside of zil_process_commit_list().
904 zil_commit_waiter_link_nolwb(zil_commit_waiter_t
*zcw
, list_t
*nolwb
)
906 mutex_enter(&zcw
->zcw_lock
);
907 ASSERT(!list_link_active(&zcw
->zcw_node
));
908 ASSERT3P(zcw
->zcw_lwb
, ==, NULL
);
909 list_insert_tail(nolwb
, zcw
);
910 mutex_exit(&zcw
->zcw_lock
);
914 zil_lwb_add_block(lwb_t
*lwb
, const blkptr_t
*bp
)
916 avl_tree_t
*t
= &lwb
->lwb_vdev_tree
;
918 zil_vdev_node_t
*zv
, zvsearch
;
919 int ndvas
= BP_GET_NDVAS(bp
);
922 if (zfs_nocacheflush
)
925 mutex_enter(&lwb
->lwb_vdev_lock
);
926 for (i
= 0; i
< ndvas
; i
++) {
927 zvsearch
.zv_vdev
= DVA_GET_VDEV(&bp
->blk_dva
[i
]);
928 if (avl_find(t
, &zvsearch
, &where
) == NULL
) {
929 zv
= kmem_alloc(sizeof (*zv
), KM_SLEEP
);
930 zv
->zv_vdev
= zvsearch
.zv_vdev
;
931 avl_insert(t
, zv
, where
);
934 mutex_exit(&lwb
->lwb_vdev_lock
);
938 zil_lwb_add_txg(lwb_t
*lwb
, uint64_t txg
)
940 lwb
->lwb_max_txg
= MAX(lwb
->lwb_max_txg
, txg
);
944 * This function is a called after all VDEVs associated with a given lwb
945 * write have completed their DKIOCFLUSHWRITECACHE command; or as soon
946 * as the lwb write completes, if "zfs_nocacheflush" is set.
948 * The intention is for this function to be called as soon as the
949 * contents of an lwb are considered "stable" on disk, and will survive
950 * any sudden loss of power. At this point, any threads waiting for the
951 * lwb to reach this state are signalled, and the "waiter" structures
955 zil_lwb_flush_vdevs_done(zio_t
*zio
)
957 lwb_t
*lwb
= zio
->io_private
;
958 zilog_t
*zilog
= lwb
->lwb_zilog
;
959 dmu_tx_t
*tx
= lwb
->lwb_tx
;
960 zil_commit_waiter_t
*zcw
;
962 spa_config_exit(zilog
->zl_spa
, SCL_STATE
, lwb
);
964 zio_buf_free(lwb
->lwb_buf
, lwb
->lwb_sz
);
966 mutex_enter(&zilog
->zl_lock
);
969 * Ensure the lwb buffer pointer is cleared before releasing the
970 * txg. If we have had an allocation failure and the txg is
971 * waiting to sync then we want zil_sync() to remove the lwb so
972 * that it's not picked up as the next new one in
973 * zil_process_commit_list(). zil_sync() will only remove the
974 * lwb if lwb_buf is null.
979 ASSERT3U(lwb
->lwb_issued_timestamp
, >, 0);
980 zilog
->zl_last_lwb_latency
= gethrtime() - lwb
->lwb_issued_timestamp
;
982 lwb
->lwb_root_zio
= NULL
;
983 lwb
->lwb_state
= LWB_STATE_DONE
;
985 if (zilog
->zl_last_lwb_opened
== lwb
) {
987 * Remember the highest committed log sequence number
988 * for ztest. We only update this value when all the log
989 * writes succeeded, because ztest wants to ASSERT that
990 * it got the whole log chain.
992 zilog
->zl_commit_lr_seq
= zilog
->zl_lr_seq
;
995 while ((zcw
= list_head(&lwb
->lwb_waiters
)) != NULL
) {
996 mutex_enter(&zcw
->zcw_lock
);
998 ASSERT(list_link_active(&zcw
->zcw_node
));
999 list_remove(&lwb
->lwb_waiters
, zcw
);
1001 ASSERT3P(zcw
->zcw_lwb
, ==, lwb
);
1002 zcw
->zcw_lwb
= NULL
;
1004 zcw
->zcw_zio_error
= zio
->io_error
;
1006 ASSERT3B(zcw
->zcw_done
, ==, B_FALSE
);
1007 zcw
->zcw_done
= B_TRUE
;
1008 cv_broadcast(&zcw
->zcw_cv
);
1010 mutex_exit(&zcw
->zcw_lock
);
1013 mutex_exit(&zilog
->zl_lock
);
1016 * Now that we've written this log block, we have a stable pointer
1017 * to the next block in the chain, so it's OK to let the txg in
1018 * which we allocated the next block sync.
1024 * This is called when an lwb write completes. This means, this specific
1025 * lwb was written to disk, and all dependent lwb have also been
1028 * At this point, a DKIOCFLUSHWRITECACHE command hasn't been issued to
1029 * the VDEVs involved in writing out this specific lwb. The lwb will be
1030 * "done" once zil_lwb_flush_vdevs_done() is called, which occurs in the
1031 * zio completion callback for the lwb's root zio.
1034 zil_lwb_write_done(zio_t
*zio
)
1036 lwb_t
*lwb
= zio
->io_private
;
1037 spa_t
*spa
= zio
->io_spa
;
1038 zilog_t
*zilog
= lwb
->lwb_zilog
;
1039 avl_tree_t
*t
= &lwb
->lwb_vdev_tree
;
1040 void *cookie
= NULL
;
1041 zil_vdev_node_t
*zv
;
1043 ASSERT3S(spa_config_held(spa
, SCL_STATE
, RW_READER
), !=, 0);
1045 ASSERT(BP_GET_COMPRESS(zio
->io_bp
) == ZIO_COMPRESS_OFF
);
1046 ASSERT(BP_GET_TYPE(zio
->io_bp
) == DMU_OT_INTENT_LOG
);
1047 ASSERT(BP_GET_LEVEL(zio
->io_bp
) == 0);
1048 ASSERT(BP_GET_BYTEORDER(zio
->io_bp
) == ZFS_HOST_BYTEORDER
);
1049 ASSERT(!BP_IS_GANG(zio
->io_bp
));
1050 ASSERT(!BP_IS_HOLE(zio
->io_bp
));
1051 ASSERT(BP_GET_FILL(zio
->io_bp
) == 0);
1053 abd_put(zio
->io_abd
);
1055 ASSERT3S(lwb
->lwb_state
, ==, LWB_STATE_ISSUED
);
1057 mutex_enter(&zilog
->zl_lock
);
1058 lwb
->lwb_write_zio
= NULL
;
1059 mutex_exit(&zilog
->zl_lock
);
1061 if (avl_numnodes(t
) == 0)
1065 * If there was an IO error, we're not going to call zio_flush()
1066 * on these vdevs, so we simply empty the tree and free the
1067 * nodes. We avoid calling zio_flush() since there isn't any
1068 * good reason for doing so, after the lwb block failed to be
1071 if (zio
->io_error
!= 0) {
1072 while ((zv
= avl_destroy_nodes(t
, &cookie
)) != NULL
)
1073 kmem_free(zv
, sizeof (*zv
));
1077 while ((zv
= avl_destroy_nodes(t
, &cookie
)) != NULL
) {
1078 vdev_t
*vd
= vdev_lookup_top(spa
, zv
->zv_vdev
);
1080 zio_flush(lwb
->lwb_root_zio
, vd
);
1081 kmem_free(zv
, sizeof (*zv
));
1086 * This function's purpose is to "open" an lwb such that it is ready to
1087 * accept new itxs being committed to it. To do this, the lwb's zio
1088 * structures are created, and linked to the lwb. This function is
1089 * idempotent; if the passed in lwb has already been opened, this
1090 * function is essentially a no-op.
1093 zil_lwb_write_open(zilog_t
*zilog
, lwb_t
*lwb
)
1095 zbookmark_phys_t zb
;
1096 zio_priority_t prio
;
1098 ASSERT(MUTEX_HELD(&zilog
->zl_writer_lock
));
1099 ASSERT3P(lwb
, !=, NULL
);
1100 EQUIV(lwb
->lwb_root_zio
== NULL
, lwb
->lwb_state
== LWB_STATE_CLOSED
);
1101 EQUIV(lwb
->lwb_root_zio
!= NULL
, lwb
->lwb_state
== LWB_STATE_OPENED
);
1103 SET_BOOKMARK(&zb
, lwb
->lwb_blk
.blk_cksum
.zc_word
[ZIL_ZC_OBJSET
],
1104 ZB_ZIL_OBJECT
, ZB_ZIL_LEVEL
,
1105 lwb
->lwb_blk
.blk_cksum
.zc_word
[ZIL_ZC_SEQ
]);
1107 if (lwb
->lwb_root_zio
== NULL
) {
1108 abd_t
*lwb_abd
= abd_get_from_buf(lwb
->lwb_buf
,
1109 BP_GET_LSIZE(&lwb
->lwb_blk
));
1111 if (!lwb
->lwb_slog
|| zilog
->zl_cur_used
<= zil_slog_bulk
)
1112 prio
= ZIO_PRIORITY_SYNC_WRITE
;
1114 prio
= ZIO_PRIORITY_ASYNC_WRITE
;
1116 lwb
->lwb_root_zio
= zio_root(zilog
->zl_spa
,
1117 zil_lwb_flush_vdevs_done
, lwb
, ZIO_FLAG_CANFAIL
);
1118 ASSERT3P(lwb
->lwb_root_zio
, !=, NULL
);
1120 lwb
->lwb_write_zio
= zio_rewrite(lwb
->lwb_root_zio
,
1121 zilog
->zl_spa
, 0, &lwb
->lwb_blk
, lwb_abd
,
1122 BP_GET_LSIZE(&lwb
->lwb_blk
), zil_lwb_write_done
, lwb
,
1123 prio
, ZIO_FLAG_CANFAIL
| ZIO_FLAG_DONT_PROPAGATE
, &zb
);
1124 ASSERT3P(lwb
->lwb_write_zio
, !=, NULL
);
1126 lwb
->lwb_state
= LWB_STATE_OPENED
;
1128 mutex_enter(&zilog
->zl_lock
);
1131 * The zilog's "zl_last_lwb_opened" field is used to
1132 * build the lwb/zio dependency chain, which is used to
1133 * preserve the ordering of lwb completions that is
1134 * required by the semantics of the ZIL. Each new lwb
1135 * zio becomes a parent of the "previous" lwb zio, such
1136 * that the new lwb's zio cannot complete until the
1137 * "previous" lwb's zio completes.
1139 * This is required by the semantics of zil_commit();
1140 * the commit waiters attached to the lwbs will be woken
1141 * in the lwb zio's completion callback, so this zio
1142 * dependency graph ensures the waiters are woken in the
1143 * correct order (the same order the lwbs were created).
1145 lwb_t
*last_lwb_opened
= zilog
->zl_last_lwb_opened
;
1146 if (last_lwb_opened
!= NULL
&&
1147 last_lwb_opened
->lwb_state
!= LWB_STATE_DONE
) {
1148 ASSERT(last_lwb_opened
->lwb_state
== LWB_STATE_OPENED
||
1149 last_lwb_opened
->lwb_state
== LWB_STATE_ISSUED
);
1150 ASSERT3P(last_lwb_opened
->lwb_root_zio
, !=, NULL
);
1151 zio_add_child(lwb
->lwb_root_zio
,
1152 last_lwb_opened
->lwb_root_zio
);
1154 zilog
->zl_last_lwb_opened
= lwb
;
1156 mutex_exit(&zilog
->zl_lock
);
1159 ASSERT3P(lwb
->lwb_root_zio
, !=, NULL
);
1160 ASSERT3P(lwb
->lwb_write_zio
, !=, NULL
);
1161 ASSERT3S(lwb
->lwb_state
, ==, LWB_STATE_OPENED
);
1165 * Define a limited set of intent log block sizes.
1167 * These must be a multiple of 4KB. Note only the amount used (again
1168 * aligned to 4KB) actually gets written. However, we can't always just
1169 * allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted.
1171 uint64_t zil_block_buckets
[] = {
1172 4096, /* non TX_WRITE */
1173 8192+4096, /* data base */
1174 32*1024 + 4096, /* NFS writes */
1179 * Start a log block write and advance to the next log block.
1180 * Calls are serialized.
1183 zil_lwb_write_issue(zilog_t
*zilog
, lwb_t
*lwb
)
1187 spa_t
*spa
= zilog
->zl_spa
;
1191 uint64_t zil_blksz
, wsz
;
1195 ASSERT(MUTEX_HELD(&zilog
->zl_writer_lock
));
1196 ASSERT3P(lwb
->lwb_root_zio
, !=, NULL
);
1197 ASSERT3P(lwb
->lwb_write_zio
, !=, NULL
);
1198 ASSERT3S(lwb
->lwb_state
, ==, LWB_STATE_OPENED
);
1200 if (BP_GET_CHECKSUM(&lwb
->lwb_blk
) == ZIO_CHECKSUM_ZILOG2
) {
1201 zilc
= (zil_chain_t
*)lwb
->lwb_buf
;
1202 bp
= &zilc
->zc_next_blk
;
1204 zilc
= (zil_chain_t
*)(lwb
->lwb_buf
+ lwb
->lwb_sz
);
1205 bp
= &zilc
->zc_next_blk
;
1208 ASSERT(lwb
->lwb_nused
<= lwb
->lwb_sz
);
1211 * Allocate the next block and save its address in this block
1212 * before writing it in order to establish the log chain.
1213 * Note that if the allocation of nlwb synced before we wrote
1214 * the block that points at it (lwb), we'd leak it if we crashed.
1215 * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done().
1216 * We dirty the dataset to ensure that zil_sync() will be called
1217 * to clean up in the event of allocation failure or I/O failure.
1220 tx
= dmu_tx_create(zilog
->zl_os
);
1223 * Since we are not going to create any new dirty data and we can even
1224 * help with clearing the existing dirty data, we should not be subject
1225 * to the dirty data based delays.
1226 * We (ab)use TXG_WAITED to bypass the delay mechanism.
1227 * One side effect from using TXG_WAITED is that dmu_tx_assign() can
1228 * fail if the pool is suspended. Those are dramatic circumstances,
1229 * so we return NULL to signal that the normal ZIL processing is not
1230 * possible and txg_wait_synced() should be used to ensure that the data
1233 error
= dmu_tx_assign(tx
, TXG_WAITED
);
1235 ASSERT3S(error
, ==, EIO
);
1239 dsl_dataset_dirty(dmu_objset_ds(zilog
->zl_os
), tx
);
1240 txg
= dmu_tx_get_txg(tx
);
1245 * Log blocks are pre-allocated. Here we select the size of the next
1246 * block, based on size used in the last block.
1247 * - first find the smallest bucket that will fit the block from a
1248 * limited set of block sizes. This is because it's faster to write
1249 * blocks allocated from the same metaslab as they are adjacent or
1251 * - next find the maximum from the new suggested size and an array of
1252 * previous sizes. This lessens a picket fence effect of wrongly
1253 * guesssing the size if we have a stream of say 2k, 64k, 2k, 64k
1256 * Note we only write what is used, but we can't just allocate
1257 * the maximum block size because we can exhaust the available
1260 zil_blksz
= zilog
->zl_cur_used
+ sizeof (zil_chain_t
);
1261 for (i
= 0; zil_blksz
> zil_block_buckets
[i
]; i
++)
1263 zil_blksz
= zil_block_buckets
[i
];
1264 if (zil_blksz
== UINT64_MAX
)
1265 zil_blksz
= SPA_OLD_MAXBLOCKSIZE
;
1266 zilog
->zl_prev_blks
[zilog
->zl_prev_rotor
] = zil_blksz
;
1267 for (i
= 0; i
< ZIL_PREV_BLKS
; i
++)
1268 zil_blksz
= MAX(zil_blksz
, zilog
->zl_prev_blks
[i
]);
1269 zilog
->zl_prev_rotor
= (zilog
->zl_prev_rotor
+ 1) & (ZIL_PREV_BLKS
- 1);
1273 /* pass the old blkptr in order to spread log blocks across devs */
1274 error
= zio_alloc_zil(spa
, txg
, bp
, &lwb
->lwb_blk
, zil_blksz
, &slog
);
1276 ASSERT3U(bp
->blk_birth
, ==, txg
);
1277 bp
->blk_cksum
= lwb
->lwb_blk
.blk_cksum
;
1278 bp
->blk_cksum
.zc_word
[ZIL_ZC_SEQ
]++;
1281 * Allocate a new log write block (lwb).
1283 nlwb
= zil_alloc_lwb(zilog
, bp
, slog
, txg
);
1286 if (BP_GET_CHECKSUM(&lwb
->lwb_blk
) == ZIO_CHECKSUM_ZILOG2
) {
1287 /* For Slim ZIL only write what is used. */
1288 wsz
= P2ROUNDUP_TYPED(lwb
->lwb_nused
, ZIL_MIN_BLKSZ
, uint64_t);
1289 ASSERT3U(wsz
, <=, lwb
->lwb_sz
);
1290 zio_shrink(lwb
->lwb_write_zio
, wsz
);
1297 zilc
->zc_nused
= lwb
->lwb_nused
;
1298 zilc
->zc_eck
.zec_cksum
= lwb
->lwb_blk
.blk_cksum
;
1301 * clear unused data for security
1303 bzero(lwb
->lwb_buf
+ lwb
->lwb_nused
, wsz
- lwb
->lwb_nused
);
1305 spa_config_enter(zilog
->zl_spa
, SCL_STATE
, lwb
, RW_READER
);
1307 zil_lwb_add_block(lwb
, &lwb
->lwb_blk
);
1308 lwb
->lwb_issued_timestamp
= gethrtime();
1309 lwb
->lwb_state
= LWB_STATE_ISSUED
;
1311 zio_nowait(lwb
->lwb_root_zio
);
1312 zio_nowait(lwb
->lwb_write_zio
);
1315 * If there was an allocation failure then nlwb will be null which
1316 * forces a txg_wait_synced().
1322 zil_lwb_commit(zilog_t
*zilog
, itx_t
*itx
, lwb_t
*lwb
)
1325 lr_write_t
*lrwb
, *lrw
;
1327 uint64_t dlen
, dnow
, lwb_sp
, reclen
, txg
;
1329 ASSERT(MUTEX_HELD(&zilog
->zl_writer_lock
));
1330 ASSERT3P(lwb
, !=, NULL
);
1331 ASSERT3P(lwb
->lwb_buf
, !=, NULL
);
1333 zil_lwb_write_open(zilog
, lwb
);
1336 lrw
= (lr_write_t
*)lrc
;
1339 * A commit itx doesn't represent any on-disk state; instead
1340 * it's simply used as a place holder on the commit list, and
1341 * provides a mechanism for attaching a "commit waiter" onto the
1342 * correct lwb (such that the waiter can be signalled upon
1343 * completion of that lwb). Thus, we don't process this itx's
1344 * log record if it's a commit itx (these itx's don't have log
1345 * records), and instead link the itx's waiter onto the lwb's
1348 * For more details, see the comment above zil_commit().
1350 if (lrc
->lrc_txtype
== TX_COMMIT
) {
1351 zil_commit_waiter_link_lwb(itx
->itx_private
, lwb
);
1352 itx
->itx_private
= NULL
;
1356 if (lrc
->lrc_txtype
== TX_WRITE
&& itx
->itx_wr_state
== WR_NEED_COPY
) {
1357 dlen
= P2ROUNDUP_TYPED(
1358 lrw
->lr_length
, sizeof (uint64_t), uint64_t);
1362 reclen
= lrc
->lrc_reclen
;
1363 zilog
->zl_cur_used
+= (reclen
+ dlen
);
1366 ASSERT3U(zilog
->zl_cur_used
, <, UINT64_MAX
- (reclen
+ dlen
));
1370 * If this record won't fit in the current log block, start a new one.
1371 * For WR_NEED_COPY optimize layout for minimal number of chunks.
1373 lwb_sp
= lwb
->lwb_sz
- lwb
->lwb_nused
;
1374 if (reclen
> lwb_sp
|| (reclen
+ dlen
> lwb_sp
&&
1375 lwb_sp
< ZIL_MAX_WASTE_SPACE
&& (dlen
% ZIL_MAX_LOG_DATA
== 0 ||
1376 lwb_sp
< reclen
+ dlen
% ZIL_MAX_LOG_DATA
))) {
1377 lwb
= zil_lwb_write_issue(zilog
, lwb
);
1380 zil_lwb_write_open(zilog
, lwb
);
1381 ASSERT(LWB_EMPTY(lwb
));
1382 lwb_sp
= lwb
->lwb_sz
- lwb
->lwb_nused
;
1383 ASSERT3U(reclen
+ MIN(dlen
, sizeof (uint64_t)), <=, lwb_sp
);
1386 dnow
= MIN(dlen
, lwb_sp
- reclen
);
1387 lr_buf
= lwb
->lwb_buf
+ lwb
->lwb_nused
;
1388 bcopy(lrc
, lr_buf
, reclen
);
1389 lrcb
= (lr_t
*)lr_buf
; /* Like lrc, but inside lwb. */
1390 lrwb
= (lr_write_t
*)lrcb
; /* Like lrw, but inside lwb. */
1393 * If it's a write, fetch the data or get its blkptr as appropriate.
1395 if (lrc
->lrc_txtype
== TX_WRITE
) {
1396 if (txg
> spa_freeze_txg(zilog
->zl_spa
))
1397 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
1398 if (itx
->itx_wr_state
!= WR_COPIED
) {
1402 if (itx
->itx_wr_state
== WR_NEED_COPY
) {
1403 dbuf
= lr_buf
+ reclen
;
1404 lrcb
->lrc_reclen
+= dnow
;
1405 if (lrwb
->lr_length
> dnow
)
1406 lrwb
->lr_length
= dnow
;
1407 lrw
->lr_offset
+= dnow
;
1408 lrw
->lr_length
-= dnow
;
1410 ASSERT(itx
->itx_wr_state
== WR_INDIRECT
);
1415 * We pass in the "lwb_write_zio" rather than
1416 * "lwb_root_zio" so that the "lwb_write_zio"
1417 * becomes the parent of any zio's created by
1418 * the "zl_get_data" callback. The vdevs are
1419 * flushed after the "lwb_write_zio" completes,
1420 * so we want to make sure that completion
1421 * callback waits for these additional zio's,
1422 * such that the vdevs used by those zio's will
1423 * be included in the lwb's vdev tree, and those
1424 * vdevs will be properly flushed. If we passed
1425 * in "lwb_root_zio" here, then these additional
1426 * vdevs may not be flushed; e.g. if these zio's
1427 * completed after "lwb_write_zio" completed.
1429 error
= zilog
->zl_get_data(itx
->itx_private
,
1430 lrwb
, dbuf
, lwb
, lwb
->lwb_write_zio
);
1433 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
1437 ASSERT(error
== ENOENT
|| error
== EEXIST
||
1445 * We're actually making an entry, so update lrc_seq to be the
1446 * log record sequence number. Note that this is generally not
1447 * equal to the itx sequence number because not all transactions
1448 * are synchronous, and sometimes spa_sync() gets there first.
1450 lrcb
->lrc_seq
= ++zilog
->zl_lr_seq
;
1451 lwb
->lwb_nused
+= reclen
+ dnow
;
1453 zil_lwb_add_txg(lwb
, txg
);
1455 ASSERT3U(lwb
->lwb_nused
, <=, lwb
->lwb_sz
);
1456 ASSERT0(P2PHASE(lwb
->lwb_nused
, sizeof (uint64_t)));
1460 zilog
->zl_cur_used
+= reclen
;
1468 zil_itx_create(uint64_t txtype
, size_t lrsize
)
1472 lrsize
= P2ROUNDUP_TYPED(lrsize
, sizeof (uint64_t), size_t);
1474 itx
= kmem_alloc(offsetof(itx_t
, itx_lr
) + lrsize
, KM_SLEEP
);
1475 itx
->itx_lr
.lrc_txtype
= txtype
;
1476 itx
->itx_lr
.lrc_reclen
= lrsize
;
1477 itx
->itx_lr
.lrc_seq
= 0; /* defensive */
1478 itx
->itx_sync
= B_TRUE
; /* default is synchronous */
1484 zil_itx_destroy(itx_t
*itx
)
1486 kmem_free(itx
, offsetof(itx_t
, itx_lr
) + itx
->itx_lr
.lrc_reclen
);
1490 * Free up the sync and async itxs. The itxs_t has already been detached
1491 * so no locks are needed.
1494 zil_itxg_clean(itxs_t
*itxs
)
1500 itx_async_node_t
*ian
;
1502 list
= &itxs
->i_sync_list
;
1503 while ((itx
= list_head(list
)) != NULL
) {
1505 * In the general case, commit itxs will not be found
1506 * here, as they'll be committed to an lwb via
1507 * zil_lwb_commit(), and free'd in that function. Having
1508 * said that, it is still possible for commit itxs to be
1509 * found here, due to the following race:
1511 * - a thread calls zil_commit() which assigns the
1512 * commit itx to a per-txg i_sync_list
1513 * - zil_itxg_clean() is called (e.g. via spa_sync())
1514 * while the waiter is still on the i_sync_list
1516 * There's nothing to prevent syncing the txg while the
1517 * waiter is on the i_sync_list. This normally doesn't
1518 * happen because spa_sync() is slower than zil_commit(),
1519 * but if zil_commit() calls txg_wait_synced() (e.g.
1520 * because zil_create() or zil_commit_writer_stall() is
1521 * called) we will hit this case.
1523 if (itx
->itx_lr
.lrc_txtype
== TX_COMMIT
)
1524 zil_commit_waiter_skip(itx
->itx_private
);
1526 list_remove(list
, itx
);
1527 zil_itx_destroy(itx
);
1531 t
= &itxs
->i_async_tree
;
1532 while ((ian
= avl_destroy_nodes(t
, &cookie
)) != NULL
) {
1533 list
= &ian
->ia_list
;
1534 while ((itx
= list_head(list
)) != NULL
) {
1535 list_remove(list
, itx
);
1536 /* commit itxs should never be on the async lists. */
1537 ASSERT3U(itx
->itx_lr
.lrc_txtype
, !=, TX_COMMIT
);
1538 zil_itx_destroy(itx
);
1541 kmem_free(ian
, sizeof (itx_async_node_t
));
1545 kmem_free(itxs
, sizeof (itxs_t
));
1549 zil_aitx_compare(const void *x1
, const void *x2
)
1551 const uint64_t o1
= ((itx_async_node_t
*)x1
)->ia_foid
;
1552 const uint64_t o2
= ((itx_async_node_t
*)x2
)->ia_foid
;
1563 * Remove all async itx with the given oid.
1566 zil_remove_async(zilog_t
*zilog
, uint64_t oid
)
1569 itx_async_node_t
*ian
;
1576 list_create(&clean_list
, sizeof (itx_t
), offsetof(itx_t
, itx_node
));
1578 if (spa_freeze_txg(zilog
->zl_spa
) != UINT64_MAX
) /* ziltest support */
1581 otxg
= spa_last_synced_txg(zilog
->zl_spa
) + 1;
1583 for (txg
= otxg
; txg
< (otxg
+ TXG_CONCURRENT_STATES
); txg
++) {
1584 itxg_t
*itxg
= &zilog
->zl_itxg
[txg
& TXG_MASK
];
1586 mutex_enter(&itxg
->itxg_lock
);
1587 if (itxg
->itxg_txg
!= txg
) {
1588 mutex_exit(&itxg
->itxg_lock
);
1593 * Locate the object node and append its list.
1595 t
= &itxg
->itxg_itxs
->i_async_tree
;
1596 ian
= avl_find(t
, &oid
, &where
);
1598 list_move_tail(&clean_list
, &ian
->ia_list
);
1599 mutex_exit(&itxg
->itxg_lock
);
1601 while ((itx
= list_head(&clean_list
)) != NULL
) {
1602 list_remove(&clean_list
, itx
);
1603 /* commit itxs should never be on the async lists. */
1604 ASSERT3U(itx
->itx_lr
.lrc_txtype
, !=, TX_COMMIT
);
1605 zil_itx_destroy(itx
);
1607 list_destroy(&clean_list
);
1611 zil_itx_assign(zilog_t
*zilog
, itx_t
*itx
, dmu_tx_t
*tx
)
1615 itxs_t
*itxs
, *clean
= NULL
;
1618 * Object ids can be re-instantiated in the next txg so
1619 * remove any async transactions to avoid future leaks.
1620 * This can happen if a fsync occurs on the re-instantiated
1621 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets
1622 * the new file data and flushes a write record for the old object.
1624 if ((itx
->itx_lr
.lrc_txtype
& ~TX_CI
) == TX_REMOVE
)
1625 zil_remove_async(zilog
, itx
->itx_oid
);
1628 * Ensure the data of a renamed file is committed before the rename.
1630 if ((itx
->itx_lr
.lrc_txtype
& ~TX_CI
) == TX_RENAME
)
1631 zil_async_to_sync(zilog
, itx
->itx_oid
);
1633 if (spa_freeze_txg(zilog
->zl_spa
) != UINT64_MAX
)
1636 txg
= dmu_tx_get_txg(tx
);
1638 itxg
= &zilog
->zl_itxg
[txg
& TXG_MASK
];
1639 mutex_enter(&itxg
->itxg_lock
);
1640 itxs
= itxg
->itxg_itxs
;
1641 if (itxg
->itxg_txg
!= txg
) {
1644 * The zil_clean callback hasn't got around to cleaning
1645 * this itxg. Save the itxs for release below.
1646 * This should be rare.
1648 zfs_dbgmsg("zil_itx_assign: missed itx cleanup for "
1649 "txg %llu", itxg
->itxg_txg
);
1650 clean
= itxg
->itxg_itxs
;
1652 itxg
->itxg_txg
= txg
;
1653 itxs
= itxg
->itxg_itxs
= kmem_zalloc(sizeof (itxs_t
), KM_SLEEP
);
1655 list_create(&itxs
->i_sync_list
, sizeof (itx_t
),
1656 offsetof(itx_t
, itx_node
));
1657 avl_create(&itxs
->i_async_tree
, zil_aitx_compare
,
1658 sizeof (itx_async_node_t
),
1659 offsetof(itx_async_node_t
, ia_node
));
1661 if (itx
->itx_sync
) {
1662 list_insert_tail(&itxs
->i_sync_list
, itx
);
1664 avl_tree_t
*t
= &itxs
->i_async_tree
;
1665 uint64_t foid
= ((lr_ooo_t
*)&itx
->itx_lr
)->lr_foid
;
1666 itx_async_node_t
*ian
;
1669 ian
= avl_find(t
, &foid
, &where
);
1671 ian
= kmem_alloc(sizeof (itx_async_node_t
), KM_SLEEP
);
1672 list_create(&ian
->ia_list
, sizeof (itx_t
),
1673 offsetof(itx_t
, itx_node
));
1674 ian
->ia_foid
= foid
;
1675 avl_insert(t
, ian
, where
);
1677 list_insert_tail(&ian
->ia_list
, itx
);
1680 itx
->itx_lr
.lrc_txg
= dmu_tx_get_txg(tx
);
1683 * We don't want to dirty the ZIL using ZILTEST_TXG, because
1684 * zil_clean() will never be called using ZILTEST_TXG. Thus, we
1685 * need to be careful to always dirty the ZIL using the "real"
1686 * TXG (not itxg_txg) even when the SPA is frozen.
1688 zilog_dirty(zilog
, dmu_tx_get_txg(tx
));
1689 mutex_exit(&itxg
->itxg_lock
);
1691 /* Release the old itxs now we've dropped the lock */
1693 zil_itxg_clean(clean
);
1697 * If there are any in-memory intent log transactions which have now been
1698 * synced then start up a taskq to free them. We should only do this after we
1699 * have written out the uberblocks (i.e. txg has been comitted) so that
1700 * don't inadvertently clean out in-memory log records that would be required
1704 zil_clean(zilog_t
*zilog
, uint64_t synced_txg
)
1706 itxg_t
*itxg
= &zilog
->zl_itxg
[synced_txg
& TXG_MASK
];
1709 ASSERT3U(synced_txg
, <, ZILTEST_TXG
);
1711 mutex_enter(&itxg
->itxg_lock
);
1712 if (itxg
->itxg_itxs
== NULL
|| itxg
->itxg_txg
== ZILTEST_TXG
) {
1713 mutex_exit(&itxg
->itxg_lock
);
1716 ASSERT3U(itxg
->itxg_txg
, <=, synced_txg
);
1717 ASSERT3U(itxg
->itxg_txg
, !=, 0);
1718 clean_me
= itxg
->itxg_itxs
;
1719 itxg
->itxg_itxs
= NULL
;
1721 mutex_exit(&itxg
->itxg_lock
);
1723 * Preferably start a task queue to free up the old itxs but
1724 * if taskq_dispatch can't allocate resources to do that then
1725 * free it in-line. This should be rare. Note, using TQ_SLEEP
1726 * created a bad performance problem.
1728 ASSERT3P(zilog
->zl_dmu_pool
, !=, NULL
);
1729 ASSERT3P(zilog
->zl_dmu_pool
->dp_zil_clean_taskq
, !=, NULL
);
1730 if (taskq_dispatch(zilog
->zl_dmu_pool
->dp_zil_clean_taskq
,
1731 (void (*)(void *))zil_itxg_clean
, clean_me
, TQ_NOSLEEP
) == NULL
)
1732 zil_itxg_clean(clean_me
);
1736 * This function will traverse the queue of itxs that need to be
1737 * committed, and move them onto the ZIL's zl_itx_commit_list.
1740 zil_get_commit_list(zilog_t
*zilog
)
1743 list_t
*commit_list
= &zilog
->zl_itx_commit_list
;
1745 ASSERT(MUTEX_HELD(&zilog
->zl_writer_lock
));
1747 if (spa_freeze_txg(zilog
->zl_spa
) != UINT64_MAX
) /* ziltest support */
1750 otxg
= spa_last_synced_txg(zilog
->zl_spa
) + 1;
1753 * This is inherently racy, since there is nothing to prevent
1754 * the last synced txg from changing. That's okay since we'll
1755 * only commit things in the future.
1757 for (txg
= otxg
; txg
< (otxg
+ TXG_CONCURRENT_STATES
); txg
++) {
1758 itxg_t
*itxg
= &zilog
->zl_itxg
[txg
& TXG_MASK
];
1760 mutex_enter(&itxg
->itxg_lock
);
1761 if (itxg
->itxg_txg
!= txg
) {
1762 mutex_exit(&itxg
->itxg_lock
);
1767 * If we're adding itx records to the zl_itx_commit_list,
1768 * then the zil better be dirty in this "txg". We can assert
1769 * that here since we're holding the itxg_lock which will
1770 * prevent spa_sync from cleaning it. Once we add the itxs
1771 * to the zl_itx_commit_list we must commit it to disk even
1772 * if it's unnecessary (i.e. the txg was synced).
1774 ASSERT(zilog_is_dirty_in_txg(zilog
, txg
) ||
1775 spa_freeze_txg(zilog
->zl_spa
) != UINT64_MAX
);
1776 list_move_tail(commit_list
, &itxg
->itxg_itxs
->i_sync_list
);
1778 mutex_exit(&itxg
->itxg_lock
);
1783 * Move the async itxs for a specified object to commit into sync lists.
1786 zil_async_to_sync(zilog_t
*zilog
, uint64_t foid
)
1789 itx_async_node_t
*ian
;
1793 if (spa_freeze_txg(zilog
->zl_spa
) != UINT64_MAX
) /* ziltest support */
1796 otxg
= spa_last_synced_txg(zilog
->zl_spa
) + 1;
1799 * This is inherently racy, since there is nothing to prevent
1800 * the last synced txg from changing.
1802 for (txg
= otxg
; txg
< (otxg
+ TXG_CONCURRENT_STATES
); txg
++) {
1803 itxg_t
*itxg
= &zilog
->zl_itxg
[txg
& TXG_MASK
];
1805 mutex_enter(&itxg
->itxg_lock
);
1806 if (itxg
->itxg_txg
!= txg
) {
1807 mutex_exit(&itxg
->itxg_lock
);
1812 * If a foid is specified then find that node and append its
1813 * list. Otherwise walk the tree appending all the lists
1814 * to the sync list. We add to the end rather than the
1815 * beginning to ensure the create has happened.
1817 t
= &itxg
->itxg_itxs
->i_async_tree
;
1819 ian
= avl_find(t
, &foid
, &where
);
1821 list_move_tail(&itxg
->itxg_itxs
->i_sync_list
,
1825 void *cookie
= NULL
;
1827 while ((ian
= avl_destroy_nodes(t
, &cookie
)) != NULL
) {
1828 list_move_tail(&itxg
->itxg_itxs
->i_sync_list
,
1830 list_destroy(&ian
->ia_list
);
1831 kmem_free(ian
, sizeof (itx_async_node_t
));
1834 mutex_exit(&itxg
->itxg_lock
);
1839 * This function will prune commit itxs that are at the head of the
1840 * commit list (it won't prune past the first non-commit itx), and
1841 * either: a) attach them to the last lwb that's still pending
1842 * completion, or b) skip them altogether.
1844 * This is used as a performance optimization to prevent commit itxs
1845 * from generating new lwbs when it's unnecessary to do so.
1848 zil_prune_commit_list(zilog_t
*zilog
)
1852 ASSERT(MUTEX_HELD(&zilog
->zl_writer_lock
));
1854 while (itx
= list_head(&zilog
->zl_itx_commit_list
)) {
1855 lr_t
*lrc
= &itx
->itx_lr
;
1856 if (lrc
->lrc_txtype
!= TX_COMMIT
)
1859 mutex_enter(&zilog
->zl_lock
);
1861 lwb_t
*last_lwb
= zilog
->zl_last_lwb_opened
;
1862 if (last_lwb
== NULL
|| last_lwb
->lwb_state
== LWB_STATE_DONE
) {
1864 * All of the itxs this waiter was waiting on
1865 * must have already completed (or there were
1866 * never any itx's for it to wait on), so it's
1867 * safe to skip this waiter and mark it done.
1869 zil_commit_waiter_skip(itx
->itx_private
);
1871 zil_commit_waiter_link_lwb(itx
->itx_private
, last_lwb
);
1872 itx
->itx_private
= NULL
;
1875 mutex_exit(&zilog
->zl_lock
);
1877 list_remove(&zilog
->zl_itx_commit_list
, itx
);
1878 zil_itx_destroy(itx
);
1881 IMPLY(itx
!= NULL
, itx
->itx_lr
.lrc_txtype
!= TX_COMMIT
);
1885 zil_commit_writer_stall(zilog_t
*zilog
)
1888 * When zio_alloc_zil() fails to allocate the next lwb block on
1889 * disk, we must call txg_wait_synced() to ensure all of the
1890 * lwbs in the zilog's zl_lwb_list are synced and then freed (in
1891 * zil_sync()), such that any subsequent ZIL writer (i.e. a call
1892 * to zil_process_commit_list()) will have to call zil_create(),
1893 * and start a new ZIL chain.
1895 * Since zil_alloc_zil() failed, the lwb that was previously
1896 * issued does not have a pointer to the "next" lwb on disk.
1897 * Thus, if another ZIL writer thread was to allocate the "next"
1898 * on-disk lwb, that block could be leaked in the event of a
1899 * crash (because the previous lwb on-disk would not point to
1902 * We must hold the zilog's zl_writer_lock while we do this, to
1903 * ensure no new threads enter zil_process_commit_list() until
1904 * all lwb's in the zl_lwb_list have been synced and freed
1905 * (which is achieved via the txg_wait_synced() call).
1907 ASSERT(MUTEX_HELD(&zilog
->zl_writer_lock
));
1908 txg_wait_synced(zilog
->zl_dmu_pool
, 0);
1909 ASSERT3P(list_tail(&zilog
->zl_lwb_list
), ==, NULL
);
1913 * This function will traverse the commit list, creating new lwbs as
1914 * needed, and committing the itxs from the commit list to these newly
1915 * created lwbs. Additionally, as a new lwb is created, the previous
1916 * lwb will be issued to the zio layer to be written to disk.
1919 zil_process_commit_list(zilog_t
*zilog
)
1921 spa_t
*spa
= zilog
->zl_spa
;
1922 list_t nolwb_waiters
;
1926 ASSERT(MUTEX_HELD(&zilog
->zl_writer_lock
));
1929 * Return if there's nothing to commit before we dirty the fs by
1930 * calling zil_create().
1932 if (list_head(&zilog
->zl_itx_commit_list
) == NULL
)
1935 list_create(&nolwb_waiters
, sizeof (zil_commit_waiter_t
),
1936 offsetof(zil_commit_waiter_t
, zcw_node
));
1938 lwb
= list_tail(&zilog
->zl_lwb_list
);
1940 lwb
= zil_create(zilog
);
1942 ASSERT3S(lwb
->lwb_state
, !=, LWB_STATE_ISSUED
);
1943 ASSERT3S(lwb
->lwb_state
, !=, LWB_STATE_DONE
);
1946 while (itx
= list_head(&zilog
->zl_itx_commit_list
)) {
1947 lr_t
*lrc
= &itx
->itx_lr
;
1948 uint64_t txg
= lrc
->lrc_txg
;
1950 ASSERT3U(txg
, !=, 0);
1952 if (lrc
->lrc_txtype
== TX_COMMIT
) {
1953 DTRACE_PROBE2(zil__process__commit__itx
,
1954 zilog_t
*, zilog
, itx_t
*, itx
);
1956 DTRACE_PROBE2(zil__process__normal__itx
,
1957 zilog_t
*, zilog
, itx_t
*, itx
);
1961 * This is inherently racy and may result in us writing
1962 * out a log block for a txg that was just synced. This
1963 * is ok since we'll end cleaning up that log block the
1964 * next time we call zil_sync().
1966 boolean_t synced
= txg
<= spa_last_synced_txg(spa
);
1967 boolean_t frozen
= txg
> spa_freeze_txg(spa
);
1969 if (!synced
|| frozen
) {
1971 lwb
= zil_lwb_commit(zilog
, itx
, lwb
);
1972 } else if (lrc
->lrc_txtype
== TX_COMMIT
) {
1973 ASSERT3P(lwb
, ==, NULL
);
1974 zil_commit_waiter_link_nolwb(
1975 itx
->itx_private
, &nolwb_waiters
);
1977 } else if (lrc
->lrc_txtype
== TX_COMMIT
) {
1978 ASSERT3B(synced
, ==, B_TRUE
);
1979 ASSERT3B(frozen
, ==, B_FALSE
);
1982 * If this is a commit itx, then there will be a
1983 * thread that is either: already waiting for
1984 * it, or soon will be waiting.
1986 * This itx has already been committed to disk
1987 * via spa_sync() so we don't bother committing
1988 * it to an lwb. As a result, we cannot use the
1989 * lwb zio callback to signal the waiter and
1990 * mark it as done, so we must do that here.
1992 zil_commit_waiter_skip(itx
->itx_private
);
1995 list_remove(&zilog
->zl_itx_commit_list
, itx
);
1996 zil_itx_destroy(itx
);
1998 DTRACE_PROBE1(zil__cw2
, zilog_t
*, zilog
);
2002 * This indicates zio_alloc_zil() failed to allocate the
2003 * "next" lwb on-disk. When this happens, we must stall
2004 * the ZIL write pipeline; see the comment within
2005 * zil_commit_writer_stall() for more details.
2007 zil_commit_writer_stall(zilog
);
2010 * Additionally, we have to signal and mark the "nolwb"
2011 * waiters as "done" here, since without an lwb, we
2012 * can't do this via zil_lwb_flush_vdevs_done() like
2015 zil_commit_waiter_t
*zcw
;
2016 while (zcw
= list_head(&nolwb_waiters
)) {
2017 zil_commit_waiter_skip(zcw
);
2018 list_remove(&nolwb_waiters
, zcw
);
2021 ASSERT(list_is_empty(&nolwb_waiters
));
2022 ASSERT3P(lwb
, !=, NULL
);
2023 ASSERT3S(lwb
->lwb_state
, !=, LWB_STATE_ISSUED
);
2024 ASSERT3S(lwb
->lwb_state
, !=, LWB_STATE_DONE
);
2027 * At this point, the ZIL block pointed at by the "lwb"
2028 * variable is in one of the following states: "closed"
2031 * If its "closed", then no itxs have been committed to
2032 * it, so there's no point in issuing its zio (i.e.
2035 * If its "open" state, then it contains one or more
2036 * itxs that eventually need to be committed to stable
2037 * storage. In this case we intentionally do not issue
2038 * the lwb's zio to disk yet, and instead rely on one of
2039 * the following two mechanisms for issuing the zio:
2041 * 1. Ideally, there will be more ZIL activity occuring
2042 * on the system, such that this function will be
2043 * immediately called again (not necessarily by the same
2044 * thread) and this lwb's zio will be issued via
2045 * zil_lwb_commit(). This way, the lwb is guaranteed to
2046 * be "full" when it is issued to disk, and we'll make
2047 * use of the lwb's size the best we can.
2049 * 2. If there isn't sufficient ZIL activity occuring on
2050 * the system, such that this lwb's zio isn't issued via
2051 * zil_lwb_commit(), zil_commit_waiter() will issue the
2052 * lwb's zio. If this occurs, the lwb is not guaranteed
2053 * to be "full" by the time its zio is issued, and means
2054 * the size of the lwb was "too large" given the amount
2055 * of ZIL activity occuring on the system at that time.
2057 * We do this for a couple of reasons:
2059 * 1. To try and reduce the number of IOPs needed to
2060 * write the same number of itxs. If an lwb has space
2061 * available in it's buffer for more itxs, and more itxs
2062 * will be committed relatively soon (relative to the
2063 * latency of performing a write), then it's beneficial
2064 * to wait for these "next" itxs. This way, more itxs
2065 * can be committed to stable storage with fewer writes.
2067 * 2. To try and use the largest lwb block size that the
2068 * incoming rate of itxs can support. Again, this is to
2069 * try and pack as many itxs into as few lwbs as
2070 * possible, without significantly impacting the latency
2071 * of each individual itx.
2077 * This function is responsible for ensuring the passed in commit waiter
2078 * (and associated commit itx) is committed to an lwb. If the waiter is
2079 * not already committed to an lwb, all itxs in the zilog's queue of
2080 * itxs will be processed. The assumption is the passed in waiter's
2081 * commit itx will found in the queue just like the other non-commit
2082 * itxs, such that when the entire queue is processed, the waiter will
2083 * have been commited to an lwb.
2085 * The lwb associated with the passed in waiter is not guaranteed to
2086 * have been issued by the time this function completes. If the lwb is
2087 * not issued, we rely on future calls to zil_commit_writer() to issue
2088 * the lwb, or the timeout mechanism found in zil_commit_waiter().
2091 zil_commit_writer(zilog_t
*zilog
, zil_commit_waiter_t
*zcw
)
2093 ASSERT(!MUTEX_HELD(&zilog
->zl_lock
));
2094 ASSERT(spa_writeable(zilog
->zl_spa
));
2095 ASSERT0(zilog
->zl_suspend
);
2097 mutex_enter(&zilog
->zl_writer_lock
);
2099 if (zcw
->zcw_lwb
!= NULL
|| zcw
->zcw_done
) {
2101 * It's possible that, while we were waiting to acquire
2102 * the "zl_writer_lock", another thread committed this
2103 * waiter to an lwb. If that occurs, we bail out early,
2104 * without processing any of the zilog's queue of itxs.
2106 * On certain workloads and system configurations, the
2107 * "zl_writer_lock" can become highly contended. In an
2108 * attempt to reduce this contention, we immediately drop
2109 * the lock if the waiter has already been processed.
2111 * We've measured this optimization to reduce CPU spent
2112 * contending on this lock by up to 5%, using a system
2113 * with 32 CPUs, low latency storage (~50 usec writes),
2114 * and 1024 threads performing sync writes.
2119 zil_get_commit_list(zilog
);
2120 zil_prune_commit_list(zilog
);
2121 zil_process_commit_list(zilog
);
2124 mutex_exit(&zilog
->zl_writer_lock
);
2128 zil_commit_waiter_timeout(zilog_t
*zilog
, zil_commit_waiter_t
*zcw
)
2130 ASSERT(!MUTEX_HELD(&zilog
->zl_writer_lock
));
2131 ASSERT(MUTEX_HELD(&zcw
->zcw_lock
));
2132 ASSERT3B(zcw
->zcw_done
, ==, B_FALSE
);
2134 lwb_t
*lwb
= zcw
->zcw_lwb
;
2135 ASSERT3P(lwb
, !=, NULL
);
2136 ASSERT3S(lwb
->lwb_state
, !=, LWB_STATE_CLOSED
);
2139 * If the lwb has already been issued by another thread, we can
2140 * immediately return since there's no work to be done (the
2141 * point of this function is to issue the lwb). Additionally, we
2142 * do this prior to acquiring the zl_writer_lock, to avoid
2143 * acquiring it when it's not necessary to do so.
2145 if (lwb
->lwb_state
== LWB_STATE_ISSUED
||
2146 lwb
->lwb_state
== LWB_STATE_DONE
)
2150 * In order to call zil_lwb_write_issue() we must hold the
2151 * zilog's "zl_writer_lock". We can't simply acquire that lock,
2152 * since we're already holding the commit waiter's "zcw_lock",
2153 * and those two locks are aquired in the opposite order
2156 mutex_exit(&zcw
->zcw_lock
);
2157 mutex_enter(&zilog
->zl_writer_lock
);
2158 mutex_enter(&zcw
->zcw_lock
);
2161 * Since we just dropped and re-acquired the commit waiter's
2162 * lock, we have to re-check to see if the waiter was marked
2163 * "done" during that process. If the waiter was marked "done",
2164 * the "lwb" pointer is no longer valid (it can be free'd after
2165 * the waiter is marked "done"), so without this check we could
2166 * wind up with a use-after-free error below.
2171 ASSERT3P(lwb
, ==, zcw
->zcw_lwb
);
2174 * We've already checked this above, but since we hadn't
2175 * acquired the zilog's zl_writer_lock, we have to perform this
2176 * check a second time while holding the lock. We can't call
2177 * zil_lwb_write_issue() if the lwb had already been issued.
2179 if (lwb
->lwb_state
== LWB_STATE_ISSUED
||
2180 lwb
->lwb_state
== LWB_STATE_DONE
)
2183 ASSERT3S(lwb
->lwb_state
, ==, LWB_STATE_OPENED
);
2186 * As described in the comments above zil_commit_waiter() and
2187 * zil_process_commit_list(), we need to issue this lwb's zio
2188 * since we've reached the commit waiter's timeout and it still
2189 * hasn't been issued.
2191 lwb_t
*nlwb
= zil_lwb_write_issue(zilog
, lwb
);
2193 ASSERT3S(lwb
->lwb_state
, !=, LWB_STATE_OPENED
);
2196 * Since the lwb's zio hadn't been issued by the time this thread
2197 * reached its timeout, we reset the zilog's "zl_cur_used" field
2198 * to influence the zil block size selection algorithm.
2200 * By having to issue the lwb's zio here, it means the size of the
2201 * lwb was too large, given the incoming throughput of itxs. By
2202 * setting "zl_cur_used" to zero, we communicate this fact to the
2203 * block size selection algorithm, so it can take this informaiton
2204 * into account, and potentially select a smaller size for the
2205 * next lwb block that is allocated.
2207 zilog
->zl_cur_used
= 0;
2211 * When zil_lwb_write_issue() returns NULL, this
2212 * indicates zio_alloc_zil() failed to allocate the
2213 * "next" lwb on-disk. When this occurs, the ZIL write
2214 * pipeline must be stalled; see the comment within the
2215 * zil_commit_writer_stall() function for more details.
2217 * We must drop the commit waiter's lock prior to
2218 * calling zil_commit_writer_stall() or else we can wind
2219 * up with the following deadlock:
2221 * - This thread is waiting for the txg to sync while
2222 * holding the waiter's lock; txg_wait_synced() is
2223 * used within txg_commit_writer_stall().
2225 * - The txg can't sync because it is waiting for this
2226 * lwb's zio callback to call dmu_tx_commit().
2228 * - The lwb's zio callback can't call dmu_tx_commit()
2229 * because it's blocked trying to acquire the waiter's
2230 * lock, which occurs prior to calling dmu_tx_commit()
2232 mutex_exit(&zcw
->zcw_lock
);
2233 zil_commit_writer_stall(zilog
);
2234 mutex_enter(&zcw
->zcw_lock
);
2238 mutex_exit(&zilog
->zl_writer_lock
);
2239 ASSERT(MUTEX_HELD(&zcw
->zcw_lock
));
2243 * This function is responsible for performing the following two tasks:
2245 * 1. its primary responsibility is to block until the given "commit
2246 * waiter" is considered "done".
2248 * 2. its secondary responsibility is to issue the zio for the lwb that
2249 * the given "commit waiter" is waiting on, if this function has
2250 * waited "long enough" and the lwb is still in the "open" state.
2252 * Given a sufficient amount of itxs being generated and written using
2253 * the ZIL, the lwb's zio will be issued via the zil_lwb_commit()
2254 * function. If this does not occur, this secondary responsibility will
2255 * ensure the lwb is issued even if there is not other synchronous
2256 * activity on the system.
2258 * For more details, see zil_process_commit_list(); more specifically,
2259 * the comment at the bottom of that function.
2262 zil_commit_waiter(zilog_t
*zilog
, zil_commit_waiter_t
*zcw
)
2264 ASSERT(!MUTEX_HELD(&zilog
->zl_lock
));
2265 ASSERT(!MUTEX_HELD(&zilog
->zl_writer_lock
));
2266 ASSERT(spa_writeable(zilog
->zl_spa
));
2267 ASSERT0(zilog
->zl_suspend
);
2269 mutex_enter(&zcw
->zcw_lock
);
2272 * The timeout is scaled based on the lwb latency to avoid
2273 * significantly impacting the latency of each individual itx.
2274 * For more details, see the comment at the bottom of the
2275 * zil_process_commit_list() function.
2277 int pct
= MAX(zfs_commit_timeout_pct
, 1);
2278 hrtime_t sleep
= (zilog
->zl_last_lwb_latency
* pct
) / 100;
2279 hrtime_t wakeup
= gethrtime() + sleep
;
2280 boolean_t timedout
= B_FALSE
;
2282 while (!zcw
->zcw_done
) {
2283 ASSERT(MUTEX_HELD(&zcw
->zcw_lock
));
2285 lwb_t
*lwb
= zcw
->zcw_lwb
;
2288 * Usually, the waiter will have a non-NULL lwb field here,
2289 * but it's possible for it to be NULL as a result of
2290 * zil_commit() racing with spa_sync().
2292 * When zil_clean() is called, it's possible for the itxg
2293 * list (which may be cleaned via a taskq) to contain
2294 * commit itxs. When this occurs, the commit waiters linked
2295 * off of these commit itxs will not be committed to an
2296 * lwb. Additionally, these commit waiters will not be
2297 * marked done until zil_commit_waiter_skip() is called via
2300 * Thus, it's possible for this commit waiter (i.e. the
2301 * "zcw" variable) to be found in this "in between" state;
2302 * where it's "zcw_lwb" field is NULL, and it hasn't yet
2303 * been skipped, so it's "zcw_done" field is still B_FALSE.
2305 IMPLY(lwb
!= NULL
, lwb
->lwb_state
!= LWB_STATE_CLOSED
);
2307 if (lwb
!= NULL
&& lwb
->lwb_state
== LWB_STATE_OPENED
) {
2308 ASSERT3B(timedout
, ==, B_FALSE
);
2311 * If the lwb hasn't been issued yet, then we
2312 * need to wait with a timeout, in case this
2313 * function needs to issue the lwb after the
2314 * timeout is reached; responsibility (2) from
2315 * the comment above this function.
2317 clock_t timeleft
= cv_timedwait_hires(&zcw
->zcw_cv
,
2318 &zcw
->zcw_lock
, wakeup
, USEC2NSEC(1),
2319 CALLOUT_FLAG_ABSOLUTE
);
2321 if (timeleft
>= 0 || zcw
->zcw_done
)
2325 zil_commit_waiter_timeout(zilog
, zcw
);
2327 if (!zcw
->zcw_done
) {
2329 * If the commit waiter has already been
2330 * marked "done", it's possible for the
2331 * waiter's lwb structure to have already
2332 * been freed. Thus, we can only reliably
2333 * make these assertions if the waiter
2336 ASSERT3P(lwb
, ==, zcw
->zcw_lwb
);
2337 ASSERT3S(lwb
->lwb_state
, !=, LWB_STATE_OPENED
);
2341 * If the lwb isn't open, then it must have already
2342 * been issued. In that case, there's no need to
2343 * use a timeout when waiting for the lwb to
2346 * Additionally, if the lwb is NULL, the waiter
2347 * will soon be signalled and marked done via
2348 * zil_clean() and zil_itxg_clean(), so no timeout
2353 lwb
->lwb_state
== LWB_STATE_ISSUED
||
2354 lwb
->lwb_state
== LWB_STATE_DONE
);
2355 cv_wait(&zcw
->zcw_cv
, &zcw
->zcw_lock
);
2359 mutex_exit(&zcw
->zcw_lock
);
2362 static zil_commit_waiter_t
*
2363 zil_alloc_commit_waiter()
2365 zil_commit_waiter_t
*zcw
= kmem_cache_alloc(zil_zcw_cache
, KM_SLEEP
);
2367 cv_init(&zcw
->zcw_cv
, NULL
, CV_DEFAULT
, NULL
);
2368 mutex_init(&zcw
->zcw_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
2369 list_link_init(&zcw
->zcw_node
);
2370 zcw
->zcw_lwb
= NULL
;
2371 zcw
->zcw_done
= B_FALSE
;
2372 zcw
->zcw_zio_error
= 0;
2378 zil_free_commit_waiter(zil_commit_waiter_t
*zcw
)
2380 ASSERT(!list_link_active(&zcw
->zcw_node
));
2381 ASSERT3P(zcw
->zcw_lwb
, ==, NULL
);
2382 ASSERT3B(zcw
->zcw_done
, ==, B_TRUE
);
2383 mutex_destroy(&zcw
->zcw_lock
);
2384 cv_destroy(&zcw
->zcw_cv
);
2385 kmem_cache_free(zil_zcw_cache
, zcw
);
2389 * This function is used to create a TX_COMMIT itx and assign it. This
2390 * way, it will be linked into the ZIL's list of synchronous itxs, and
2391 * then later committed to an lwb (or skipped) when
2392 * zil_process_commit_list() is called.
2395 zil_commit_itx_assign(zilog_t
*zilog
, zil_commit_waiter_t
*zcw
)
2397 dmu_tx_t
*tx
= dmu_tx_create(zilog
->zl_os
);
2398 VERIFY0(dmu_tx_assign(tx
, TXG_WAIT
));
2400 itx_t
*itx
= zil_itx_create(TX_COMMIT
, sizeof (lr_t
));
2401 itx
->itx_sync
= B_TRUE
;
2402 itx
->itx_private
= zcw
;
2404 zil_itx_assign(zilog
, itx
, tx
);
2410 * Commit ZFS Intent Log transactions (itxs) to stable storage.
2412 * When writing ZIL transactions to the on-disk representation of the
2413 * ZIL, the itxs are committed to a Log Write Block (lwb). Multiple
2414 * itxs can be committed to a single lwb. Once a lwb is written and
2415 * committed to stable storage (i.e. the lwb is written, and vdevs have
2416 * been flushed), each itx that was committed to that lwb is also
2417 * considered to be committed to stable storage.
2419 * When an itx is committed to an lwb, the log record (lr_t) contained
2420 * by the itx is copied into the lwb's zio buffer, and once this buffer
2421 * is written to disk, it becomes an on-disk ZIL block.
2423 * As itxs are generated, they're inserted into the ZIL's queue of
2424 * uncommitted itxs. The semantics of zil_commit() are such that it will
2425 * block until all itxs that were in the queue when it was called, are
2426 * committed to stable storage.
2428 * If "foid" is zero, this means all "synchronous" and "asynchronous"
2429 * itxs, for all objects in the dataset, will be committed to stable
2430 * storage prior to zil_commit() returning. If "foid" is non-zero, all
2431 * "synchronous" itxs for all objects, but only "asynchronous" itxs
2432 * that correspond to the foid passed in, will be committed to stable
2433 * storage prior to zil_commit() returning.
2435 * Generally speaking, when zil_commit() is called, the consumer doesn't
2436 * actually care about _all_ of the uncommitted itxs. Instead, they're
2437 * simply trying to waiting for a specific itx to be committed to disk,
2438 * but the interface(s) for interacting with the ZIL don't allow such
2439 * fine-grained communication. A better interface would allow a consumer
2440 * to create and assign an itx, and then pass a reference to this itx to
2441 * zil_commit(); such that zil_commit() would return as soon as that
2442 * specific itx was committed to disk (instead of waiting for _all_
2443 * itxs to be committed).
2445 * When a thread calls zil_commit() a special "commit itx" will be
2446 * generated, along with a corresponding "waiter" for this commit itx.
2447 * zil_commit() will wait on this waiter's CV, such that when the waiter
2448 * is marked done, and signalled, zil_commit() will return.
2450 * This commit itx is inserted into the queue of uncommitted itxs. This
2451 * provides an easy mechanism for determining which itxs were in the
2452 * queue prior to zil_commit() having been called, and which itxs were
2453 * added after zil_commit() was called.
2455 * The commit it is special; it doesn't have any on-disk representation.
2456 * When a commit itx is "committed" to an lwb, the waiter associated
2457 * with it is linked onto the lwb's list of waiters. Then, when that lwb
2458 * completes, each waiter on the lwb's list is marked done and signalled
2459 * -- allowing the thread waiting on the waiter to return from zil_commit().
2461 * It's important to point out a few critical factors that allow us
2462 * to make use of the commit itxs, commit waiters, per-lwb lists of
2463 * commit waiters, and zio completion callbacks like we're doing:
2465 * 1. The list of waiters for each lwb is traversed, and each commit
2466 * waiter is marked "done" and signalled, in the zio completion
2467 * callback of the lwb's zio[*].
2469 * * Actually, the waiters are signalled in the zio completion
2470 * callback of the root zio for the DKIOCFLUSHWRITECACHE commands
2471 * that are sent to the vdevs upon completion of the lwb zio.
2473 * 2. When the itxs are inserted into the ZIL's queue of uncommitted
2474 * itxs, the order in which they are inserted is preserved[*]; as
2475 * itxs are added to the queue, they are added to the tail of
2476 * in-memory linked lists.
2478 * When committing the itxs to lwbs (to be written to disk), they
2479 * are committed in the same order in which the itxs were added to
2480 * the uncommitted queue's linked list(s); i.e. the linked list of
2481 * itxs to commit is traversed from head to tail, and each itx is
2482 * committed to an lwb in that order.
2486 * - the order of "sync" itxs is preserved w.r.t. other
2487 * "sync" itxs, regardless of the corresponding objects.
2488 * - the order of "async" itxs is preserved w.r.t. other
2489 * "async" itxs corresponding to the same object.
2490 * - the order of "async" itxs is *not* preserved w.r.t. other
2491 * "async" itxs corresponding to different objects.
2492 * - the order of "sync" itxs w.r.t. "async" itxs (or vice
2493 * versa) is *not* preserved, even for itxs that correspond
2494 * to the same object.
2496 * For more details, see: zil_itx_assign(), zil_async_to_sync(),
2497 * zil_get_commit_list(), and zil_process_commit_list().
2499 * 3. The lwbs represent a linked list of blocks on disk. Thus, any
2500 * lwb cannot be considered committed to stable storage, until its
2501 * "previous" lwb is also committed to stable storage. This fact,
2502 * coupled with the fact described above, means that itxs are
2503 * committed in (roughly) the order in which they were generated.
2504 * This is essential because itxs are dependent on prior itxs.
2505 * Thus, we *must not* deem an itx as being committed to stable
2506 * storage, until *all* prior itxs have also been committed to
2509 * To enforce this ordering of lwb zio's, while still leveraging as
2510 * much of the underlying storage performance as possible, we rely
2511 * on two fundamental concepts:
2513 * 1. The creation and issuance of lwb zio's is protected by
2514 * the zilog's "zl_writer_lock", which ensures only a single
2515 * thread is creating and/or issuing lwb's at a time
2516 * 2. The "previous" lwb is a child of the "current" lwb
2517 * (leveraging the zio parent-child depenency graph)
2519 * By relying on this parent-child zio relationship, we can have
2520 * many lwb zio's concurrently issued to the underlying storage,
2521 * but the order in which they complete will be the same order in
2522 * which they were created.
2525 zil_commit(zilog_t
*zilog
, uint64_t foid
)
2528 * We should never attempt to call zil_commit on a snapshot for
2529 * a couple of reasons:
2531 * 1. A snapshot may never be modified, thus it cannot have any
2532 * in-flight itxs that would have modified the dataset.
2534 * 2. By design, when zil_commit() is called, a commit itx will
2535 * be assigned to this zilog; as a result, the zilog will be
2536 * dirtied. We must not dirty the zilog of a snapshot; there's
2537 * checks in the code that enforce this invariant, and will
2538 * cause a panic if it's not upheld.
2540 ASSERT3B(dmu_objset_is_snapshot(zilog
->zl_os
), ==, B_FALSE
);
2542 if (zilog
->zl_sync
== ZFS_SYNC_DISABLED
)
2545 if (!spa_writeable(zilog
->zl_spa
)) {
2547 * If the SPA is not writable, there should never be any
2548 * pending itxs waiting to be committed to disk. If that
2549 * weren't true, we'd skip writing those itxs out, and
2550 * would break the sematics of zil_commit(); thus, we're
2551 * verifying that truth before we return to the caller.
2553 ASSERT(list_is_empty(&zilog
->zl_lwb_list
));
2554 ASSERT3P(zilog
->zl_last_lwb_opened
, ==, NULL
);
2555 for (int i
= 0; i
< TXG_SIZE
; i
++)
2556 ASSERT3P(zilog
->zl_itxg
[i
].itxg_itxs
, ==, NULL
);
2561 * If the ZIL is suspended, we don't want to dirty it by calling
2562 * zil_commit_itx_assign() below, nor can we write out
2563 * lwbs like would be done in zil_commit_write(). Thus, we
2564 * simply rely on txg_wait_synced() to maintain the necessary
2565 * semantics, and avoid calling those functions altogether.
2567 if (zilog
->zl_suspend
> 0) {
2568 txg_wait_synced(zilog
->zl_dmu_pool
, 0);
2573 * Move the "async" itxs for the specified foid to the "sync"
2574 * queues, such that they will be later committed (or skipped)
2575 * to an lwb when zil_process_commit_list() is called.
2577 * Since these "async" itxs must be committed prior to this
2578 * call to zil_commit returning, we must perform this operation
2579 * before we call zil_commit_itx_assign().
2581 zil_async_to_sync(zilog
, foid
);
2584 * We allocate a new "waiter" structure which will initially be
2585 * linked to the commit itx using the itx's "itx_private" field.
2586 * Since the commit itx doesn't represent any on-disk state,
2587 * when it's committed to an lwb, rather than copying the its
2588 * lr_t into the lwb's buffer, the commit itx's "waiter" will be
2589 * added to the lwb's list of waiters. Then, when the lwb is
2590 * committed to stable storage, each waiter in the lwb's list of
2591 * waiters will be marked "done", and signalled.
2593 * We must create the waiter and assign the commit itx prior to
2594 * calling zil_commit_writer(), or else our specific commit itx
2595 * is not guaranteed to be committed to an lwb prior to calling
2596 * zil_commit_waiter().
2598 zil_commit_waiter_t
*zcw
= zil_alloc_commit_waiter();
2599 zil_commit_itx_assign(zilog
, zcw
);
2601 zil_commit_writer(zilog
, zcw
);
2602 zil_commit_waiter(zilog
, zcw
);
2604 if (zcw
->zcw_zio_error
!= 0) {
2606 * If there was an error writing out the ZIL blocks that
2607 * this thread is waiting on, then we fallback to
2608 * relying on spa_sync() to write out the data this
2609 * thread is waiting on. Obviously this has performance
2610 * implications, but the expectation is for this to be
2611 * an exceptional case, and shouldn't occur often.
2613 DTRACE_PROBE2(zil__commit__io__error
,
2614 zilog_t
*, zilog
, zil_commit_waiter_t
*, zcw
);
2615 txg_wait_synced(zilog
->zl_dmu_pool
, 0);
2618 zil_free_commit_waiter(zcw
);
2622 * Called in syncing context to free committed log blocks and update log header.
2625 zil_sync(zilog_t
*zilog
, dmu_tx_t
*tx
)
2627 zil_header_t
*zh
= zil_header_in_syncing_context(zilog
);
2628 uint64_t txg
= dmu_tx_get_txg(tx
);
2629 spa_t
*spa
= zilog
->zl_spa
;
2630 uint64_t *replayed_seq
= &zilog
->zl_replayed_seq
[txg
& TXG_MASK
];
2634 * We don't zero out zl_destroy_txg, so make sure we don't try
2635 * to destroy it twice.
2637 if (spa_sync_pass(spa
) != 1)
2640 mutex_enter(&zilog
->zl_lock
);
2642 ASSERT(zilog
->zl_stop_sync
== 0);
2644 if (*replayed_seq
!= 0) {
2645 ASSERT(zh
->zh_replay_seq
< *replayed_seq
);
2646 zh
->zh_replay_seq
= *replayed_seq
;
2650 if (zilog
->zl_destroy_txg
== txg
) {
2651 blkptr_t blk
= zh
->zh_log
;
2653 ASSERT(list_head(&zilog
->zl_lwb_list
) == NULL
);
2655 bzero(zh
, sizeof (zil_header_t
));
2656 bzero(zilog
->zl_replayed_seq
, sizeof (zilog
->zl_replayed_seq
));
2658 if (zilog
->zl_keep_first
) {
2660 * If this block was part of log chain that couldn't
2661 * be claimed because a device was missing during
2662 * zil_claim(), but that device later returns,
2663 * then this block could erroneously appear valid.
2664 * To guard against this, assign a new GUID to the new
2665 * log chain so it doesn't matter what blk points to.
2667 zil_init_log_chain(zilog
, &blk
);
2672 while ((lwb
= list_head(&zilog
->zl_lwb_list
)) != NULL
) {
2673 zh
->zh_log
= lwb
->lwb_blk
;
2674 if (lwb
->lwb_buf
!= NULL
|| lwb
->lwb_max_txg
> txg
)
2676 list_remove(&zilog
->zl_lwb_list
, lwb
);
2677 zio_free(spa
, txg
, &lwb
->lwb_blk
);
2678 zil_free_lwb(zilog
, lwb
);
2681 * If we don't have anything left in the lwb list then
2682 * we've had an allocation failure and we need to zero
2683 * out the zil_header blkptr so that we don't end
2684 * up freeing the same block twice.
2686 if (list_head(&zilog
->zl_lwb_list
) == NULL
)
2687 BP_ZERO(&zh
->zh_log
);
2689 mutex_exit(&zilog
->zl_lock
);
2694 zil_lwb_cons(void *vbuf
, void *unused
, int kmflag
)
2697 list_create(&lwb
->lwb_waiters
, sizeof (zil_commit_waiter_t
),
2698 offsetof(zil_commit_waiter_t
, zcw_node
));
2699 avl_create(&lwb
->lwb_vdev_tree
, zil_lwb_vdev_compare
,
2700 sizeof (zil_vdev_node_t
), offsetof(zil_vdev_node_t
, zv_node
));
2701 mutex_init(&lwb
->lwb_vdev_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
2707 zil_lwb_dest(void *vbuf
, void *unused
)
2710 mutex_destroy(&lwb
->lwb_vdev_lock
);
2711 avl_destroy(&lwb
->lwb_vdev_tree
);
2712 list_destroy(&lwb
->lwb_waiters
);
2718 zil_lwb_cache
= kmem_cache_create("zil_lwb_cache",
2719 sizeof (lwb_t
), 0, zil_lwb_cons
, zil_lwb_dest
, NULL
, NULL
, NULL
, 0);
2721 zil_zcw_cache
= kmem_cache_create("zil_zcw_cache",
2722 sizeof (zil_commit_waiter_t
), 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
2728 kmem_cache_destroy(zil_zcw_cache
);
2729 kmem_cache_destroy(zil_lwb_cache
);
2733 zil_set_sync(zilog_t
*zilog
, uint64_t sync
)
2735 zilog
->zl_sync
= sync
;
2739 zil_set_logbias(zilog_t
*zilog
, uint64_t logbias
)
2741 zilog
->zl_logbias
= logbias
;
2745 zil_alloc(objset_t
*os
, zil_header_t
*zh_phys
)
2749 zilog
= kmem_zalloc(sizeof (zilog_t
), KM_SLEEP
);
2751 zilog
->zl_header
= zh_phys
;
2753 zilog
->zl_spa
= dmu_objset_spa(os
);
2754 zilog
->zl_dmu_pool
= dmu_objset_pool(os
);
2755 zilog
->zl_destroy_txg
= TXG_INITIAL
- 1;
2756 zilog
->zl_logbias
= dmu_objset_logbias(os
);
2757 zilog
->zl_sync
= dmu_objset_syncprop(os
);
2758 zilog
->zl_dirty_max_txg
= 0;
2759 zilog
->zl_last_lwb_opened
= NULL
;
2760 zilog
->zl_last_lwb_latency
= 0;
2762 mutex_init(&zilog
->zl_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
2763 mutex_init(&zilog
->zl_writer_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
2765 for (int i
= 0; i
< TXG_SIZE
; i
++) {
2766 mutex_init(&zilog
->zl_itxg
[i
].itxg_lock
, NULL
,
2767 MUTEX_DEFAULT
, NULL
);
2770 list_create(&zilog
->zl_lwb_list
, sizeof (lwb_t
),
2771 offsetof(lwb_t
, lwb_node
));
2773 list_create(&zilog
->zl_itx_commit_list
, sizeof (itx_t
),
2774 offsetof(itx_t
, itx_node
));
2776 cv_init(&zilog
->zl_cv_suspend
, NULL
, CV_DEFAULT
, NULL
);
2782 zil_free(zilog_t
*zilog
)
2784 zilog
->zl_stop_sync
= 1;
2786 ASSERT0(zilog
->zl_suspend
);
2787 ASSERT0(zilog
->zl_suspending
);
2789 ASSERT(list_is_empty(&zilog
->zl_lwb_list
));
2790 list_destroy(&zilog
->zl_lwb_list
);
2792 ASSERT(list_is_empty(&zilog
->zl_itx_commit_list
));
2793 list_destroy(&zilog
->zl_itx_commit_list
);
2795 for (int i
= 0; i
< TXG_SIZE
; i
++) {
2797 * It's possible for an itx to be generated that doesn't dirty
2798 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
2799 * callback to remove the entry. We remove those here.
2801 * Also free up the ziltest itxs.
2803 if (zilog
->zl_itxg
[i
].itxg_itxs
)
2804 zil_itxg_clean(zilog
->zl_itxg
[i
].itxg_itxs
);
2805 mutex_destroy(&zilog
->zl_itxg
[i
].itxg_lock
);
2808 mutex_destroy(&zilog
->zl_writer_lock
);
2809 mutex_destroy(&zilog
->zl_lock
);
2811 cv_destroy(&zilog
->zl_cv_suspend
);
2813 kmem_free(zilog
, sizeof (zilog_t
));
2817 * Open an intent log.
2820 zil_open(objset_t
*os
, zil_get_data_t
*get_data
)
2822 zilog_t
*zilog
= dmu_objset_zil(os
);
2824 ASSERT3P(zilog
->zl_get_data
, ==, NULL
);
2825 ASSERT3P(zilog
->zl_last_lwb_opened
, ==, NULL
);
2826 ASSERT(list_is_empty(&zilog
->zl_lwb_list
));
2828 zilog
->zl_get_data
= get_data
;
2834 * Close an intent log.
2837 zil_close(zilog_t
*zilog
)
2842 if (!dmu_objset_is_snapshot(zilog
->zl_os
)) {
2843 zil_commit(zilog
, 0);
2845 ASSERT3P(list_tail(&zilog
->zl_lwb_list
), ==, NULL
);
2846 ASSERT0(zilog
->zl_dirty_max_txg
);
2847 ASSERT3B(zilog_is_dirty(zilog
), ==, B_FALSE
);
2850 mutex_enter(&zilog
->zl_lock
);
2851 lwb
= list_tail(&zilog
->zl_lwb_list
);
2853 txg
= zilog
->zl_dirty_max_txg
;
2855 txg
= MAX(zilog
->zl_dirty_max_txg
, lwb
->lwb_max_txg
);
2856 mutex_exit(&zilog
->zl_lock
);
2859 * We need to use txg_wait_synced() to wait long enough for the
2860 * ZIL to be clean, and to wait for all pending lwbs to be
2864 txg_wait_synced(zilog
->zl_dmu_pool
, txg
);
2866 if (zilog_is_dirty(zilog
))
2867 zfs_dbgmsg("zil (%p) is dirty, txg %llu", zilog
, txg
);
2868 VERIFY(!zilog_is_dirty(zilog
));
2870 zilog
->zl_get_data
= NULL
;
2873 * We should have only one lwb left on the list; remove it now.
2875 mutex_enter(&zilog
->zl_lock
);
2876 lwb
= list_head(&zilog
->zl_lwb_list
);
2878 ASSERT3P(lwb
, ==, list_tail(&zilog
->zl_lwb_list
));
2879 ASSERT3S(lwb
->lwb_state
, !=, LWB_STATE_ISSUED
);
2880 list_remove(&zilog
->zl_lwb_list
, lwb
);
2881 zio_buf_free(lwb
->lwb_buf
, lwb
->lwb_sz
);
2882 zil_free_lwb(zilog
, lwb
);
2884 mutex_exit(&zilog
->zl_lock
);
2887 static char *suspend_tag
= "zil suspending";
2890 * Suspend an intent log. While in suspended mode, we still honor
2891 * synchronous semantics, but we rely on txg_wait_synced() to do it.
2892 * On old version pools, we suspend the log briefly when taking a
2893 * snapshot so that it will have an empty intent log.
2895 * Long holds are not really intended to be used the way we do here --
2896 * held for such a short time. A concurrent caller of dsl_dataset_long_held()
2897 * could fail. Therefore we take pains to only put a long hold if it is
2898 * actually necessary. Fortunately, it will only be necessary if the
2899 * objset is currently mounted (or the ZVOL equivalent). In that case it
2900 * will already have a long hold, so we are not really making things any worse.
2902 * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or
2903 * zvol_state_t), and use their mechanism to prevent their hold from being
2904 * dropped (e.g. VFS_HOLD()). However, that would be even more pain for
2907 * if cookiep == NULL, this does both the suspend & resume.
2908 * Otherwise, it returns with the dataset "long held", and the cookie
2909 * should be passed into zil_resume().
2912 zil_suspend(const char *osname
, void **cookiep
)
2916 const zil_header_t
*zh
;
2919 error
= dmu_objset_hold(osname
, suspend_tag
, &os
);
2922 zilog
= dmu_objset_zil(os
);
2924 mutex_enter(&zilog
->zl_lock
);
2925 zh
= zilog
->zl_header
;
2927 if (zh
->zh_flags
& ZIL_REPLAY_NEEDED
) { /* unplayed log */
2928 mutex_exit(&zilog
->zl_lock
);
2929 dmu_objset_rele(os
, suspend_tag
);
2930 return (SET_ERROR(EBUSY
));
2934 * Don't put a long hold in the cases where we can avoid it. This
2935 * is when there is no cookie so we are doing a suspend & resume
2936 * (i.e. called from zil_vdev_offline()), and there's nothing to do
2937 * for the suspend because it's already suspended, or there's no ZIL.
2939 if (cookiep
== NULL
&& !zilog
->zl_suspending
&&
2940 (zilog
->zl_suspend
> 0 || BP_IS_HOLE(&zh
->zh_log
))) {
2941 mutex_exit(&zilog
->zl_lock
);
2942 dmu_objset_rele(os
, suspend_tag
);
2946 dsl_dataset_long_hold(dmu_objset_ds(os
), suspend_tag
);
2947 dsl_pool_rele(dmu_objset_pool(os
), suspend_tag
);
2949 zilog
->zl_suspend
++;
2951 if (zilog
->zl_suspend
> 1) {
2953 * Someone else is already suspending it.
2954 * Just wait for them to finish.
2957 while (zilog
->zl_suspending
)
2958 cv_wait(&zilog
->zl_cv_suspend
, &zilog
->zl_lock
);
2959 mutex_exit(&zilog
->zl_lock
);
2961 if (cookiep
== NULL
)
2969 * If there is no pointer to an on-disk block, this ZIL must not
2970 * be active (e.g. filesystem not mounted), so there's nothing
2973 if (BP_IS_HOLE(&zh
->zh_log
)) {
2974 ASSERT(cookiep
!= NULL
); /* fast path already handled */
2977 mutex_exit(&zilog
->zl_lock
);
2981 zilog
->zl_suspending
= B_TRUE
;
2982 mutex_exit(&zilog
->zl_lock
);
2984 zil_commit(zilog
, 0);
2986 zil_destroy(zilog
, B_FALSE
);
2988 mutex_enter(&zilog
->zl_lock
);
2989 zilog
->zl_suspending
= B_FALSE
;
2990 cv_broadcast(&zilog
->zl_cv_suspend
);
2991 mutex_exit(&zilog
->zl_lock
);
2993 if (cookiep
== NULL
)
3001 zil_resume(void *cookie
)
3003 objset_t
*os
= cookie
;
3004 zilog_t
*zilog
= dmu_objset_zil(os
);
3006 mutex_enter(&zilog
->zl_lock
);
3007 ASSERT(zilog
->zl_suspend
!= 0);
3008 zilog
->zl_suspend
--;
3009 mutex_exit(&zilog
->zl_lock
);
3010 dsl_dataset_long_rele(dmu_objset_ds(os
), suspend_tag
);
3011 dsl_dataset_rele(dmu_objset_ds(os
), suspend_tag
);
3014 typedef struct zil_replay_arg
{
3015 zil_replay_func_t
**zr_replay
;
3017 boolean_t zr_byteswap
;
3022 zil_replay_error(zilog_t
*zilog
, lr_t
*lr
, int error
)
3024 char name
[ZFS_MAX_DATASET_NAME_LEN
];
3026 zilog
->zl_replaying_seq
--; /* didn't actually replay this one */
3028 dmu_objset_name(zilog
->zl_os
, name
);
3030 cmn_err(CE_WARN
, "ZFS replay transaction error %d, "
3031 "dataset %s, seq 0x%llx, txtype %llu %s\n", error
, name
,
3032 (u_longlong_t
)lr
->lrc_seq
,
3033 (u_longlong_t
)(lr
->lrc_txtype
& ~TX_CI
),
3034 (lr
->lrc_txtype
& TX_CI
) ? "CI" : "");
3040 zil_replay_log_record(zilog_t
*zilog
, lr_t
*lr
, void *zra
, uint64_t claim_txg
)
3042 zil_replay_arg_t
*zr
= zra
;
3043 const zil_header_t
*zh
= zilog
->zl_header
;
3044 uint64_t reclen
= lr
->lrc_reclen
;
3045 uint64_t txtype
= lr
->lrc_txtype
;
3048 zilog
->zl_replaying_seq
= lr
->lrc_seq
;
3050 if (lr
->lrc_seq
<= zh
->zh_replay_seq
) /* already replayed */
3053 if (lr
->lrc_txg
< claim_txg
) /* already committed */
3056 /* Strip case-insensitive bit, still present in log record */
3059 if (txtype
== 0 || txtype
>= TX_MAX_TYPE
)
3060 return (zil_replay_error(zilog
, lr
, EINVAL
));
3063 * If this record type can be logged out of order, the object
3064 * (lr_foid) may no longer exist. That's legitimate, not an error.
3066 if (TX_OOO(txtype
)) {
3067 error
= dmu_object_info(zilog
->zl_os
,
3068 ((lr_ooo_t
*)lr
)->lr_foid
, NULL
);
3069 if (error
== ENOENT
|| error
== EEXIST
)
3074 * Make a copy of the data so we can revise and extend it.
3076 bcopy(lr
, zr
->zr_lr
, reclen
);
3079 * If this is a TX_WRITE with a blkptr, suck in the data.
3081 if (txtype
== TX_WRITE
&& reclen
== sizeof (lr_write_t
)) {
3082 error
= zil_read_log_data(zilog
, (lr_write_t
*)lr
,
3083 zr
->zr_lr
+ reclen
);
3085 return (zil_replay_error(zilog
, lr
, error
));
3089 * The log block containing this lr may have been byteswapped
3090 * so that we can easily examine common fields like lrc_txtype.
3091 * However, the log is a mix of different record types, and only the
3092 * replay vectors know how to byteswap their records. Therefore, if
3093 * the lr was byteswapped, undo it before invoking the replay vector.
3095 if (zr
->zr_byteswap
)
3096 byteswap_uint64_array(zr
->zr_lr
, reclen
);
3099 * We must now do two things atomically: replay this log record,
3100 * and update the log header sequence number to reflect the fact that
3101 * we did so. At the end of each replay function the sequence number
3102 * is updated if we are in replay mode.
3104 error
= zr
->zr_replay
[txtype
](zr
->zr_arg
, zr
->zr_lr
, zr
->zr_byteswap
);
3107 * The DMU's dnode layer doesn't see removes until the txg
3108 * commits, so a subsequent claim can spuriously fail with
3109 * EEXIST. So if we receive any error we try syncing out
3110 * any removes then retry the transaction. Note that we
3111 * specify B_FALSE for byteswap now, so we don't do it twice.
3113 txg_wait_synced(spa_get_dsl(zilog
->zl_spa
), 0);
3114 error
= zr
->zr_replay
[txtype
](zr
->zr_arg
, zr
->zr_lr
, B_FALSE
);
3116 return (zil_replay_error(zilog
, lr
, error
));
3123 zil_incr_blks(zilog_t
*zilog
, blkptr_t
*bp
, void *arg
, uint64_t claim_txg
)
3125 zilog
->zl_replay_blks
++;
3131 * If this dataset has a non-empty intent log, replay it and destroy it.
3134 zil_replay(objset_t
*os
, void *arg
, zil_replay_func_t
*replay_func
[TX_MAX_TYPE
])
3136 zilog_t
*zilog
= dmu_objset_zil(os
);
3137 const zil_header_t
*zh
= zilog
->zl_header
;
3138 zil_replay_arg_t zr
;
3140 if ((zh
->zh_flags
& ZIL_REPLAY_NEEDED
) == 0) {
3141 zil_destroy(zilog
, B_TRUE
);
3145 zr
.zr_replay
= replay_func
;
3147 zr
.zr_byteswap
= BP_SHOULD_BYTESWAP(&zh
->zh_log
);
3148 zr
.zr_lr
= kmem_alloc(2 * SPA_MAXBLOCKSIZE
, KM_SLEEP
);
3151 * Wait for in-progress removes to sync before starting replay.
3153 txg_wait_synced(zilog
->zl_dmu_pool
, 0);
3155 zilog
->zl_replay
= B_TRUE
;
3156 zilog
->zl_replay_time
= ddi_get_lbolt();
3157 ASSERT(zilog
->zl_replay_blks
== 0);
3158 (void) zil_parse(zilog
, zil_incr_blks
, zil_replay_log_record
, &zr
,
3160 kmem_free(zr
.zr_lr
, 2 * SPA_MAXBLOCKSIZE
);
3162 zil_destroy(zilog
, B_FALSE
);
3163 txg_wait_synced(zilog
->zl_dmu_pool
, zilog
->zl_destroy_txg
);
3164 zilog
->zl_replay
= B_FALSE
;
3168 zil_replaying(zilog_t
*zilog
, dmu_tx_t
*tx
)
3170 if (zilog
->zl_sync
== ZFS_SYNC_DISABLED
)
3173 if (zilog
->zl_replay
) {
3174 dsl_dataset_dirty(dmu_objset_ds(zilog
->zl_os
), tx
);
3175 zilog
->zl_replayed_seq
[dmu_tx_get_txg(tx
) & TXG_MASK
] =
3176 zilog
->zl_replaying_seq
;
3185 zil_vdev_offline(const char *osname
, void *arg
)
3189 error
= zil_suspend(osname
, NULL
);
3191 return (SET_ERROR(EEXIST
));