2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.45 2008/07/31 04:42:04 dillon Exp $
37 * HAMMER dependancy flusher thread
39 * Meta data updates create buffer dependancies which are arranged as a
45 static void hammer_flusher_master_thread(void *arg
);
46 static void hammer_flusher_slave_thread(void *arg
);
47 static int hammer_flusher_flush(hammer_mount_t hmp
, int *nomorep
);
48 static int hammer_flusher_flush_inode(hammer_inode_t ip
, void *data
);
50 RB_GENERATE(hammer_fls_rb_tree
, hammer_inode
, rb_flsnode
,
51 hammer_ino_rb_compare
);
54 * Support structures for the flusher threads.
56 struct hammer_flusher_info
{
57 TAILQ_ENTRY(hammer_flusher_info
) entry
;
58 struct hammer_mount
*hmp
;
61 hammer_flush_group_t flg
;
62 struct hammer_transaction trans
; /* per-slave transaction */
65 typedef struct hammer_flusher_info
*hammer_flusher_info_t
;
68 * Sync all inodes pending on the flusher.
70 * All flush groups will be flushed. This does not queue dirty inodes
71 * to the flush groups, it just flushes out what has already been queued!
74 hammer_flusher_sync(hammer_mount_t hmp
)
78 seq
= hammer_flusher_async(hmp
, NULL
);
79 hammer_flusher_wait(hmp
, seq
);
83 * Sync all flush groups through to close_flg - return immediately.
84 * If close_flg is NULL all flush groups are synced.
86 * Returns the sequence number of the last closed flush group,
87 * which may be close_flg. When syncing to the end if there
88 * are no flush groups pending we still cycle the flusher, and
89 * must allocate a sequence number to placemark the spot even
90 * though no flush group will ever be associated with it.
93 hammer_flusher_async(hammer_mount_t hmp
, hammer_flush_group_t close_flg
)
95 hammer_flush_group_t flg
;
101 if (close_flg
&& close_flg
->closed
)
102 return(close_flg
->seq
);
105 * Close flush groups until we hit the end of the list
108 while ((flg
= hmp
->next_flush_group
) != NULL
) {
109 KKASSERT(flg
->closed
== 0 && flg
->running
== 0);
111 hmp
->next_flush_group
= TAILQ_NEXT(flg
, flush_entry
);
112 if (flg
== close_flg
)
116 if (hmp
->flusher
.td
) {
117 if (hmp
->flusher
.signal
++ == 0)
118 wakeup(&hmp
->flusher
.signal
);
122 seq
= hmp
->flusher
.next
;
126 seq
= hmp
->flusher
.done
;
132 * Flush the current/next flushable flg. This function is typically called
133 * in a loop along with hammer_flusher_wait(hmp, returned_seq) to iterate
134 * flush groups until specific conditions are met.
136 * If a flush is currently in progress its seq is returned.
138 * If no flush is currently in progress the next available flush group
139 * will be flushed and its seq returned.
141 * If no flush groups are present a dummy seq will be allocated and
142 * returned and the flusher will be activated (e.g. to flush the
143 * undo/redo and the volume header).
146 hammer_flusher_async_one(hammer_mount_t hmp
)
148 hammer_flush_group_t flg
;
151 if (hmp
->flusher
.td
) {
152 flg
= TAILQ_FIRST(&hmp
->flush_group_list
);
153 seq
= hammer_flusher_async(hmp
, flg
);
155 seq
= hmp
->flusher
.done
;
161 * Wait for the flusher to finish flushing the specified sequence
162 * number. The flush is already running and will signal us on
166 hammer_flusher_wait(hammer_mount_t hmp
, int seq
)
168 while (seq
- hmp
->flusher
.done
> 0)
169 tsleep(&hmp
->flusher
.done
, 0, "hmrfls", 0);
173 * Returns non-zero if the flusher is currently running. Used for
174 * time-domain multiplexing of frontend operations in order to avoid
175 * starving the backend flusher.
178 hammer_flusher_running(hammer_mount_t hmp
)
180 int seq
= hmp
->flusher
.next
- 1;
181 if (seq
- hmp
->flusher
.done
> 0)
187 hammer_flusher_wait_next(hammer_mount_t hmp
)
191 seq
= hammer_flusher_async_one(hmp
);
192 hammer_flusher_wait(hmp
, seq
);
196 hammer_flusher_create(hammer_mount_t hmp
)
198 hammer_flusher_info_t info
;
201 hmp
->flusher
.signal
= 0;
202 hmp
->flusher
.done
= 0;
203 hmp
->flusher
.next
= 1;
204 hammer_ref(&hmp
->flusher
.finalize_lock
);
205 TAILQ_INIT(&hmp
->flusher
.run_list
);
206 TAILQ_INIT(&hmp
->flusher
.ready_list
);
208 lwkt_create(hammer_flusher_master_thread
, hmp
,
209 &hmp
->flusher
.td
, NULL
, 0, -1, "hammer-M");
210 for (i
= 0; i
< HAMMER_MAX_FLUSHERS
; ++i
) {
211 info
= kmalloc(sizeof(*info
), hmp
->m_misc
, M_WAITOK
|M_ZERO
);
213 TAILQ_INSERT_TAIL(&hmp
->flusher
.ready_list
, info
, entry
);
214 lwkt_create(hammer_flusher_slave_thread
, info
,
215 &info
->td
, NULL
, 0, -1, "hammer-S%d", i
);
220 hammer_flusher_destroy(hammer_mount_t hmp
)
222 hammer_flusher_info_t info
;
227 hmp
->flusher
.exiting
= 1;
228 while (hmp
->flusher
.td
) {
229 ++hmp
->flusher
.signal
;
230 wakeup(&hmp
->flusher
.signal
);
231 tsleep(&hmp
->flusher
.exiting
, 0, "hmrwex", hz
);
237 while ((info
= TAILQ_FIRST(&hmp
->flusher
.ready_list
)) != NULL
) {
238 KKASSERT(info
->runstate
== 0);
239 TAILQ_REMOVE(&hmp
->flusher
.ready_list
, info
, entry
);
241 wakeup(&info
->runstate
);
243 tsleep(&info
->td
, 0, "hmrwwc", 0);
244 kfree(info
, hmp
->m_misc
);
249 * The master flusher thread manages the flusher sequence id and
250 * synchronization with the slave work threads.
253 hammer_flusher_master_thread(void *arg
)
261 lwkt_gettoken(&hmp
->fs_token
);
265 * Flush all sequence numbers up to but not including .next,
266 * or until an open flush group is encountered.
269 while (hmp
->flusher
.group_lock
)
270 tsleep(&hmp
->flusher
.group_lock
, 0, "hmrhld",0);
271 hammer_flusher_clean_loose_ios(hmp
);
273 seq
= hammer_flusher_flush(hmp
, &nomore
);
274 hmp
->flusher
.done
= seq
;
275 wakeup(&hmp
->flusher
.done
);
277 if (hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
)
286 if (hmp
->flusher
.exiting
&& TAILQ_EMPTY(&hmp
->flush_group_list
))
288 while (hmp
->flusher
.signal
== 0)
289 tsleep(&hmp
->flusher
.signal
, 0, "hmrwwa", 0);
290 hmp
->flusher
.signal
= 0;
296 hmp
->flusher
.td
= NULL
;
297 wakeup(&hmp
->flusher
.exiting
);
298 lwkt_reltoken(&hmp
->fs_token
);
303 * Flush the next sequence number until an open flush group is encountered
304 * or we reach (next). Not all sequence numbers will have flush groups
305 * associated with them. These require that the UNDO/REDO FIFO still be
306 * flushed since it can take at least one additional run to synchronize
307 * the FIFO, and more to also synchronize the reserve structures.
310 hammer_flusher_flush(hammer_mount_t hmp
, int *nomorep
)
312 hammer_flusher_info_t info
;
313 hammer_flush_group_t flg
;
314 hammer_reserve_t resv
;
319 * Just in-case there's a flush race on mount. Seq number
322 if (TAILQ_FIRST(&hmp
->flusher
.ready_list
) == NULL
) {
324 return (hmp
->flusher
.done
);
329 * Flush the next sequence number. Sequence numbers can exist
330 * without an assigned flush group, indicating that just a FIFO flush
333 seq
= hmp
->flusher
.done
+ 1;
334 flg
= TAILQ_FIRST(&hmp
->flush_group_list
);
336 if (seq
== hmp
->flusher
.next
) {
338 return (hmp
->flusher
.done
);
340 } else if (seq
== flg
->seq
) {
342 KKASSERT(flg
->running
== 0);
344 if (hmp
->fill_flush_group
== flg
) {
345 hmp
->fill_flush_group
=
346 TAILQ_NEXT(flg
, flush_entry
);
350 return (hmp
->flusher
.done
);
354 * Sequence number problems can only happen if a critical
355 * filesystem error occurred which forced the filesystem into
358 KKASSERT(flg
->seq
- seq
> 0 || hmp
->ronly
>= 2);
363 * We only do one flg but we may have to loop/retry.
365 * Due to various races it is possible to come across a flush
366 * group which as not yet been closed.
369 while (flg
&& flg
->running
) {
371 if (hammer_debug_general
& 0x0001) {
372 hdkprintf("%d ttl=%d recs=%d\n",
373 flg
->seq
, flg
->total_count
, flg
->refs
);
375 if (hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
)
377 hammer_start_transaction_fls(&hmp
->flusher
.trans
, hmp
);
380 * If the previous flush cycle just about exhausted our
381 * UNDO space we may have to do a dummy cycle to move the
382 * first_offset up before actually digging into a new cycle,
383 * or the new cycle will not have sufficient undo space.
385 if (hammer_flusher_undo_exhausted(&hmp
->flusher
.trans
, 3))
386 hammer_flusher_finalize(&hmp
->flusher
.trans
, 0);
388 KKASSERT(hmp
->next_flush_group
!= flg
);
391 * Place the flg in the flusher structure and start the
392 * slaves running. The slaves will compete for inodes
395 * Make a per-thread copy of the transaction.
397 while ((info
= TAILQ_FIRST(&hmp
->flusher
.ready_list
)) != NULL
) {
398 TAILQ_REMOVE(&hmp
->flusher
.ready_list
, info
, entry
);
401 info
->trans
= hmp
->flusher
.trans
;
402 TAILQ_INSERT_TAIL(&hmp
->flusher
.run_list
, info
, entry
);
403 wakeup(&info
->runstate
);
407 * Wait for all slaves to finish running
409 while (TAILQ_FIRST(&hmp
->flusher
.run_list
) != NULL
)
410 tsleep(&hmp
->flusher
.ready_list
, 0, "hmrfcc", 0);
413 * Do the final finalization, clean up
415 hammer_flusher_finalize(&hmp
->flusher
.trans
, 1);
416 hmp
->flusher
.tid
= hmp
->flusher
.trans
.tid
;
418 hammer_done_transaction(&hmp
->flusher
.trans
);
421 * Loop up on the same flg. If the flg is done clean it up
422 * and break out. We only flush one flg.
424 if (RB_EMPTY(&flg
->flush_tree
)) {
425 KKASSERT(flg
->refs
== 0);
426 TAILQ_REMOVE(&hmp
->flush_group_list
, flg
, flush_entry
);
427 kfree(flg
, hmp
->m_misc
);
430 KKASSERT(TAILQ_FIRST(&hmp
->flush_group_list
) == flg
);
434 * We may have pure meta-data to flush, or we may have to finish
435 * cycling the UNDO FIFO, even if there were no flush groups.
437 if (count
== 0 && hammer_flusher_haswork(hmp
)) {
438 hammer_start_transaction_fls(&hmp
->flusher
.trans
, hmp
);
439 hammer_flusher_finalize(&hmp
->flusher
.trans
, 1);
440 hammer_done_transaction(&hmp
->flusher
.trans
);
444 * Clean up any freed big-blocks (typically zone-2).
445 * resv->flush_group is typically set several flush groups ahead
446 * of the free to ensure that the freed block is not reused until
447 * it can no longer be reused.
449 while ((resv
= TAILQ_FIRST(&hmp
->delay_list
)) != NULL
) {
450 if (resv
->flg_no
- seq
> 0)
452 hammer_reserve_clrdelay(hmp
, resv
);
459 * The slave flusher thread pulls work off the master flush list until no
463 hammer_flusher_slave_thread(void *arg
)
465 hammer_flush_group_t flg
;
466 hammer_flusher_info_t info
;
471 lwkt_gettoken(&hmp
->fs_token
);
474 while (info
->runstate
== 0)
475 tsleep(&info
->runstate
, 0, "hmrssw", 0);
476 if (info
->runstate
< 0)
480 RB_SCAN(hammer_fls_rb_tree
, &flg
->flush_tree
, NULL
,
481 hammer_flusher_flush_inode
, info
);
485 TAILQ_REMOVE(&hmp
->flusher
.run_list
, info
, entry
);
486 TAILQ_INSERT_TAIL(&hmp
->flusher
.ready_list
, info
, entry
);
487 wakeup(&hmp
->flusher
.ready_list
);
491 lwkt_reltoken(&hmp
->fs_token
);
496 hammer_flusher_clean_loose_ios(hammer_mount_t hmp
)
498 hammer_buffer_t buffer
;
502 * loose ends - buffers without bp's aren't tracked by the kernel
503 * and can build up, so clean them out. This can occur when an
504 * IO completes on a buffer with no references left.
506 * The io_token is needed to protect the list.
508 if ((io
= RB_ROOT(&hmp
->lose_root
)) != NULL
) {
509 lwkt_gettoken(&hmp
->io_token
);
510 while ((io
= RB_ROOT(&hmp
->lose_root
)) != NULL
) {
511 KKASSERT(io
->mod_root
== &hmp
->lose_root
);
512 RB_REMOVE(hammer_mod_rb_tree
, io
->mod_root
, io
);
514 hammer_ref(&io
->lock
);
516 hammer_rel_buffer(buffer
, 0);
518 lwkt_reltoken(&hmp
->io_token
);
523 * Flush a single inode that is part of a flush group.
525 * Flusher errors are extremely serious, even ENOSPC shouldn't occur because
526 * the front-end should have reserved sufficient space on the media. Any
527 * error other then EWOULDBLOCK will force the mount to be read-only.
531 hammer_flusher_flush_inode(hammer_inode_t ip
, void *data
)
533 hammer_flusher_info_t info
= data
;
534 hammer_mount_t hmp
= info
->hmp
;
535 hammer_transaction_t trans
= &info
->trans
;
539 * Several slaves are operating on the same flush group concurrently.
540 * The SLAVEFLUSH flag prevents them from tripping over each other.
542 * NOTE: It is possible for a EWOULDBLOCK'd ip returned by one slave
543 * to be resynced by another, but normally such inodes are not
544 * revisited until the master loop gets to them.
546 if (ip
->flags
& HAMMER_INODE_SLAVEFLUSH
)
548 ip
->flags
|= HAMMER_INODE_SLAVEFLUSH
;
549 ++hammer_stats_inode_flushes
;
551 hammer_flusher_clean_loose_ios(hmp
);
553 error
= hammer_sync_inode(trans
, ip
);
556 * EWOULDBLOCK can happen under normal operation, all other errors
557 * are considered extremely serious. We must set WOULDBLOCK
558 * mechanics to deal with the mess left over from the abort of the
562 ip
->flags
|= HAMMER_INODE_WOULDBLOCK
;
563 if (error
== EWOULDBLOCK
)
566 hammer_flush_inode_done(ip
, error
);
569 while (hmp
->flusher
.finalize_want
)
570 tsleep(&hmp
->flusher
.finalize_want
, 0, "hmrsxx", 0);
571 if (hammer_flusher_undo_exhausted(trans
, 1)) {
572 hkprintf("Warning: UNDO area too small!\n");
573 hammer_flusher_finalize(trans
, 1);
574 } else if (hammer_flusher_meta_limit(trans
->hmp
)) {
575 hammer_flusher_finalize(trans
, 0);
581 * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
584 * 1/4 - Emergency free undo space level. Below this point the flusher
585 * will finalize even if directory dependancies have not been resolved.
587 * 2/4 - Used by the pruning and reblocking code. These functions may be
588 * running in parallel with a flush and cannot be allowed to drop
589 * available undo space to emergency levels.
591 * 3/4 - Used at the beginning of a flush to force-sync the volume header
592 * to give the flush plenty of runway to work in.
595 hammer_flusher_undo_exhausted(hammer_transaction_t trans
, int quarter
)
597 if (hammer_undo_space(trans
) <
598 hammer_undo_max(trans
->hmp
) * quarter
/ 4) {
606 * Flush all pending UNDOs, wait for write completion, update the volume
607 * header with the new UNDO end position, and flush it. Then
608 * asynchronously flush the meta-data.
610 * If this is the last finalization in a flush group we also synchronize
611 * our cached blockmap and set hmp->flusher_undo_start and our cached undo
612 * fifo first_offset so the next flush resets the FIFO pointers.
614 * If this is not final it is being called because too many dirty meta-data
615 * buffers have built up and must be flushed with UNDO synchronization to
616 * avoid a buffer cache deadlock.
619 hammer_flusher_finalize(hammer_transaction_t trans
, int final
)
621 hammer_volume_t root_volume
;
622 hammer_blockmap_t cundomap
, dundomap
;
625 hammer_off_t save_undo_next_offset
;
630 root_volume
= trans
->rootvol
;
633 * Exclusively lock the flusher. This guarantees that all dirty
634 * buffers will be idled (have a mod-count of 0).
636 ++hmp
->flusher
.finalize_want
;
637 hammer_lock_ex(&hmp
->flusher
.finalize_lock
);
640 * If this isn't the final sync several threads may have hit the
641 * meta-limit at the same time and raced. Only sync if we really
642 * have to, after acquiring the lock.
644 if (final
== 0 && !hammer_flusher_meta_limit(hmp
))
647 if (hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
)
651 * Flush data buffers. This can occur asynchronously and at any
652 * time. We must interlock against the frontend direct-data write
653 * but do not have to acquire the sync-lock yet.
655 * These data buffers have already been collected prior to the
656 * related inode(s) getting queued to the flush group.
659 while ((io
= RB_FIRST(hammer_mod_rb_tree
, &hmp
->data_root
)) != NULL
) {
662 hammer_ref(&io
->lock
);
663 hammer_io_write_interlock(io
);
664 KKASSERT(io
->type
!= HAMMER_STRUCTURE_VOLUME
);
665 hammer_io_flush(io
, 0);
666 hammer_io_done_interlock(io
);
667 hammer_rel_buffer(HAMMER_ITOB(io
), 0);
668 hammer_io_limit_backlog(hmp
);
673 * The sync-lock is required for the remaining sequence. This lock
674 * prevents meta-data from being modified.
676 hammer_sync_lock_ex(trans
);
679 * If we have been asked to finalize the volume header sync the
680 * cached blockmap to the on-disk blockmap. Generate an UNDO
681 * record for the update.
684 cundomap
= &hmp
->blockmap
[0];
685 dundomap
= &root_volume
->ondisk
->vol0_blockmap
[0];
686 if (root_volume
->io
.modified
) {
687 hammer_modify_volume(trans
, root_volume
,
688 dundomap
, sizeof(hmp
->blockmap
));
689 for (i
= 0; i
< HAMMER_MAX_ZONES
; ++i
)
690 hammer_crc_set_blockmap(&cundomap
[i
]);
691 bcopy(cundomap
, dundomap
, sizeof(hmp
->blockmap
));
692 hammer_modify_volume_done(root_volume
);
697 * Flush UNDOs. This can occur concurrently with the data flush
698 * because data writes never overwrite.
700 * This also waits for I/Os to complete and flushes the cache on
703 * Record the UNDO append point as this can continue to change
704 * after we have flushed the UNDOs.
706 cundomap
= &hmp
->blockmap
[HAMMER_ZONE_UNDO_INDEX
];
707 hammer_lock_ex(&hmp
->undo_lock
);
708 save_undo_next_offset
= cundomap
->next_offset
;
709 hammer_unlock(&hmp
->undo_lock
);
710 hammer_flusher_flush_undos(hmp
, HAMMER_FLUSH_UNDOS_FORCED
);
712 if (hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
)
716 * HAMMER VERSION < 4:
717 * Update the on-disk volume header with new UNDO FIFO end
718 * position (do not generate new UNDO records for this change).
719 * We have to do this for the UNDO FIFO whether (final) is
720 * set or not in order for the UNDOs to be recognized on
723 * HAMMER VERSION >= 4:
724 * The UNDO FIFO data written above will be recognized on
725 * recovery without us having to sync the volume header.
727 * Also update the on-disk next_tid field. This does not require
728 * an UNDO. However, because our TID is generated before we get
729 * the sync lock another sync may have beat us to the punch.
731 * This also has the side effect of updating first_offset based on
732 * a prior finalization when the first finalization of the next flush
733 * cycle occurs, removing any undo info from the prior finalization
734 * from consideration.
736 * The volume header will be flushed out synchronously.
738 dundomap
= &root_volume
->ondisk
->vol0_blockmap
[HAMMER_ZONE_UNDO_INDEX
];
739 cundomap
= &hmp
->blockmap
[HAMMER_ZONE_UNDO_INDEX
];
741 if (dundomap
->first_offset
!= cundomap
->first_offset
||
742 dundomap
->next_offset
!= save_undo_next_offset
) {
743 hammer_modify_volume_noundo(NULL
, root_volume
);
744 dundomap
->first_offset
= cundomap
->first_offset
;
745 dundomap
->next_offset
= save_undo_next_offset
;
746 hammer_crc_set_blockmap(dundomap
);
747 hammer_modify_volume_done(root_volume
);
751 * vol0_next_tid is used for TID selection and is updated without
752 * an UNDO so we do not reuse a TID that may have been rolled-back.
754 * vol0_last_tid is the highest fully-synchronized TID. It is
755 * set-up when the UNDO fifo is fully synced, later on (not here).
757 * The root volume can be open for modification by other threads
758 * generating UNDO or REDO records. For example, reblocking,
759 * pruning, REDO mode fast-fsyncs, so the write interlock is
762 if (root_volume
->io
.modified
) {
763 hammer_modify_volume_noundo(NULL
, root_volume
);
764 if (root_volume
->ondisk
->vol0_next_tid
< trans
->tid
)
765 root_volume
->ondisk
->vol0_next_tid
= trans
->tid
;
766 hammer_crc_set_volume(root_volume
->ondisk
);
767 hammer_modify_volume_done(root_volume
);
768 hammer_io_write_interlock(&root_volume
->io
);
769 hammer_io_flush(&root_volume
->io
, 0);
770 hammer_io_done_interlock(&root_volume
->io
);
774 * Wait for I/Os to complete.
776 * For HAMMER VERSION 4+ filesystems we do not have to wait for
777 * the I/O to complete as the new UNDO FIFO entries are recognized
778 * even without the volume header update. This allows the volume
779 * header to flushed along with meta-data, significantly reducing
782 hammer_flusher_clean_loose_ios(hmp
);
783 if (hmp
->version
< HAMMER_VOL_VERSION_FOUR
)
784 hammer_io_wait_all(hmp
, "hmrfl3", 1);
786 if (hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
)
790 * Flush meta-data. The meta-data will be undone if we crash
791 * so we can safely flush it asynchronously. There is no need
792 * to wait for I/O to complete (or issue a synchronous disk flush).
794 * In fact, even if we did wait the meta-data will still be undone
795 * by a crash up until the next flush cycle due to the first_offset
796 * in the volume header for the UNDO FIFO not being adjusted until
797 * the following flush cycle.
799 * No io interlock is needed, bioops callbacks will not mess with
803 while ((io
= RB_FIRST(hammer_mod_rb_tree
, &hmp
->meta_root
)) != NULL
) {
806 KKASSERT(io
->modify_refs
== 0);
807 hammer_ref(&io
->lock
);
808 KKASSERT(io
->type
!= HAMMER_STRUCTURE_VOLUME
);
809 hammer_io_flush(io
, 0);
810 hammer_rel_buffer(HAMMER_ITOB(io
), 0);
811 hammer_io_limit_backlog(hmp
);
816 * If this is the final finalization for the flush group set
817 * up for the next sequence by setting a new first_offset in
818 * our cached blockmap and clearing the undo history.
820 * Even though we have updated our cached first_offset, the on-disk
821 * first_offset still governs available-undo-space calculations.
823 * We synchronize to save_undo_next_offset rather than
824 * cundomap->next_offset because that is what we flushed out
827 * NOTE! UNDOs can only be added with the sync_lock held
828 * so we can clear the undo history without racing.
829 * REDOs can be added at any time which is why we
830 * have to be careful and use save_undo_next_offset
831 * when setting the new first_offset.
834 cundomap
= &hmp
->blockmap
[HAMMER_ZONE_UNDO_INDEX
];
835 if (cundomap
->first_offset
!= save_undo_next_offset
) {
836 cundomap
->first_offset
= save_undo_next_offset
;
837 hmp
->hflags
|= HMNT_UNDO_DIRTY
;
838 } else if (cundomap
->first_offset
!= cundomap
->next_offset
) {
839 hmp
->hflags
|= HMNT_UNDO_DIRTY
;
841 hmp
->hflags
&= ~HMNT_UNDO_DIRTY
;
843 hammer_clear_undo_history(hmp
);
846 * Flush tid sequencing. flush_tid1 is fully synchronized,
847 * meaning a crash will not roll it back. flush_tid2 has
848 * been written out asynchronously and a crash will roll
849 * it back. flush_tid1 is used for all mirroring masters.
851 if (hmp
->flush_tid1
!= hmp
->flush_tid2
) {
852 hmp
->flush_tid1
= hmp
->flush_tid2
;
853 wakeup(&hmp
->flush_tid1
);
855 hmp
->flush_tid2
= trans
->tid
;
858 * Clear the REDO SYNC flag. This flag is used to ensure
859 * that the recovery span in the UNDO/REDO FIFO contains
860 * at least one REDO SYNC record.
862 hmp
->flags
&= ~HAMMER_MOUNT_REDO_SYNC
;
866 * Cleanup. Report any critical errors.
869 hammer_sync_unlock(trans
);
871 if (hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
) {
872 hvkprintf(root_volume
,
873 "Critical write error during flush, "
874 "refusing to sync UNDO FIFO\n");
878 hammer_unlock(&hmp
->flusher
.finalize_lock
);
880 if (--hmp
->flusher
.finalize_want
== 0)
881 wakeup(&hmp
->flusher
.finalize_want
);
882 hammer_stats_commits
+= final
;
889 hammer_flusher_flush_undos(hammer_mount_t hmp
, int mode
)
895 while ((io
= RB_FIRST(hammer_mod_rb_tree
, &hmp
->undo_root
)) != NULL
) {
898 hammer_ref(&io
->lock
);
899 KKASSERT(io
->type
!= HAMMER_STRUCTURE_VOLUME
);
900 hammer_io_write_interlock(io
);
901 hammer_io_flush(io
, hammer_undo_reclaim(io
));
902 hammer_io_done_interlock(io
);
903 hammer_rel_buffer(HAMMER_ITOB(io
), 0);
904 hammer_io_limit_backlog(hmp
);
907 hammer_flusher_clean_loose_ios(hmp
);
908 if (mode
== HAMMER_FLUSH_UNDOS_FORCED
||
909 (mode
== HAMMER_FLUSH_UNDOS_AUTO
&& count
)) {
910 hammer_io_wait_all(hmp
, "hmrfl1", 1);
912 hammer_io_wait_all(hmp
, "hmrfl2", 0);
917 * Return non-zero if too many dirty meta-data buffers have built up.
919 * Since we cannot allow such buffers to flush until we have dealt with
920 * the UNDOs, we risk deadlocking the kernel's buffer cache.
923 hammer_flusher_meta_limit(hammer_mount_t hmp
)
925 if (hmp
->locked_dirty_space
+ hmp
->io_running_space
>
926 hammer_limit_dirtybufspace
) {
933 * Return non-zero if too many dirty meta-data buffers have built up.
935 * This version is used by background operations (mirror, prune, reblock)
936 * to leave room for foreground operations.
939 hammer_flusher_meta_halflimit(hammer_mount_t hmp
)
941 if (hmp
->locked_dirty_space
+ hmp
->io_running_space
>
942 hammer_limit_dirtybufspace
/ 2) {
949 * Return non-zero if the flusher still has something to flush.
952 hammer_flusher_haswork(hammer_mount_t hmp
)
956 if (hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
)
958 if (TAILQ_FIRST(&hmp
->flush_group_list
) || /* dirty inodes */
959 RB_ROOT(&hmp
->volu_root
) || /* dirty buffers */
960 RB_ROOT(&hmp
->undo_root
) ||
961 RB_ROOT(&hmp
->data_root
) ||
962 RB_ROOT(&hmp
->meta_root
) ||
963 (hmp
->hflags
& HMNT_UNDO_DIRTY
)) { /* UNDO FIFO sync */
970 hammer_flush_dirty(hammer_mount_t hmp
, int max_count
)
975 while (hammer_flusher_haswork(hmp
)) {
976 hammer_flusher_sync(hmp
);
980 hkprintf("flushing.");
983 tsleep(&dummy
, 0, "hmrufl", hz
);
985 if (max_count
!= -1 && count
== max_count
) {
986 kprintf("giving up");
993 if (count
>= max_count
)