2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.45 2008/07/31 04:42:04 dillon Exp $
37 * HAMMER dependancy flusher thread
39 * Meta data updates create buffer dependancies which are arranged as a
45 static void hammer_flusher_master_thread(void *arg
);
46 static void hammer_flusher_slave_thread(void *arg
);
47 static void hammer_flusher_flush(hammer_mount_t hmp
);
48 static void hammer_flusher_flush_inode(hammer_inode_t ip
,
49 hammer_transaction_t trans
);
51 RB_GENERATE(hammer_fls_rb_tree
, hammer_inode
, rb_flsnode
,
52 hammer_ino_rb_compare
);
55 * Inodes are sorted and assigned to slave threads in groups of 128.
56 * We want a flush group size large enough such that the slave threads
57 * are not likely to interfere with each other when accessing the B-Tree,
58 * but not so large that we lose concurrency.
60 #define HAMMER_FLUSH_GROUP_SIZE 128
63 * Support structures for the flusher threads.
65 struct hammer_flusher_info
{
66 TAILQ_ENTRY(hammer_flusher_info
) entry
;
67 struct hammer_mount
*hmp
;
71 hammer_flush_group_t flg
;
72 hammer_inode_t work_array
[HAMMER_FLUSH_GROUP_SIZE
];
75 typedef struct hammer_flusher_info
*hammer_flusher_info_t
;
78 * Sync all inodes pending on the flusher.
80 * All flush groups will be flushed. This does not queue dirty inodes
81 * to the flush groups, it just flushes out what has already been queued!
84 hammer_flusher_sync(hammer_mount_t hmp
)
88 seq
= hammer_flusher_async(hmp
, NULL
);
89 hammer_flusher_wait(hmp
, seq
);
93 * Sync all inodes pending on the flusher - return immediately.
95 * All flush groups will be flushed.
98 hammer_flusher_async(hammer_mount_t hmp
, hammer_flush_group_t close_flg
)
100 hammer_flush_group_t flg
;
101 int seq
= hmp
->flusher
.next
;
103 TAILQ_FOREACH(flg
, &hmp
->flush_group_list
, flush_entry
) {
104 if (flg
->running
== 0)
107 if (flg
== close_flg
)
110 if (hmp
->flusher
.td
) {
111 if (hmp
->flusher
.signal
++ == 0)
112 wakeup(&hmp
->flusher
.signal
);
114 seq
= hmp
->flusher
.done
;
120 hammer_flusher_async_one(hammer_mount_t hmp
)
124 if (hmp
->flusher
.td
) {
125 seq
= hmp
->flusher
.next
;
126 if (hmp
->flusher
.signal
++ == 0)
127 wakeup(&hmp
->flusher
.signal
);
129 seq
= hmp
->flusher
.done
;
135 * Wait for the flusher to get to the specified sequence number.
136 * Signal the flusher as often as necessary to keep it going.
139 hammer_flusher_wait(hammer_mount_t hmp
, int seq
)
141 while ((int)(seq
- hmp
->flusher
.done
) > 0) {
142 if (hmp
->flusher
.act
!= seq
) {
143 if (hmp
->flusher
.signal
++ == 0)
144 wakeup(&hmp
->flusher
.signal
);
146 tsleep(&hmp
->flusher
.done
, 0, "hmrfls", 0);
151 hammer_flusher_wait_next(hammer_mount_t hmp
)
155 seq
= hammer_flusher_async_one(hmp
);
156 hammer_flusher_wait(hmp
, seq
);
160 hammer_flusher_create(hammer_mount_t hmp
)
162 hammer_flusher_info_t info
;
165 hmp
->flusher
.signal
= 0;
166 hmp
->flusher
.act
= 0;
167 hmp
->flusher
.done
= 0;
168 hmp
->flusher
.next
= 1;
169 hammer_ref(&hmp
->flusher
.finalize_lock
);
170 TAILQ_INIT(&hmp
->flusher
.run_list
);
171 TAILQ_INIT(&hmp
->flusher
.ready_list
);
173 lwkt_create(hammer_flusher_master_thread
, hmp
,
174 &hmp
->flusher
.td
, NULL
, 0, -1, "hammer-M");
175 for (i
= 0; i
< HAMMER_MAX_FLUSHERS
; ++i
) {
176 info
= kmalloc(sizeof(*info
), hmp
->m_misc
, M_WAITOK
|M_ZERO
);
178 TAILQ_INSERT_TAIL(&hmp
->flusher
.ready_list
, info
, entry
);
179 lwkt_create(hammer_flusher_slave_thread
, info
,
180 &info
->td
, NULL
, 0, -1, "hammer-S%d", i
);
185 hammer_flusher_destroy(hammer_mount_t hmp
)
187 hammer_flusher_info_t info
;
192 hmp
->flusher
.exiting
= 1;
193 while (hmp
->flusher
.td
) {
194 ++hmp
->flusher
.signal
;
195 wakeup(&hmp
->flusher
.signal
);
196 tsleep(&hmp
->flusher
.exiting
, 0, "hmrwex", hz
);
202 while ((info
= TAILQ_FIRST(&hmp
->flusher
.ready_list
)) != NULL
) {
203 KKASSERT(info
->runstate
== 0);
204 TAILQ_REMOVE(&hmp
->flusher
.ready_list
, info
, entry
);
206 wakeup(&info
->runstate
);
208 tsleep(&info
->td
, 0, "hmrwwc", 0);
209 kfree(info
, hmp
->m_misc
);
214 * The master flusher thread manages the flusher sequence id and
215 * synchronization with the slave work threads.
218 hammer_flusher_master_thread(void *arg
)
220 hammer_flush_group_t flg
;
227 * Do at least one flush cycle. We may have to update the
228 * UNDO FIFO even if no inodes are queued.
231 while (hmp
->flusher
.group_lock
)
232 tsleep(&hmp
->flusher
.group_lock
, 0, "hmrhld", 0);
233 hmp
->flusher
.act
= hmp
->flusher
.next
;
235 hammer_flusher_clean_loose_ios(hmp
);
236 hammer_flusher_flush(hmp
);
237 hmp
->flusher
.done
= hmp
->flusher
.act
;
238 wakeup(&hmp
->flusher
.done
);
239 flg
= TAILQ_FIRST(&hmp
->flush_group_list
);
240 if (flg
== NULL
|| flg
->closed
== 0)
242 if (hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
)
249 if (hmp
->flusher
.exiting
&& TAILQ_EMPTY(&hmp
->flush_group_list
))
251 while (hmp
->flusher
.signal
== 0)
252 tsleep(&hmp
->flusher
.signal
, 0, "hmrwwa", 0);
255 * Flush for each count on signal but only allow one extra
256 * flush request to build up.
258 if (--hmp
->flusher
.signal
!= 0)
259 hmp
->flusher
.signal
= 1;
265 hmp
->flusher
.td
= NULL
;
266 wakeup(&hmp
->flusher
.exiting
);
271 * Flush all inodes in the current flush group.
274 hammer_flusher_flush(hammer_mount_t hmp
)
276 hammer_flusher_info_t info
;
277 hammer_flush_group_t flg
;
278 hammer_reserve_t resv
;
280 hammer_inode_t next_ip
;
285 * Just in-case there's a flush race on mount
287 if (TAILQ_FIRST(&hmp
->flusher
.ready_list
) == NULL
)
291 * We only do one flg but we may have to loop/retry.
294 while ((flg
= TAILQ_FIRST(&hmp
->flush_group_list
)) != NULL
) {
296 if (hammer_debug_general
& 0x0001) {
297 kprintf("hammer_flush %d ttl=%d recs=%d\n",
299 flg
->total_count
, flg
->refs
);
301 if (hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
)
303 hammer_start_transaction_fls(&hmp
->flusher
.trans
, hmp
);
306 * If the previous flush cycle just about exhausted our
307 * UNDO space we may have to do a dummy cycle to move the
308 * first_offset up before actually digging into a new cycle,
309 * or the new cycle will not have sufficient undo space.
311 if (hammer_flusher_undo_exhausted(&hmp
->flusher
.trans
, 3))
312 hammer_flusher_finalize(&hmp
->flusher
.trans
, 0);
315 * Ok, we are running this flush group now (this prevents new
319 if (hmp
->next_flush_group
== flg
)
320 hmp
->next_flush_group
= TAILQ_NEXT(flg
, flush_entry
);
323 * Iterate the inodes in the flg's flush_tree and assign
327 info
= TAILQ_FIRST(&hmp
->flusher
.ready_list
);
328 next_ip
= RB_FIRST(hammer_fls_rb_tree
, &flg
->flush_tree
);
330 while ((ip
= next_ip
) != NULL
) {
331 next_ip
= RB_NEXT(hammer_fls_rb_tree
,
332 &flg
->flush_tree
, ip
);
334 if (++hmp
->check_yield
> hammer_yield_check
) {
335 hmp
->check_yield
= 0;
340 * Add ip to the slave's work array. The slave is
341 * not currently running.
343 info
->work_array
[info
->count
++] = ip
;
344 if (info
->count
!= HAMMER_FLUSH_GROUP_SIZE
)
348 * Get the slave running
350 TAILQ_REMOVE(&hmp
->flusher
.ready_list
, info
, entry
);
351 TAILQ_INSERT_TAIL(&hmp
->flusher
.run_list
, info
, entry
);
354 wakeup(&info
->runstate
);
357 * Get a new slave. We may have to wait for one to
360 while ((info
= TAILQ_FIRST(&hmp
->flusher
.ready_list
)) == NULL
) {
361 tsleep(&hmp
->flusher
.ready_list
, 0, "hmrfcc", 0);
366 * Run the current slave if necessary
369 TAILQ_REMOVE(&hmp
->flusher
.ready_list
, info
, entry
);
370 TAILQ_INSERT_TAIL(&hmp
->flusher
.run_list
, info
, entry
);
373 wakeup(&info
->runstate
);
377 * Wait for all slaves to finish running
379 while (TAILQ_FIRST(&hmp
->flusher
.run_list
) != NULL
)
380 tsleep(&hmp
->flusher
.ready_list
, 0, "hmrfcc", 0);
383 * Do the final finalization, clean up
385 hammer_flusher_finalize(&hmp
->flusher
.trans
, 1);
386 hmp
->flusher
.tid
= hmp
->flusher
.trans
.tid
;
388 hammer_done_transaction(&hmp
->flusher
.trans
);
391 * Loop up on the same flg. If the flg is done clean it up
392 * and break out. We only flush one flg.
394 if (RB_EMPTY(&flg
->flush_tree
)) {
395 KKASSERT(flg
->refs
== 0);
396 TAILQ_REMOVE(&hmp
->flush_group_list
, flg
, flush_entry
);
397 kfree(flg
, hmp
->m_misc
);
403 * We may have pure meta-data to flush, or we may have to finish
404 * cycling the UNDO FIFO, even if there were no flush groups.
406 if (count
== 0 && hammer_flusher_haswork(hmp
)) {
407 hammer_start_transaction_fls(&hmp
->flusher
.trans
, hmp
);
408 hammer_flusher_finalize(&hmp
->flusher
.trans
, 1);
409 hammer_done_transaction(&hmp
->flusher
.trans
);
413 * Clean up any freed big-blocks (typically zone-2).
414 * resv->flush_group is typically set several flush groups ahead
415 * of the free to ensure that the freed block is not reused until
416 * it can no longer be reused.
418 while ((resv
= TAILQ_FIRST(&hmp
->delay_list
)) != NULL
) {
419 if (resv
->flush_group
!= hmp
->flusher
.act
)
421 hammer_reserve_clrdelay(hmp
, resv
);
427 * The slave flusher thread pulls work off the master flush list until no
431 hammer_flusher_slave_thread(void *arg
)
433 hammer_flush_group_t flg
;
434 hammer_flusher_info_t info
;
443 while (info
->runstate
== 0)
444 tsleep(&info
->runstate
, 0, "hmrssw", 0);
445 if (info
->runstate
< 0)
449 for (i
= 0; i
< info
->count
; ++i
) {
450 ip
= info
->work_array
[i
];
451 hammer_flusher_flush_inode(ip
, &hmp
->flusher
.trans
);
452 ++hammer_stats_inode_flushes
;
456 TAILQ_REMOVE(&hmp
->flusher
.run_list
, info
, entry
);
457 TAILQ_INSERT_TAIL(&hmp
->flusher
.ready_list
, info
, entry
);
458 wakeup(&hmp
->flusher
.ready_list
);
466 hammer_flusher_clean_loose_ios(hammer_mount_t hmp
)
468 hammer_buffer_t buffer
;
472 * loose ends - buffers without bp's aren't tracked by the kernel
473 * and can build up, so clean them out. This can occur when an
474 * IO completes on a buffer with no references left.
476 if ((io
= TAILQ_FIRST(&hmp
->lose_list
)) != NULL
) {
477 crit_enter(); /* biodone() race */
478 while ((io
= TAILQ_FIRST(&hmp
->lose_list
)) != NULL
) {
479 KKASSERT(io
->mod_list
== &hmp
->lose_list
);
480 TAILQ_REMOVE(&hmp
->lose_list
, io
, mod_entry
);
482 hammer_ref(&io
->lock
);
484 hammer_rel_buffer(buffer
, 0);
491 * Flush a single inode that is part of a flush group.
493 * Flusher errors are extremely serious, even ENOSPC shouldn't occur because
494 * the front-end should have reserved sufficient space on the media. Any
495 * error other then EWOULDBLOCK will force the mount to be read-only.
499 hammer_flusher_flush_inode(hammer_inode_t ip
, hammer_transaction_t trans
)
501 hammer_mount_t hmp
= ip
->hmp
;
504 hammer_flusher_clean_loose_ios(hmp
);
505 error
= hammer_sync_inode(trans
, ip
);
508 * EWOULDBLOCK can happen under normal operation, all other errors
509 * are considered extremely serious. We must set WOULDBLOCK
510 * mechanics to deal with the mess left over from the abort of the
514 ip
->flags
|= HAMMER_INODE_WOULDBLOCK
;
515 if (error
== EWOULDBLOCK
)
518 hammer_flush_inode_done(ip
, error
);
519 while (hmp
->flusher
.finalize_want
)
520 tsleep(&hmp
->flusher
.finalize_want
, 0, "hmrsxx", 0);
521 if (hammer_flusher_undo_exhausted(trans
, 1)) {
522 kprintf("HAMMER: Warning: UNDO area too small!\n");
523 hammer_flusher_finalize(trans
, 1);
524 } else if (hammer_flusher_meta_limit(trans
->hmp
)) {
525 hammer_flusher_finalize(trans
, 0);
530 * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
533 * 1/4 - Emergency free undo space level. Below this point the flusher
534 * will finalize even if directory dependancies have not been resolved.
536 * 2/4 - Used by the pruning and reblocking code. These functions may be
537 * running in parallel with a flush and cannot be allowed to drop
538 * available undo space to emergency levels.
540 * 3/4 - Used at the beginning of a flush to force-sync the volume header
541 * to give the flush plenty of runway to work in.
544 hammer_flusher_undo_exhausted(hammer_transaction_t trans
, int quarter
)
546 if (hammer_undo_space(trans
) <
547 hammer_undo_max(trans
->hmp
) * quarter
/ 4) {
555 * Flush all pending UNDOs, wait for write completion, update the volume
556 * header with the new UNDO end position, and flush it. Then
557 * asynchronously flush the meta-data.
559 * If this is the last finalization in a flush group we also synchronize
560 * our cached blockmap and set hmp->flusher_undo_start and our cached undo
561 * fifo first_offset so the next flush resets the FIFO pointers.
563 * If this is not final it is being called because too many dirty meta-data
564 * buffers have built up and must be flushed with UNDO synchronization to
565 * avoid a buffer cache deadlock.
568 hammer_flusher_finalize(hammer_transaction_t trans
, int final
)
570 hammer_volume_t root_volume
;
571 hammer_blockmap_t cundomap
, dundomap
;
574 hammer_off_t save_undo_next_offset
;
579 root_volume
= trans
->rootvol
;
582 * Exclusively lock the flusher. This guarantees that all dirty
583 * buffers will be idled (have a mod-count of 0).
585 ++hmp
->flusher
.finalize_want
;
586 hammer_lock_ex(&hmp
->flusher
.finalize_lock
);
589 * If this isn't the final sync several threads may have hit the
590 * meta-limit at the same time and raced. Only sync if we really
591 * have to, after acquiring the lock.
593 if (final
== 0 && !hammer_flusher_meta_limit(hmp
))
596 if (hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
)
600 * Flush data buffers. This can occur asynchronously and at any
601 * time. We must interlock against the frontend direct-data write
602 * but do not have to acquire the sync-lock yet.
604 * These data buffers have already been collected prior to the
605 * related inode(s) getting queued to the flush group.
608 while ((io
= TAILQ_FIRST(&hmp
->data_list
)) != NULL
) {
611 hammer_ref(&io
->lock
);
612 hammer_io_write_interlock(io
);
613 KKASSERT(io
->type
!= HAMMER_STRUCTURE_VOLUME
);
614 hammer_io_flush(io
, 0);
615 hammer_io_done_interlock(io
);
616 hammer_rel_buffer((hammer_buffer_t
)io
, 0);
621 * The sync-lock is required for the remaining sequence. This lock
622 * prevents meta-data from being modified.
624 hammer_sync_lock_ex(trans
);
627 * If we have been asked to finalize the volume header sync the
628 * cached blockmap to the on-disk blockmap. Generate an UNDO
629 * record for the update.
632 cundomap
= &hmp
->blockmap
[0];
633 dundomap
= &root_volume
->ondisk
->vol0_blockmap
[0];
634 if (root_volume
->io
.modified
) {
635 hammer_modify_volume(trans
, root_volume
,
636 dundomap
, sizeof(hmp
->blockmap
));
637 for (i
= 0; i
< HAMMER_MAX_ZONES
; ++i
)
638 hammer_crc_set_blockmap(&cundomap
[i
]);
639 bcopy(cundomap
, dundomap
, sizeof(hmp
->blockmap
));
640 hammer_modify_volume_done(root_volume
);
645 * Flush UNDOs. This can occur concurrently with the data flush
646 * because data writes never overwrite.
648 * This also waits for I/Os to complete and flushes the cache on
651 * Record the UNDO append point as this can continue to change
652 * after we have flushed the UNDOs.
654 cundomap
= &hmp
->blockmap
[HAMMER_ZONE_UNDO_INDEX
];
655 hammer_lock_ex(&hmp
->undo_lock
);
656 save_undo_next_offset
= cundomap
->next_offset
;
657 hammer_unlock(&hmp
->undo_lock
);
658 hammer_flusher_flush_undos(hmp
, HAMMER_FLUSH_UNDOS_FORCED
);
660 if (hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
)
664 * HAMMER VERSION < 4:
665 * Update the on-disk volume header with new UNDO FIFO end
666 * position (do not generate new UNDO records for this change).
667 * We have to do this for the UNDO FIFO whether (final) is
668 * set or not in order for the UNDOs to be recognized on
671 * HAMMER VERSION >= 4:
672 * The UNDO FIFO data written above will be recognized on
673 * recovery without us having to sync the volume header.
675 * Also update the on-disk next_tid field. This does not require
676 * an UNDO. However, because our TID is generated before we get
677 * the sync lock another sync may have beat us to the punch.
679 * This also has the side effect of updating first_offset based on
680 * a prior finalization when the first finalization of the next flush
681 * cycle occurs, removing any undo info from the prior finalization
682 * from consideration.
684 * The volume header will be flushed out synchronously.
686 dundomap
= &root_volume
->ondisk
->vol0_blockmap
[HAMMER_ZONE_UNDO_INDEX
];
687 cundomap
= &hmp
->blockmap
[HAMMER_ZONE_UNDO_INDEX
];
689 if (dundomap
->first_offset
!= cundomap
->first_offset
||
690 dundomap
->next_offset
!= save_undo_next_offset
) {
691 hammer_modify_volume(NULL
, root_volume
, NULL
, 0);
692 dundomap
->first_offset
= cundomap
->first_offset
;
693 dundomap
->next_offset
= save_undo_next_offset
;
694 hammer_crc_set_blockmap(dundomap
);
695 hammer_modify_volume_done(root_volume
);
699 * vol0_next_tid is used for TID selection and is updated without
700 * an UNDO so we do not reuse a TID that may have been rolled-back.
702 * vol0_last_tid is the highest fully-synchronized TID. It is
703 * set-up when the UNDO fifo is fully synced, later on (not here).
705 * The root volume can be open for modification by other threads
706 * generating UNDO or REDO records. For example, reblocking,
707 * pruning, REDO mode fast-fsyncs, so the write interlock is
710 if (root_volume
->io
.modified
) {
711 hammer_modify_volume(NULL
, root_volume
, NULL
, 0);
712 if (root_volume
->ondisk
->vol0_next_tid
< trans
->tid
)
713 root_volume
->ondisk
->vol0_next_tid
= trans
->tid
;
714 hammer_crc_set_volume(root_volume
->ondisk
);
715 hammer_modify_volume_done(root_volume
);
716 hammer_io_write_interlock(&root_volume
->io
);
717 hammer_io_flush(&root_volume
->io
, 0);
718 hammer_io_done_interlock(&root_volume
->io
);
722 * Wait for I/Os to complete.
724 * For HAMMER VERSION 4+ filesystems we do not have to wait for
725 * the I/O to complete as the new UNDO FIFO entries are recognized
726 * even without the volume header update. This allows the volume
727 * header to flushed along with meta-data, significantly reducing
730 hammer_flusher_clean_loose_ios(hmp
);
731 if (hmp
->version
< HAMMER_VOL_VERSION_FOUR
)
732 hammer_io_wait_all(hmp
, "hmrfl3", 1);
734 if (hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
)
738 * Flush meta-data. The meta-data will be undone if we crash
739 * so we can safely flush it asynchronously. There is no need
740 * to wait for I/O to complete (or issue a synchronous disk flush).
742 * In fact, even if we did wait the meta-data will still be undone
743 * by a crash up until the next flush cycle due to the first_offset
744 * in the volume header for the UNDO FIFO not being adjusted until
745 * the following flush cycle.
748 while ((io
= TAILQ_FIRST(&hmp
->meta_list
)) != NULL
) {
751 KKASSERT(io
->modify_refs
== 0);
752 hammer_ref(&io
->lock
);
753 KKASSERT(io
->type
!= HAMMER_STRUCTURE_VOLUME
);
754 hammer_io_flush(io
, 0);
755 hammer_rel_buffer((hammer_buffer_t
)io
, 0);
760 * If this is the final finalization for the flush group set
761 * up for the next sequence by setting a new first_offset in
762 * our cached blockmap and clearing the undo history.
764 * Even though we have updated our cached first_offset, the on-disk
765 * first_offset still governs available-undo-space calculations.
767 * We synchronize to save_undo_next_offset rather than
768 * cundomap->next_offset because that is what we flushed out
771 * NOTE! UNDOs can only be added with the sync_lock held
772 * so we can clear the undo history without racing.
773 * REDOs can be added at any time which is why we
774 * have to be careful and use save_undo_next_offset
775 * when setting the new first_offset.
778 cundomap
= &hmp
->blockmap
[HAMMER_ZONE_UNDO_INDEX
];
779 if (cundomap
->first_offset
!= save_undo_next_offset
) {
780 cundomap
->first_offset
= save_undo_next_offset
;
781 hmp
->hflags
|= HMNT_UNDO_DIRTY
;
782 } else if (cundomap
->first_offset
!= cundomap
->next_offset
) {
783 hmp
->hflags
|= HMNT_UNDO_DIRTY
;
785 hmp
->hflags
&= ~HMNT_UNDO_DIRTY
;
787 hammer_clear_undo_history(hmp
);
790 * Flush tid sequencing. flush_tid1 is fully synchronized,
791 * meaning a crash will not roll it back. flush_tid2 has
792 * been written out asynchronously and a crash will roll
793 * it back. flush_tid1 is used for all mirroring masters.
795 if (hmp
->flush_tid1
!= hmp
->flush_tid2
) {
796 hmp
->flush_tid1
= hmp
->flush_tid2
;
797 wakeup(&hmp
->flush_tid1
);
799 hmp
->flush_tid2
= trans
->tid
;
802 * Clear the REDO SYNC flag. This flag is used to ensure
803 * that the recovery span in the UNDO/REDO FIFO contains
804 * at least one REDO SYNC record.
806 hmp
->flags
&= ~HAMMER_MOUNT_REDO_SYNC
;
810 * Cleanup. Report any critical errors.
813 hammer_sync_unlock(trans
);
815 if (hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
) {
816 kprintf("HAMMER(%s): Critical write error during flush, "
817 "refusing to sync UNDO FIFO\n",
818 root_volume
->ondisk
->vol_name
);
822 hammer_unlock(&hmp
->flusher
.finalize_lock
);
824 if (--hmp
->flusher
.finalize_want
== 0)
825 wakeup(&hmp
->flusher
.finalize_want
);
826 hammer_stats_commits
+= final
;
833 hammer_flusher_flush_undos(hammer_mount_t hmp
, int mode
)
839 while ((io
= TAILQ_FIRST(&hmp
->undo_list
)) != NULL
) {
842 hammer_ref(&io
->lock
);
843 KKASSERT(io
->type
!= HAMMER_STRUCTURE_VOLUME
);
844 hammer_io_write_interlock(io
);
845 hammer_io_flush(io
, hammer_undo_reclaim(io
));
846 hammer_io_done_interlock(io
);
847 hammer_rel_buffer((hammer_buffer_t
)io
, 0);
850 hammer_flusher_clean_loose_ios(hmp
);
851 if (mode
== HAMMER_FLUSH_UNDOS_FORCED
||
852 (mode
== HAMMER_FLUSH_UNDOS_AUTO
&& count
)) {
853 hammer_io_wait_all(hmp
, "hmrfl1", 1);
855 hammer_io_wait_all(hmp
, "hmrfl2", 0);
860 * Return non-zero if too many dirty meta-data buffers have built up.
862 * Since we cannot allow such buffers to flush until we have dealt with
863 * the UNDOs, we risk deadlocking the kernel's buffer cache.
866 hammer_flusher_meta_limit(hammer_mount_t hmp
)
868 if (hmp
->locked_dirty_space
+ hmp
->io_running_space
>
869 hammer_limit_dirtybufspace
) {
876 * Return non-zero if too many dirty meta-data buffers have built up.
878 * This version is used by background operations (mirror, prune, reblock)
879 * to leave room for foreground operations.
882 hammer_flusher_meta_halflimit(hammer_mount_t hmp
)
884 if (hmp
->locked_dirty_space
+ hmp
->io_running_space
>
885 hammer_limit_dirtybufspace
/ 2) {
892 * Return non-zero if the flusher still has something to flush.
895 hammer_flusher_haswork(hammer_mount_t hmp
)
897 if (hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
)
899 if (TAILQ_FIRST(&hmp
->flush_group_list
) || /* dirty inodes */
900 TAILQ_FIRST(&hmp
->volu_list
) || /* dirty buffers */
901 TAILQ_FIRST(&hmp
->undo_list
) ||
902 TAILQ_FIRST(&hmp
->data_list
) ||
903 TAILQ_FIRST(&hmp
->meta_list
) ||
904 (hmp
->hflags
& HMNT_UNDO_DIRTY
) /* UNDO FIFO sync */