94a199495b38e964f77e9029c4e0944f6c60db92
[dragonfly.git] / sys / vfs / hammer / hammer_flusher.c
blob94a199495b38e964f77e9029c4e0944f6c60db92
1 /*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.40.2.3 2008/07/18 00:21:09 dillon Exp $
37 * HAMMER dependancy flusher thread
39 * Meta data updates create buffer dependancies which are arranged as a
40 * hierarchy of lists.
43 #include "hammer.h"
45 static void hammer_flusher_master_thread(void *arg);
46 static void hammer_flusher_slave_thread(void *arg);
47 static void hammer_flusher_flush(hammer_mount_t hmp);
48 static void hammer_flusher_flush_inode(hammer_inode_t ip,
49 hammer_transaction_t trans);
52 * Support structures for the flusher threads.
54 struct hammer_flusher_info {
55 TAILQ_ENTRY(hammer_flusher_info) entry;
56 struct hammer_mount *hmp;
57 thread_t td;
58 int runstate;
59 int count;
60 hammer_flush_group_t flg;
61 hammer_inode_t work_array[HAMMER_FLUSH_GROUP_SIZE];
64 typedef struct hammer_flusher_info *hammer_flusher_info_t;
67 * Sync all inodes pending on the flusher.
69 * All flush groups will be flushed. This does not queue dirty inodes
70 * to the flush groups, it just flushes out what has already been queued!
72 void
73 hammer_flusher_sync(hammer_mount_t hmp)
75 int seq;
77 seq = hammer_flusher_async(hmp, NULL);
78 while ((int)(seq - hmp->flusher.done) > 0)
79 tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
83 * Sync all inodes pending on the flusher - return immediately.
85 * All flush groups will be flushed.
87 int
88 hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t close_flg)
90 hammer_flush_group_t flg;
91 int seq = hmp->flusher.next;
93 TAILQ_FOREACH(flg, &hmp->flush_group_list, flush_entry) {
94 if (flg->running == 0)
95 ++seq;
96 flg->closed = 1;
97 if (flg == close_flg)
98 break;
100 if (hmp->flusher.td) {
101 if (hmp->flusher.signal++ == 0)
102 wakeup(&hmp->flusher.signal);
103 } else {
104 seq = hmp->flusher.done;
106 return(seq);
110 hammer_flusher_async_one(hammer_mount_t hmp)
112 int seq;
114 if (hmp->flusher.td) {
115 seq = hmp->flusher.next;
116 if (hmp->flusher.signal++ == 0)
117 wakeup(&hmp->flusher.signal);
118 } else {
119 seq = hmp->flusher.done;
121 return(seq);
124 void
125 hammer_flusher_wait(hammer_mount_t hmp, int seq)
127 while ((int)(seq - hmp->flusher.done) > 0) {
128 tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
132 void
133 hammer_flusher_create(hammer_mount_t hmp)
135 hammer_flusher_info_t info;
136 int i;
138 hmp->flusher.signal = 0;
139 hmp->flusher.act = 0;
140 hmp->flusher.done = 0;
141 hmp->flusher.next = 1;
142 hammer_ref(&hmp->flusher.finalize_lock);
143 TAILQ_INIT(&hmp->flusher.run_list);
144 TAILQ_INIT(&hmp->flusher.ready_list);
146 lwkt_create(hammer_flusher_master_thread, hmp,
147 &hmp->flusher.td, NULL, 0, -1, "hammer-M");
148 for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
149 info = kmalloc(sizeof(*info), M_HAMMER, M_WAITOK|M_ZERO);
150 info->hmp = hmp;
151 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
152 lwkt_create(hammer_flusher_slave_thread, info,
153 &info->td, NULL, 0, -1, "hammer-S%d", i);
157 void
158 hammer_flusher_destroy(hammer_mount_t hmp)
160 hammer_flusher_info_t info;
163 * Kill the master
165 hmp->flusher.exiting = 1;
166 while (hmp->flusher.td) {
167 ++hmp->flusher.signal;
168 wakeup(&hmp->flusher.signal);
169 tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
173 * Kill the slaves
175 while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
176 KKASSERT(info->runstate == 0);
177 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
178 info->runstate = -1;
179 wakeup(&info->runstate);
180 while (info->td)
181 tsleep(&info->td, 0, "hmrwwc", 0);
182 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
183 kfree(info, M_HAMMER);
188 * The master flusher thread manages the flusher sequence id and
189 * synchronization with the slave work threads.
191 static void
192 hammer_flusher_master_thread(void *arg)
194 hammer_flush_group_t flg;
195 hammer_mount_t hmp;
197 hmp = arg;
199 for (;;) {
201 * Do at least one flush cycle. We may have to update the
202 * UNDO FIFO even if no inodes are queued.
204 for (;;) {
205 while (hmp->flusher.group_lock)
206 tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
207 hmp->flusher.act = hmp->flusher.next;
208 ++hmp->flusher.next;
209 hammer_flusher_clean_loose_ios(hmp);
210 hammer_flusher_flush(hmp);
211 hmp->flusher.done = hmp->flusher.act;
212 wakeup(&hmp->flusher.done);
213 flg = TAILQ_FIRST(&hmp->flush_group_list);
214 if (flg == NULL || flg->closed == 0)
215 break;
216 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
217 break;
221 * Wait for activity.
223 if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_group_list))
224 break;
225 while (hmp->flusher.signal == 0)
226 tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
227 hmp->flusher.signal = 0;
231 * And we are done.
233 hmp->flusher.td = NULL;
234 wakeup(&hmp->flusher.exiting);
235 lwkt_exit();
239 * Flush all inodes in the current flush group.
241 static void
242 hammer_flusher_flush(hammer_mount_t hmp)
244 hammer_flusher_info_t info;
245 hammer_flush_group_t flg;
246 hammer_reserve_t resv;
247 hammer_inode_t ip;
248 hammer_inode_t next_ip;
249 int slave_index;
250 int count;
253 * Just in-case there's a flush race on mount
255 if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL)
256 return;
259 * We only do one flg but we may have to loop/retry.
261 count = 0;
262 while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
263 ++count;
264 if (hammer_debug_general & 0x0001) {
265 kprintf("hammer_flush %d ttl=%d recs=%d\n",
266 hmp->flusher.act,
267 flg->total_count, flg->refs);
269 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
270 break;
271 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
274 * If the previous flush cycle just about exhausted our
275 * UNDO space we may have to do a dummy cycle to move the
276 * first_offset up before actually digging into a new cycle,
277 * or the new cycle will not have sufficient undo space.
279 if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
280 hammer_flusher_finalize(&hmp->flusher.trans, 0);
283 * Ok, we are running this flush group now (this prevents new
284 * additions to it).
286 flg->running = 1;
287 if (hmp->next_flush_group == flg)
288 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
291 * Iterate the inodes in the flg's flush_list and assign
292 * them to slaves.
294 slave_index = 0;
295 info = TAILQ_FIRST(&hmp->flusher.ready_list);
296 next_ip = TAILQ_FIRST(&flg->flush_list);
298 while ((ip = next_ip) != NULL) {
299 next_ip = TAILQ_NEXT(ip, flush_entry);
302 * Add ip to the slave's work array. The slave is
303 * not currently running.
305 info->work_array[info->count++] = ip;
306 if (info->count != HAMMER_FLUSH_GROUP_SIZE)
307 continue;
310 * Get the slave running
312 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
313 TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
314 info->flg = flg;
315 info->runstate = 1;
316 wakeup(&info->runstate);
319 * Get a new slave. We may have to wait for one to
320 * finish running.
322 while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) == NULL) {
323 tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
328 * Run the current slave if necessary
330 if (info->count) {
331 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
332 TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
333 info->flg = flg;
334 info->runstate = 1;
335 wakeup(&info->runstate);
339 * Wait for all slaves to finish running
341 while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL)
342 tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
345 * Do the final finalization, clean up
347 hammer_flusher_finalize(&hmp->flusher.trans, 1);
348 hmp->flusher.tid = hmp->flusher.trans.tid;
350 hammer_done_transaction(&hmp->flusher.trans);
353 * Loop up on the same flg. If the flg is done clean it up
354 * and break out. We only flush one flg.
356 if (TAILQ_FIRST(&flg->flush_list) == NULL) {
357 KKASSERT(TAILQ_EMPTY(&flg->flush_list));
358 KKASSERT(flg->refs == 0);
359 TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
360 kfree(flg, M_HAMMER);
361 break;
366 * We may have pure meta-data to flush, or we may have to finish
367 * cycling the UNDO FIFO, even if there were no flush groups.
369 if (count == 0 && hammer_flusher_haswork(hmp)) {
370 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
371 hammer_flusher_finalize(&hmp->flusher.trans, 1);
372 hammer_done_transaction(&hmp->flusher.trans);
376 * Clean up any freed big-blocks (typically zone-2).
377 * resv->flush_group is typically set several flush groups ahead
378 * of the free to ensure that the freed block is not reused until
379 * it can no longer be reused.
381 while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
382 if (resv->flush_group != hmp->flusher.act)
383 break;
384 hammer_reserve_clrdelay(hmp, resv);
390 * The slave flusher thread pulls work off the master flush_list until no
391 * work is left.
393 static void
394 hammer_flusher_slave_thread(void *arg)
396 hammer_flush_group_t flg;
397 hammer_flusher_info_t info;
398 hammer_mount_t hmp;
399 hammer_inode_t ip;
400 int i;
402 info = arg;
403 hmp = info->hmp;
405 for (;;) {
406 while (info->runstate == 0)
407 tsleep(&info->runstate, 0, "hmrssw", 0);
408 if (info->runstate < 0)
409 break;
410 flg = info->flg;
412 for (i = 0; i < info->count; ++i) {
413 ip = info->work_array[i];
414 hammer_flusher_flush_inode(ip, &hmp->flusher.trans);
415 ++hammer_stats_inode_flushes;
417 info->count = 0;
418 info->runstate = 0;
419 TAILQ_REMOVE(&hmp->flusher.run_list, info, entry);
420 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
421 wakeup(&hmp->flusher.ready_list);
423 info->td = NULL;
424 wakeup(&info->td);
425 lwkt_exit();
428 void
429 hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
431 hammer_buffer_t buffer;
432 hammer_io_t io;
435 * loose ends - buffers without bp's aren't tracked by the kernel
436 * and can build up, so clean them out. This can occur when an
437 * IO completes on a buffer with no references left.
439 if ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
440 crit_enter(); /* biodone() race */
441 while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
442 KKASSERT(io->mod_list == &hmp->lose_list);
443 TAILQ_REMOVE(&hmp->lose_list, io, mod_entry);
444 io->mod_list = NULL;
445 if (io->lock.refs == 0)
446 ++hammer_count_refedbufs;
447 hammer_ref(&io->lock);
448 buffer = (void *)io;
449 hammer_rel_buffer(buffer, 0);
451 crit_exit();
456 * Flush a single inode that is part of a flush group.
458 * Flusher errors are extremely serious, even ENOSPC shouldn't occur because
459 * the front-end should have reserved sufficient space on the media. Any
460 * error other then EWOULDBLOCK will force the mount to be read-only.
462 static
463 void
464 hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
466 hammer_mount_t hmp = ip->hmp;
467 int error;
469 hammer_flusher_clean_loose_ios(hmp);
470 error = hammer_sync_inode(trans, ip);
473 * EWOULDBLOCK can happen under normal operation, all other errors
474 * are considered extremely serious. We must set WOULDBLOCK
475 * mechanics to deal with the mess left over from the abort of the
476 * previous flush.
478 if (error) {
479 ip->flags |= HAMMER_INODE_WOULDBLOCK;
480 if (error == EWOULDBLOCK)
481 error = 0;
483 hammer_flush_inode_done(ip, error);
484 while (hmp->flusher.finalize_want)
485 tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
486 if (hammer_flusher_undo_exhausted(trans, 1)) {
487 kprintf("HAMMER: Warning: UNDO area too small!\n");
488 hammer_flusher_finalize(trans, 1);
489 } else if (hammer_flusher_meta_limit(trans->hmp)) {
490 hammer_flusher_finalize(trans, 0);
495 * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
496 * space left.
498 * 1/4 - Emergency free undo space level. Below this point the flusher
499 * will finalize even if directory dependancies have not been resolved.
501 * 2/4 - Used by the pruning and reblocking code. These functions may be
502 * running in parallel with a flush and cannot be allowed to drop
503 * available undo space to emergency levels.
505 * 3/4 - Used at the beginning of a flush to force-sync the volume header
506 * to give the flush plenty of runway to work in.
509 hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter)
511 if (hammer_undo_space(trans) <
512 hammer_undo_max(trans->hmp) * quarter / 4) {
513 return(1);
514 } else {
515 return(0);
520 * Flush all pending UNDOs, wait for write completion, update the volume
521 * header with the new UNDO end position, and flush it. Then
522 * asynchronously flush the meta-data.
524 * If this is the last finalization in a flush group we also synchronize
525 * our cached blockmap and set hmp->flusher_undo_start and our cached undo
526 * fifo first_offset so the next flush resets the FIFO pointers.
528 * If this is not final it is being called because too many dirty meta-data
529 * buffers have built up and must be flushed with UNDO synchronization to
530 * avoid a buffer cache deadlock.
532 void
533 hammer_flusher_finalize(hammer_transaction_t trans, int final)
535 hammer_volume_t root_volume;
536 hammer_blockmap_t cundomap, dundomap;
537 hammer_mount_t hmp;
538 hammer_io_t io;
539 int count;
540 int i;
542 hmp = trans->hmp;
543 root_volume = trans->rootvol;
546 * Exclusively lock the flusher. This guarantees that all dirty
547 * buffers will be idled (have a mod-count of 0).
549 ++hmp->flusher.finalize_want;
550 hammer_lock_ex(&hmp->flusher.finalize_lock);
553 * If this isn't the final sync several threads may have hit the
554 * meta-limit at the same time and raced. Only sync if we really
555 * have to, after acquiring the lock.
557 if (final == 0 && !hammer_flusher_meta_limit(hmp))
558 goto done;
560 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
561 goto done;
564 * Flush data buffers. This can occur asynchronously and at any
565 * time. We must interlock against the frontend direct-data write
566 * but do not have to acquire the sync-lock yet.
568 count = 0;
569 while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
570 if (io->ioerror)
571 break;
572 if (io->lock.refs == 0)
573 ++hammer_count_refedbufs;
574 hammer_ref(&io->lock);
575 hammer_io_write_interlock(io);
576 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
577 hammer_io_flush(io);
578 hammer_io_done_interlock(io);
579 hammer_rel_buffer((hammer_buffer_t)io, 0);
580 ++count;
584 * The sync-lock is required for the remaining sequence. This lock
585 * prevents meta-data from being modified.
587 hammer_sync_lock_ex(trans);
590 * If we have been asked to finalize the volume header sync the
591 * cached blockmap to the on-disk blockmap. Generate an UNDO
592 * record for the update.
594 if (final) {
595 cundomap = &hmp->blockmap[0];
596 dundomap = &root_volume->ondisk->vol0_blockmap[0];
597 if (root_volume->io.modified) {
598 hammer_modify_volume(trans, root_volume,
599 dundomap, sizeof(hmp->blockmap));
600 for (i = 0; i < HAMMER_MAX_ZONES; ++i)
601 hammer_crc_set_blockmap(&cundomap[i]);
602 bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
603 hammer_modify_volume_done(root_volume);
608 * Flush UNDOs
610 count = 0;
611 while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
612 if (io->ioerror)
613 break;
614 KKASSERT(io->modify_refs == 0);
615 if (io->lock.refs == 0)
616 ++hammer_count_refedbufs;
617 hammer_ref(&io->lock);
618 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
619 hammer_io_flush(io);
620 hammer_rel_buffer((hammer_buffer_t)io, 0);
621 ++count;
625 * Wait for I/Os to complete
627 hammer_flusher_clean_loose_ios(hmp);
628 hammer_io_wait_all(hmp, "hmrfl1");
630 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
631 goto failed;
634 * Update the on-disk volume header with new UNDO FIFO end position
635 * (do not generate new UNDO records for this change). We have to
636 * do this for the UNDO FIFO whether (final) is set or not.
638 * Also update the on-disk next_tid field. This does not require
639 * an UNDO. However, because our TID is generated before we get
640 * the sync lock another sync may have beat us to the punch.
642 * This also has the side effect of updating first_offset based on
643 * a prior finalization when the first finalization of the next flush
644 * cycle occurs, removing any undo info from the prior finalization
645 * from consideration.
647 * The volume header will be flushed out synchronously.
649 dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
650 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
652 if (dundomap->first_offset != cundomap->first_offset ||
653 dundomap->next_offset != cundomap->next_offset) {
654 hammer_modify_volume(NULL, root_volume, NULL, 0);
655 dundomap->first_offset = cundomap->first_offset;
656 dundomap->next_offset = cundomap->next_offset;
657 hammer_crc_set_blockmap(dundomap);
658 hammer_modify_volume_done(root_volume);
661 if (root_volume->io.modified) {
662 hammer_modify_volume(NULL, root_volume, NULL, 0);
663 if (root_volume->ondisk->vol0_next_tid < trans->tid)
664 root_volume->ondisk->vol0_next_tid = trans->tid;
665 hammer_crc_set_volume(root_volume->ondisk);
666 hammer_modify_volume_done(root_volume);
667 hammer_io_flush(&root_volume->io);
671 * Wait for I/Os to complete
673 hammer_flusher_clean_loose_ios(hmp);
674 hammer_io_wait_all(hmp, "hmrfl2");
676 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
677 goto failed;
680 * Flush meta-data. The meta-data will be undone if we crash
681 * so we can safely flush it asynchronously.
683 * Repeated catchups will wind up flushing this update's meta-data
684 * and the UNDO buffers for the next update simultaniously. This
685 * is ok.
687 count = 0;
688 while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
689 if (io->ioerror)
690 break;
691 KKASSERT(io->modify_refs == 0);
692 if (io->lock.refs == 0)
693 ++hammer_count_refedbufs;
694 hammer_ref(&io->lock);
695 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
696 hammer_io_flush(io);
697 hammer_rel_buffer((hammer_buffer_t)io, 0);
698 ++count;
702 * If this is the final finalization for the flush group set
703 * up for the next sequence by setting a new first_offset in
704 * our cached blockmap and clearing the undo history.
706 * Even though we have updated our cached first_offset, the on-disk
707 * first_offset still governs available-undo-space calculations.
709 if (final) {
710 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
711 if (cundomap->first_offset == cundomap->next_offset) {
712 hmp->hflags &= ~HMNT_UNDO_DIRTY;
713 } else {
714 cundomap->first_offset = cundomap->next_offset;
715 hmp->hflags |= HMNT_UNDO_DIRTY;
717 hammer_clear_undo_history(hmp);
721 * Cleanup. Report any critical errors.
723 failed:
724 hammer_sync_unlock(trans);
726 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
727 kprintf("HAMMER(%s): Critical write error during flush, "
728 "refusing to sync UNDO FIFO\n",
729 root_volume->ondisk->vol_name);
732 done:
733 hammer_unlock(&hmp->flusher.finalize_lock);
734 if (--hmp->flusher.finalize_want == 0)
735 wakeup(&hmp->flusher.finalize_want);
736 hammer_stats_commits += final;
740 * Return non-zero if too many dirty meta-data buffers have built up.
742 * Since we cannot allow such buffers to flush until we have dealt with
743 * the UNDOs, we risk deadlocking the kernel's buffer cache.
746 hammer_flusher_meta_limit(hammer_mount_t hmp)
748 if (hmp->locked_dirty_space + hmp->io_running_space >
749 hammer_limit_dirtybufspace) {
750 return(1);
752 return(0);
756 * Return non-zero if too many dirty meta-data buffers have built up.
758 * This version is used by background operations (mirror, prune, reblock)
759 * to leave room for foreground operations.
762 hammer_flusher_meta_halflimit(hammer_mount_t hmp)
764 if (hmp->locked_dirty_space + hmp->io_running_space >
765 hammer_limit_dirtybufspace / 2) {
766 return(1);
768 return(0);
772 * Return non-zero if the flusher still has something to flush.
775 hammer_flusher_haswork(hammer_mount_t hmp)
777 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
778 return(0);
779 if (TAILQ_FIRST(&hmp->flush_group_list) || /* dirty inodes */
780 TAILQ_FIRST(&hmp->volu_list) || /* dirty bufffers */
781 TAILQ_FIRST(&hmp->undo_list) ||
782 TAILQ_FIRST(&hmp->data_list) ||
783 TAILQ_FIRST(&hmp->meta_list) ||
784 (hmp->hflags & HMNT_UNDO_DIRTY) /* UNDO FIFO sync */
786 return(1);
788 return(0);