2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.40 2008/07/14 03:20:49 dillon Exp $
37 * HAMMER dependancy flusher thread
39 * Meta data updates create buffer dependancies which are arranged as a
45 static void hammer_flusher_master_thread(void *arg
);
46 static void hammer_flusher_slave_thread(void *arg
);
47 static void hammer_flusher_flush(hammer_mount_t hmp
);
48 static void hammer_flusher_flush_inode(hammer_inode_t ip
,
49 hammer_transaction_t trans
);
52 * Support structures for the flusher threads.
54 struct hammer_flusher_info
{
55 TAILQ_ENTRY(hammer_flusher_info
) entry
;
56 struct hammer_mount
*hmp
;
60 hammer_flush_group_t flg
;
61 hammer_inode_t work_array
[HAMMER_FLUSH_GROUP_SIZE
];
64 typedef struct hammer_flusher_info
*hammer_flusher_info_t
;
67 * Sync all inodes pending on the flusher.
69 * All flush groups will be flushed. This does not queue dirty inodes
70 * to the flush groups, it just flushes out what has already been queued!
73 hammer_flusher_sync(hammer_mount_t hmp
)
77 seq
= hammer_flusher_async(hmp
, NULL
);
78 while ((int)(seq
- hmp
->flusher
.done
) > 0)
79 tsleep(&hmp
->flusher
.done
, 0, "hmrfls", 0);
83 * Sync all inodes pending on the flusher - return immediately.
85 * All flush groups will be flushed.
88 hammer_flusher_async(hammer_mount_t hmp
, hammer_flush_group_t close_flg
)
90 hammer_flush_group_t flg
;
91 int seq
= hmp
->flusher
.next
;
93 TAILQ_FOREACH(flg
, &hmp
->flush_group_list
, flush_entry
) {
94 if (flg
->running
== 0)
100 if (hmp
->flusher
.td
) {
101 if (hmp
->flusher
.signal
++ == 0)
102 wakeup(&hmp
->flusher
.signal
);
104 seq
= hmp
->flusher
.done
;
110 hammer_flusher_async_one(hammer_mount_t hmp
)
114 if (hmp
->flusher
.td
) {
115 seq
= hmp
->flusher
.next
;
116 if (hmp
->flusher
.signal
++ == 0)
117 wakeup(&hmp
->flusher
.signal
);
119 seq
= hmp
->flusher
.done
;
125 hammer_flusher_wait(hammer_mount_t hmp
, int seq
)
127 while ((int)(seq
- hmp
->flusher
.done
) > 0)
128 tsleep(&hmp
->flusher
.done
, 0, "hmrfls", 0);
132 hammer_flusher_create(hammer_mount_t hmp
)
134 hammer_flusher_info_t info
;
137 hmp
->flusher
.signal
= 0;
138 hmp
->flusher
.act
= 0;
139 hmp
->flusher
.done
= 0;
140 hmp
->flusher
.next
= 1;
141 hammer_ref(&hmp
->flusher
.finalize_lock
);
142 TAILQ_INIT(&hmp
->flusher
.run_list
);
143 TAILQ_INIT(&hmp
->flusher
.ready_list
);
145 lwkt_create(hammer_flusher_master_thread
, hmp
,
146 &hmp
->flusher
.td
, NULL
, 0, -1, "hammer-M");
147 for (i
= 0; i
< HAMMER_MAX_FLUSHERS
; ++i
) {
148 info
= kmalloc(sizeof(*info
), M_HAMMER
, M_WAITOK
|M_ZERO
);
150 TAILQ_INSERT_TAIL(&hmp
->flusher
.ready_list
, info
, entry
);
151 lwkt_create(hammer_flusher_slave_thread
, info
,
152 &info
->td
, NULL
, 0, -1, "hammer-S%d", i
);
157 hammer_flusher_destroy(hammer_mount_t hmp
)
159 hammer_flusher_info_t info
;
164 hmp
->flusher
.exiting
= 1;
165 while (hmp
->flusher
.td
) {
166 ++hmp
->flusher
.signal
;
167 wakeup(&hmp
->flusher
.signal
);
168 tsleep(&hmp
->flusher
.exiting
, 0, "hmrwex", hz
);
174 while ((info
= TAILQ_FIRST(&hmp
->flusher
.ready_list
)) != NULL
) {
175 KKASSERT(info
->runstate
== 0);
176 TAILQ_REMOVE(&hmp
->flusher
.ready_list
, info
, entry
);
178 wakeup(&info
->runstate
);
180 tsleep(&info
->td
, 0, "hmrwwc", 0);
181 TAILQ_REMOVE(&hmp
->flusher
.ready_list
, info
, entry
);
182 kfree(info
, M_HAMMER
);
187 * The master flusher thread manages the flusher sequence id and
188 * synchronization with the slave work threads.
191 hammer_flusher_master_thread(void *arg
)
193 hammer_flush_group_t flg
;
200 * Do at least one flush cycle. We may have to update the
201 * UNDO FIFO even if no inodes are queued.
204 while (hmp
->flusher
.group_lock
)
205 tsleep(&hmp
->flusher
.group_lock
, 0, "hmrhld", 0);
206 hmp
->flusher
.act
= hmp
->flusher
.next
;
208 hammer_flusher_clean_loose_ios(hmp
);
209 hammer_flusher_flush(hmp
);
210 hmp
->flusher
.done
= hmp
->flusher
.act
;
211 wakeup(&hmp
->flusher
.done
);
212 flg
= TAILQ_FIRST(&hmp
->flush_group_list
);
213 if (flg
== NULL
|| flg
->closed
== 0)
220 if (hmp
->flusher
.exiting
&& TAILQ_EMPTY(&hmp
->flush_group_list
))
222 while (hmp
->flusher
.signal
== 0)
223 tsleep(&hmp
->flusher
.signal
, 0, "hmrwwa", 0);
224 hmp
->flusher
.signal
= 0;
230 hmp
->flusher
.td
= NULL
;
231 wakeup(&hmp
->flusher
.exiting
);
236 * Flush all inodes in the current flush group.
239 hammer_flusher_flush(hammer_mount_t hmp
)
241 hammer_flusher_info_t info
;
242 hammer_flush_group_t flg
;
243 hammer_reserve_t resv
;
245 hammer_inode_t next_ip
;
250 * Just in-case there's a flush race on mount
252 if (TAILQ_FIRST(&hmp
->flusher
.ready_list
) == NULL
)
256 * We only do one flg but we may have to loop/retry.
259 while ((flg
= TAILQ_FIRST(&hmp
->flush_group_list
)) != NULL
) {
261 if (hammer_debug_general
& 0x0001) {
262 kprintf("hammer_flush %d ttl=%d recs=%d\n",
264 flg
->total_count
, flg
->refs
);
266 hammer_start_transaction_fls(&hmp
->flusher
.trans
, hmp
);
267 if (hammer_debug_general
& 0x0001)
271 * If the previous flush cycle just about exhausted our
272 * UNDO space we may have to do a dummy cycle to move the
273 * first_offset up before actually digging into a new cycle,
274 * or the new cycle will not have sufficient undo space.
276 if (hammer_flusher_undo_exhausted(&hmp
->flusher
.trans
, 3))
277 hammer_flusher_finalize(&hmp
->flusher
.trans
, 0);
280 * Iterate the inodes in the flg's flush_list and assign
285 info
= TAILQ_FIRST(&hmp
->flusher
.ready_list
);
286 next_ip
= TAILQ_FIRST(&flg
->flush_list
);
288 while ((ip
= next_ip
) != NULL
) {
289 next_ip
= TAILQ_NEXT(ip
, flush_entry
);
292 * Add ip to the slave's work array. The slave is
293 * not currently running.
295 info
->work_array
[info
->count
++] = ip
;
296 if (info
->count
!= HAMMER_FLUSH_GROUP_SIZE
)
300 * Get the slave running
302 TAILQ_REMOVE(&hmp
->flusher
.ready_list
, info
, entry
);
303 TAILQ_INSERT_TAIL(&hmp
->flusher
.run_list
, info
, entry
);
306 wakeup(&info
->runstate
);
309 * Get a new slave. We may have to wait for one to
312 while ((info
= TAILQ_FIRST(&hmp
->flusher
.ready_list
)) == NULL
) {
313 tsleep(&hmp
->flusher
.ready_list
, 0, "hmrfcc", 0);
318 * Run the current slave if necessary
321 TAILQ_REMOVE(&hmp
->flusher
.ready_list
, info
, entry
);
322 TAILQ_INSERT_TAIL(&hmp
->flusher
.run_list
, info
, entry
);
325 wakeup(&info
->runstate
);
329 * Wait for all slaves to finish running
331 while (TAILQ_FIRST(&hmp
->flusher
.run_list
) != NULL
)
332 tsleep(&hmp
->flusher
.ready_list
, 0, "hmrfcc", 0);
335 * Do the final finalization, clean up
337 hammer_flusher_finalize(&hmp
->flusher
.trans
, 1);
338 hmp
->flusher
.tid
= hmp
->flusher
.trans
.tid
;
340 hammer_done_transaction(&hmp
->flusher
.trans
);
343 * Loop up on the same flg. If the flg is done clean it up
344 * and break out. We only flush one flg.
346 if (TAILQ_FIRST(&flg
->flush_list
) == NULL
) {
347 KKASSERT(TAILQ_EMPTY(&flg
->flush_list
));
348 KKASSERT(flg
->refs
== 0);
349 TAILQ_REMOVE(&hmp
->flush_group_list
, flg
, flush_entry
);
350 kfree(flg
, M_HAMMER
);
356 * We may have pure meta-data to flush, or we may have to finish
357 * cycling the UNDO FIFO, even if there were no flush groups.
359 if (count
== 0 && hammer_flusher_haswork(hmp
)) {
360 hammer_start_transaction_fls(&hmp
->flusher
.trans
, hmp
);
361 hammer_flusher_finalize(&hmp
->flusher
.trans
, 1);
362 hammer_done_transaction(&hmp
->flusher
.trans
);
366 * Clean up any freed big-blocks (typically zone-2).
367 * resv->flush_group is typically set several flush groups ahead
368 * of the free to ensure that the freed block is not reused until
369 * it can no longer be reused.
371 while ((resv
= TAILQ_FIRST(&hmp
->delay_list
)) != NULL
) {
372 if (resv
->flush_group
!= hmp
->flusher
.act
)
374 hammer_reserve_clrdelay(hmp
, resv
);
380 * The slave flusher thread pulls work off the master flush_list until no
384 hammer_flusher_slave_thread(void *arg
)
386 hammer_flush_group_t flg
;
387 hammer_flusher_info_t info
;
396 while (info
->runstate
== 0)
397 tsleep(&info
->runstate
, 0, "hmrssw", 0);
398 if (info
->runstate
< 0)
402 for (i
= 0; i
< info
->count
; ++i
) {
403 ip
= info
->work_array
[i
];
404 hammer_flusher_flush_inode(ip
, &hmp
->flusher
.trans
);
408 TAILQ_REMOVE(&hmp
->flusher
.run_list
, info
, entry
);
409 TAILQ_INSERT_TAIL(&hmp
->flusher
.ready_list
, info
, entry
);
410 wakeup(&hmp
->flusher
.ready_list
);
418 hammer_flusher_clean_loose_ios(hammer_mount_t hmp
)
420 hammer_buffer_t buffer
;
424 * loose ends - buffers without bp's aren't tracked by the kernel
425 * and can build up, so clean them out. This can occur when an
426 * IO completes on a buffer with no references left.
428 if ((io
= TAILQ_FIRST(&hmp
->lose_list
)) != NULL
) {
429 crit_enter(); /* biodone() race */
430 while ((io
= TAILQ_FIRST(&hmp
->lose_list
)) != NULL
) {
431 KKASSERT(io
->mod_list
== &hmp
->lose_list
);
432 TAILQ_REMOVE(&hmp
->lose_list
, io
, mod_entry
);
434 if (io
->lock
.refs
== 0)
435 ++hammer_count_refedbufs
;
436 hammer_ref(&io
->lock
);
438 hammer_rel_buffer(buffer
, 0);
445 * Flush a single inode that is part of a flush group.
447 * NOTE! The sync code can return EWOULDBLOCK if the flush operation
448 * would otherwise blow out the buffer cache. hammer_flush_inode_done()
449 * will re-queue the inode for the next flush sequence and force the
450 * flusher to run again if this occurs.
454 hammer_flusher_flush_inode(hammer_inode_t ip
, hammer_transaction_t trans
)
456 hammer_mount_t hmp
= ip
->hmp
;
459 hammer_flusher_clean_loose_ios(hmp
);
460 error
= hammer_sync_inode(trans
, ip
);
461 if (error
!= EWOULDBLOCK
)
463 hammer_flush_inode_done(ip
);
464 while (hmp
->flusher
.finalize_want
)
465 tsleep(&hmp
->flusher
.finalize_want
, 0, "hmrsxx", 0);
466 if (hammer_flusher_undo_exhausted(trans
, 1)) {
467 kprintf("HAMMER: Warning: UNDO area too small!\n");
468 hammer_flusher_finalize(trans
, 1);
469 } else if (hammer_flusher_meta_limit(trans
->hmp
)) {
470 hammer_flusher_finalize(trans
, 0);
475 * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
478 * 1/4 - Emergency free undo space level. Below this point the flusher
479 * will finalize even if directory dependancies have not been resolved.
481 * 2/4 - Used by the pruning and reblocking code. These functions may be
482 * running in parallel with a flush and cannot be allowed to drop
483 * available undo space to emergency levels.
485 * 3/4 - Used at the beginning of a flush to force-sync the volume header
486 * to give the flush plenty of runway to work in.
489 hammer_flusher_undo_exhausted(hammer_transaction_t trans
, int quarter
)
491 if (hammer_undo_space(trans
) <
492 hammer_undo_max(trans
->hmp
) * quarter
/ 4) {
500 * Flush all pending UNDOs, wait for write completion, update the volume
501 * header with the new UNDO end position, and flush it. Then
502 * asynchronously flush the meta-data.
504 * If this is the last finalization in a flush group we also synchronize
505 * our cached blockmap and set hmp->flusher_undo_start and our cached undo
506 * fifo first_offset so the next flush resets the FIFO pointers.
508 * If this is not final it is being called because too many dirty meta-data
509 * buffers have built up and must be flushed with UNDO synchronization to
510 * avoid a buffer cache deadlock.
513 hammer_flusher_finalize(hammer_transaction_t trans
, int final
)
515 hammer_volume_t root_volume
;
516 hammer_blockmap_t cundomap
, dundomap
;
523 root_volume
= trans
->rootvol
;
526 * Exclusively lock the flusher. This guarantees that all dirty
527 * buffers will be idled (have a mod-count of 0).
529 ++hmp
->flusher
.finalize_want
;
530 hammer_lock_ex(&hmp
->flusher
.finalize_lock
);
533 * If this isn't the final sync several threads may have hit the
534 * meta-limit at the same time and raced. Only sync if we really
535 * have to, after acquiring the lock.
537 if (final
== 0 && !hammer_flusher_meta_limit(hmp
))
541 * Flush data buffers. This can occur asynchronously and at any
542 * time. We must interlock against the frontend direct-data write
543 * but do not have to acquire the sync-lock yet.
546 while ((io
= TAILQ_FIRST(&hmp
->data_list
)) != NULL
) {
547 if (io
->lock
.refs
== 0)
548 ++hammer_count_refedbufs
;
549 hammer_ref(&io
->lock
);
550 hammer_io_write_interlock(io
);
551 KKASSERT(io
->type
!= HAMMER_STRUCTURE_VOLUME
);
553 hammer_io_done_interlock(io
);
554 hammer_rel_buffer((hammer_buffer_t
)io
, 0);
559 * The sync-lock is required for the remaining sequence. This lock
560 * prevents meta-data from being modified.
562 hammer_sync_lock_ex(trans
);
565 * If we have been asked to finalize the volume header sync the
566 * cached blockmap to the on-disk blockmap. Generate an UNDO
567 * record for the update.
570 cundomap
= &hmp
->blockmap
[0];
571 dundomap
= &root_volume
->ondisk
->vol0_blockmap
[0];
572 if (root_volume
->io
.modified
) {
573 hammer_modify_volume(trans
, root_volume
,
574 dundomap
, sizeof(hmp
->blockmap
));
575 for (i
= 0; i
< HAMMER_MAX_ZONES
; ++i
)
576 hammer_crc_set_blockmap(&cundomap
[i
]);
577 bcopy(cundomap
, dundomap
, sizeof(hmp
->blockmap
));
578 hammer_modify_volume_done(root_volume
);
586 while ((io
= TAILQ_FIRST(&hmp
->undo_list
)) != NULL
) {
587 KKASSERT(io
->modify_refs
== 0);
588 if (io
->lock
.refs
== 0)
589 ++hammer_count_refedbufs
;
590 hammer_ref(&io
->lock
);
591 KKASSERT(io
->type
!= HAMMER_STRUCTURE_VOLUME
);
593 hammer_rel_buffer((hammer_buffer_t
)io
, 0);
598 * Wait for I/Os to complete
600 hammer_flusher_clean_loose_ios(hmp
);
601 hammer_io_wait_all(hmp
, "hmrfl1");
604 * Update the on-disk volume header with new UNDO FIFO end position
605 * (do not generate new UNDO records for this change). We have to
606 * do this for the UNDO FIFO whether (final) is set or not.
608 * Also update the on-disk next_tid field. This does not require
609 * an UNDO. However, because our TID is generated before we get
610 * the sync lock another sync may have beat us to the punch.
612 * This also has the side effect of updating first_offset based on
613 * a prior finalization when the first finalization of the next flush
614 * cycle occurs, removing any undo info from the prior finalization
615 * from consideration.
617 * The volume header will be flushed out synchronously.
619 dundomap
= &root_volume
->ondisk
->vol0_blockmap
[HAMMER_ZONE_UNDO_INDEX
];
620 cundomap
= &hmp
->blockmap
[HAMMER_ZONE_UNDO_INDEX
];
622 if (dundomap
->first_offset
!= cundomap
->first_offset
||
623 dundomap
->next_offset
!= cundomap
->next_offset
) {
624 hammer_modify_volume(NULL
, root_volume
, NULL
, 0);
625 dundomap
->first_offset
= cundomap
->first_offset
;
626 dundomap
->next_offset
= cundomap
->next_offset
;
627 hammer_crc_set_blockmap(dundomap
);
628 hammer_modify_volume_done(root_volume
);
631 if (root_volume
->io
.modified
) {
632 hammer_modify_volume(NULL
, root_volume
, NULL
, 0);
633 if (root_volume
->ondisk
->vol0_next_tid
< trans
->tid
)
634 root_volume
->ondisk
->vol0_next_tid
= trans
->tid
;
635 hammer_crc_set_volume(root_volume
->ondisk
);
636 hammer_modify_volume_done(root_volume
);
637 hammer_io_flush(&root_volume
->io
);
641 * Wait for I/Os to complete
643 hammer_flusher_clean_loose_ios(hmp
);
644 hammer_io_wait_all(hmp
, "hmrfl2");
647 * Flush meta-data. The meta-data will be undone if we crash
648 * so we can safely flush it asynchronously.
650 * Repeated catchups will wind up flushing this update's meta-data
651 * and the UNDO buffers for the next update simultaniously. This
655 while ((io
= TAILQ_FIRST(&hmp
->meta_list
)) != NULL
) {
656 KKASSERT(io
->modify_refs
== 0);
657 if (io
->lock
.refs
== 0)
658 ++hammer_count_refedbufs
;
659 hammer_ref(&io
->lock
);
660 KKASSERT(io
->type
!= HAMMER_STRUCTURE_VOLUME
);
662 hammer_rel_buffer((hammer_buffer_t
)io
, 0);
667 * If this is the final finalization for the flush group set
668 * up for the next sequence by setting a new first_offset in
669 * our cached blockmap and clearing the undo history.
671 * Even though we have updated our cached first_offset, the on-disk
672 * first_offset still governs available-undo-space calculations.
675 cundomap
= &hmp
->blockmap
[HAMMER_ZONE_UNDO_INDEX
];
676 if (cundomap
->first_offset
== cundomap
->next_offset
) {
677 hmp
->hflags
&= ~HMNT_UNDO_DIRTY
;
679 cundomap
->first_offset
= cundomap
->next_offset
;
680 hmp
->hflags
|= HMNT_UNDO_DIRTY
;
682 hammer_clear_undo_history(hmp
);
685 hammer_sync_unlock(trans
);
688 hammer_unlock(&hmp
->flusher
.finalize_lock
);
689 if (--hmp
->flusher
.finalize_want
== 0)
690 wakeup(&hmp
->flusher
.finalize_want
);
694 * Return non-zero if too many dirty meta-data buffers have built up.
696 * Since we cannot allow such buffers to flush until we have dealt with
697 * the UNDOs, we risk deadlocking the kernel's buffer cache.
700 hammer_flusher_meta_limit(hammer_mount_t hmp
)
702 if (hmp
->locked_dirty_space
+ hmp
->io_running_space
>
703 hammer_limit_dirtybufspace
) {
710 * Return non-zero if too many dirty meta-data buffers have built up.
712 * This version is used by background operations (mirror, prune, reblock)
713 * to leave room for foreground operations.
716 hammer_flusher_meta_halflimit(hammer_mount_t hmp
)
718 if (hmp
->locked_dirty_space
+ hmp
->io_running_space
>
719 hammer_limit_dirtybufspace
/ 2) {
726 * Return non-zero if the flusher still has something to flush.
729 hammer_flusher_haswork(hammer_mount_t hmp
)
731 if (TAILQ_FIRST(&hmp
->flush_group_list
) || /* dirty inodes */
732 TAILQ_FIRST(&hmp
->volu_list
) || /* dirty bufffers */
733 TAILQ_FIRST(&hmp
->undo_list
) ||
734 TAILQ_FIRST(&hmp
->data_list
) ||
735 TAILQ_FIRST(&hmp
->meta_list
) ||
736 (hmp
->hflags
& HMNT_UNDO_DIRTY
) /* UNDO FIFO sync */