2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.35 2008/07/11 01:22:29 dillon Exp $
37 * HAMMER dependancy flusher thread
39 * Meta data updates create buffer dependancies which are arranged as a
45 static void hammer_flusher_master_thread(void *arg
);
46 static void hammer_flusher_slave_thread(void *arg
);
47 static void hammer_flusher_flush(hammer_mount_t hmp
);
48 static void hammer_flusher_flush_inode(hammer_inode_t ip
,
49 hammer_transaction_t trans
);
52 * Support structures for the flusher threads.
54 struct hammer_flusher_info
{
55 struct hammer_mount
*hmp
;
58 hammer_inode_t work_array
[HAMMER_FLUSH_GROUP_SIZE
];
61 typedef struct hammer_flusher_info
*hammer_flusher_info_t
;
64 * Sync all inodes pending on the flusher. This routine may have to be
65 * called twice to get them all as some may be queued to a later flush group.
68 hammer_flusher_sync(hammer_mount_t hmp
)
72 if (hmp
->flusher
.td
) {
73 seq
= hmp
->flusher
.next
;
74 if (hmp
->flusher
.signal
++ == 0)
75 wakeup(&hmp
->flusher
.signal
);
76 while ((int)(seq
- hmp
->flusher
.done
) > 0)
77 tsleep(&hmp
->flusher
.done
, 0, "hmrfls", 0);
82 * Sync all inodes pending on the flusher - return immediately.
85 hammer_flusher_async(hammer_mount_t hmp
)
87 if (hmp
->flusher
.td
) {
88 if (hmp
->flusher
.signal
++ == 0)
89 wakeup(&hmp
->flusher
.signal
);
94 hammer_flusher_create(hammer_mount_t hmp
)
96 hammer_flusher_info_t info
;
99 hmp
->flusher
.signal
= 0;
100 hmp
->flusher
.act
= 0;
101 hmp
->flusher
.done
= 0;
102 hmp
->flusher
.next
= 1;
103 hmp
->flusher
.count
= 0;
104 hammer_ref(&hmp
->flusher
.finalize_lock
);
106 lwkt_create(hammer_flusher_master_thread
, hmp
,
107 &hmp
->flusher
.td
, NULL
, 0, -1, "hammer-M");
108 for (i
= 0; i
< HAMMER_MAX_FLUSHERS
; ++i
) {
109 info
= kmalloc(sizeof(*info
), M_HAMMER
, M_WAITOK
|M_ZERO
);
111 ++hmp
->flusher
.count
;
112 hmp
->flusher
.info
[i
] = info
;
113 lwkt_create(hammer_flusher_slave_thread
, info
,
114 &info
->td
, NULL
, 0, -1, "hammer-S%d", i
);
119 hammer_flusher_destroy(hammer_mount_t hmp
)
121 hammer_flusher_info_t info
;
127 hmp
->flusher
.exiting
= 1;
128 while (hmp
->flusher
.td
) {
129 ++hmp
->flusher
.signal
;
130 wakeup(&hmp
->flusher
.signal
);
131 tsleep(&hmp
->flusher
.exiting
, 0, "hmrwex", hz
);
137 for (i
= 0; i
< HAMMER_MAX_FLUSHERS
; ++i
) {
138 if ((info
= hmp
->flusher
.info
[i
]) != NULL
) {
139 KKASSERT(info
->startit
== 0);
141 wakeup(&info
->startit
);
143 tsleep(&info
->td
, 0, "hmrwwc", 0);
145 hmp
->flusher
.info
[i
] = NULL
;
146 kfree(info
, M_HAMMER
);
147 --hmp
->flusher
.count
;
150 KKASSERT(hmp
->flusher
.count
== 0);
154 * The master flusher thread manages the flusher sequence id and
155 * synchronization with the slave work threads.
158 hammer_flusher_master_thread(void *arg
)
160 hammer_mount_t hmp
= arg
;
163 while (hmp
->flusher
.group_lock
)
164 tsleep(&hmp
->flusher
.group_lock
, 0, "hmrhld", 0);
165 hmp
->flusher
.act
= hmp
->flusher
.next
;
167 hammer_flusher_clean_loose_ios(hmp
);
168 hammer_flusher_flush(hmp
);
169 hmp
->flusher
.done
= hmp
->flusher
.act
;
170 wakeup(&hmp
->flusher
.done
);
175 if (hmp
->flusher
.exiting
&& TAILQ_EMPTY(&hmp
->flush_list
))
179 * This is a hack until we can dispose of frontend buffer
180 * cache buffers on the frontend.
182 while (hmp
->flusher
.signal
== 0)
183 tsleep(&hmp
->flusher
.signal
, 0, "hmrwwa", 0);
184 hmp
->flusher
.signal
= 0;
190 hmp
->flusher
.td
= NULL
;
191 wakeup(&hmp
->flusher
.exiting
);
196 * The slave flusher thread pulls work off the master flush_list until no
200 hammer_flusher_slave_thread(void *arg
)
202 hammer_flusher_info_t info
;
213 while (info
->startit
== 0)
214 tsleep(&info
->startit
, 0, "hmrssw", 0);
215 if (info
->startit
< 0)
220 * Try to pull out around ~64 inodes at a time to flush.
221 * The idea is to try to avoid deadlocks between the slaves.
224 while ((ip
= TAILQ_FIRST(&hmp
->flush_list
)) != NULL
) {
225 if (ip
->flush_group
!= hmp
->flusher
.act
)
227 TAILQ_REMOVE(&hmp
->flush_list
, ip
, flush_entry
);
228 info
->work_array
[n
++] = ip
;
230 if (n
< HAMMER_FLUSH_GROUP_SIZE
&&
231 c
< HAMMER_FLUSH_GROUP_SIZE
* 8) {
234 for (i
= 0; i
< n
; ++i
){
235 hammer_flusher_flush_inode(info
->work_array
[i
],
236 &hmp
->flusher
.trans
);
240 for (i
= 0; i
< n
; ++i
) {
241 hammer_flusher_flush_inode(info
->work_array
[i
],
242 &hmp
->flusher
.trans
);
244 if (--hmp
->flusher
.running
== 0)
245 wakeup(&hmp
->flusher
.running
);
253 hammer_flusher_clean_loose_ios(hammer_mount_t hmp
)
255 hammer_buffer_t buffer
;
259 * loose ends - buffers without bp's aren't tracked by the kernel
260 * and can build up, so clean them out. This can occur when an
261 * IO completes on a buffer with no references left.
263 if ((io
= TAILQ_FIRST(&hmp
->lose_list
)) != NULL
) {
264 crit_enter(); /* biodone() race */
265 while ((io
= TAILQ_FIRST(&hmp
->lose_list
)) != NULL
) {
266 KKASSERT(io
->mod_list
== &hmp
->lose_list
);
267 TAILQ_REMOVE(&hmp
->lose_list
, io
, mod_entry
);
269 if (io
->lock
.refs
== 0)
270 ++hammer_count_refedbufs
;
271 hammer_ref(&io
->lock
);
273 hammer_rel_buffer(buffer
, 0);
280 * Flush all inodes in the current flush group.
283 hammer_flusher_flush(hammer_mount_t hmp
)
285 hammer_flusher_info_t info
;
286 hammer_reserve_t resv
;
290 hammer_start_transaction_fls(&hmp
->flusher
.trans
, hmp
);
293 * If the previous flush cycle just about exhausted our UNDO space
294 * we may have to do a dummy cycle to move the first_offset up
295 * before actually digging into a new cycle, or the new cycle will
296 * not have sufficient undo space.
298 if (hammer_flusher_undo_exhausted(&hmp
->flusher
.trans
, 3))
299 hammer_flusher_finalize(&hmp
->flusher
.trans
, 0);
302 * Start work threads.
305 n
= hmp
->count_iqueued
/ HAMMER_FLUSH_GROUP_SIZE
;
306 if (TAILQ_FIRST(&hmp
->flush_list
)) {
307 for (i
= 0; i
<= n
; ++i
) {
308 if (i
== HAMMER_MAX_FLUSHERS
||
309 hmp
->flusher
.info
[i
] == NULL
) {
312 info
= hmp
->flusher
.info
[i
];
313 if (info
->startit
== 0) {
314 ++hmp
->flusher
.running
;
316 wakeup(&info
->startit
);
320 while (hmp
->flusher
.running
)
321 tsleep(&hmp
->flusher
.running
, 0, "hmrfcc", 0);
323 hammer_flusher_finalize(&hmp
->flusher
.trans
, 1);
324 hmp
->flusher
.tid
= hmp
->flusher
.trans
.tid
;
327 * Clean up any freed big-blocks (typically zone-2).
328 * resv->flush_group is typically set several flush groups ahead
329 * of the free to ensure that the freed block is not reused until
330 * it can no longer be reused.
332 while ((resv
= TAILQ_FIRST(&hmp
->delay_list
)) != NULL
) {
333 if (resv
->flush_group
!= hmp
->flusher
.act
)
335 hammer_reserve_clrdelay(hmp
, resv
);
337 hammer_done_transaction(&hmp
->flusher
.trans
);
341 * Flush a single inode that is part of a flush group.
343 * NOTE! The sync code can return EWOULDBLOCK if the flush operation
344 * would otherwise blow out the buffer cache. hammer_flush_inode_done()
345 * will re-queue the inode for the next flush sequence and force the
346 * flusher to run again if this occurs.
350 hammer_flusher_flush_inode(hammer_inode_t ip
, hammer_transaction_t trans
)
352 hammer_mount_t hmp
= ip
->hmp
;
355 hammer_flusher_clean_loose_ios(hmp
);
356 error
= hammer_sync_inode(ip
);
357 if (error
!= EWOULDBLOCK
)
359 hammer_flush_inode_done(ip
);
360 while (hmp
->flusher
.finalize_want
)
361 tsleep(&hmp
->flusher
.finalize_want
, 0, "hmrsxx", 0);
362 if (hammer_flusher_undo_exhausted(trans
, 1)) {
363 kprintf("HAMMER: Warning: UNDO area too small!\n");
364 hammer_flusher_finalize(trans
, 1);
365 } else if (hammer_flusher_meta_limit(trans
->hmp
)) {
366 hammer_flusher_finalize(trans
, 0);
371 * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
374 * 1/4 - Emergency free undo space level. Below this point the flusher
375 * will finalize even if directory dependancies have not been resolved.
377 * 2/4 - Used by the pruning and reblocking code. These functions may be
378 * running in parallel with a flush and cannot be allowed to drop
379 * available undo space to emergency levels.
381 * 3/4 - Used at the beginning of a flush to force-sync the volume header
382 * to give the flush plenty of runway to work in.
385 hammer_flusher_undo_exhausted(hammer_transaction_t trans
, int quarter
)
387 if (hammer_undo_space(trans
) <
388 hammer_undo_max(trans
->hmp
) * quarter
/ 4) {
396 * Flush all pending UNDOs, wait for write completion, update the volume
397 * header with the new UNDO end position, and flush it. Then
398 * asynchronously flush the meta-data.
400 * If this is the last finalization in a flush group we also synchronize
401 * our cached blockmap and set hmp->flusher_undo_start and our cached undo
402 * fifo first_offset so the next flush resets the FIFO pointers.
404 * If this is not final it is being called because too many dirty meta-data
405 * buffers have built up and must be flushed with UNDO synchronization to
406 * avoid a buffer cache deadlock.
409 hammer_flusher_finalize(hammer_transaction_t trans
, int final
)
411 hammer_volume_t root_volume
;
412 hammer_blockmap_t cundomap
, dundomap
;
419 root_volume
= trans
->rootvol
;
422 * Exclusively lock the flusher. This guarantees that all dirty
423 * buffers will be idled (have a mod-count of 0).
425 ++hmp
->flusher
.finalize_want
;
426 hammer_lock_ex(&hmp
->flusher
.finalize_lock
);
429 * If this isn't the final sync several threads may have hit the
430 * meta-limit at the same time and raced. Only sync if we really
431 * have to, after acquiring the lock.
433 if (final
== 0 && !hammer_flusher_meta_limit(hmp
))
437 * Flush data buffers. This can occur asynchronously and at any
438 * time. We must interlock against the frontend direct-data write
439 * but do not have to acquire the sync-lock yet.
442 while ((io
= TAILQ_FIRST(&hmp
->data_list
)) != NULL
) {
443 if (io
->lock
.refs
== 0)
444 ++hammer_count_refedbufs
;
445 hammer_ref(&io
->lock
);
446 hammer_io_write_interlock(io
);
447 KKASSERT(io
->type
!= HAMMER_STRUCTURE_VOLUME
);
449 hammer_io_done_interlock(io
);
450 hammer_rel_buffer((hammer_buffer_t
)io
, 0);
455 * The sync-lock is required for the remaining sequence. This lock
456 * prevents meta-data from being modified.
458 hammer_sync_lock_ex(trans
);
461 * If we have been asked to finalize the volume header sync the
462 * cached blockmap to the on-disk blockmap. Generate an UNDO
463 * record for the update.
466 cundomap
= &hmp
->blockmap
[0];
467 dundomap
= &root_volume
->ondisk
->vol0_blockmap
[0];
468 if (root_volume
->io
.modified
) {
469 hammer_modify_volume(trans
, root_volume
,
470 dundomap
, sizeof(hmp
->blockmap
));
471 for (i
= 0; i
< HAMMER_MAX_ZONES
; ++i
)
472 hammer_crc_set_blockmap(&cundomap
[i
]);
473 bcopy(cundomap
, dundomap
, sizeof(hmp
->blockmap
));
474 hammer_modify_volume_done(root_volume
);
482 while ((io
= TAILQ_FIRST(&hmp
->undo_list
)) != NULL
) {
483 KKASSERT(io
->modify_refs
== 0);
484 if (io
->lock
.refs
== 0)
485 ++hammer_count_refedbufs
;
486 hammer_ref(&io
->lock
);
487 KKASSERT(io
->type
!= HAMMER_STRUCTURE_VOLUME
);
489 hammer_rel_buffer((hammer_buffer_t
)io
, 0);
494 * Wait for I/Os to complete
496 hammer_flusher_clean_loose_ios(hmp
);
497 hammer_io_wait_all(hmp
, "hmrfl1");
500 * Update the on-disk volume header with new UNDO FIFO end position
501 * (do not generate new UNDO records for this change). We have to
502 * do this for the UNDO FIFO whether (final) is set or not.
504 * Also update the on-disk next_tid field. This does not require
505 * an UNDO. However, because our TID is generated before we get
506 * the sync lock another sync may have beat us to the punch.
508 * This also has the side effect of updating first_offset based on
509 * a prior finalization when the first finalization of the next flush
510 * cycle occurs, removing any undo info from the prior finalization
511 * from consideration.
513 * The volume header will be flushed out synchronously.
515 dundomap
= &root_volume
->ondisk
->vol0_blockmap
[HAMMER_ZONE_UNDO_INDEX
];
516 cundomap
= &hmp
->blockmap
[HAMMER_ZONE_UNDO_INDEX
];
518 if (dundomap
->first_offset
!= cundomap
->first_offset
||
519 dundomap
->next_offset
!= cundomap
->next_offset
) {
520 hammer_modify_volume(NULL
, root_volume
, NULL
, 0);
521 dundomap
->first_offset
= cundomap
->first_offset
;
522 dundomap
->next_offset
= cundomap
->next_offset
;
523 hammer_crc_set_blockmap(dundomap
);
524 hammer_modify_volume_done(root_volume
);
527 if (root_volume
->io
.modified
) {
528 hammer_modify_volume(NULL
, root_volume
, NULL
, 0);
529 if (root_volume
->ondisk
->vol0_next_tid
< trans
->tid
)
530 root_volume
->ondisk
->vol0_next_tid
= trans
->tid
;
531 hammer_crc_set_volume(root_volume
->ondisk
);
532 hammer_modify_volume_done(root_volume
);
533 hammer_io_flush(&root_volume
->io
);
537 * Wait for I/Os to complete
539 hammer_flusher_clean_loose_ios(hmp
);
540 hammer_io_wait_all(hmp
, "hmrfl2");
543 * Flush meta-data. The meta-data will be undone if we crash
544 * so we can safely flush it asynchronously.
546 * Repeated catchups will wind up flushing this update's meta-data
547 * and the UNDO buffers for the next update simultaniously. This
551 while ((io
= TAILQ_FIRST(&hmp
->meta_list
)) != NULL
) {
552 KKASSERT(io
->modify_refs
== 0);
553 if (io
->lock
.refs
== 0)
554 ++hammer_count_refedbufs
;
555 hammer_ref(&io
->lock
);
556 KKASSERT(io
->type
!= HAMMER_STRUCTURE_VOLUME
);
558 hammer_rel_buffer((hammer_buffer_t
)io
, 0);
563 * If this is the final finalization for the flush group set
564 * up for the next sequence by setting a new first_offset in
565 * our cached blockmap and clearing the undo history.
567 * Even though we have updated our cached first_offset, the on-disk
568 * first_offset still governs available-undo-space calculations.
571 cundomap
= &hmp
->blockmap
[HAMMER_ZONE_UNDO_INDEX
];
572 cundomap
->first_offset
= cundomap
->next_offset
;
573 hammer_clear_undo_history(hmp
);
576 hammer_sync_unlock(trans
);
579 hammer_unlock(&hmp
->flusher
.finalize_lock
);
580 if (--hmp
->flusher
.finalize_want
== 0)
581 wakeup(&hmp
->flusher
.finalize_want
);
585 * Return non-zero if too many dirty meta-data buffers have built up.
587 * Since we cannot allow such buffers to flush until we have dealt with
588 * the UNDOs, we risk deadlocking the kernel's buffer cache.
591 hammer_flusher_meta_limit(hammer_mount_t hmp
)
593 if (hmp
->locked_dirty_space
+ hmp
->io_running_space
>
594 hammer_limit_dirtybufspace
) {