2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.23 2008/06/10 08:51:01 dillon Exp $
37 * HAMMER dependancy flusher thread
39 * Meta data updates create buffer dependancies which are arranged as a
45 static void hammer_flusher_master_thread(void *arg
);
46 static void hammer_flusher_slave_thread(void *arg
);
47 static void hammer_flusher_clean_loose_ios(hammer_mount_t hmp
);
48 static void hammer_flusher_flush(hammer_mount_t hmp
);
49 static void hammer_flusher_flush_inode(hammer_inode_t ip
,
50 hammer_transaction_t trans
);
51 static int hammer_must_finalize_undo(hammer_mount_t hmp
);
52 static void hammer_flusher_finalize(hammer_transaction_t trans
, int final
);
54 #define HAMMER_FLUSHER_IMMEDIATE 16
57 hammer_flusher_sync(hammer_mount_t hmp
)
61 if (hmp
->flusher
.td
) {
62 seq
= hmp
->flusher
.next
;
63 if (hmp
->flusher
.signal
++ == 0)
64 wakeup(&hmp
->flusher
.signal
);
65 while ((int)(seq
- hmp
->flusher
.done
) > 0)
66 tsleep(&hmp
->flusher
.done
, 0, "hmrfls", 0);
71 hammer_flusher_async(hammer_mount_t hmp
)
73 if (hmp
->flusher
.td
) {
74 if (hmp
->flusher
.signal
++ == 0)
75 wakeup(&hmp
->flusher
.signal
);
80 hammer_flusher_create(hammer_mount_t hmp
)
82 hammer_flusher_info_t info
;
85 hmp
->flusher
.signal
= 0;
87 hmp
->flusher
.done
= 0;
88 hmp
->flusher
.next
= 1;
89 hmp
->flusher
.count
= 0;
90 hammer_ref(&hmp
->flusher
.finalize_lock
);
92 lwkt_create(hammer_flusher_master_thread
, hmp
,
93 &hmp
->flusher
.td
, NULL
, 0, -1, "hammer-M");
94 for (i
= 0; i
< HAMMER_MAX_FLUSHERS
; ++i
) {
95 info
= kmalloc(sizeof(*info
), M_HAMMER
, M_WAITOK
|M_ZERO
);
97 TAILQ_INIT(&info
->work_list
);
99 hmp
->flusher
.info
[i
] = info
;
100 lwkt_create(hammer_flusher_slave_thread
, info
,
101 &info
->td
, NULL
, 0, -1, "hammer-S%d", i
);
106 hammer_flusher_destroy(hammer_mount_t hmp
)
108 hammer_flusher_info_t info
;
114 hmp
->flusher
.exiting
= 1;
115 while (hmp
->flusher
.td
) {
116 ++hmp
->flusher
.signal
;
117 wakeup(&hmp
->flusher
.signal
);
118 tsleep(&hmp
->flusher
.exiting
, 0, "hmrwex", hz
);
124 for (i
= 0; i
< HAMMER_MAX_FLUSHERS
; ++i
) {
125 if ((info
= hmp
->flusher
.info
[i
]) != NULL
) {
126 KKASSERT(info
->running
== 0);
128 wakeup(&info
->running
);
130 tsleep(&info
->td
, 0, "hmrwwc", 0);
132 hmp
->flusher
.info
[i
] = NULL
;
133 kfree(info
, M_HAMMER
);
134 --hmp
->flusher
.count
;
137 KKASSERT(hmp
->flusher
.count
== 0);
141 hammer_flusher_master_thread(void *arg
)
143 hammer_mount_t hmp
= arg
;
146 while (hmp
->flusher
.group_lock
)
147 tsleep(&hmp
->flusher
.group_lock
, 0, "hmrhld", 0);
149 hmp
->flusher
.act
= hmp
->flusher
.next
;
151 hammer_flusher_clean_loose_ios(hmp
);
152 hammer_flusher_flush(hmp
);
153 hammer_flusher_clean_loose_ios(hmp
);
154 hmp
->flusher
.done
= hmp
->flusher
.act
;
155 wakeup(&hmp
->flusher
.done
);
160 if (hmp
->flusher
.exiting
&& TAILQ_EMPTY(&hmp
->flush_list
))
164 * This is a hack until we can dispose of frontend buffer
165 * cache buffers on the frontend.
167 while (hmp
->flusher
.signal
== 0)
168 tsleep(&hmp
->flusher
.signal
, 0, "hmrwwa", 0);
169 hmp
->flusher
.signal
= 0;
175 hmp
->flusher
.td
= NULL
;
176 wakeup(&hmp
->flusher
.exiting
);
181 hammer_flusher_slave_thread(void *arg
)
183 hammer_flusher_info_t info
;
191 while (info
->running
== 0)
192 tsleep(&info
->running
, 0, "hmrssw", 0);
193 if (info
->running
< 0)
195 while ((ip
= TAILQ_FIRST(&info
->work_list
)) != NULL
) {
196 TAILQ_REMOVE(&info
->work_list
, ip
, flush_entry
);
197 hammer_flusher_flush_inode(ip
, &hmp
->flusher
.trans
);
200 if (--hmp
->flusher
.running
== 0)
201 wakeup(&hmp
->flusher
.running
);
209 hammer_flusher_clean_loose_ios(hammer_mount_t hmp
)
211 hammer_buffer_t buffer
;
215 * loose ends - buffers without bp's aren't tracked by the kernel
216 * and can build up, so clean them out. This can occur when an
217 * IO completes on a buffer with no references left.
219 while ((io
= TAILQ_FIRST(&hmp
->lose_list
)) != NULL
) {
220 KKASSERT(io
->mod_list
== &hmp
->lose_list
);
221 TAILQ_REMOVE(io
->mod_list
, io
, mod_entry
);
223 hammer_ref(&io
->lock
);
225 hammer_rel_buffer(buffer
, 0);
230 * Flush all inodes in the current flush group.
233 hammer_flusher_flush(hammer_mount_t hmp
)
235 hammer_flusher_info_t info
;
237 hammer_reserve_t resv
;
243 hammer_start_transaction_fls(&hmp
->flusher
.trans
, hmp
);
245 while ((ip
= TAILQ_FIRST(&hmp
->flush_list
)) != NULL
) {
246 if (ip
->flush_group
!= hmp
->flusher
.act
)
248 TAILQ_REMOVE(&hmp
->flush_list
, ip
, flush_entry
);
249 info
= hmp
->flusher
.info
[i
];
250 TAILQ_INSERT_TAIL(&info
->work_list
, ip
, flush_entry
);
251 if (info
->running
== 0) {
252 ++hmp
->flusher
.running
;
254 wakeup(&info
->running
);
256 /*hammer_flusher_flush_inode(ip, &trans);*/
258 if (i
== HAMMER_MAX_FLUSHERS
|| hmp
->flusher
.info
[i
] == NULL
)
261 while (hmp
->flusher
.running
)
262 tsleep(&hmp
->flusher
.running
, 0, "hmrfcc", 0);
264 hammer_flusher_finalize(&hmp
->flusher
.trans
, 1);
265 hmp
->flusher
.tid
= hmp
->flusher
.trans
.tid
;
268 * Clean up any freed big-blocks (typically zone-2).
269 * resv->flush_group is typically set several flush groups ahead
270 * of the free to ensure that the freed block is not reused until
271 * it can no longer be reused.
273 while ((resv
= TAILQ_FIRST(&hmp
->delay_list
)) != NULL
) {
274 if (resv
->flush_group
!= hmp
->flusher
.act
)
276 TAILQ_REMOVE(&hmp
->delay_list
, resv
, delay_entry
);
277 hammer_blockmap_reserve_complete(hmp
, resv
);
279 hammer_done_transaction(&hmp
->flusher
.trans
);
283 * Flush a single inode that is part of a flush group.
287 hammer_flusher_flush_inode(hammer_inode_t ip
, hammer_transaction_t trans
)
289 hammer_mount_t hmp
= ip
->hmp
;
291 hammer_lock_sh(&hmp
->flusher
.finalize_lock
);
292 ip
->error
= hammer_sync_inode(ip
);
293 hammer_flush_inode_done(ip
);
294 hammer_unlock(&hmp
->flusher
.finalize_lock
);
295 while (hmp
->flusher
.finalize_want
)
296 tsleep(&hmp
->flusher
.finalize_want
, 0, "hmrsxx", 0);
297 if (hammer_must_finalize_undo(hmp
)) {
298 hmp
->flusher
.finalize_want
= 1;
299 hammer_lock_ex(&hmp
->flusher
.finalize_lock
);
300 kprintf("HAMMER: Warning: UNDO area too small!");
301 hammer_flusher_finalize(trans
, 1);
302 hammer_unlock(&hmp
->flusher
.finalize_lock
);
303 hmp
->flusher
.finalize_want
= 0;
304 wakeup(&hmp
->flusher
.finalize_want
);
305 } else if (trans
->hmp
->locked_dirty_count
+
306 trans
->hmp
->io_running_count
> hammer_limit_dirtybufs
) {
307 hmp
->flusher
.finalize_want
= 1;
308 hammer_lock_ex(&hmp
->flusher
.finalize_lock
);
310 hammer_flusher_finalize(trans
, 0);
311 hammer_unlock(&hmp
->flusher
.finalize_lock
);
312 hmp
->flusher
.finalize_want
= 0;
313 wakeup(&hmp
->flusher
.finalize_want
);
318 * If the UNDO area gets over half full we have to flush it. We can't
319 * afford the UNDO area becoming completely full as that would break
320 * the crash recovery atomicy.
324 hammer_must_finalize_undo(hammer_mount_t hmp
)
326 if (hammer_undo_space(hmp
) < hammer_undo_max(hmp
) / 2) {
335 * Flush all pending UNDOs, wait for write completion, update the volume
336 * header with the new UNDO end position, and flush it. Then
337 * asynchronously flush the meta-data.
339 * If this is the last finalization in a flush group we also synchronize
340 * our cached blockmap and set hmp->flusher_undo_start and our cached undo
341 * fifo first_offset so the next flush resets the FIFO pointers.
345 hammer_flusher_finalize(hammer_transaction_t trans
, int final
)
347 hammer_volume_t root_volume
;
348 hammer_blockmap_t cundomap
, dundomap
;
355 root_volume
= trans
->rootvol
;
358 * Flush data buffers. This can occur asynchronously and at any
359 * time. We must interlock against the frontend direct-data write
360 * but do not have to acquire the sync-lock yet.
363 while ((io
= TAILQ_FIRST(&hmp
->data_list
)) != NULL
) {
364 hammer_ref(&io
->lock
);
365 hammer_io_write_interlock(io
);
366 KKASSERT(io
->type
!= HAMMER_STRUCTURE_VOLUME
);
368 hammer_io_done_interlock(io
);
369 hammer_rel_buffer((hammer_buffer_t
)io
, 0);
374 * The sync-lock is required for the remaining sequence. This lock
375 * prevents meta-data from being modified.
377 hammer_sync_lock_ex(trans
);
380 * If we have been asked to finalize the volume header sync the
381 * cached blockmap to the on-disk blockmap. Generate an UNDO
382 * record for the update.
385 cundomap
= &hmp
->blockmap
[0];
386 dundomap
= &root_volume
->ondisk
->vol0_blockmap
[0];
387 if (root_volume
->io
.modified
) {
388 hammer_modify_volume(trans
, root_volume
,
389 dundomap
, sizeof(hmp
->blockmap
));
390 for (i
= 0; i
< HAMMER_MAX_ZONES
; ++i
)
391 hammer_crc_set_blockmap(&cundomap
[i
]);
392 bcopy(cundomap
, dundomap
, sizeof(hmp
->blockmap
));
393 hammer_modify_volume_done(root_volume
);
401 while ((io
= TAILQ_FIRST(&hmp
->undo_list
)) != NULL
) {
402 KKASSERT(io
->modify_refs
== 0);
403 hammer_ref(&io
->lock
);
404 KKASSERT(io
->type
!= HAMMER_STRUCTURE_VOLUME
);
406 hammer_rel_buffer((hammer_buffer_t
)io
, 0);
411 * Wait for I/Os to complete
414 while (hmp
->io_running_count
)
415 tsleep(&hmp
->io_running_count
, 0, "hmrfl1", 0);
419 * Update the on-disk volume header with new UNDO FIFO end position
420 * (do not generate new UNDO records for this change). We have to
421 * do this for the UNDO FIFO whether (final) is set or not.
423 * Also update the on-disk next_tid field. This does not require
424 * an UNDO. However, because our TID is generated before we get
425 * the sync lock another sync may have beat us to the punch.
427 * The volume header will be flushed out synchronously.
429 dundomap
= &root_volume
->ondisk
->vol0_blockmap
[HAMMER_ZONE_UNDO_INDEX
];
430 cundomap
= &hmp
->blockmap
[HAMMER_ZONE_UNDO_INDEX
];
432 if (dundomap
->first_offset
!= cundomap
->first_offset
||
433 dundomap
->next_offset
!= cundomap
->next_offset
) {
434 hammer_modify_volume(NULL
, root_volume
, NULL
, 0);
435 dundomap
->first_offset
= cundomap
->first_offset
;
436 dundomap
->next_offset
= cundomap
->next_offset
;
437 hammer_crc_set_blockmap(dundomap
);
438 hammer_crc_set_volume(root_volume
->ondisk
);
439 if (root_volume
->ondisk
->vol0_next_tid
< trans
->tid
)
440 root_volume
->ondisk
->vol0_next_tid
= trans
->tid
;
441 hammer_modify_volume_done(root_volume
);
444 if (root_volume
->io
.modified
) {
445 hammer_io_flush(&root_volume
->io
);
449 * Wait for I/Os to complete
452 while (hmp
->io_running_count
)
453 tsleep(&hmp
->io_running_count
, 0, "hmrfl2", 0);
457 * Flush meta-data. The meta-data will be undone if we crash
458 * so we can safely flush it asynchronously.
460 * Repeated catchups will wind up flushing this update's meta-data
461 * and the UNDO buffers for the next update simultaniously. This
465 while ((io
= TAILQ_FIRST(&hmp
->meta_list
)) != NULL
) {
466 KKASSERT(io
->modify_refs
== 0);
467 hammer_ref(&io
->lock
);
468 KKASSERT(io
->type
!= HAMMER_STRUCTURE_VOLUME
);
470 hammer_rel_buffer((hammer_buffer_t
)io
, 0);
475 * If this is the final finalization for the flush group set
476 * up for the next sequence by setting a new first_offset in
477 * our cached blockmap and
478 * clearing the undo history.
481 cundomap
= &hmp
->blockmap
[HAMMER_ZONE_UNDO_INDEX
];
482 cundomap
->first_offset
= cundomap
->next_offset
;
483 hammer_clear_undo_history(hmp
);
486 hammer_sync_unlock(trans
);