HAMMER 61A/Many: Stabilization
[dragonfly.git] / sys / vfs / hammer / hammer_flusher.c
blobca77e75d647eb4725ba1a14cf7e2714e97ce63c3
1 /*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.34 2008/07/10 21:23:58 dillon Exp $
37 * HAMMER dependancy flusher thread
39 * Meta data updates create buffer dependancies which are arranged as a
40 * hierarchy of lists.
43 #include "hammer.h"
45 static void hammer_flusher_master_thread(void *arg);
46 static void hammer_flusher_slave_thread(void *arg);
47 static void hammer_flusher_flush(hammer_mount_t hmp);
48 static void hammer_flusher_flush_inode(hammer_inode_t ip,
49 hammer_transaction_t trans);
52 * Support structures for the flusher threads.
54 struct hammer_flusher_info {
55 struct hammer_mount *hmp;
56 thread_t td;
57 int startit;
58 hammer_inode_t work_array[HAMMER_FLUSH_GROUP_SIZE];
61 typedef struct hammer_flusher_info *hammer_flusher_info_t;
64 * Sync all inodes pending on the flusher. This routine may have to be
65 * called twice to get them all as some may be queued to a later flush group.
67 void
68 hammer_flusher_sync(hammer_mount_t hmp)
70 int seq;
72 if (hmp->flusher.td) {
73 seq = hmp->flusher.next;
74 if (hmp->flusher.signal++ == 0)
75 wakeup(&hmp->flusher.signal);
76 while ((int)(seq - hmp->flusher.done) > 0)
77 tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
82 * Sync all inodes pending on the flusher - return immediately.
84 void
85 hammer_flusher_async(hammer_mount_t hmp)
87 if (hmp->flusher.td) {
88 if (hmp->flusher.signal++ == 0)
89 wakeup(&hmp->flusher.signal);
93 void
94 hammer_flusher_create(hammer_mount_t hmp)
96 hammer_flusher_info_t info;
97 int i;
99 hmp->flusher.signal = 0;
100 hmp->flusher.act = 0;
101 hmp->flusher.done = 0;
102 hmp->flusher.next = 1;
103 hmp->flusher.count = 0;
104 hammer_ref(&hmp->flusher.finalize_lock);
106 lwkt_create(hammer_flusher_master_thread, hmp,
107 &hmp->flusher.td, NULL, 0, -1, "hammer-M");
108 for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
109 info = kmalloc(sizeof(*info), M_HAMMER, M_WAITOK|M_ZERO);
110 info->hmp = hmp;
111 ++hmp->flusher.count;
112 hmp->flusher.info[i] = info;
113 lwkt_create(hammer_flusher_slave_thread, info,
114 &info->td, NULL, 0, -1, "hammer-S%d", i);
118 void
119 hammer_flusher_destroy(hammer_mount_t hmp)
121 hammer_flusher_info_t info;
122 int i;
125 * Kill the master
127 hmp->flusher.exiting = 1;
128 while (hmp->flusher.td) {
129 ++hmp->flusher.signal;
130 wakeup(&hmp->flusher.signal);
131 tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
135 * Kill the slaves
137 for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
138 if ((info = hmp->flusher.info[i]) != NULL) {
139 KKASSERT(info->startit == 0);
140 info->startit = -1;
141 wakeup(&info->startit);
142 while (info->td) {
143 tsleep(&info->td, 0, "hmrwwc", 0);
145 hmp->flusher.info[i] = NULL;
146 kfree(info, M_HAMMER);
147 --hmp->flusher.count;
150 KKASSERT(hmp->flusher.count == 0);
154 * The master flusher thread manages the flusher sequence id and
155 * synchronization with the slave work threads.
157 static void
158 hammer_flusher_master_thread(void *arg)
160 hammer_mount_t hmp = arg;
162 for (;;) {
163 while (hmp->flusher.group_lock)
164 tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
165 hmp->flusher.act = hmp->flusher.next;
166 ++hmp->flusher.next;
167 hammer_flusher_clean_loose_ios(hmp);
168 hammer_flusher_flush(hmp);
169 hmp->flusher.done = hmp->flusher.act;
170 wakeup(&hmp->flusher.done);
173 * Wait for activity.
175 if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_list))
176 break;
179 * This is a hack until we can dispose of frontend buffer
180 * cache buffers on the frontend.
182 while (hmp->flusher.signal == 0)
183 tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
184 hmp->flusher.signal = 0;
188 * And we are done.
190 hmp->flusher.td = NULL;
191 wakeup(&hmp->flusher.exiting);
192 lwkt_exit();
196 * The slave flusher thread pulls work off the master flush_list until no
197 * work is left.
199 static void
200 hammer_flusher_slave_thread(void *arg)
202 hammer_flusher_info_t info;
203 hammer_mount_t hmp;
204 hammer_inode_t ip;
205 int c;
206 int i;
207 int n;
209 info = arg;
210 hmp = info->hmp;
212 for (;;) {
213 while (info->startit == 0)
214 tsleep(&info->startit, 0, "hmrssw", 0);
215 if (info->startit < 0)
216 break;
217 info->startit = 0;
220 * Try to pull out around ~64 inodes at a time to flush.
221 * The idea is to try to avoid deadlocks between the slaves.
223 n = c = 0;
224 while ((ip = TAILQ_FIRST(&hmp->flush_list)) != NULL) {
225 if (ip->flush_group != hmp->flusher.act)
226 break;
227 TAILQ_REMOVE(&hmp->flush_list, ip, flush_entry);
228 info->work_array[n++] = ip;
229 c += ip->rsv_recs;
230 if (n < HAMMER_FLUSH_GROUP_SIZE &&
231 c < HAMMER_FLUSH_GROUP_SIZE * 8) {
232 continue;
234 for (i = 0; i < n; ++i){
235 hammer_flusher_flush_inode(info->work_array[i],
236 &hmp->flusher.trans);
238 n = c = 0;
240 for (i = 0; i < n; ++i) {
241 hammer_flusher_flush_inode(info->work_array[i],
242 &hmp->flusher.trans);
244 if (--hmp->flusher.running == 0)
245 wakeup(&hmp->flusher.running);
247 info->td = NULL;
248 wakeup(&info->td);
249 lwkt_exit();
252 void
253 hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
255 hammer_buffer_t buffer;
256 hammer_io_t io;
259 * loose ends - buffers without bp's aren't tracked by the kernel
260 * and can build up, so clean them out. This can occur when an
261 * IO completes on a buffer with no references left.
263 if ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
264 crit_enter(); /* biodone() race */
265 while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
266 KKASSERT(io->mod_list == &hmp->lose_list);
267 TAILQ_REMOVE(&hmp->lose_list, io, mod_entry);
268 io->mod_list = NULL;
269 if (io->lock.refs == 0)
270 ++hammer_count_refedbufs;
271 hammer_ref(&io->lock);
272 buffer = (void *)io;
273 hammer_rel_buffer(buffer, 0);
275 crit_exit();
280 * Flush all inodes in the current flush group.
282 static void
283 hammer_flusher_flush(hammer_mount_t hmp)
285 hammer_flusher_info_t info;
286 hammer_reserve_t resv;
287 int i;
288 int n;
290 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
293 * If the previous flush cycle just about exhausted our UNDO space
294 * we may have to do a dummy cycle to move the first_offset up
295 * before actually digging into a new cycle, or the new cycle will
296 * not have sufficient undo space.
298 if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
299 hammer_flusher_finalize(&hmp->flusher.trans, 0);
302 * Start work threads.
304 i = 0;
305 n = hmp->count_iqueued / HAMMER_FLUSH_GROUP_SIZE;
306 if (TAILQ_FIRST(&hmp->flush_list)) {
307 for (i = 0; i <= n; ++i) {
308 if (i == HAMMER_MAX_FLUSHERS ||
309 hmp->flusher.info[i] == NULL) {
310 break;
312 info = hmp->flusher.info[i];
313 if (info->startit == 0) {
314 ++hmp->flusher.running;
315 info->startit = 1;
316 wakeup(&info->startit);
320 while (hmp->flusher.running)
321 tsleep(&hmp->flusher.running, 0, "hmrfcc", 0);
323 hammer_flusher_finalize(&hmp->flusher.trans, 1);
324 hmp->flusher.tid = hmp->flusher.trans.tid;
327 * Clean up any freed big-blocks (typically zone-2).
328 * resv->flush_group is typically set several flush groups ahead
329 * of the free to ensure that the freed block is not reused until
330 * it can no longer be reused.
332 while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
333 if (resv->flush_group != hmp->flusher.act)
334 break;
335 hammer_reserve_clrdelay(hmp, resv);
337 hammer_done_transaction(&hmp->flusher.trans);
341 * Flush a single inode that is part of a flush group.
343 * NOTE! The sync code can return EWOULDBLOCK if the flush operation
344 * would otherwise blow out the buffer cache. hammer_flush_inode_done()
345 * will re-queue the inode for the next flush sequence and force the
346 * flusher to run again if this occurs.
348 static
349 void
350 hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
352 hammer_mount_t hmp = ip->hmp;
353 int error;
355 hammer_flusher_clean_loose_ios(hmp);
356 hammer_lock_sh(&hmp->flusher.finalize_lock);
357 error = hammer_sync_inode(ip);
358 if (error != EWOULDBLOCK)
359 ip->error = error;
360 hammer_flush_inode_done(ip);
361 hammer_unlock(&hmp->flusher.finalize_lock);
362 while (hmp->flusher.finalize_want)
363 tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
364 if (hammer_flusher_undo_exhausted(trans, 1)) {
365 kprintf("HAMMER: Warning: UNDO area too small!\n");
366 hammer_flusher_finalize(trans, 1);
367 } else if (hammer_flusher_meta_limit(trans->hmp)) {
368 hammer_flusher_finalize(trans, 0);
373 * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
374 * space left.
376 * 1/4 - Emergency free undo space level. Below this point the flusher
377 * will finalize even if directory dependancies have not been resolved.
379 * 2/4 - Used by the pruning and reblocking code. These functions may be
380 * running in parallel with a flush and cannot be allowed to drop
381 * available undo space to emergency levels.
383 * 3/4 - Used at the beginning of a flush to force-sync the volume header
384 * to give the flush plenty of runway to work in.
387 hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter)
389 if (hammer_undo_space(trans) <
390 hammer_undo_max(trans->hmp) * quarter / 4) {
391 kprintf("%c", '0' + quarter);
392 return(1);
393 } else {
394 return(0);
399 * Flush all pending UNDOs, wait for write completion, update the volume
400 * header with the new UNDO end position, and flush it. Then
401 * asynchronously flush the meta-data.
403 * If this is the last finalization in a flush group we also synchronize
404 * our cached blockmap and set hmp->flusher_undo_start and our cached undo
405 * fifo first_offset so the next flush resets the FIFO pointers.
407 * If this is not final it is being called because too many dirty meta-data
408 * buffers have built up and must be flushed with UNDO synchronization to
409 * avoid a buffer cache deadlock.
411 void
412 hammer_flusher_finalize(hammer_transaction_t trans, int final)
414 hammer_volume_t root_volume;
415 hammer_blockmap_t cundomap, dundomap;
416 hammer_mount_t hmp;
417 hammer_io_t io;
418 int count;
419 int i;
421 hmp = trans->hmp;
422 root_volume = trans->rootvol;
425 * Exclusively lock the flusher. This guarantees that all dirty
426 * buffers will be idled (have a mod-count of 0).
428 ++hmp->flusher.finalize_want;
429 hammer_lock_ex(&hmp->flusher.finalize_lock);
432 * If this isn't the final sync several threads may have hit the
433 * meta-limit at the same time and raced. Only sync if we really
434 * have to, after acquiring the lock.
436 if (final == 0 && !hammer_flusher_meta_limit(hmp))
437 goto done;
440 * Flush data buffers. This can occur asynchronously and at any
441 * time. We must interlock against the frontend direct-data write
442 * but do not have to acquire the sync-lock yet.
444 count = 0;
445 while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
446 if (io->lock.refs == 0)
447 ++hammer_count_refedbufs;
448 hammer_ref(&io->lock);
449 hammer_io_write_interlock(io);
450 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
451 hammer_io_flush(io);
452 hammer_io_done_interlock(io);
453 hammer_rel_buffer((hammer_buffer_t)io, 0);
454 ++count;
458 * The sync-lock is required for the remaining sequence. This lock
459 * prevents meta-data from being modified.
461 hammer_sync_lock_ex(trans);
464 * If we have been asked to finalize the volume header sync the
465 * cached blockmap to the on-disk blockmap. Generate an UNDO
466 * record for the update.
468 if (final) {
469 cundomap = &hmp->blockmap[0];
470 dundomap = &root_volume->ondisk->vol0_blockmap[0];
471 if (root_volume->io.modified) {
472 hammer_modify_volume(trans, root_volume,
473 dundomap, sizeof(hmp->blockmap));
474 for (i = 0; i < HAMMER_MAX_ZONES; ++i)
475 hammer_crc_set_blockmap(&cundomap[i]);
476 bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
477 hammer_modify_volume_done(root_volume);
482 * Flush UNDOs
484 count = 0;
485 while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
486 KKASSERT(io->modify_refs == 0);
487 if (io->lock.refs == 0)
488 ++hammer_count_refedbufs;
489 hammer_ref(&io->lock);
490 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
491 hammer_io_flush(io);
492 hammer_rel_buffer((hammer_buffer_t)io, 0);
493 ++count;
497 * Wait for I/Os to complete
499 hammer_flusher_clean_loose_ios(hmp);
500 hammer_io_wait_all(hmp, "hmrfl1");
503 * Update the on-disk volume header with new UNDO FIFO end position
504 * (do not generate new UNDO records for this change). We have to
505 * do this for the UNDO FIFO whether (final) is set or not.
507 * Also update the on-disk next_tid field. This does not require
508 * an UNDO. However, because our TID is generated before we get
509 * the sync lock another sync may have beat us to the punch.
511 * This also has the side effect of updating first_offset based on
512 * a prior finalization when the first finalization of the next flush
513 * cycle occurs, removing any undo info from the prior finalization
514 * from consideration.
516 * The volume header will be flushed out synchronously.
518 dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
519 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
521 if (dundomap->first_offset != cundomap->first_offset ||
522 dundomap->next_offset != cundomap->next_offset) {
523 hammer_modify_volume(NULL, root_volume, NULL, 0);
524 dundomap->first_offset = cundomap->first_offset;
525 dundomap->next_offset = cundomap->next_offset;
526 hammer_crc_set_blockmap(dundomap);
527 hammer_modify_volume_done(root_volume);
530 if (root_volume->io.modified) {
531 hammer_modify_volume(NULL, root_volume, NULL, 0);
532 if (root_volume->ondisk->vol0_next_tid < trans->tid)
533 root_volume->ondisk->vol0_next_tid = trans->tid;
534 hammer_crc_set_volume(root_volume->ondisk);
535 hammer_modify_volume_done(root_volume);
536 hammer_io_flush(&root_volume->io);
540 * Wait for I/Os to complete
542 hammer_flusher_clean_loose_ios(hmp);
543 hammer_io_wait_all(hmp, "hmrfl2");
546 * Flush meta-data. The meta-data will be undone if we crash
547 * so we can safely flush it asynchronously.
549 * Repeated catchups will wind up flushing this update's meta-data
550 * and the UNDO buffers for the next update simultaniously. This
551 * is ok.
553 count = 0;
554 while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
555 KKASSERT(io->modify_refs == 0);
556 if (io->lock.refs == 0)
557 ++hammer_count_refedbufs;
558 hammer_ref(&io->lock);
559 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
560 hammer_io_flush(io);
561 hammer_rel_buffer((hammer_buffer_t)io, 0);
562 ++count;
566 * If this is the final finalization for the flush group set
567 * up for the next sequence by setting a new first_offset in
568 * our cached blockmap and clearing the undo history.
570 * Even though we have updated our cached first_offset, the on-disk
571 * first_offset still governs available-undo-space calculations.
573 if (final) {
574 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
575 cundomap->first_offset = cundomap->next_offset;
576 hammer_clear_undo_history(hmp);
579 hammer_sync_unlock(trans);
581 done:
582 hammer_unlock(&hmp->flusher.finalize_lock);
583 if (--hmp->flusher.finalize_want == 0)
584 wakeup(&hmp->flusher.finalize_want);
588 * Return non-zero if too many dirty meta-data buffers have built up.
590 * Since we cannot allow such buffers to flush until we have dealt with
591 * the UNDOs, we risk deadlocking the kernel's buffer cache.
594 hammer_flusher_meta_limit(hammer_mount_t hmp)
596 if (hmp->locked_dirty_space + hmp->io_running_space >
597 hammer_limit_dirtybufspace) {
598 return(1);
600 return(0);