HAMMER 38E/Many: Undo/Synchronization and crash recovery
[dfdiff.git] / sys / vfs / hammer / hammer_flusher.c
blob7f2f6b0ded737f457c7c205d7b3681f7ece89744
1 /*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.5 2008/04/26 08:02:17 dillon Exp $
37 * HAMMER dependancy flusher thread
39 * Meta data updates create buffer dependancies which are arranged as a
40 * hierarchy of lists.
43 #include "hammer.h"
45 static void hammer_flusher_thread(void *arg);
46 static void hammer_flusher_clean_loose_ios(hammer_mount_t hmp);
47 static void hammer_flusher_flush(hammer_mount_t hmp);
48 static int hammer_must_finalize_undo(hammer_volume_t root_volume);
49 static void hammer_flusher_finalize(hammer_mount_t hmp,
50 hammer_volume_t root_volume, hammer_off_t start_offset);
52 void
53 hammer_flusher_sync(hammer_mount_t hmp)
55 int seq;
57 if (hmp->flusher_td) {
58 seq = ++hmp->flusher_seq;
59 wakeup(&hmp->flusher_seq);
60 while ((int)(seq - hmp->flusher_act) > 0)
61 tsleep(&hmp->flusher_act, 0, "hmrfls", 0);
65 void
66 hammer_flusher_async(hammer_mount_t hmp)
68 if (hmp->flusher_td) {
69 ++hmp->flusher_seq;
70 wakeup(&hmp->flusher_seq);
74 void
75 hammer_flusher_create(hammer_mount_t hmp)
77 lwkt_create(hammer_flusher_thread, hmp, &hmp->flusher_td, NULL,
78 0, -1, "hammer");
81 void
82 hammer_flusher_destroy(hammer_mount_t hmp)
84 if (hmp->flusher_td) {
85 hmp->flusher_exiting = 1;
86 ++hmp->flusher_seq;
87 wakeup(&hmp->flusher_seq);
88 while (hmp->flusher_td)
89 tsleep(&hmp->flusher_exiting, 0, "hmrwex", 0);
93 static void
94 hammer_flusher_thread(void *arg)
96 hammer_mount_t hmp = arg;
97 int seq;
99 for (;;) {
100 seq = hmp->flusher_seq;
101 hammer_flusher_clean_loose_ios(hmp);
102 hammer_flusher_flush(hmp);
103 hammer_flusher_clean_loose_ios(hmp);
104 hmp->flusher_act = seq;
105 wakeup(&hmp->flusher_act);
106 if (hmp->flusher_exiting)
107 break;
108 while (hmp->flusher_seq == hmp->flusher_act)
109 tsleep(&hmp->flusher_seq, 0, "hmrflt", 0);
111 hmp->flusher_td = NULL;
112 wakeup(&hmp->flusher_exiting);
113 lwkt_exit();
116 static void
117 hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
119 hammer_buffer_t buffer;
120 hammer_io_t io;
123 * loose ends - buffers without bp's aren't tracked by the kernel
124 * and can build up, so clean them out. This can occur when an
125 * IO completes on a buffer with no references left.
127 while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
128 KKASSERT(io->mod_list == &hmp->lose_list);
129 TAILQ_REMOVE(io->mod_list, io, mod_entry);
130 io->mod_list = NULL;
131 hammer_ref(&io->lock);
132 buffer = (void *)io;
133 hammer_rel_buffer(buffer, 0);
138 * Flush stuff
140 static void
141 hammer_flusher_flush(hammer_mount_t hmp)
143 hammer_volume_t root_volume;
144 hammer_blockmap_t rootmap;
145 hammer_inode_t ip;
146 hammer_off_t start_offset;
147 int error;
149 root_volume = hammer_get_root_volume(hmp, &error);
150 rootmap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
151 start_offset = rootmap->next_offset;
153 if (hammer_debug_general & 0x00010000)
154 kprintf("x");
156 while ((ip = TAILQ_FIRST(&hmp->flush_list)) != NULL) {
157 TAILQ_REMOVE(&hmp->flush_list, ip, flush_entry);
160 * We inherit the inode ref from the flush list
162 ip->error = hammer_sync_inode(ip, (ip->vp ? 0 : 1));
163 hammer_flush_inode_done(ip);
164 if (hmp->locked_dirty_count > 64 ||
165 hammer_must_finalize_undo(root_volume)) {
166 hammer_flusher_finalize(hmp, root_volume, start_offset);
167 start_offset = rootmap->next_offset;
170 hammer_flusher_finalize(hmp, root_volume, start_offset);
171 hammer_rel_volume(root_volume, 0);
175 * If the UNDO area gets over half full we have to flush it. We can't
176 * afford the UNDO area becoming completely full as that would break
177 * the crash recovery atomicy.
179 static
181 hammer_must_finalize_undo(hammer_volume_t root_volume)
183 hammer_blockmap_t rootmap;
184 int bytes;
185 int max_bytes;
187 rootmap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
189 if (rootmap->first_offset <= rootmap->next_offset) {
190 bytes = (int)(rootmap->next_offset - rootmap->first_offset);
191 } else {
192 bytes = (int)(rootmap->alloc_offset - rootmap->first_offset +
193 rootmap->next_offset);
195 max_bytes = (int)(rootmap->alloc_offset & HAMMER_OFF_SHORT_MASK);
196 if (bytes > max_bytes / 2)
197 kprintf("*");
198 return (bytes > max_bytes / 2);
202 * To finalize the flush we finish flushing all undo and data buffers
203 * still present, then we update the volume header and flush it,
204 * then we flush out the mata-data (that can now be undone).
206 * Note that as long as the undo fifo's start and end points do not
207 * match, we always must at least update the volume header.
209 static
210 void
211 hammer_flusher_finalize(hammer_mount_t hmp, hammer_volume_t root_volume,
212 hammer_off_t start_offset)
214 hammer_blockmap_t rootmap;
215 hammer_io_t io;
218 * Flush undo bufs
220 while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
221 KKASSERT(io->modify_refs == 0);
222 hammer_ref(&io->lock);
223 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
224 hammer_io_flush(io);
225 hammer_rel_buffer((hammer_buffer_t)io, 1);
229 * Flush data bufs
231 while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
232 KKASSERT(io->modify_refs == 0);
233 hammer_ref(&io->lock);
234 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
235 hammer_io_flush(io);
236 hammer_rel_buffer((hammer_buffer_t)io, 1);
240 * Wait for I/O to complete
242 crit_enter();
243 while (hmp->io_running_count) {
244 kprintf("WAIT1 %d\n", hmp->io_running_count);
245 tsleep(&hmp->io_running_count, 0, "hmrfl1", 0);
247 crit_exit();
250 * Update the volume header
252 rootmap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
253 if (rootmap->first_offset != start_offset) {
254 hammer_modify_volume(NULL, root_volume, NULL, 0);
255 rootmap->first_offset = start_offset;
256 hammer_modify_volume_done(root_volume);
257 hammer_io_flush(&root_volume->io);
261 * Wait for I/O to complete
263 crit_enter();
264 while (hmp->io_running_count) {
265 tsleep(&hmp->io_running_count, 0, "hmrfl2", 0);
267 crit_exit();
270 * Flush meta-data
272 while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
273 KKASSERT(io->modify_refs == 0);
274 hammer_ref(&io->lock);
275 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
276 hammer_io_flush(io);
277 hammer_rel_buffer((hammer_buffer_t)io, 1);