2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.10 2008/05/03 05:28:55 dillon Exp $
37 * HAMMER dependancy flusher thread
39 * Meta data updates create buffer dependancies which are arranged as a
45 static void hammer_flusher_thread(void *arg
);
46 static void hammer_flusher_clean_loose_ios(hammer_mount_t hmp
);
47 static void hammer_flusher_flush(hammer_mount_t hmp
);
48 static int hammer_must_finalize_undo(hammer_mount_t hmp
);
49 static void hammer_flusher_finalize(hammer_mount_t hmp
,
50 hammer_volume_t root_volume
, hammer_off_t start_offset
);
53 hammer_flusher_sync(hammer_mount_t hmp
)
57 if (hmp
->flusher_td
) {
58 seq
= hmp
->flusher_next
;
59 if (hmp
->flusher_signal
== 0) {
60 hmp
->flusher_signal
= 1;
61 wakeup(&hmp
->flusher_signal
);
63 while ((int)(seq
- hmp
->flusher_done
) > 0)
64 tsleep(&hmp
->flusher_done
, 0, "hmrfls", 0);
69 hammer_flusher_async(hammer_mount_t hmp
)
71 if (hmp
->flusher_td
) {
72 if (hmp
->flusher_signal
== 0) {
73 hmp
->flusher_signal
= 1;
74 wakeup(&hmp
->flusher_signal
);
80 hammer_flusher_create(hammer_mount_t hmp
)
82 hmp
->flusher_signal
= 0;
84 hmp
->flusher_done
= 0;
85 hmp
->flusher_next
= 1;
86 lwkt_create(hammer_flusher_thread
, hmp
, &hmp
->flusher_td
, NULL
,
91 hammer_flusher_destroy(hammer_mount_t hmp
)
93 if (hmp
->flusher_td
) {
94 hmp
->flusher_exiting
= 1;
95 while (hmp
->flusher_td
) {
96 hmp
->flusher_signal
= 1;
97 wakeup(&hmp
->flusher_signal
);
98 tsleep(&hmp
->flusher_exiting
, 0, "hmrwex", 0);
104 hammer_flusher_thread(void *arg
)
106 hammer_mount_t hmp
= arg
;
109 while (hmp
->flusher_lock
)
110 tsleep(&hmp
->flusher_lock
, 0, "hmrhld", 0);
111 hmp
->flusher_act
= hmp
->flusher_next
;
114 hammer_flusher_clean_loose_ios(hmp
);
115 hammer_flusher_flush(hmp
);
116 hammer_flusher_clean_loose_ios(hmp
);
117 hmp
->flusher_done
= hmp
->flusher_act
;
119 wakeup(&hmp
->flusher_done
);
124 if (hmp
->flusher_exiting
&& TAILQ_EMPTY(&hmp
->flush_list
))
128 while (hmp
->flusher_signal
== 0 &&
129 TAILQ_EMPTY(&hmp
->flush_list
)) {
130 tsleep(&hmp
->flusher_signal
, 0, "hmrwwa", 0);
132 hmp
->flusher_signal
= 0;
134 hmp
->flusher_td
= NULL
;
135 wakeup(&hmp
->flusher_exiting
);
140 hammer_flusher_clean_loose_ios(hammer_mount_t hmp
)
142 hammer_buffer_t buffer
;
146 * loose ends - buffers without bp's aren't tracked by the kernel
147 * and can build up, so clean them out. This can occur when an
148 * IO completes on a buffer with no references left.
150 while ((io
= TAILQ_FIRST(&hmp
->lose_list
)) != NULL
) {
151 KKASSERT(io
->mod_list
== &hmp
->lose_list
);
152 TAILQ_REMOVE(io
->mod_list
, io
, mod_entry
);
154 hammer_ref(&io
->lock
);
156 hammer_rel_buffer(buffer
, 0);
164 hammer_flusher_flush(hammer_mount_t hmp
)
166 hammer_volume_t root_volume
;
167 hammer_blockmap_t rootmap
;
169 hammer_off_t start_offset
;
172 root_volume
= hammer_get_root_volume(hmp
, &error
);
173 rootmap
= &hmp
->blockmap
[HAMMER_ZONE_UNDO_INDEX
];
174 start_offset
= rootmap
->next_offset
;
176 while ((ip
= TAILQ_FIRST(&hmp
->flush_list
)) != NULL
) {
178 * Stop when we hit a different flush group
180 if (ip
->flush_group
!= hmp
->flusher_act
)
184 * Remove the inode from the flush list and inherit
185 * its reference, sync, and clean-up.
187 TAILQ_REMOVE(&hmp
->flush_list
, ip
, flush_entry
);
188 ip
->error
= hammer_sync_inode(ip
);
189 hammer_flush_inode_done(ip
);
192 * XXX this breaks atomicy
194 if (hammer_must_finalize_undo(hmp
)) {
195 Debugger("Too many undos!!");
196 hammer_flusher_finalize(hmp
, root_volume
, start_offset
);
197 start_offset
= rootmap
->next_offset
;
200 hammer_flusher_finalize(hmp
, root_volume
, start_offset
);
201 hammer_rel_volume(root_volume
, 0);
205 * If the UNDO area gets over half full we have to flush it. We can't
206 * afford the UNDO area becoming completely full as that would break
207 * the crash recovery atomicy.
211 hammer_must_finalize_undo(hammer_mount_t hmp
)
213 if (hammer_undo_space(hmp
) < hammer_undo_max(hmp
) / 2) {
222 * To finalize the flush we finish flushing all undo and data buffers
223 * still present, then we update the volume header and flush it,
224 * then we flush out the mata-data (that can now be undone).
226 * Note that as long as the undo fifo's start and end points do not
227 * match, we always must at least update the volume header.
229 * The sync_lock is used by other threads to issue modifying operations
230 * to HAMMER media without crossing a synchronization boundary or messing
231 * up the media synchronization operation. Specifically, the pruning
232 * the reblocking ioctls, and allowing the frontend strategy code to
233 * allocate media data space.
237 hammer_flusher_finalize(hammer_mount_t hmp
, hammer_volume_t root_volume
,
238 hammer_off_t start_offset
)
240 hammer_blockmap_t rootmap
;
243 hammer_lock_ex(&hmp
->sync_lock
);
248 while ((io
= TAILQ_FIRST(&hmp
->undo_list
)) != NULL
) {
249 KKASSERT(io
->modify_refs
== 0);
250 hammer_ref(&io
->lock
);
251 KKASSERT(io
->type
!= HAMMER_STRUCTURE_VOLUME
);
253 hammer_rel_buffer((hammer_buffer_t
)io
, 1);
259 while ((io
= TAILQ_FIRST(&hmp
->data_list
)) != NULL
) {
260 KKASSERT(io
->modify_refs
== 0);
261 hammer_ref(&io
->lock
);
262 KKASSERT(io
->type
!= HAMMER_STRUCTURE_VOLUME
);
264 hammer_rel_buffer((hammer_buffer_t
)io
, 1);
268 * Wait for I/O to complete
271 while (hmp
->io_running_count
) {
272 kprintf("W[%d]", hmp
->io_running_count
);
273 tsleep(&hmp
->io_running_count
, 0, "hmrfl1", 0);
278 * Update the volume header
280 rootmap
= &hmp
->blockmap
[HAMMER_ZONE_UNDO_INDEX
];
281 if (rootmap
->first_offset
!= start_offset
) {
282 hammer_modify_volume(NULL
, root_volume
, NULL
, 0);
283 rootmap
->first_offset
= start_offset
;
284 hammer_modify_volume_done(root_volume
);
286 if (root_volume
->ondisk
->vol0_next_tid
!= hmp
->next_tid
) {
287 hammer_modify_volume(NULL
, root_volume
, NULL
, 0);
288 root_volume
->ondisk
->vol0_next_tid
= hmp
->next_tid
;
289 hammer_modify_volume_done(root_volume
);
293 * Sync our cached blockmap array with the one in the root
296 if (root_volume
->io
.modified
) {
297 bcopy(hmp
->blockmap
, root_volume
->ondisk
->vol0_blockmap
,
298 sizeof(hmp
->blockmap
));
299 hammer_io_flush(&root_volume
->io
);
303 * Wait for I/O to complete
306 while (hmp
->io_running_count
) {
307 tsleep(&hmp
->io_running_count
, 0, "hmrfl2", 0);
314 while ((io
= TAILQ_FIRST(&hmp
->meta_list
)) != NULL
) {
315 KKASSERT(io
->modify_refs
== 0);
316 hammer_ref(&io
->lock
);
317 KKASSERT(io
->type
!= HAMMER_STRUCTURE_VOLUME
);
319 hammer_rel_buffer((hammer_buffer_t
)io
, 1);
321 hammer_unlock(&hmp
->sync_lock
);