HAMMER 38D/Many: Undo/Synchronization and crash recovery
[dragonfly.git] / sys / vfs / hammer / hammer_io.c
blob8f5c38285369dc8438735516a29605f4914556cf
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.27 2008/04/26 02:54:00 dillon Exp $
37 * IO Primitives and buffer cache management
39 * All major data-tracking structures in HAMMER contain a struct hammer_io
40 * which is used to manage their backing store. We use filesystem buffers
41 * for backing store and we leave them passively associated with their
42 * HAMMER structures.
44 * If the kernel tries to release a passively associated buf which we cannot
45 * yet let go we set B_LOCKED in the buffer and then actively released it
46 * later when we can.
49 #include "hammer.h"
50 #include <sys/fcntl.h>
51 #include <sys/nlookup.h>
52 #include <sys/buf.h>
53 #include <sys/buf2.h>
55 static void hammer_io_modify(hammer_io_t io, int count);
56 static void hammer_io_deallocate(struct buf *bp);
59 * Initialize a new, already-zero'd hammer_io structure, or reinitialize
60 * an existing hammer_io structure which may have switched to another type.
62 void
63 hammer_io_init(hammer_io_t io, hammer_mount_t hmp, enum hammer_io_type type)
65 io->hmp = hmp;
66 io->type = type;
69 void
70 hammer_io_reinit(hammer_io_t io, enum hammer_io_type type)
72 hammer_mount_t hmp = io->hmp;
74 if (io->modified) {
75 KKASSERT(io->mod_list != NULL);
76 if (io->mod_list == &hmp->volu_list ||
77 io->mod_list == &hmp->meta_list) {
78 --hmp->locked_dirty_count;
80 TAILQ_REMOVE(io->mod_list, io, mod_entry);
81 io->mod_list = NULL;
83 io->type = type;
84 if (io->modified) {
85 switch(io->type) {
86 case HAMMER_STRUCTURE_VOLUME:
87 io->mod_list = &hmp->volu_list;
88 ++hmp->locked_dirty_count;
89 break;
90 case HAMMER_STRUCTURE_META_BUFFER:
91 io->mod_list = &hmp->meta_list;
92 ++hmp->locked_dirty_count;
93 break;
94 case HAMMER_STRUCTURE_UNDO_BUFFER:
95 io->mod_list = &hmp->undo_list;
96 break;
97 case HAMMER_STRUCTURE_DATA_BUFFER:
98 io->mod_list = &hmp->data_list;
99 break;
101 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
106 * Helper routine to disassociate a buffer cache buffer from an I/O
107 * structure. Called with the io structure exclusively locked.
109 * The io may have 0 or 1 references depending on who called us. The
110 * caller is responsible for dealing with the refs.
112 * This call can only be made when no action is required on the buffer.
113 * HAMMER must own the buffer (released == 0) since we mess around with it.
115 static void
116 hammer_io_disassociate(hammer_io_structure_t iou, int elseit)
118 struct buf *bp = iou->io.bp;
120 KKASSERT(iou->io.modified == 0);
121 buf_dep_init(bp);
122 iou->io.bp = NULL;
123 bp->b_flags &= ~B_LOCKED;
124 if (elseit) {
125 KKASSERT(iou->io.released == 0);
126 iou->io.released = 1;
127 bqrelse(bp);
128 } else {
129 KKASSERT(iou->io.released);
132 switch(iou->io.type) {
133 case HAMMER_STRUCTURE_VOLUME:
134 iou->volume.ondisk = NULL;
135 break;
136 case HAMMER_STRUCTURE_DATA_BUFFER:
137 case HAMMER_STRUCTURE_META_BUFFER:
138 case HAMMER_STRUCTURE_UNDO_BUFFER:
139 iou->buffer.ondisk = NULL;
140 break;
145 * Wait for any physical IO to complete
147 static void
148 hammer_io_wait(hammer_io_t io)
150 if (io->running) {
151 crit_enter();
152 tsleep_interlock(io);
153 io->waiting = 1;
154 for (;;) {
155 tsleep(io, 0, "hmrflw", 0);
156 if (io->running == 0)
157 break;
158 tsleep_interlock(io);
159 io->waiting = 1;
160 if (io->running == 0)
161 break;
163 crit_exit();
168 * Load bp for a HAMMER structure. The io must be exclusively locked by
169 * the caller.
172 hammer_io_read(struct vnode *devvp, struct hammer_io *io)
174 struct buf *bp;
175 int error;
177 if ((bp = io->bp) == NULL) {
178 error = bread(devvp, io->offset, HAMMER_BUFSIZE, &io->bp);
179 if (error == 0) {
180 bp = io->bp;
181 bp->b_ops = &hammer_bioops;
182 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
183 BUF_KERNPROC(bp);
185 KKASSERT(io->modified == 0);
186 KKASSERT(io->running == 0);
187 KKASSERT(io->waiting == 0);
188 io->released = 0; /* we hold an active lock on bp */
189 } else {
190 error = 0;
192 return(error);
196 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
197 * Must be called with the IO exclusively locked.
199 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
200 * I/O by forcing the buffer to not be in a released state before calling
201 * it.
203 * This function will also mark the IO as modified but it will not
204 * increment the modify_refs count.
207 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
209 struct buf *bp;
211 if ((bp = io->bp) == NULL) {
212 io->bp = getblk(devvp, io->offset, HAMMER_BUFSIZE, 0, 0);
213 bp = io->bp;
214 bp->b_ops = &hammer_bioops;
215 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
216 io->released = 0;
217 KKASSERT(io->running == 0);
218 io->waiting = 0;
219 BUF_KERNPROC(bp);
220 } else {
221 if (io->released) {
222 regetblk(bp);
223 BUF_KERNPROC(bp);
224 io->released = 0;
227 hammer_io_modify(io, 0);
228 vfs_bio_clrbuf(bp);
229 return(0);
233 * This routine is called on the last reference to a hammer structure.
234 * The io is usually locked exclusively (but may not be during unmount).
236 * This routine is responsible for the disposition of the buffer cache
237 * buffer backing the IO. Only pure-data and undo buffers can be handed
238 * back to the kernel. Volume and meta-data buffers must be retained
239 * by HAMMER until explicitly flushed by the backend.
241 void
242 hammer_io_release(struct hammer_io *io)
244 struct buf *bp;
246 if ((bp = io->bp) == NULL)
247 return;
250 * Try to flush a dirty IO to disk if asked to by the
251 * caller or if the kernel tried to flush the buffer in the past.
253 * Kernel-initiated flushes are only allowed for pure-data buffers.
254 * meta-data and volume buffers can only be flushed explicitly
255 * by HAMMER.
257 if (io->modified) {
258 if (io->flush) {
259 hammer_io_flush(io);
260 } else if (bp->b_flags & B_LOCKED) {
261 switch(io->type) {
262 case HAMMER_STRUCTURE_DATA_BUFFER:
263 case HAMMER_STRUCTURE_UNDO_BUFFER:
264 hammer_io_flush(io);
265 break;
266 default:
267 break;
269 } /* else no explicit request to flush the buffer */
273 * Wait for the IO to complete if asked to.
275 if (io->waitdep && io->running) {
276 hammer_io_wait(io);
280 * Return control of the buffer to the kernel (with the provisio
281 * that our bioops can override kernel decisions with regards to
282 * the buffer).
284 if (io->flush && io->modified == 0 && io->running == 0) {
286 * Always disassociate the bp if an explicit flush
287 * was requested and the IO completed with no error
288 * (so unmount can really clean up the structure).
290 if (io->released) {
291 regetblk(bp);
292 BUF_KERNPROC(bp);
293 io->released = 0;
295 hammer_io_disassociate((hammer_io_structure_t)io, 1);
296 } else if (io->modified) {
298 * Only certain IO types can be released to the kernel.
299 * volume and meta-data IO types must be explicitly flushed
300 * by HAMMER.
302 switch(io->type) {
303 case HAMMER_STRUCTURE_DATA_BUFFER:
304 case HAMMER_STRUCTURE_UNDO_BUFFER:
305 if (io->released == 0) {
306 io->released = 1;
307 bdwrite(bp);
309 break;
310 default:
311 break;
313 } else if (io->released == 0) {
315 * Clean buffers can be generally released to the kernel.
316 * We leave the bp passively associated with the HAMMER
317 * structure and use bioops to disconnect it later on
318 * if the kernel wants to discard the buffer.
320 io->released = 1;
321 bqrelse(bp);
326 * This routine is called with a locked IO when a flush is desired and
327 * no other references to the structure exists other then ours. This
328 * routine is ONLY called when HAMMER believes it is safe to flush a
329 * potentially modified buffer out.
331 void
332 hammer_io_flush(struct hammer_io *io)
334 struct buf *bp;
337 * Degenerate case - nothing to flush if nothing is dirty.
339 if (io->modified == 0) {
340 io->flush = 0;
341 return;
344 KKASSERT(io->bp);
345 KKASSERT(io->modify_refs == 0);
348 * Acquire exclusive access to the bp and then clear the modified
349 * state of the buffer prior to issuing I/O to interlock any
350 * modifications made while the I/O is in progress. This shouldn't
351 * happen anyway but losing data would be worse. The modified bit
352 * will be rechecked after the IO completes.
354 * This is only legal when lock.refs == 1 (otherwise we might clear
355 * the modified bit while there are still users of the cluster
356 * modifying the data).
358 * Do this before potentially blocking so any attempt to modify the
359 * ondisk while we are blocked blocks waiting for us.
361 KKASSERT(io->mod_list != NULL);
362 if (io->mod_list == &io->hmp->volu_list ||
363 io->mod_list == &io->hmp->meta_list) {
364 --io->hmp->locked_dirty_count;
366 TAILQ_REMOVE(io->mod_list, io, mod_entry);
367 io->mod_list = NULL;
368 io->modified = 0;
369 io->flush = 0;
370 bp = io->bp;
373 * Acquire ownership (released variable set for clarity)
375 if (io->released) {
376 regetblk(bp);
377 /* BUF_KERNPROC(io->bp); */
378 io->released = 0;
382 * Transfer ownership to the kernel and initiate I/O.
384 io->released = 1;
385 io->running = 1;
386 ++io->hmp->io_running_count;
387 bawrite(bp);
390 /************************************************************************
391 * BUFFER DIRTYING *
392 ************************************************************************
394 * These routines deal with dependancies created when IO buffers get
395 * modified. The caller must call hammer_modify_*() on a referenced
396 * HAMMER structure prior to modifying its on-disk data.
398 * Any intent to modify an IO buffer acquires the related bp and imposes
399 * various write ordering dependancies.
403 * Mark a HAMMER structure as undergoing modification. Meta-data buffers
404 * are locked until the flusher can deal with them, pure data buffers
405 * can be written out.
407 static
408 void
409 hammer_io_modify(hammer_io_t io, int count)
411 struct hammer_mount *hmp = io->hmp;
414 * Shortcut if nothing to do.
416 KKASSERT(io->lock.refs != 0 && io->bp != NULL);
417 io->modify_refs += count;
418 if (io->modified && io->released == 0)
419 return;
421 hammer_lock_ex(&io->lock);
422 if (io->modified == 0) {
423 KKASSERT(io->mod_list == NULL);
424 switch(io->type) {
425 case HAMMER_STRUCTURE_VOLUME:
426 io->mod_list = &hmp->volu_list;
427 ++hmp->locked_dirty_count;
428 break;
429 case HAMMER_STRUCTURE_META_BUFFER:
430 io->mod_list = &hmp->meta_list;
431 ++hmp->locked_dirty_count;
432 break;
433 case HAMMER_STRUCTURE_UNDO_BUFFER:
434 io->mod_list = &hmp->undo_list;
435 break;
436 case HAMMER_STRUCTURE_DATA_BUFFER:
437 io->mod_list = &hmp->data_list;
438 break;
440 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
441 io->modified = 1;
443 if (io->released) {
444 regetblk(io->bp);
445 BUF_KERNPROC(io->bp);
446 io->released = 0;
447 KKASSERT(io->modified != 0);
449 hammer_unlock(&io->lock);
452 static __inline
453 void
454 hammer_io_modify_done(hammer_io_t io)
456 KKASSERT(io->modify_refs > 0);
457 --io->modify_refs;
460 void
461 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
462 void *base, int len)
464 hammer_io_modify(&volume->io, 1);
466 if (len) {
467 intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
468 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
469 hammer_generate_undo(trans, &volume->io,
470 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
471 base, len);
476 * Caller intends to modify a buffer's ondisk structure. The related
477 * cluster must be marked open prior to being able to flush the modified
478 * buffer so get that I/O going now.
480 void
481 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
482 void *base, int len)
484 hammer_io_modify(&buffer->io, 1);
485 if (len) {
486 intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
487 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
488 hammer_generate_undo(trans, &buffer->io,
489 buffer->zone2_offset + rel_offset,
490 base, len);
494 void
495 hammer_modify_volume_done(hammer_volume_t volume)
497 hammer_io_modify_done(&volume->io);
500 void
501 hammer_modify_buffer_done(hammer_buffer_t buffer)
503 hammer_io_modify_done(&buffer->io);
507 * Mark an entity as not being dirty any more -- this usually occurs when
508 * the governing a-list has freed the entire entity.
510 * XXX
512 void
513 hammer_io_clear_modify(struct hammer_io *io)
515 #if 0
516 struct buf *bp;
518 io->modified = 0;
519 XXX mod_list/entry
520 if ((bp = io->bp) != NULL) {
521 if (io->released) {
522 regetblk(bp);
523 /* BUF_KERNPROC(io->bp); */
524 } else {
525 io->released = 1;
527 if (io->modified == 0) {
528 kprintf("hammer_io_clear_modify: cleared %p\n", io);
529 bundirty(bp);
530 bqrelse(bp);
531 } else {
532 bdwrite(bp);
535 #endif
538 /************************************************************************
539 * HAMMER_BIOOPS *
540 ************************************************************************
545 * Pre-IO initiation kernel callback - cluster build only
547 static void
548 hammer_io_start(struct buf *bp)
553 * Post-IO completion kernel callback
555 * NOTE: HAMMER may modify a buffer after initiating I/O. The modified bit
556 * may also be set if we were marking a cluster header open. Only remove
557 * our dependancy if the modified bit is clear.
559 static void
560 hammer_io_complete(struct buf *bp)
562 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
564 KKASSERT(iou->io.released == 1);
566 if (iou->io.running) {
567 if (--iou->io.hmp->io_running_count == 0)
568 wakeup(&iou->io.hmp->io_running_count);
569 KKASSERT(iou->io.hmp->io_running_count >= 0);
570 iou->io.running = 0;
574 * If no lock references remain and we can acquire the IO lock and
575 * someone at some point wanted us to flush (B_LOCKED test), then
576 * try to dispose of the IO.
578 if (iou->io.waiting) {
579 iou->io.waiting = 0;
580 wakeup(iou);
584 * Someone wanted us to flush, try to clean out the buffer.
586 if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) {
587 KKASSERT(iou->io.modified == 0);
588 bp->b_flags &= ~B_LOCKED;
589 hammer_io_deallocate(bp);
590 /* structure may be dead now */
595 * Callback from kernel when it wishes to deallocate a passively
596 * associated structure. This mostly occurs with clean buffers
597 * but it may be possible for a holding structure to be marked dirty
598 * while its buffer is passively associated.
600 * If we cannot disassociate we set B_LOCKED to prevent the buffer
601 * from getting reused.
603 * WARNING: Because this can be called directly by getnewbuf we cannot
604 * recurse into the tree. If a bp cannot be immediately disassociated
605 * our only recourse is to set B_LOCKED.
607 static void
608 hammer_io_deallocate(struct buf *bp)
610 hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
612 KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
613 if (iou->io.lock.refs > 0 || iou->io.modified) {
615 * It is not legal to disassociate a modified buffer. This
616 * case really shouldn't ever occur.
618 bp->b_flags |= B_LOCKED;
619 } else {
621 * Disassociate the BP. If the io has no refs left we
622 * have to add it to the loose list.
624 hammer_io_disassociate(iou, 0);
625 if (iou->io.bp == NULL &&
626 iou->io.type != HAMMER_STRUCTURE_VOLUME) {
627 KKASSERT(iou->io.mod_list == NULL);
628 iou->io.mod_list = &iou->io.hmp->lose_list;
629 TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
634 static int
635 hammer_io_fsync(struct vnode *vp)
637 return(0);
641 * NOTE: will not be called unless we tell the kernel about the
642 * bioops. Unused... we use the mount's VFS_SYNC instead.
644 static int
645 hammer_io_sync(struct mount *mp)
647 return(0);
650 static void
651 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
656 * I/O pre-check for reading and writing. HAMMER only uses this for
657 * B_CACHE buffers so checkread just shouldn't happen, but if it does
658 * allow it.
660 * Writing is a different case. We don't want the kernel to try to write
661 * out a buffer that HAMMER may be modifying passively or which has a
662 * dependancy. In addition, kernel-demanded writes can only proceed for
663 * certain types of buffers (i.e. UNDO and DATA types). Other dirty
664 * buffer types can only be explicitly written by the flusher.
666 * checkwrite will only be called for bdwrite()n buffers. If we return
667 * success the kernel is guaranteed to initiate the buffer write.
669 static int
670 hammer_io_checkread(struct buf *bp)
672 return(0);
675 static int
676 hammer_io_checkwrite(struct buf *bp)
678 hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
681 * We can only clear the modified bit if the IO is not currently
682 * undergoing modification. Otherwise we may miss changes.
684 if (io->modify_refs == 0 && io->modified) {
685 KKASSERT(io->mod_list != NULL);
686 if (io->mod_list == &io->hmp->volu_list ||
687 io->mod_list == &io->hmp->meta_list) {
688 --io->hmp->locked_dirty_count;
690 TAILQ_REMOVE(io->mod_list, io, mod_entry);
691 io->mod_list = NULL;
692 io->modified = 0;
696 * The kernel is going to start the IO, set io->running.
698 KKASSERT(io->running == 0);
699 io->running = 1;
700 ++io->hmp->io_running_count;
701 return(0);
705 * Return non-zero if the caller should flush the structure associated
706 * with this io sub-structure.
709 hammer_io_checkflush(struct hammer_io *io)
711 if (io->bp == NULL || (io->bp->b_flags & B_LOCKED)) {
712 return(1);
714 return(0);
718 * Return non-zero if we wish to delay the kernel's attempt to flush
719 * this buffer to disk.
721 static int
722 hammer_io_countdeps(struct buf *bp, int n)
724 return(0);
727 struct bio_ops hammer_bioops = {
728 .io_start = hammer_io_start,
729 .io_complete = hammer_io_complete,
730 .io_deallocate = hammer_io_deallocate,
731 .io_fsync = hammer_io_fsync,
732 .io_sync = hammer_io_sync,
733 .io_movedeps = hammer_io_movedeps,
734 .io_countdeps = hammer_io_countdeps,
735 .io_checkread = hammer_io_checkread,
736 .io_checkwrite = hammer_io_checkwrite,