HAMMER 45/Many: Stabilization pass, undo sequencing.
[dragonfly.git] / sys / vfs / hammer / hammer_io.c
blob5bcab68cb8d1ec43f244cf14affda13b08b6fb78
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.31 2008/05/15 03:36:40 dillon Exp $
37 * IO Primitives and buffer cache management
39 * All major data-tracking structures in HAMMER contain a struct hammer_io
40 * which is used to manage their backing store. We use filesystem buffers
41 * for backing store and we leave them passively associated with their
42 * HAMMER structures.
44 * If the kernel tries to release a passively associated buf which we cannot
45 * yet let go we set B_LOCKED in the buffer and then actively released it
46 * later when we can.
49 #include "hammer.h"
50 #include <sys/fcntl.h>
51 #include <sys/nlookup.h>
52 #include <sys/buf.h>
53 #include <sys/buf2.h>
55 static void hammer_io_modify(hammer_io_t io, int count);
56 static void hammer_io_deallocate(struct buf *bp);
59 * Initialize a new, already-zero'd hammer_io structure, or reinitialize
60 * an existing hammer_io structure which may have switched to another type.
62 void
63 hammer_io_init(hammer_io_t io, hammer_mount_t hmp, enum hammer_io_type type)
65 io->hmp = hmp;
66 io->type = type;
69 void
70 hammer_io_reinit(hammer_io_t io, enum hammer_io_type type)
72 hammer_mount_t hmp = io->hmp;
74 if (io->modified) {
75 KKASSERT(io->mod_list != NULL);
76 if (io->mod_list == &hmp->volu_list ||
77 io->mod_list == &hmp->meta_list) {
78 --hmp->locked_dirty_count;
79 --hammer_count_dirtybufs;
81 TAILQ_REMOVE(io->mod_list, io, mod_entry);
82 io->mod_list = NULL;
84 io->type = type;
85 if (io->modified) {
86 switch(io->type) {
87 case HAMMER_STRUCTURE_VOLUME:
88 io->mod_list = &hmp->volu_list;
89 ++hmp->locked_dirty_count;
90 ++hammer_count_dirtybufs;
91 break;
92 case HAMMER_STRUCTURE_META_BUFFER:
93 io->mod_list = &hmp->meta_list;
94 ++hmp->locked_dirty_count;
95 ++hammer_count_dirtybufs;
96 break;
97 case HAMMER_STRUCTURE_UNDO_BUFFER:
98 io->mod_list = &hmp->undo_list;
99 break;
100 case HAMMER_STRUCTURE_DATA_BUFFER:
101 io->mod_list = &hmp->data_list;
102 break;
104 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
109 * Helper routine to disassociate a buffer cache buffer from an I/O
110 * structure. Called with the io structure exclusively locked.
112 * The io may have 0 or 1 references depending on who called us. The
113 * caller is responsible for dealing with the refs.
115 * This call can only be made when no action is required on the buffer.
116 * HAMMER must own the buffer (released == 0) since we mess around with it.
118 static void
119 hammer_io_disassociate(hammer_io_structure_t iou, int elseit)
121 struct buf *bp = iou->io.bp;
123 KKASSERT(iou->io.modified == 0);
124 buf_dep_init(bp);
125 iou->io.bp = NULL;
126 bp->b_flags &= ~B_LOCKED;
127 if (elseit) {
128 KKASSERT(iou->io.released == 0);
129 iou->io.released = 1;
130 bqrelse(bp);
131 } else {
132 KKASSERT(iou->io.released);
135 switch(iou->io.type) {
136 case HAMMER_STRUCTURE_VOLUME:
137 iou->volume.ondisk = NULL;
138 break;
139 case HAMMER_STRUCTURE_DATA_BUFFER:
140 case HAMMER_STRUCTURE_META_BUFFER:
141 case HAMMER_STRUCTURE_UNDO_BUFFER:
142 iou->buffer.ondisk = NULL;
143 break;
148 * Wait for any physical IO to complete
150 static void
151 hammer_io_wait(hammer_io_t io)
153 if (io->running) {
154 crit_enter();
155 tsleep_interlock(io);
156 io->waiting = 1;
157 for (;;) {
158 tsleep(io, 0, "hmrflw", 0);
159 if (io->running == 0)
160 break;
161 tsleep_interlock(io);
162 io->waiting = 1;
163 if (io->running == 0)
164 break;
166 crit_exit();
171 * Load bp for a HAMMER structure. The io must be exclusively locked by
172 * the caller.
175 hammer_io_read(struct vnode *devvp, struct hammer_io *io)
177 struct buf *bp;
178 int error;
180 if ((bp = io->bp) == NULL) {
181 error = bread(devvp, io->offset, HAMMER_BUFSIZE, &io->bp);
182 if (error == 0) {
183 bp = io->bp;
184 bp->b_ops = &hammer_bioops;
185 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
186 BUF_KERNPROC(bp);
188 KKASSERT(io->modified == 0);
189 KKASSERT(io->running == 0);
190 KKASSERT(io->waiting == 0);
191 io->released = 0; /* we hold an active lock on bp */
192 } else {
193 error = 0;
195 return(error);
199 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
200 * Must be called with the IO exclusively locked.
202 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
203 * I/O by forcing the buffer to not be in a released state before calling
204 * it.
206 * This function will also mark the IO as modified but it will not
207 * increment the modify_refs count.
210 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
212 struct buf *bp;
214 if ((bp = io->bp) == NULL) {
215 io->bp = getblk(devvp, io->offset, HAMMER_BUFSIZE, 0, 0);
216 bp = io->bp;
217 bp->b_ops = &hammer_bioops;
218 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
219 io->released = 0;
220 KKASSERT(io->running == 0);
221 io->waiting = 0;
222 BUF_KERNPROC(bp);
223 } else {
224 if (io->released) {
225 regetblk(bp);
226 BUF_KERNPROC(bp);
227 io->released = 0;
230 hammer_io_modify(io, 0);
231 vfs_bio_clrbuf(bp);
232 return(0);
236 * This routine is called on the last reference to a hammer structure.
237 * The io is usually locked exclusively (but may not be during unmount).
239 * This routine is responsible for the disposition of the buffer cache
240 * buffer backing the IO. Only pure-data and undo buffers can be handed
241 * back to the kernel. Volume and meta-data buffers must be retained
242 * by HAMMER until explicitly flushed by the backend.
244 void
245 hammer_io_release(struct hammer_io *io, int flush)
247 struct buf *bp;
249 if ((bp = io->bp) == NULL)
250 return;
253 * Try to flush a dirty IO to disk if asked to by the
254 * caller or if the kernel tried to flush the buffer in the past.
256 * Kernel-initiated flushes are only allowed for pure-data buffers.
257 * meta-data and volume buffers can only be flushed explicitly
258 * by HAMMER.
260 if (io->modified) {
261 if (flush) {
262 hammer_io_flush(io);
263 } else if (bp->b_flags & B_LOCKED) {
264 switch(io->type) {
265 case HAMMER_STRUCTURE_DATA_BUFFER:
266 case HAMMER_STRUCTURE_UNDO_BUFFER:
267 hammer_io_flush(io);
268 break;
269 default:
270 break;
272 } /* else no explicit request to flush the buffer */
276 * Wait for the IO to complete if asked to.
278 if (io->waitdep && io->running) {
279 hammer_io_wait(io);
283 * Return control of the buffer to the kernel (with the provisio
284 * that our bioops can override kernel decisions with regards to
285 * the buffer).
287 if (flush && io->modified == 0 && io->running == 0) {
289 * Always disassociate the bp if an explicit flush
290 * was requested and the IO completed with no error
291 * (so unmount can really clean up the structure).
293 if (io->released) {
294 regetblk(bp);
295 BUF_KERNPROC(bp);
296 io->released = 0;
298 hammer_io_disassociate((hammer_io_structure_t)io, 1);
299 } else if (io->modified) {
301 * Only certain IO types can be released to the kernel.
302 * volume and meta-data IO types must be explicitly flushed
303 * by HAMMER.
305 switch(io->type) {
306 case HAMMER_STRUCTURE_DATA_BUFFER:
307 case HAMMER_STRUCTURE_UNDO_BUFFER:
308 if (io->released == 0) {
309 io->released = 1;
310 bdwrite(bp);
312 break;
313 default:
314 break;
316 } else if (io->released == 0) {
318 * Clean buffers can be generally released to the kernel.
319 * We leave the bp passively associated with the HAMMER
320 * structure and use bioops to disconnect it later on
321 * if the kernel wants to discard the buffer.
323 io->released = 1;
324 bqrelse(bp);
329 * This routine is called with a locked IO when a flush is desired and
330 * no other references to the structure exists other then ours. This
331 * routine is ONLY called when HAMMER believes it is safe to flush a
332 * potentially modified buffer out.
334 void
335 hammer_io_flush(struct hammer_io *io)
337 struct buf *bp;
340 * Degenerate case - nothing to flush if nothing is dirty.
342 if (io->modified == 0) {
343 return;
346 KKASSERT(io->bp);
347 KKASSERT(io->modify_refs == 0);
350 * Acquire ownership of the bp, particularly before we clear our
351 * modified flag.
353 * We are going to bawrite() this bp. Don't leave a window where
354 * io->released is set, we actually own the bp rather then our
355 * buffer.
357 bp = io->bp;
358 if (io->released) {
359 regetblk(bp);
360 /* BUF_KERNPROC(io->bp); */
361 /* io->released = 0; */
362 KKASSERT(io->released);
363 KKASSERT(io->bp == bp);
365 io->released = 1;
368 * Acquire exclusive access to the bp and then clear the modified
369 * state of the buffer prior to issuing I/O to interlock any
370 * modifications made while the I/O is in progress. This shouldn't
371 * happen anyway but losing data would be worse. The modified bit
372 * will be rechecked after the IO completes.
374 * This is only legal when lock.refs == 1 (otherwise we might clear
375 * the modified bit while there are still users of the cluster
376 * modifying the data).
378 * Do this before potentially blocking so any attempt to modify the
379 * ondisk while we are blocked blocks waiting for us.
381 KKASSERT(io->mod_list != NULL);
382 if (io->mod_list == &io->hmp->volu_list ||
383 io->mod_list == &io->hmp->meta_list) {
384 --io->hmp->locked_dirty_count;
385 --hammer_count_dirtybufs;
387 TAILQ_REMOVE(io->mod_list, io, mod_entry);
388 io->mod_list = NULL;
389 io->modified = 0;
392 * Transfer ownership to the kernel and initiate I/O.
394 io->running = 1;
395 ++io->hmp->io_running_count;
396 bawrite(bp);
399 /************************************************************************
400 * BUFFER DIRTYING *
401 ************************************************************************
403 * These routines deal with dependancies created when IO buffers get
404 * modified. The caller must call hammer_modify_*() on a referenced
405 * HAMMER structure prior to modifying its on-disk data.
407 * Any intent to modify an IO buffer acquires the related bp and imposes
408 * various write ordering dependancies.
412 * Mark a HAMMER structure as undergoing modification. Meta-data buffers
413 * are locked until the flusher can deal with them, pure data buffers
414 * can be written out.
416 static
417 void
418 hammer_io_modify(hammer_io_t io, int count)
420 struct hammer_mount *hmp = io->hmp;
423 * Shortcut if nothing to do.
425 KKASSERT(io->lock.refs != 0 && io->bp != NULL);
426 io->modify_refs += count;
427 if (io->modified && io->released == 0)
428 return;
430 hammer_lock_ex(&io->lock);
431 if (io->modified == 0) {
432 KKASSERT(io->mod_list == NULL);
433 switch(io->type) {
434 case HAMMER_STRUCTURE_VOLUME:
435 io->mod_list = &hmp->volu_list;
436 ++hmp->locked_dirty_count;
437 ++hammer_count_dirtybufs;
438 break;
439 case HAMMER_STRUCTURE_META_BUFFER:
440 io->mod_list = &hmp->meta_list;
441 ++hmp->locked_dirty_count;
442 ++hammer_count_dirtybufs;
443 break;
444 case HAMMER_STRUCTURE_UNDO_BUFFER:
445 io->mod_list = &hmp->undo_list;
446 break;
447 case HAMMER_STRUCTURE_DATA_BUFFER:
448 io->mod_list = &hmp->data_list;
449 break;
451 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
452 io->modified = 1;
454 if (io->released) {
455 regetblk(io->bp);
456 BUF_KERNPROC(io->bp);
457 io->released = 0;
458 KKASSERT(io->modified != 0);
460 hammer_unlock(&io->lock);
463 static __inline
464 void
465 hammer_io_modify_done(hammer_io_t io)
467 KKASSERT(io->modify_refs > 0);
468 --io->modify_refs;
471 void
472 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
473 void *base, int len)
475 hammer_io_modify(&volume->io, 1);
477 if (len) {
478 intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
479 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
480 hammer_generate_undo(trans, &volume->io,
481 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
482 base, len);
487 * Caller intends to modify a buffer's ondisk structure. The related
488 * cluster must be marked open prior to being able to flush the modified
489 * buffer so get that I/O going now.
491 void
492 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
493 void *base, int len)
495 hammer_io_modify(&buffer->io, 1);
496 if (len) {
497 intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
498 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
499 hammer_generate_undo(trans, &buffer->io,
500 buffer->zone2_offset + rel_offset,
501 base, len);
505 void
506 hammer_modify_volume_done(hammer_volume_t volume)
508 hammer_io_modify_done(&volume->io);
511 void
512 hammer_modify_buffer_done(hammer_buffer_t buffer)
514 hammer_io_modify_done(&buffer->io);
518 * Mark an entity as not being dirty any more -- this usually occurs when
519 * the governing a-list has freed the entire entity.
521 * XXX
523 void
524 hammer_io_clear_modify(struct hammer_io *io)
526 #if 0
527 struct buf *bp;
529 io->modified = 0;
530 XXX mod_list/entry
531 if ((bp = io->bp) != NULL) {
532 if (io->released) {
533 regetblk(bp);
534 /* BUF_KERNPROC(io->bp); */
535 } else {
536 io->released = 1;
538 if (io->modified == 0) {
539 hkprintf("hammer_io_clear_modify: cleared %p\n", io);
540 bundirty(bp);
541 bqrelse(bp);
542 } else {
543 bdwrite(bp);
546 #endif
549 /************************************************************************
550 * HAMMER_BIOOPS *
551 ************************************************************************
556 * Pre-IO initiation kernel callback - cluster build only
558 static void
559 hammer_io_start(struct buf *bp)
564 * Post-IO completion kernel callback
566 * NOTE: HAMMER may modify a buffer after initiating I/O. The modified bit
567 * may also be set if we were marking a cluster header open. Only remove
568 * our dependancy if the modified bit is clear.
570 static void
571 hammer_io_complete(struct buf *bp)
573 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
575 KKASSERT(iou->io.released == 1);
577 if (iou->io.running) {
578 if (--iou->io.hmp->io_running_count == 0)
579 wakeup(&iou->io.hmp->io_running_count);
580 KKASSERT(iou->io.hmp->io_running_count >= 0);
581 iou->io.running = 0;
585 * If no lock references remain and we can acquire the IO lock and
586 * someone at some point wanted us to flush (B_LOCKED test), then
587 * try to dispose of the IO.
589 if (iou->io.waiting) {
590 iou->io.waiting = 0;
591 wakeup(iou);
595 * Someone wanted us to flush, try to clean out the buffer.
597 if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) {
598 KKASSERT(iou->io.modified == 0);
599 bp->b_flags &= ~B_LOCKED;
600 hammer_io_deallocate(bp);
601 /* structure may be dead now */
606 * Callback from kernel when it wishes to deallocate a passively
607 * associated structure. This mostly occurs with clean buffers
608 * but it may be possible for a holding structure to be marked dirty
609 * while its buffer is passively associated.
611 * If we cannot disassociate we set B_LOCKED to prevent the buffer
612 * from getting reused.
614 * WARNING: Because this can be called directly by getnewbuf we cannot
615 * recurse into the tree. If a bp cannot be immediately disassociated
616 * our only recourse is to set B_LOCKED.
618 static void
619 hammer_io_deallocate(struct buf *bp)
621 hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
623 KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
624 if (iou->io.lock.refs > 0 || iou->io.modified) {
626 * It is not legal to disassociate a modified buffer. This
627 * case really shouldn't ever occur.
629 bp->b_flags |= B_LOCKED;
630 } else {
632 * Disassociate the BP. If the io has no refs left we
633 * have to add it to the loose list.
635 hammer_io_disassociate(iou, 0);
636 if (iou->io.bp == NULL &&
637 iou->io.type != HAMMER_STRUCTURE_VOLUME) {
638 KKASSERT(iou->io.mod_list == NULL);
639 iou->io.mod_list = &iou->io.hmp->lose_list;
640 TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
645 static int
646 hammer_io_fsync(struct vnode *vp)
648 return(0);
652 * NOTE: will not be called unless we tell the kernel about the
653 * bioops. Unused... we use the mount's VFS_SYNC instead.
655 static int
656 hammer_io_sync(struct mount *mp)
658 return(0);
661 static void
662 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
667 * I/O pre-check for reading and writing. HAMMER only uses this for
668 * B_CACHE buffers so checkread just shouldn't happen, but if it does
669 * allow it.
671 * Writing is a different case. We don't want the kernel to try to write
672 * out a buffer that HAMMER may be modifying passively or which has a
673 * dependancy. In addition, kernel-demanded writes can only proceed for
674 * certain types of buffers (i.e. UNDO and DATA types). Other dirty
675 * buffer types can only be explicitly written by the flusher.
677 * checkwrite will only be called for bdwrite()n buffers. If we return
678 * success the kernel is guaranteed to initiate the buffer write.
680 static int
681 hammer_io_checkread(struct buf *bp)
683 return(0);
686 static int
687 hammer_io_checkwrite(struct buf *bp)
689 hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
692 * This shouldn't happen under normal operation.
694 if (io->type == HAMMER_STRUCTURE_VOLUME ||
695 io->type == HAMMER_STRUCTURE_META_BUFFER) {
696 if (!panicstr)
697 panic("hammer_io_checkwrite: illegal buffer");
698 hkprintf("x");
699 bp->b_flags |= B_LOCKED;
700 return(1);
704 * We can only clear the modified bit if the IO is not currently
705 * undergoing modification. Otherwise we may miss changes.
707 if (io->modify_refs == 0 && io->modified) {
708 KKASSERT(io->mod_list != NULL);
709 if (io->mod_list == &io->hmp->volu_list ||
710 io->mod_list == &io->hmp->meta_list) {
711 --io->hmp->locked_dirty_count;
712 --hammer_count_dirtybufs;
714 TAILQ_REMOVE(io->mod_list, io, mod_entry);
715 io->mod_list = NULL;
716 io->modified = 0;
720 * The kernel is going to start the IO, set io->running.
722 KKASSERT(io->running == 0);
723 io->running = 1;
724 ++io->hmp->io_running_count;
725 return(0);
729 * Return non-zero if the caller should flush the structure associated
730 * with this io sub-structure.
733 hammer_io_checkflush(struct hammer_io *io)
735 if (io->bp == NULL || (io->bp->b_flags & B_LOCKED)) {
736 return(1);
738 return(0);
742 * Return non-zero if we wish to delay the kernel's attempt to flush
743 * this buffer to disk.
745 static int
746 hammer_io_countdeps(struct buf *bp, int n)
748 return(0);
751 struct bio_ops hammer_bioops = {
752 .io_start = hammer_io_start,
753 .io_complete = hammer_io_complete,
754 .io_deallocate = hammer_io_deallocate,
755 .io_fsync = hammer_io_fsync,
756 .io_sync = hammer_io_sync,
757 .io_movedeps = hammer_io_movedeps,
758 .io_countdeps = hammer_io_countdeps,
759 .io_checkread = hammer_io_checkread,
760 .io_checkwrite = hammer_io_checkwrite,