2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.27 2008/04/26 02:54:00 dillon Exp $
37 * IO Primitives and buffer cache management
39 * All major data-tracking structures in HAMMER contain a struct hammer_io
40 * which is used to manage their backing store. We use filesystem buffers
41 * for backing store and we leave them passively associated with their
44 * If the kernel tries to release a passively associated buf which we cannot
45 * yet let go we set B_LOCKED in the buffer and then actively released it
50 #include <sys/fcntl.h>
51 #include <sys/nlookup.h>
55 static void hammer_io_modify(hammer_io_t io
, int count
);
56 static void hammer_io_deallocate(struct buf
*bp
);
59 * Initialize a new, already-zero'd hammer_io structure, or reinitialize
60 * an existing hammer_io structure which may have switched to another type.
63 hammer_io_init(hammer_io_t io
, hammer_mount_t hmp
, enum hammer_io_type type
)
70 hammer_io_reinit(hammer_io_t io
, enum hammer_io_type type
)
72 hammer_mount_t hmp
= io
->hmp
;
75 KKASSERT(io
->mod_list
!= NULL
);
76 if (io
->mod_list
== &hmp
->volu_list
||
77 io
->mod_list
== &hmp
->meta_list
) {
78 --hmp
->locked_dirty_count
;
80 TAILQ_REMOVE(io
->mod_list
, io
, mod_entry
);
86 case HAMMER_STRUCTURE_VOLUME
:
87 io
->mod_list
= &hmp
->volu_list
;
88 ++hmp
->locked_dirty_count
;
90 case HAMMER_STRUCTURE_META_BUFFER
:
91 io
->mod_list
= &hmp
->meta_list
;
92 ++hmp
->locked_dirty_count
;
94 case HAMMER_STRUCTURE_UNDO_BUFFER
:
95 io
->mod_list
= &hmp
->undo_list
;
97 case HAMMER_STRUCTURE_DATA_BUFFER
:
98 io
->mod_list
= &hmp
->data_list
;
101 TAILQ_INSERT_TAIL(io
->mod_list
, io
, mod_entry
);
106 * Helper routine to disassociate a buffer cache buffer from an I/O
107 * structure. Called with the io structure exclusively locked.
109 * The io may have 0 or 1 references depending on who called us. The
110 * caller is responsible for dealing with the refs.
112 * This call can only be made when no action is required on the buffer.
113 * HAMMER must own the buffer (released == 0) since we mess around with it.
116 hammer_io_disassociate(hammer_io_structure_t iou
, int elseit
)
118 struct buf
*bp
= iou
->io
.bp
;
120 KKASSERT(iou
->io
.modified
== 0);
123 bp
->b_flags
&= ~B_LOCKED
;
125 KKASSERT(iou
->io
.released
== 0);
126 iou
->io
.released
= 1;
129 KKASSERT(iou
->io
.released
);
132 switch(iou
->io
.type
) {
133 case HAMMER_STRUCTURE_VOLUME
:
134 iou
->volume
.ondisk
= NULL
;
136 case HAMMER_STRUCTURE_DATA_BUFFER
:
137 case HAMMER_STRUCTURE_META_BUFFER
:
138 case HAMMER_STRUCTURE_UNDO_BUFFER
:
139 iou
->buffer
.ondisk
= NULL
;
145 * Wait for any physical IO to complete
148 hammer_io_wait(hammer_io_t io
)
152 tsleep_interlock(io
);
155 tsleep(io
, 0, "hmrflw", 0);
156 if (io
->running
== 0)
158 tsleep_interlock(io
);
160 if (io
->running
== 0)
168 * Load bp for a HAMMER structure. The io must be exclusively locked by
172 hammer_io_read(struct vnode
*devvp
, struct hammer_io
*io
)
177 if ((bp
= io
->bp
) == NULL
) {
178 error
= bread(devvp
, io
->offset
, HAMMER_BUFSIZE
, &io
->bp
);
181 bp
->b_ops
= &hammer_bioops
;
182 LIST_INSERT_HEAD(&bp
->b_dep
, &io
->worklist
, node
);
185 KKASSERT(io
->modified
== 0);
186 KKASSERT(io
->running
== 0);
187 KKASSERT(io
->waiting
== 0);
188 io
->released
= 0; /* we hold an active lock on bp */
196 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
197 * Must be called with the IO exclusively locked.
199 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
200 * I/O by forcing the buffer to not be in a released state before calling
203 * This function will also mark the IO as modified but it will not
204 * increment the modify_refs count.
207 hammer_io_new(struct vnode
*devvp
, struct hammer_io
*io
)
211 if ((bp
= io
->bp
) == NULL
) {
212 io
->bp
= getblk(devvp
, io
->offset
, HAMMER_BUFSIZE
, 0, 0);
214 bp
->b_ops
= &hammer_bioops
;
215 LIST_INSERT_HEAD(&bp
->b_dep
, &io
->worklist
, node
);
217 KKASSERT(io
->running
== 0);
227 hammer_io_modify(io
, 0);
233 * This routine is called on the last reference to a hammer structure.
234 * The io is usually locked exclusively (but may not be during unmount).
236 * This routine is responsible for the disposition of the buffer cache
237 * buffer backing the IO. Only pure-data and undo buffers can be handed
238 * back to the kernel. Volume and meta-data buffers must be retained
239 * by HAMMER until explicitly flushed by the backend.
242 hammer_io_release(struct hammer_io
*io
)
246 if ((bp
= io
->bp
) == NULL
)
250 * Try to flush a dirty IO to disk if asked to by the
251 * caller or if the kernel tried to flush the buffer in the past.
253 * Kernel-initiated flushes are only allowed for pure-data buffers.
254 * meta-data and volume buffers can only be flushed explicitly
260 } else if (bp
->b_flags
& B_LOCKED
) {
262 case HAMMER_STRUCTURE_DATA_BUFFER
:
263 case HAMMER_STRUCTURE_UNDO_BUFFER
:
269 } /* else no explicit request to flush the buffer */
273 * Wait for the IO to complete if asked to.
275 if (io
->waitdep
&& io
->running
) {
280 * Return control of the buffer to the kernel (with the provisio
281 * that our bioops can override kernel decisions with regards to
284 if (io
->flush
&& io
->modified
== 0 && io
->running
== 0) {
286 * Always disassociate the bp if an explicit flush
287 * was requested and the IO completed with no error
288 * (so unmount can really clean up the structure).
295 hammer_io_disassociate((hammer_io_structure_t
)io
, 1);
296 } else if (io
->modified
) {
298 * Only certain IO types can be released to the kernel.
299 * volume and meta-data IO types must be explicitly flushed
303 case HAMMER_STRUCTURE_DATA_BUFFER
:
304 case HAMMER_STRUCTURE_UNDO_BUFFER
:
305 if (io
->released
== 0) {
313 } else if (io
->released
== 0) {
315 * Clean buffers can be generally released to the kernel.
316 * We leave the bp passively associated with the HAMMER
317 * structure and use bioops to disconnect it later on
318 * if the kernel wants to discard the buffer.
326 * This routine is called with a locked IO when a flush is desired and
327 * no other references to the structure exists other then ours. This
328 * routine is ONLY called when HAMMER believes it is safe to flush a
329 * potentially modified buffer out.
332 hammer_io_flush(struct hammer_io
*io
)
337 * Degenerate case - nothing to flush if nothing is dirty.
339 if (io
->modified
== 0) {
345 KKASSERT(io
->modify_refs
== 0);
348 * Acquire exclusive access to the bp and then clear the modified
349 * state of the buffer prior to issuing I/O to interlock any
350 * modifications made while the I/O is in progress. This shouldn't
351 * happen anyway but losing data would be worse. The modified bit
352 * will be rechecked after the IO completes.
354 * This is only legal when lock.refs == 1 (otherwise we might clear
355 * the modified bit while there are still users of the cluster
356 * modifying the data).
358 * Do this before potentially blocking so any attempt to modify the
359 * ondisk while we are blocked blocks waiting for us.
361 KKASSERT(io
->mod_list
!= NULL
);
362 if (io
->mod_list
== &io
->hmp
->volu_list
||
363 io
->mod_list
== &io
->hmp
->meta_list
) {
364 --io
->hmp
->locked_dirty_count
;
366 TAILQ_REMOVE(io
->mod_list
, io
, mod_entry
);
373 * Acquire ownership (released variable set for clarity)
377 /* BUF_KERNPROC(io->bp); */
382 * Transfer ownership to the kernel and initiate I/O.
386 ++io
->hmp
->io_running_count
;
390 /************************************************************************
392 ************************************************************************
394 * These routines deal with dependancies created when IO buffers get
395 * modified. The caller must call hammer_modify_*() on a referenced
396 * HAMMER structure prior to modifying its on-disk data.
398 * Any intent to modify an IO buffer acquires the related bp and imposes
399 * various write ordering dependancies.
403 * Mark a HAMMER structure as undergoing modification. Meta-data buffers
404 * are locked until the flusher can deal with them, pure data buffers
405 * can be written out.
409 hammer_io_modify(hammer_io_t io
, int count
)
411 struct hammer_mount
*hmp
= io
->hmp
;
414 * Shortcut if nothing to do.
416 KKASSERT(io
->lock
.refs
!= 0 && io
->bp
!= NULL
);
417 io
->modify_refs
+= count
;
418 if (io
->modified
&& io
->released
== 0)
421 hammer_lock_ex(&io
->lock
);
422 if (io
->modified
== 0) {
423 KKASSERT(io
->mod_list
== NULL
);
425 case HAMMER_STRUCTURE_VOLUME
:
426 io
->mod_list
= &hmp
->volu_list
;
427 ++hmp
->locked_dirty_count
;
429 case HAMMER_STRUCTURE_META_BUFFER
:
430 io
->mod_list
= &hmp
->meta_list
;
431 ++hmp
->locked_dirty_count
;
433 case HAMMER_STRUCTURE_UNDO_BUFFER
:
434 io
->mod_list
= &hmp
->undo_list
;
436 case HAMMER_STRUCTURE_DATA_BUFFER
:
437 io
->mod_list
= &hmp
->data_list
;
440 TAILQ_INSERT_TAIL(io
->mod_list
, io
, mod_entry
);
445 BUF_KERNPROC(io
->bp
);
447 KKASSERT(io
->modified
!= 0);
449 hammer_unlock(&io
->lock
);
454 hammer_io_modify_done(hammer_io_t io
)
456 KKASSERT(io
->modify_refs
> 0);
461 hammer_modify_volume(hammer_transaction_t trans
, hammer_volume_t volume
,
464 hammer_io_modify(&volume
->io
, 1);
467 intptr_t rel_offset
= (intptr_t)base
- (intptr_t)volume
->ondisk
;
468 KKASSERT((rel_offset
& ~(intptr_t)HAMMER_BUFMASK
) == 0);
469 hammer_generate_undo(trans
, &volume
->io
,
470 HAMMER_ENCODE_RAW_VOLUME(volume
->vol_no
, rel_offset
),
476 * Caller intends to modify a buffer's ondisk structure. The related
477 * cluster must be marked open prior to being able to flush the modified
478 * buffer so get that I/O going now.
481 hammer_modify_buffer(hammer_transaction_t trans
, hammer_buffer_t buffer
,
484 hammer_io_modify(&buffer
->io
, 1);
486 intptr_t rel_offset
= (intptr_t)base
- (intptr_t)buffer
->ondisk
;
487 KKASSERT((rel_offset
& ~(intptr_t)HAMMER_BUFMASK
) == 0);
488 hammer_generate_undo(trans
, &buffer
->io
,
489 buffer
->zone2_offset
+ rel_offset
,
495 hammer_modify_volume_done(hammer_volume_t volume
)
497 hammer_io_modify_done(&volume
->io
);
501 hammer_modify_buffer_done(hammer_buffer_t buffer
)
503 hammer_io_modify_done(&buffer
->io
);
507 * Mark an entity as not being dirty any more -- this usually occurs when
508 * the governing a-list has freed the entire entity.
513 hammer_io_clear_modify(struct hammer_io
*io
)
520 if ((bp
= io
->bp
) != NULL
) {
523 /* BUF_KERNPROC(io->bp); */
527 if (io
->modified
== 0) {
528 kprintf("hammer_io_clear_modify: cleared %p\n", io
);
538 /************************************************************************
540 ************************************************************************
545 * Pre-IO initiation kernel callback - cluster build only
548 hammer_io_start(struct buf
*bp
)
553 * Post-IO completion kernel callback
555 * NOTE: HAMMER may modify a buffer after initiating I/O. The modified bit
556 * may also be set if we were marking a cluster header open. Only remove
557 * our dependancy if the modified bit is clear.
560 hammer_io_complete(struct buf
*bp
)
562 union hammer_io_structure
*iou
= (void *)LIST_FIRST(&bp
->b_dep
);
564 KKASSERT(iou
->io
.released
== 1);
566 if (iou
->io
.running
) {
567 if (--iou
->io
.hmp
->io_running_count
== 0)
568 wakeup(&iou
->io
.hmp
->io_running_count
);
569 KKASSERT(iou
->io
.hmp
->io_running_count
>= 0);
574 * If no lock references remain and we can acquire the IO lock and
575 * someone at some point wanted us to flush (B_LOCKED test), then
576 * try to dispose of the IO.
578 if (iou
->io
.waiting
) {
584 * Someone wanted us to flush, try to clean out the buffer.
586 if ((bp
->b_flags
& B_LOCKED
) && iou
->io
.lock
.refs
== 0) {
587 KKASSERT(iou
->io
.modified
== 0);
588 bp
->b_flags
&= ~B_LOCKED
;
589 hammer_io_deallocate(bp
);
590 /* structure may be dead now */
595 * Callback from kernel when it wishes to deallocate a passively
596 * associated structure. This mostly occurs with clean buffers
597 * but it may be possible for a holding structure to be marked dirty
598 * while its buffer is passively associated.
600 * If we cannot disassociate we set B_LOCKED to prevent the buffer
601 * from getting reused.
603 * WARNING: Because this can be called directly by getnewbuf we cannot
604 * recurse into the tree. If a bp cannot be immediately disassociated
605 * our only recourse is to set B_LOCKED.
608 hammer_io_deallocate(struct buf
*bp
)
610 hammer_io_structure_t iou
= (void *)LIST_FIRST(&bp
->b_dep
);
612 KKASSERT((bp
->b_flags
& B_LOCKED
) == 0 && iou
->io
.running
== 0);
613 if (iou
->io
.lock
.refs
> 0 || iou
->io
.modified
) {
615 * It is not legal to disassociate a modified buffer. This
616 * case really shouldn't ever occur.
618 bp
->b_flags
|= B_LOCKED
;
621 * Disassociate the BP. If the io has no refs left we
622 * have to add it to the loose list.
624 hammer_io_disassociate(iou
, 0);
625 if (iou
->io
.bp
== NULL
&&
626 iou
->io
.type
!= HAMMER_STRUCTURE_VOLUME
) {
627 KKASSERT(iou
->io
.mod_list
== NULL
);
628 iou
->io
.mod_list
= &iou
->io
.hmp
->lose_list
;
629 TAILQ_INSERT_TAIL(iou
->io
.mod_list
, &iou
->io
, mod_entry
);
635 hammer_io_fsync(struct vnode
*vp
)
641 * NOTE: will not be called unless we tell the kernel about the
642 * bioops. Unused... we use the mount's VFS_SYNC instead.
645 hammer_io_sync(struct mount
*mp
)
651 hammer_io_movedeps(struct buf
*bp1
, struct buf
*bp2
)
656 * I/O pre-check for reading and writing. HAMMER only uses this for
657 * B_CACHE buffers so checkread just shouldn't happen, but if it does
660 * Writing is a different case. We don't want the kernel to try to write
661 * out a buffer that HAMMER may be modifying passively or which has a
662 * dependancy. In addition, kernel-demanded writes can only proceed for
663 * certain types of buffers (i.e. UNDO and DATA types). Other dirty
664 * buffer types can only be explicitly written by the flusher.
666 * checkwrite will only be called for bdwrite()n buffers. If we return
667 * success the kernel is guaranteed to initiate the buffer write.
670 hammer_io_checkread(struct buf
*bp
)
676 hammer_io_checkwrite(struct buf
*bp
)
678 hammer_io_t io
= (void *)LIST_FIRST(&bp
->b_dep
);
681 * We can only clear the modified bit if the IO is not currently
682 * undergoing modification. Otherwise we may miss changes.
684 if (io
->modify_refs
== 0 && io
->modified
) {
685 KKASSERT(io
->mod_list
!= NULL
);
686 if (io
->mod_list
== &io
->hmp
->volu_list
||
687 io
->mod_list
== &io
->hmp
->meta_list
) {
688 --io
->hmp
->locked_dirty_count
;
690 TAILQ_REMOVE(io
->mod_list
, io
, mod_entry
);
696 * The kernel is going to start the IO, set io->running.
698 KKASSERT(io
->running
== 0);
700 ++io
->hmp
->io_running_count
;
705 * Return non-zero if the caller should flush the structure associated
706 * with this io sub-structure.
709 hammer_io_checkflush(struct hammer_io
*io
)
711 if (io
->bp
== NULL
|| (io
->bp
->b_flags
& B_LOCKED
)) {
718 * Return non-zero if we wish to delay the kernel's attempt to flush
719 * this buffer to disk.
722 hammer_io_countdeps(struct buf
*bp
, int n
)
727 struct bio_ops hammer_bioops
= {
728 .io_start
= hammer_io_start
,
729 .io_complete
= hammer_io_complete
,
730 .io_deallocate
= hammer_io_deallocate
,
731 .io_fsync
= hammer_io_fsync
,
732 .io_sync
= hammer_io_sync
,
733 .io_movedeps
= hammer_io_movedeps
,
734 .io_countdeps
= hammer_io_countdeps
,
735 .io_checkread
= hammer_io_checkread
,
736 .io_checkwrite
= hammer_io_checkwrite
,