HAMMER 56C/Many: Performance tuning - MEDIA STRUCTURES CHANGED!
[dragonfly.git] / sys / vfs / hammer / hammer_io.c
blob1226db96547d6a0f2718fdaafbe1af5c28793092
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.44 2008/06/20 05:38:26 dillon Exp $
37 * IO Primitives and buffer cache management
39 * All major data-tracking structures in HAMMER contain a struct hammer_io
40 * which is used to manage their backing store. We use filesystem buffers
41 * for backing store and we leave them passively associated with their
42 * HAMMER structures.
44 * If the kernel tries to destroy a passively associated buf which we cannot
45 * yet let go we set B_LOCKED in the buffer and then actively released it
46 * later when we can.
49 #include "hammer.h"
50 #include <sys/fcntl.h>
51 #include <sys/nlookup.h>
52 #include <sys/buf.h>
53 #include <sys/buf2.h>
55 static void hammer_io_modify(hammer_io_t io, int count);
56 static void hammer_io_deallocate(struct buf *bp);
59 * Initialize a new, already-zero'd hammer_io structure, or reinitialize
60 * an existing hammer_io structure which may have switched to another type.
62 void
63 hammer_io_init(hammer_io_t io, hammer_mount_t hmp, enum hammer_io_type type)
65 io->hmp = hmp;
66 io->type = type;
70 * Helper routine to disassociate a buffer cache buffer from an I/O
71 * structure.
73 * The io may have 0 or 1 references depending on who called us. The
74 * caller is responsible for dealing with the refs.
76 * This call can only be made when no action is required on the buffer.
77 * HAMMER must own the buffer (released == 0) since we mess around with it.
79 static void
80 hammer_io_disassociate(hammer_io_structure_t iou, int elseit)
82 struct buf *bp = iou->io.bp;
84 KKASSERT(iou->io.modified == 0);
85 KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou);
86 buf_dep_init(bp);
87 iou->io.bp = NULL;
90 * If the buffer was locked someone wanted to get rid of it.
92 if (bp->b_flags & B_LOCKED) {
93 --hammer_count_io_locked;
94 bp->b_flags &= ~B_LOCKED;
98 * elseit is 0 when called from the kernel path when the io
99 * might have no references.
101 if (elseit) {
102 KKASSERT(iou->io.released == 0);
103 iou->io.released = 1;
104 if (iou->io.reclaim)
105 bp->b_flags |= B_NOCACHE|B_RELBUF;
106 bqrelse(bp);
107 } else {
108 KKASSERT(iou->io.released);
110 iou->io.reclaim = 0;
112 switch(iou->io.type) {
113 case HAMMER_STRUCTURE_VOLUME:
114 iou->volume.ondisk = NULL;
115 break;
116 case HAMMER_STRUCTURE_DATA_BUFFER:
117 case HAMMER_STRUCTURE_META_BUFFER:
118 case HAMMER_STRUCTURE_UNDO_BUFFER:
119 iou->buffer.ondisk = NULL;
120 break;
125 * Wait for any physical IO to complete
127 static void
128 hammer_io_wait(hammer_io_t io)
130 if (io->running) {
131 crit_enter();
132 tsleep_interlock(io);
133 io->waiting = 1;
134 for (;;) {
135 tsleep(io, 0, "hmrflw", 0);
136 if (io->running == 0)
137 break;
138 tsleep_interlock(io);
139 io->waiting = 1;
140 if (io->running == 0)
141 break;
143 crit_exit();
148 * Wait for all hammer_io-initated write I/O's to complete. This is not
149 * supposed to count direct I/O's but some can leak through (for
150 * non-full-sized direct I/Os).
152 void
153 hammer_io_wait_all(hammer_mount_t hmp, const char *ident)
155 crit_enter();
156 while (hmp->io_running_count)
157 tsleep(&hmp->io_running_count, 0, ident, 0);
158 crit_exit();
161 #define HAMMER_MAXRA 4
164 * Load bp for a HAMMER structure. The io must be exclusively locked by
165 * the caller.
167 * This routine is mostly used on meta-data and small-data blocks. Generally
168 * speaking HAMMER assumes some locality of reference and will cluster
169 * a 64K read.
171 * Note that clustering occurs at the device layer, not the logical layer.
172 * If the buffers do not apply to the current operation they may apply to
173 * some other.
176 hammer_io_read(struct vnode *devvp, struct hammer_io *io, hammer_off_t limit)
178 struct buf *bp;
179 int error;
181 if ((bp = io->bp) == NULL) {
182 ++hammer_count_io_running_read;
183 #if 1
184 error = cluster_read(devvp, limit, io->offset, io->bytes,
185 HAMMER_CLUSTER_SIZE,
186 HAMMER_CLUSTER_BUFS, &io->bp);
187 #else
188 error = bread(devvp, io->offset, io->bytes, &io->bp);
189 #endif
190 --hammer_count_io_running_read;
191 if (error == 0) {
192 bp = io->bp;
193 bp->b_ops = &hammer_bioops;
194 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
195 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
196 BUF_KERNPROC(bp);
198 KKASSERT(io->modified == 0);
199 KKASSERT(io->running == 0);
200 KKASSERT(io->waiting == 0);
201 io->released = 0; /* we hold an active lock on bp */
202 } else {
203 error = 0;
205 return(error);
209 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
210 * Must be called with the IO exclusively locked.
212 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
213 * I/O by forcing the buffer to not be in a released state before calling
214 * it.
216 * This function will also mark the IO as modified but it will not
217 * increment the modify_refs count.
220 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
222 struct buf *bp;
224 if ((bp = io->bp) == NULL) {
225 io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
226 bp = io->bp;
227 bp->b_ops = &hammer_bioops;
228 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
229 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
230 io->released = 0;
231 KKASSERT(io->running == 0);
232 io->waiting = 0;
233 BUF_KERNPROC(bp);
234 } else {
235 if (io->released) {
236 regetblk(bp);
237 BUF_KERNPROC(bp);
238 io->released = 0;
241 hammer_io_modify(io, 0);
242 vfs_bio_clrbuf(bp);
243 return(0);
247 * Remove potential device level aliases against buffers managed by high level
248 * vnodes.
250 void
251 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
253 hammer_io_structure_t iou;
254 hammer_off_t phys_offset;
255 struct buf *bp;
257 phys_offset = volume->ondisk->vol_buf_beg +
258 (zone2_offset & HAMMER_OFF_SHORT_MASK);
259 crit_enter();
260 if ((bp = findblk(volume->devvp, phys_offset)) != NULL) {
261 bp = getblk(volume->devvp, phys_offset, bp->b_bufsize, 0, 0);
262 if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
263 hammer_io_clear_modify(&iou->io, 1);
264 bundirty(bp);
265 iou->io.reclaim = 1;
266 hammer_io_deallocate(bp);
267 } else {
268 KKASSERT((bp->b_flags & B_LOCKED) == 0);
269 bundirty(bp);
270 bp->b_flags |= B_NOCACHE|B_RELBUF;
271 brelse(bp);
274 crit_exit();
278 * This routine is called on the last reference to a hammer structure.
279 * The io is usually locked exclusively (but may not be during unmount).
281 * This routine is responsible for the disposition of the buffer cache
282 * buffer backing the IO. Only pure-data and undo buffers can be handed
283 * back to the kernel. Volume and meta-data buffers must be retained
284 * by HAMMER until explicitly flushed by the backend.
286 void
287 hammer_io_release(struct hammer_io *io, int flush)
289 union hammer_io_structure *iou = (void *)io;
290 struct buf *bp;
292 if ((bp = io->bp) == NULL)
293 return;
296 * Try to flush a dirty IO to disk if asked to by the
297 * caller or if the kernel tried to flush the buffer in the past.
299 * Kernel-initiated flushes are only allowed for pure-data buffers.
300 * meta-data and volume buffers can only be flushed explicitly
301 * by HAMMER.
303 if (io->modified) {
304 if (flush) {
305 hammer_io_flush(io);
306 } else if (bp->b_flags & B_LOCKED) {
307 switch(io->type) {
308 case HAMMER_STRUCTURE_DATA_BUFFER:
309 case HAMMER_STRUCTURE_UNDO_BUFFER:
310 hammer_io_flush(io);
311 break;
312 default:
313 break;
315 } /* else no explicit request to flush the buffer */
319 * Wait for the IO to complete if asked to.
321 if (io->waitdep && io->running) {
322 hammer_io_wait(io);
326 * Return control of the buffer to the kernel (with the provisio
327 * that our bioops can override kernel decisions with regards to
328 * the buffer).
330 if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
332 * Always disassociate the bp if an explicit flush
333 * was requested and the IO completed with no error
334 * (so unmount can really clean up the structure).
336 if (io->released) {
337 regetblk(bp);
338 BUF_KERNPROC(bp);
339 io->released = 0;
341 hammer_io_disassociate((hammer_io_structure_t)io, 1);
342 } else if (io->modified) {
344 * Only certain IO types can be released to the kernel.
345 * volume and meta-data IO types must be explicitly flushed
346 * by HAMMER.
348 switch(io->type) {
349 case HAMMER_STRUCTURE_DATA_BUFFER:
350 case HAMMER_STRUCTURE_UNDO_BUFFER:
351 if (io->released == 0) {
352 io->released = 1;
353 bdwrite(bp);
355 break;
356 default:
357 break;
359 } else if (io->released == 0) {
361 * Clean buffers can be generally released to the kernel.
362 * We leave the bp passively associated with the HAMMER
363 * structure and use bioops to disconnect it later on
364 * if the kernel wants to discard the buffer.
366 if (bp->b_flags & B_LOCKED) {
367 hammer_io_disassociate(iou, 1);
368 } else {
369 if (io->reclaim) {
370 hammer_io_disassociate(iou, 1);
371 } else {
372 io->released = 1;
373 bqrelse(bp);
376 } else {
378 * A released buffer is passively associate with our
379 * hammer_io structure. The kernel cannot destroy it
380 * without making a bioops call. If the kernel (B_LOCKED)
381 * or we (reclaim) requested that the buffer be destroyed
382 * we destroy it, otherwise we do a quick get/release to
383 * reset its position in the kernel's LRU list.
385 * Leaving the buffer passively associated allows us to
386 * use the kernel's LRU buffer flushing mechanisms rather
387 * then rolling our own.
389 * XXX there are two ways of doing this. We can re-acquire
390 * and passively release to reset the LRU, or not.
392 crit_enter();
393 if (io->running == 0) {
394 regetblk(bp);
395 if ((bp->b_flags & B_LOCKED) || io->reclaim) {
396 /*regetblk(bp);*/
397 io->released = 0;
398 hammer_io_disassociate(iou, 1);
399 } else {
400 bqrelse(bp);
403 crit_exit();
408 * This routine is called with a locked IO when a flush is desired and
409 * no other references to the structure exists other then ours. This
410 * routine is ONLY called when HAMMER believes it is safe to flush a
411 * potentially modified buffer out.
413 void
414 hammer_io_flush(struct hammer_io *io)
416 struct buf *bp;
419 * Degenerate case - nothing to flush if nothing is dirty.
421 if (io->modified == 0) {
422 return;
425 KKASSERT(io->bp);
426 KKASSERT(io->modify_refs <= 0);
429 * Acquire ownership of the bp, particularly before we clear our
430 * modified flag.
432 * We are going to bawrite() this bp. Don't leave a window where
433 * io->released is set, we actually own the bp rather then our
434 * buffer.
436 bp = io->bp;
437 if (io->released) {
438 regetblk(bp);
439 /* BUF_KERNPROC(io->bp); */
440 /* io->released = 0; */
441 KKASSERT(io->released);
442 KKASSERT(io->bp == bp);
444 io->released = 1;
447 * Acquire exclusive access to the bp and then clear the modified
448 * state of the buffer prior to issuing I/O to interlock any
449 * modifications made while the I/O is in progress. This shouldn't
450 * happen anyway but losing data would be worse. The modified bit
451 * will be rechecked after the IO completes.
453 * NOTE: This call also finalizes the buffer's content (inval == 0).
455 * This is only legal when lock.refs == 1 (otherwise we might clear
456 * the modified bit while there are still users of the cluster
457 * modifying the data).
459 * Do this before potentially blocking so any attempt to modify the
460 * ondisk while we are blocked blocks waiting for us.
462 hammer_io_clear_modify(io, 0);
465 * Transfer ownership to the kernel and initiate I/O.
467 io->running = 1;
468 ++io->hmp->io_running_count;
469 ++hammer_count_io_running_write;
470 bawrite(bp);
473 /************************************************************************
474 * BUFFER DIRTYING *
475 ************************************************************************
477 * These routines deal with dependancies created when IO buffers get
478 * modified. The caller must call hammer_modify_*() on a referenced
479 * HAMMER structure prior to modifying its on-disk data.
481 * Any intent to modify an IO buffer acquires the related bp and imposes
482 * various write ordering dependancies.
486 * Mark a HAMMER structure as undergoing modification. Meta-data buffers
487 * are locked until the flusher can deal with them, pure data buffers
488 * can be written out.
490 static
491 void
492 hammer_io_modify(hammer_io_t io, int count)
494 struct hammer_mount *hmp = io->hmp;
497 * io->modify_refs must be >= 0
499 while (io->modify_refs < 0) {
500 io->waitmod = 1;
501 tsleep(io, 0, "hmrmod", 0);
505 * Shortcut if nothing to do.
507 KKASSERT(io->lock.refs != 0 && io->bp != NULL);
508 io->modify_refs += count;
509 if (io->modified && io->released == 0)
510 return;
512 hammer_lock_ex(&io->lock);
513 if (io->modified == 0) {
514 KKASSERT(io->mod_list == NULL);
515 switch(io->type) {
516 case HAMMER_STRUCTURE_VOLUME:
517 io->mod_list = &hmp->volu_list;
518 ++hmp->locked_dirty_count;
519 ++hammer_count_dirtybufs;
520 break;
521 case HAMMER_STRUCTURE_META_BUFFER:
522 io->mod_list = &hmp->meta_list;
523 ++hmp->locked_dirty_count;
524 ++hammer_count_dirtybufs;
525 break;
526 case HAMMER_STRUCTURE_UNDO_BUFFER:
527 io->mod_list = &hmp->undo_list;
528 break;
529 case HAMMER_STRUCTURE_DATA_BUFFER:
530 io->mod_list = &hmp->data_list;
531 break;
533 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
534 io->modified = 1;
536 if (io->released) {
537 regetblk(io->bp);
538 BUF_KERNPROC(io->bp);
539 io->released = 0;
540 KKASSERT(io->modified != 0);
542 hammer_unlock(&io->lock);
545 static __inline
546 void
547 hammer_io_modify_done(hammer_io_t io)
549 KKASSERT(io->modify_refs > 0);
550 --io->modify_refs;
551 if (io->modify_refs == 0 && io->waitmod) {
552 io->waitmod = 0;
553 wakeup(io);
557 void
558 hammer_io_write_interlock(hammer_io_t io)
560 while (io->modify_refs != 0) {
561 io->waitmod = 1;
562 tsleep(io, 0, "hmrmod", 0);
564 io->modify_refs = -1;
567 void
568 hammer_io_done_interlock(hammer_io_t io)
570 KKASSERT(io->modify_refs == -1);
571 io->modify_refs = 0;
572 if (io->waitmod) {
573 io->waitmod = 0;
574 wakeup(io);
579 * Caller intends to modify a volume's ondisk structure.
581 * This is only allowed if we are the flusher or we have a ref on the
582 * sync_lock.
584 void
585 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
586 void *base, int len)
588 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
590 hammer_io_modify(&volume->io, 1);
591 if (len) {
592 intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
593 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
594 hammer_generate_undo(trans, &volume->io,
595 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
596 base, len);
601 * Caller intends to modify a buffer's ondisk structure.
603 * This is only allowed if we are the flusher or we have a ref on the
604 * sync_lock.
606 void
607 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
608 void *base, int len)
610 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
612 hammer_io_modify(&buffer->io, 1);
613 if (len) {
614 intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
615 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
616 hammer_generate_undo(trans, &buffer->io,
617 buffer->zone2_offset + rel_offset,
618 base, len);
622 void
623 hammer_modify_volume_done(hammer_volume_t volume)
625 hammer_io_modify_done(&volume->io);
628 void
629 hammer_modify_buffer_done(hammer_buffer_t buffer)
631 hammer_io_modify_done(&buffer->io);
635 * Mark an entity as not being dirty any more and finalize any
636 * delayed adjustments to the buffer.
638 * Delayed adjustments are an important performance enhancement, allowing
639 * us to avoid recalculating B-Tree node CRCs over and over again when
640 * making bulk-modifications to the B-Tree.
642 * If inval is non-zero delayed adjustments are ignored.
644 void
645 hammer_io_clear_modify(struct hammer_io *io, int inval)
647 if (io->modified == 0)
648 return;
651 * Take us off the mod-list and clear the modified bit.
653 KKASSERT(io->mod_list != NULL);
654 if (io->mod_list == &io->hmp->volu_list ||
655 io->mod_list == &io->hmp->meta_list) {
656 --io->hmp->locked_dirty_count;
657 --hammer_count_dirtybufs;
659 TAILQ_REMOVE(io->mod_list, io, mod_entry);
660 io->mod_list = NULL;
661 io->modified = 0;
664 * If this bit is not set there are no delayed adjustments.
666 if (io->gencrc == 0)
667 return;
668 io->gencrc = 0;
671 * Finalize requested CRCs. The NEEDSCRC flag also holds a reference
672 * on the node (& underlying buffer). Release the node after clearing
673 * the flag.
675 if (io->type == HAMMER_STRUCTURE_META_BUFFER) {
676 hammer_buffer_t buffer = (void *)io;
677 hammer_node_t node;
679 restart:
680 TAILQ_FOREACH(node, &buffer->clist, entry) {
681 if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
682 continue;
683 node->flags &= ~HAMMER_NODE_NEEDSCRC;
684 KKASSERT(node->ondisk);
685 if (inval == 0)
686 node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
687 hammer_rel_node(node);
688 goto restart;
695 * Clear the IO's modify list. Even though the IO is no longer modified
696 * it may still be on the lose_list. This routine is called just before
697 * the governing hammer_buffer is destroyed.
699 void
700 hammer_io_clear_modlist(struct hammer_io *io)
702 KKASSERT(io->modified == 0);
703 if (io->mod_list) {
704 crit_enter(); /* biodone race against list */
705 KKASSERT(io->mod_list == &io->hmp->lose_list);
706 TAILQ_REMOVE(io->mod_list, io, mod_entry);
707 io->mod_list = NULL;
708 crit_exit();
712 /************************************************************************
713 * HAMMER_BIOOPS *
714 ************************************************************************
719 * Pre-IO initiation kernel callback - cluster build only
721 static void
722 hammer_io_start(struct buf *bp)
727 * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
729 * NOTE: HAMMER may modify a buffer after initiating I/O. The modified bit
730 * may also be set if we were marking a cluster header open. Only remove
731 * our dependancy if the modified bit is clear.
733 static void
734 hammer_io_complete(struct buf *bp)
736 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
738 KKASSERT(iou->io.released == 1);
741 * Deal with people waiting for I/O to drain
743 if (iou->io.running) {
744 --hammer_count_io_running_write;
745 if (--iou->io.hmp->io_running_count == 0)
746 wakeup(&iou->io.hmp->io_running_count);
747 KKASSERT(iou->io.hmp->io_running_count >= 0);
748 iou->io.running = 0;
751 if (iou->io.waiting) {
752 iou->io.waiting = 0;
753 wakeup(iou);
757 * If B_LOCKED is set someone wanted to deallocate the bp at some
758 * point, do it now if refs has become zero.
760 if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) {
761 KKASSERT(iou->io.modified == 0);
762 --hammer_count_io_locked;
763 bp->b_flags &= ~B_LOCKED;
764 hammer_io_deallocate(bp);
765 /* structure may be dead now */
770 * Callback from kernel when it wishes to deallocate a passively
771 * associated structure. This mostly occurs with clean buffers
772 * but it may be possible for a holding structure to be marked dirty
773 * while its buffer is passively associated. The caller owns the bp.
775 * If we cannot disassociate we set B_LOCKED to prevent the buffer
776 * from getting reused.
778 * WARNING: Because this can be called directly by getnewbuf we cannot
779 * recurse into the tree. If a bp cannot be immediately disassociated
780 * our only recourse is to set B_LOCKED.
782 * WARNING: This may be called from an interrupt via hammer_io_complete()
784 static void
785 hammer_io_deallocate(struct buf *bp)
787 hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
789 KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
790 if (iou->io.lock.refs > 0 || iou->io.modified) {
792 * It is not legal to disassociate a modified buffer. This
793 * case really shouldn't ever occur.
795 bp->b_flags |= B_LOCKED;
796 ++hammer_count_io_locked;
797 } else {
799 * Disassociate the BP. If the io has no refs left we
800 * have to add it to the loose list.
802 hammer_io_disassociate(iou, 0);
803 if (iou->io.bp == NULL &&
804 iou->io.type != HAMMER_STRUCTURE_VOLUME) {
805 KKASSERT(iou->io.mod_list == NULL);
806 crit_enter(); /* biodone race against list */
807 iou->io.mod_list = &iou->io.hmp->lose_list;
808 TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
809 crit_exit();
814 static int
815 hammer_io_fsync(struct vnode *vp)
817 return(0);
821 * NOTE: will not be called unless we tell the kernel about the
822 * bioops. Unused... we use the mount's VFS_SYNC instead.
824 static int
825 hammer_io_sync(struct mount *mp)
827 return(0);
830 static void
831 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
836 * I/O pre-check for reading and writing. HAMMER only uses this for
837 * B_CACHE buffers so checkread just shouldn't happen, but if it does
838 * allow it.
840 * Writing is a different case. We don't want the kernel to try to write
841 * out a buffer that HAMMER may be modifying passively or which has a
842 * dependancy. In addition, kernel-demanded writes can only proceed for
843 * certain types of buffers (i.e. UNDO and DATA types). Other dirty
844 * buffer types can only be explicitly written by the flusher.
846 * checkwrite will only be called for bdwrite()n buffers. If we return
847 * success the kernel is guaranteed to initiate the buffer write.
849 static int
850 hammer_io_checkread(struct buf *bp)
852 return(0);
855 static int
856 hammer_io_checkwrite(struct buf *bp)
858 hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
861 * This shouldn't happen under normal operation.
863 if (io->type == HAMMER_STRUCTURE_VOLUME ||
864 io->type == HAMMER_STRUCTURE_META_BUFFER) {
865 if (!panicstr)
866 panic("hammer_io_checkwrite: illegal buffer");
867 if ((bp->b_flags & B_LOCKED) == 0) {
868 bp->b_flags |= B_LOCKED;
869 ++hammer_count_io_locked;
871 return(1);
875 * We can only clear the modified bit if the IO is not currently
876 * undergoing modification. Otherwise we may miss changes.
878 if (io->modify_refs == 0 && io->modified)
879 hammer_io_clear_modify(io, 0);
882 * The kernel is going to start the IO, set io->running.
884 KKASSERT(io->running == 0);
885 io->running = 1;
886 ++io->hmp->io_running_count;
887 ++hammer_count_io_running_write;
888 return(0);
892 * Return non-zero if we wish to delay the kernel's attempt to flush
893 * this buffer to disk.
895 static int
896 hammer_io_countdeps(struct buf *bp, int n)
898 return(0);
901 struct bio_ops hammer_bioops = {
902 .io_start = hammer_io_start,
903 .io_complete = hammer_io_complete,
904 .io_deallocate = hammer_io_deallocate,
905 .io_fsync = hammer_io_fsync,
906 .io_sync = hammer_io_sync,
907 .io_movedeps = hammer_io_movedeps,
908 .io_countdeps = hammer_io_countdeps,
909 .io_checkread = hammer_io_checkread,
910 .io_checkwrite = hammer_io_checkwrite,
913 /************************************************************************
914 * DIRECT IO OPS *
915 ************************************************************************
917 * These functions operate directly on the buffer cache buffer associated
918 * with a front-end vnode rather then a back-end device vnode.
922 * Read a buffer associated with a front-end vnode directly from the
923 * disk media. The bio may be issued asynchronously.
925 * This function can takes a zone-2 or zone-X blockmap offset.
928 hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio)
930 hammer_off_t data_offset;
931 hammer_off_t zone2_offset;
932 hammer_volume_t volume;
933 struct buf *bp;
934 struct bio *nbio;
935 int vol_no;
936 int error;
938 data_offset = bio->bio_offset;
940 if ((data_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_RAW_BUFFER) {
941 zone2_offset = data_offset;
942 error = 0;
943 } else {
944 KKASSERT(data_offset >= HAMMER_ZONE_BTREE);
945 KKASSERT((data_offset & HAMMER_BUFMASK) == 0);
946 zone2_offset = hammer_blockmap_lookup(hmp, data_offset, &error);
948 if (error == 0) {
949 vol_no = HAMMER_VOL_DECODE(zone2_offset);
950 volume = hammer_get_volume(hmp, vol_no, &error);
951 if (error == 0 && zone2_offset >= volume->maxbuf_off)
952 error = EIO;
953 if (error == 0) {
954 zone2_offset &= HAMMER_OFF_SHORT_MASK;
956 /* NOTE: third-level push */
957 nbio = push_bio(bio);
958 nbio->bio_offset = volume->ondisk->vol_buf_beg +
959 zone2_offset;
960 vn_strategy(volume->devvp, nbio);
962 hammer_rel_volume(volume, 0);
964 if (error) {
965 kprintf("hammer_direct_read: failed @ %016llx\n",
966 data_offset);
967 bp = bio->bio_buf;
968 bp->b_error = error;
969 bp->b_flags |= B_ERROR;
970 biodone(bio);
972 return(error);
976 * Write a buffer associated with a front-end vnode directly to the
977 * disk media. The bio may be issued asynchronously.
980 hammer_io_direct_write(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf,
981 struct bio *bio)
983 hammer_off_t buf_offset;
984 hammer_off_t zone2_offset;
985 hammer_volume_t volume;
986 hammer_buffer_t buffer;
987 struct buf *bp;
988 struct bio *nbio;
989 char *ptr;
990 int vol_no;
991 int error;
993 buf_offset = leaf->data_offset;
995 KKASSERT(buf_offset > HAMMER_ZONE_BTREE);
996 KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
998 if ((buf_offset & HAMMER_BUFMASK) == 0 &&
999 leaf->data_len >= HAMMER_BUFSIZE) {
1001 * We are using the vnode's bio to write directly to the
1002 * media, any hammer_buffer at the same zone-X offset will
1003 * now have stale data.
1005 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1006 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1007 volume = hammer_get_volume(hmp, vol_no, &error);
1009 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1010 error = EIO;
1011 if (error == 0) {
1012 bp = bio->bio_buf;
1013 KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
1014 hammer_del_buffers(hmp, buf_offset,
1015 zone2_offset, bp->b_bufsize);
1016 zone2_offset &= HAMMER_OFF_SHORT_MASK;
1018 nbio = push_bio(bio);
1019 nbio->bio_offset = volume->ondisk->vol_buf_beg +
1020 zone2_offset;
1021 vn_strategy(volume->devvp, nbio);
1023 hammer_rel_volume(volume, 0);
1024 } else {
1025 /* must fit in a standard HAMMER buffer */
1026 KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
1027 buffer = NULL;
1028 ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
1029 if (error == 0) {
1030 bp = bio->bio_buf;
1031 bp->b_flags |= B_AGE;
1032 hammer_io_modify(&buffer->io, 1);
1033 bcopy(bp->b_data, ptr, leaf->data_len);
1034 hammer_io_modify_done(&buffer->io);
1035 hammer_rel_buffer(buffer, 0);
1036 bp->b_resid = 0;
1037 biodone(bio);
1040 if (error) {
1041 kprintf("hammer_direct_write: failed @ %016llx\n",
1042 leaf->data_offset);
1043 bp = bio->bio_buf;
1044 bp->b_resid = 0;
1045 bp->b_error = EIO;
1046 bp->b_flags |= B_ERROR;
1047 biodone(bio);
1049 return(error);