dma: factor out mail handling code
[dragonfly.git] / sys / vfs / hammer / hammer_io.c
blobbc3743300be2df88c81c1a9134bdb1a6fc5afdde
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.55 2008/09/15 17:02:49 dillon Exp $
37 * IO Primitives and buffer cache management
39 * All major data-tracking structures in HAMMER contain a struct hammer_io
40 * which is used to manage their backing store. We use filesystem buffers
41 * for backing store and we leave them passively associated with their
42 * HAMMER structures.
44 * If the kernel tries to destroy a passively associated buf which we cannot
45 * yet let go we set B_LOCKED in the buffer and then actively released it
46 * later when we can.
49 #include "hammer.h"
50 #include <sys/fcntl.h>
51 #include <sys/nlookup.h>
52 #include <sys/buf.h>
53 #include <sys/buf2.h>
55 static void hammer_io_modify(hammer_io_t io, int count);
56 static void hammer_io_deallocate(struct buf *bp);
57 #if 0
58 static void hammer_io_direct_read_complete(struct bio *nbio);
59 #endif
60 static void hammer_io_direct_write_complete(struct bio *nbio);
61 static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data);
62 static void hammer_io_set_modlist(struct hammer_io *io);
63 static void hammer_io_flush_mark(hammer_volume_t volume);
67 * Initialize a new, already-zero'd hammer_io structure, or reinitialize
68 * an existing hammer_io structure which may have switched to another type.
70 void
71 hammer_io_init(hammer_io_t io, hammer_volume_t volume, enum hammer_io_type type)
73 io->volume = volume;
74 io->hmp = volume->io.hmp;
75 io->type = type;
79 * Helper routine to disassociate a buffer cache buffer from an I/O
80 * structure. The buffer is unlocked and marked appropriate for reclamation.
82 * The io may have 0 or 1 references depending on who called us. The
83 * caller is responsible for dealing with the refs.
85 * This call can only be made when no action is required on the buffer.
87 * The caller must own the buffer and the IO must indicate that the
88 * structure no longer owns it (io.released != 0).
90 static void
91 hammer_io_disassociate(hammer_io_structure_t iou)
93 struct buf *bp = iou->io.bp;
95 KKASSERT(iou->io.released);
96 KKASSERT(iou->io.modified == 0);
97 KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou);
98 buf_dep_init(bp);
99 iou->io.bp = NULL;
102 * If the buffer was locked someone wanted to get rid of it.
104 if (bp->b_flags & B_LOCKED) {
105 --hammer_count_io_locked;
106 bp->b_flags &= ~B_LOCKED;
108 if (iou->io.reclaim) {
109 bp->b_flags |= B_NOCACHE|B_RELBUF;
110 iou->io.reclaim = 0;
113 switch(iou->io.type) {
114 case HAMMER_STRUCTURE_VOLUME:
115 iou->volume.ondisk = NULL;
116 break;
117 case HAMMER_STRUCTURE_DATA_BUFFER:
118 case HAMMER_STRUCTURE_META_BUFFER:
119 case HAMMER_STRUCTURE_UNDO_BUFFER:
120 iou->buffer.ondisk = NULL;
121 break;
126 * Wait for any physical IO to complete
128 * XXX we aren't interlocked against a spinlock or anything so there
129 * is a small window in the interlock / io->running == 0 test.
131 void
132 hammer_io_wait(hammer_io_t io)
134 if (io->running) {
135 for (;;) {
136 io->waiting = 1;
137 tsleep_interlock(io, 0);
138 if (io->running == 0)
139 break;
140 tsleep(io, PINTERLOCKED, "hmrflw", hz);
141 if (io->running == 0)
142 break;
148 * Wait for all hammer_io-initated write I/O's to complete. This is not
149 * supposed to count direct I/O's but some can leak through (for
150 * non-full-sized direct I/Os).
152 void
153 hammer_io_wait_all(hammer_mount_t hmp, const char *ident)
155 hammer_io_flush_sync(hmp);
156 crit_enter();
157 while (hmp->io_running_space)
158 tsleep(&hmp->io_running_space, 0, ident, 0);
159 crit_exit();
162 #define HAMMER_MAXRA 4
165 * Load bp for a HAMMER structure. The io must be exclusively locked by
166 * the caller.
168 * This routine is mostly used on meta-data and small-data blocks. Generally
169 * speaking HAMMER assumes some locality of reference and will cluster
170 * a 64K read.
172 * Note that clustering occurs at the device layer, not the logical layer.
173 * If the buffers do not apply to the current operation they may apply to
174 * some other.
177 hammer_io_read(struct vnode *devvp, struct hammer_io *io, hammer_off_t limit)
179 struct buf *bp;
180 int error;
182 if ((bp = io->bp) == NULL) {
183 hammer_count_io_running_read += io->bytes;
184 if (hammer_cluster_enable) {
185 error = cluster_read(devvp, limit,
186 io->offset, io->bytes,
187 HAMMER_CLUSTER_SIZE,
188 HAMMER_CLUSTER_BUFS, &io->bp);
189 } else {
190 error = bread(devvp, io->offset, io->bytes, &io->bp);
192 hammer_stats_disk_read += io->bytes;
193 hammer_count_io_running_read -= io->bytes;
196 * The code generally assumes b_ops/b_dep has been set-up,
197 * even if we error out here.
199 bp = io->bp;
200 bp->b_ops = &hammer_bioops;
201 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
202 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
203 BUF_KERNPROC(bp);
204 KKASSERT(io->modified == 0);
205 KKASSERT(io->running == 0);
206 KKASSERT(io->waiting == 0);
207 io->released = 0; /* we hold an active lock on bp */
208 } else {
209 error = 0;
211 return(error);
215 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
216 * Must be called with the IO exclusively locked.
218 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
219 * I/O by forcing the buffer to not be in a released state before calling
220 * it.
222 * This function will also mark the IO as modified but it will not
223 * increment the modify_refs count.
226 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
228 struct buf *bp;
230 if ((bp = io->bp) == NULL) {
231 io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
232 bp = io->bp;
233 bp->b_ops = &hammer_bioops;
234 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
235 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
236 io->released = 0;
237 KKASSERT(io->running == 0);
238 io->waiting = 0;
239 BUF_KERNPROC(bp);
240 } else {
241 if (io->released) {
242 regetblk(bp);
243 BUF_KERNPROC(bp);
244 io->released = 0;
247 hammer_io_modify(io, 0);
248 vfs_bio_clrbuf(bp);
249 return(0);
253 * Remove potential device level aliases against buffers managed by high level
254 * vnodes. Aliases can also be created due to mixed buffer sizes or via
255 * direct access to the backing store device.
257 * This is nasty because the buffers are also VMIO-backed. Even if a buffer
258 * does not exist its backing VM pages might, and we have to invalidate
259 * those as well or a getblk() will reinstate them.
261 * Buffer cache buffers associated with hammer_buffers cannot be
262 * invalidated.
265 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
267 hammer_io_structure_t iou;
268 hammer_off_t phys_offset;
269 struct buf *bp;
270 int error;
272 phys_offset = volume->ondisk->vol_buf_beg +
273 (zone2_offset & HAMMER_OFF_SHORT_MASK);
274 crit_enter();
275 if ((bp = findblk(volume->devvp, phys_offset, FINDBLK_TEST)) != NULL)
276 bp = getblk(volume->devvp, phys_offset, bp->b_bufsize, 0, 0);
277 else
278 bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0);
279 if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
280 #if 0
281 hammer_ref(&iou->io.lock);
282 hammer_io_clear_modify(&iou->io, 1);
283 bundirty(bp);
284 iou->io.released = 0;
285 BUF_KERNPROC(bp);
286 iou->io.reclaim = 1;
287 iou->io.waitdep = 1;
288 KKASSERT(iou->io.lock.refs == 1);
289 hammer_rel_buffer(&iou->buffer, 0);
290 /*hammer_io_deallocate(bp);*/
291 #endif
292 bqrelse(bp);
293 error = EAGAIN;
294 } else {
295 KKASSERT((bp->b_flags & B_LOCKED) == 0);
296 bundirty(bp);
297 bp->b_flags |= B_NOCACHE|B_RELBUF;
298 brelse(bp);
299 error = 0;
301 crit_exit();
302 return(error);
306 * This routine is called on the last reference to a hammer structure.
307 * The io is usually interlocked with io.loading and io.refs must be 1.
309 * This routine may return a non-NULL bp to the caller for dispoal. Disposal
310 * simply means the caller finishes decrementing the ref-count on the
311 * IO structure then brelse()'s the bp. The bp may or may not still be
312 * passively associated with the IO.
314 * The only requirement here is that modified meta-data and volume-header
315 * buffer may NOT be disassociated from the IO structure, and consequently
316 * we also leave such buffers actively associated with the IO if they already
317 * are (since the kernel can't do anything with them anyway). Only the
318 * flusher is allowed to write such buffers out. Modified pure-data and
319 * undo buffers are returned to the kernel but left passively associated
320 * so we can track when the kernel writes the bp out.
322 struct buf *
323 hammer_io_release(struct hammer_io *io, int flush)
325 union hammer_io_structure *iou = (void *)io;
326 struct buf *bp;
328 if ((bp = io->bp) == NULL)
329 return(NULL);
332 * Try to flush a dirty IO to disk if asked to by the
333 * caller or if the kernel tried to flush the buffer in the past.
335 * Kernel-initiated flushes are only allowed for pure-data buffers.
336 * meta-data and volume buffers can only be flushed explicitly
337 * by HAMMER.
339 if (io->modified) {
340 if (flush) {
341 hammer_io_flush(io, 0);
342 } else if (bp->b_flags & B_LOCKED) {
343 switch(io->type) {
344 case HAMMER_STRUCTURE_DATA_BUFFER:
345 hammer_io_flush(io, 0);
346 break;
347 case HAMMER_STRUCTURE_UNDO_BUFFER:
348 hammer_io_flush(io, hammer_undo_reclaim(io));
349 break;
350 default:
351 break;
353 } /* else no explicit request to flush the buffer */
357 * Wait for the IO to complete if asked to. This occurs when
358 * the buffer must be disposed of definitively during an umount
359 * or buffer invalidation.
361 if (io->waitdep && io->running) {
362 hammer_io_wait(io);
366 * Return control of the buffer to the kernel (with the provisio
367 * that our bioops can override kernel decisions with regards to
368 * the buffer).
370 if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
372 * Always disassociate the bp if an explicit flush
373 * was requested and the IO completed with no error
374 * (so unmount can really clean up the structure).
376 if (io->released) {
377 regetblk(bp);
378 BUF_KERNPROC(bp);
379 } else {
380 io->released = 1;
382 hammer_io_disassociate((hammer_io_structure_t)io);
383 /* return the bp */
384 } else if (io->modified) {
386 * Only certain IO types can be released to the kernel if
387 * the buffer has been modified.
389 * volume and meta-data IO types may only be explicitly
390 * flushed by HAMMER.
392 switch(io->type) {
393 case HAMMER_STRUCTURE_DATA_BUFFER:
394 case HAMMER_STRUCTURE_UNDO_BUFFER:
395 if (io->released == 0) {
396 io->released = 1;
397 bdwrite(bp);
399 break;
400 default:
401 break;
403 bp = NULL; /* bp left associated */
404 } else if (io->released == 0) {
406 * Clean buffers can be generally released to the kernel.
407 * We leave the bp passively associated with the HAMMER
408 * structure and use bioops to disconnect it later on
409 * if the kernel wants to discard the buffer.
411 * We can steal the structure's ownership of the bp.
413 io->released = 1;
414 if (bp->b_flags & B_LOCKED) {
415 hammer_io_disassociate(iou);
416 /* return the bp */
417 } else {
418 if (io->reclaim) {
419 hammer_io_disassociate(iou);
420 /* return the bp */
421 } else {
422 /* return the bp (bp passively associated) */
425 } else {
427 * A released buffer is passively associate with our
428 * hammer_io structure. The kernel cannot destroy it
429 * without making a bioops call. If the kernel (B_LOCKED)
430 * or we (reclaim) requested that the buffer be destroyed
431 * we destroy it, otherwise we do a quick get/release to
432 * reset its position in the kernel's LRU list.
434 * Leaving the buffer passively associated allows us to
435 * use the kernel's LRU buffer flushing mechanisms rather
436 * then rolling our own.
438 * XXX there are two ways of doing this. We can re-acquire
439 * and passively release to reset the LRU, or not.
441 if (io->running == 0) {
442 regetblk(bp);
443 if ((bp->b_flags & B_LOCKED) || io->reclaim) {
444 hammer_io_disassociate(iou);
445 /* return the bp */
446 } else {
447 /* return the bp (bp passively associated) */
449 } else {
451 * bp is left passively associated but we do not
452 * try to reacquire it. Interactions with the io
453 * structure will occur on completion of the bp's
454 * I/O.
456 bp = NULL;
459 return(bp);
463 * This routine is called with a locked IO when a flush is desired and
464 * no other references to the structure exists other then ours. This
465 * routine is ONLY called when HAMMER believes it is safe to flush a
466 * potentially modified buffer out.
468 void
469 hammer_io_flush(struct hammer_io *io, int reclaim)
471 struct buf *bp;
474 * Degenerate case - nothing to flush if nothing is dirty.
476 if (io->modified == 0) {
477 return;
480 KKASSERT(io->bp);
481 KKASSERT(io->modify_refs <= 0);
484 * Acquire ownership of the bp, particularly before we clear our
485 * modified flag.
487 * We are going to bawrite() this bp. Don't leave a window where
488 * io->released is set, we actually own the bp rather then our
489 * buffer.
491 bp = io->bp;
492 if (io->released) {
493 regetblk(bp);
494 /* BUF_KERNPROC(io->bp); */
495 /* io->released = 0; */
496 KKASSERT(io->released);
497 KKASSERT(io->bp == bp);
499 io->released = 1;
501 if (reclaim) {
502 io->reclaim = 1;
503 if ((bp->b_flags & B_LOCKED) == 0) {
504 bp->b_flags |= B_LOCKED;
505 ++hammer_count_io_locked;
510 * Acquire exclusive access to the bp and then clear the modified
511 * state of the buffer prior to issuing I/O to interlock any
512 * modifications made while the I/O is in progress. This shouldn't
513 * happen anyway but losing data would be worse. The modified bit
514 * will be rechecked after the IO completes.
516 * NOTE: This call also finalizes the buffer's content (inval == 0).
518 * This is only legal when lock.refs == 1 (otherwise we might clear
519 * the modified bit while there are still users of the cluster
520 * modifying the data).
522 * Do this before potentially blocking so any attempt to modify the
523 * ondisk while we are blocked blocks waiting for us.
525 hammer_ref(&io->lock);
526 hammer_io_clear_modify(io, 0);
527 hammer_unref(&io->lock);
530 * Transfer ownership to the kernel and initiate I/O.
532 io->running = 1;
533 io->hmp->io_running_space += io->bytes;
534 hammer_count_io_running_write += io->bytes;
535 bawrite(bp);
536 hammer_io_flush_mark(io->volume);
539 /************************************************************************
540 * BUFFER DIRTYING *
541 ************************************************************************
543 * These routines deal with dependancies created when IO buffers get
544 * modified. The caller must call hammer_modify_*() on a referenced
545 * HAMMER structure prior to modifying its on-disk data.
547 * Any intent to modify an IO buffer acquires the related bp and imposes
548 * various write ordering dependancies.
552 * Mark a HAMMER structure as undergoing modification. Meta-data buffers
553 * are locked until the flusher can deal with them, pure data buffers
554 * can be written out.
556 static
557 void
558 hammer_io_modify(hammer_io_t io, int count)
561 * io->modify_refs must be >= 0
563 while (io->modify_refs < 0) {
564 io->waitmod = 1;
565 tsleep(io, 0, "hmrmod", 0);
569 * Shortcut if nothing to do.
571 KKASSERT(io->lock.refs != 0 && io->bp != NULL);
572 io->modify_refs += count;
573 if (io->modified && io->released == 0)
574 return;
576 hammer_lock_ex(&io->lock);
577 if (io->modified == 0) {
578 hammer_io_set_modlist(io);
579 io->modified = 1;
581 if (io->released) {
582 regetblk(io->bp);
583 BUF_KERNPROC(io->bp);
584 io->released = 0;
585 KKASSERT(io->modified != 0);
587 hammer_unlock(&io->lock);
590 static __inline
591 void
592 hammer_io_modify_done(hammer_io_t io)
594 KKASSERT(io->modify_refs > 0);
595 --io->modify_refs;
596 if (io->modify_refs == 0 && io->waitmod) {
597 io->waitmod = 0;
598 wakeup(io);
602 void
603 hammer_io_write_interlock(hammer_io_t io)
605 while (io->modify_refs != 0) {
606 io->waitmod = 1;
607 tsleep(io, 0, "hmrmod", 0);
609 io->modify_refs = -1;
612 void
613 hammer_io_done_interlock(hammer_io_t io)
615 KKASSERT(io->modify_refs == -1);
616 io->modify_refs = 0;
617 if (io->waitmod) {
618 io->waitmod = 0;
619 wakeup(io);
624 * Caller intends to modify a volume's ondisk structure.
626 * This is only allowed if we are the flusher or we have a ref on the
627 * sync_lock.
629 void
630 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
631 void *base, int len)
633 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
635 hammer_io_modify(&volume->io, 1);
636 if (len) {
637 intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
638 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
639 hammer_generate_undo(trans, &volume->io,
640 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
641 base, len);
646 * Caller intends to modify a buffer's ondisk structure.
648 * This is only allowed if we are the flusher or we have a ref on the
649 * sync_lock.
651 void
652 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
653 void *base, int len)
655 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
657 hammer_io_modify(&buffer->io, 1);
658 if (len) {
659 intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
660 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
661 hammer_generate_undo(trans, &buffer->io,
662 buffer->zone2_offset + rel_offset,
663 base, len);
667 void
668 hammer_modify_volume_done(hammer_volume_t volume)
670 hammer_io_modify_done(&volume->io);
673 void
674 hammer_modify_buffer_done(hammer_buffer_t buffer)
676 hammer_io_modify_done(&buffer->io);
680 * Mark an entity as not being dirty any more and finalize any
681 * delayed adjustments to the buffer.
683 * Delayed adjustments are an important performance enhancement, allowing
684 * us to avoid recalculating B-Tree node CRCs over and over again when
685 * making bulk-modifications to the B-Tree.
687 * If inval is non-zero delayed adjustments are ignored.
689 * This routine may dereference related btree nodes and cause the
690 * buffer to be dereferenced. The caller must own a reference on io.
692 void
693 hammer_io_clear_modify(struct hammer_io *io, int inval)
695 if (io->modified == 0)
696 return;
699 * Take us off the mod-list and clear the modified bit.
701 KKASSERT(io->mod_list != NULL);
702 if (io->mod_list == &io->hmp->volu_list ||
703 io->mod_list == &io->hmp->meta_list) {
704 io->hmp->locked_dirty_space -= io->bytes;
705 hammer_count_dirtybufspace -= io->bytes;
707 TAILQ_REMOVE(io->mod_list, io, mod_entry);
708 io->mod_list = NULL;
709 io->modified = 0;
712 * If this bit is not set there are no delayed adjustments.
714 if (io->gencrc == 0)
715 return;
716 io->gencrc = 0;
719 * Finalize requested CRCs. The NEEDSCRC flag also holds a reference
720 * on the node (& underlying buffer). Release the node after clearing
721 * the flag.
723 if (io->type == HAMMER_STRUCTURE_META_BUFFER) {
724 hammer_buffer_t buffer = (void *)io;
725 hammer_node_t node;
727 restart:
728 TAILQ_FOREACH(node, &buffer->clist, entry) {
729 if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
730 continue;
731 node->flags &= ~HAMMER_NODE_NEEDSCRC;
732 KKASSERT(node->ondisk);
733 if (inval == 0)
734 node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
735 hammer_rel_node(node);
736 goto restart;
739 /* caller must still have ref on io */
740 KKASSERT(io->lock.refs > 0);
744 * Clear the IO's modify list. Even though the IO is no longer modified
745 * it may still be on the lose_list. This routine is called just before
746 * the governing hammer_buffer is destroyed.
748 void
749 hammer_io_clear_modlist(struct hammer_io *io)
751 KKASSERT(io->modified == 0);
752 if (io->mod_list) {
753 crit_enter(); /* biodone race against list */
754 KKASSERT(io->mod_list == &io->hmp->lose_list);
755 TAILQ_REMOVE(io->mod_list, io, mod_entry);
756 io->mod_list = NULL;
757 crit_exit();
761 static void
762 hammer_io_set_modlist(struct hammer_io *io)
764 struct hammer_mount *hmp = io->hmp;
766 KKASSERT(io->mod_list == NULL);
768 switch(io->type) {
769 case HAMMER_STRUCTURE_VOLUME:
770 io->mod_list = &hmp->volu_list;
771 hmp->locked_dirty_space += io->bytes;
772 hammer_count_dirtybufspace += io->bytes;
773 break;
774 case HAMMER_STRUCTURE_META_BUFFER:
775 io->mod_list = &hmp->meta_list;
776 hmp->locked_dirty_space += io->bytes;
777 hammer_count_dirtybufspace += io->bytes;
778 break;
779 case HAMMER_STRUCTURE_UNDO_BUFFER:
780 io->mod_list = &hmp->undo_list;
781 break;
782 case HAMMER_STRUCTURE_DATA_BUFFER:
783 io->mod_list = &hmp->data_list;
784 break;
786 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
789 /************************************************************************
790 * HAMMER_BIOOPS *
791 ************************************************************************
796 * Pre-IO initiation kernel callback - cluster build only
798 static void
799 hammer_io_start(struct buf *bp)
804 * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
806 * NOTE: HAMMER may modify a buffer after initiating I/O. The modified bit
807 * may also be set if we were marking a cluster header open. Only remove
808 * our dependancy if the modified bit is clear.
810 static void
811 hammer_io_complete(struct buf *bp)
813 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
815 KKASSERT(iou->io.released == 1);
818 * Deal with people waiting for I/O to drain
820 if (iou->io.running) {
822 * Deal with critical write errors. Once a critical error
823 * has been flagged in hmp the UNDO FIFO will not be updated.
824 * That way crash recover will give us a consistent
825 * filesystem.
827 * Because of this we can throw away failed UNDO buffers. If
828 * we throw away META or DATA buffers we risk corrupting
829 * the now read-only version of the filesystem visible to
830 * the user. Clear B_ERROR so the buffer is not re-dirtied
831 * by the kernel and ref the io so it doesn't get thrown
832 * away.
834 if (bp->b_flags & B_ERROR) {
835 hammer_critical_error(iou->io.hmp, NULL, bp->b_error,
836 "while flushing meta-data");
837 switch(iou->io.type) {
838 case HAMMER_STRUCTURE_UNDO_BUFFER:
839 break;
840 default:
841 if (iou->io.ioerror == 0) {
842 iou->io.ioerror = 1;
843 if (iou->io.lock.refs == 0)
844 ++hammer_count_refedbufs;
845 hammer_ref(&iou->io.lock);
847 break;
849 bp->b_flags &= ~B_ERROR;
850 bundirty(bp);
851 #if 0
852 hammer_io_set_modlist(&iou->io);
853 iou->io.modified = 1;
854 #endif
856 hammer_stats_disk_write += iou->io.bytes;
857 hammer_count_io_running_write -= iou->io.bytes;
858 iou->io.hmp->io_running_space -= iou->io.bytes;
859 if (iou->io.hmp->io_running_space == 0)
860 wakeup(&iou->io.hmp->io_running_space);
861 KKASSERT(iou->io.hmp->io_running_space >= 0);
862 iou->io.running = 0;
863 } else {
864 hammer_stats_disk_read += iou->io.bytes;
867 if (iou->io.waiting) {
868 iou->io.waiting = 0;
869 wakeup(iou);
873 * If B_LOCKED is set someone wanted to deallocate the bp at some
874 * point, do it now if refs has become zero.
876 if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) {
877 KKASSERT(iou->io.modified == 0);
878 --hammer_count_io_locked;
879 bp->b_flags &= ~B_LOCKED;
880 hammer_io_deallocate(bp);
881 /* structure may be dead now */
886 * Callback from kernel when it wishes to deallocate a passively
887 * associated structure. This mostly occurs with clean buffers
888 * but it may be possible for a holding structure to be marked dirty
889 * while its buffer is passively associated. The caller owns the bp.
891 * If we cannot disassociate we set B_LOCKED to prevent the buffer
892 * from getting reused.
894 * WARNING: Because this can be called directly by getnewbuf we cannot
895 * recurse into the tree. If a bp cannot be immediately disassociated
896 * our only recourse is to set B_LOCKED.
898 * WARNING: This may be called from an interrupt via hammer_io_complete()
900 static void
901 hammer_io_deallocate(struct buf *bp)
903 hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
905 KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
906 if (iou->io.lock.refs > 0 || iou->io.modified) {
908 * It is not legal to disassociate a modified buffer. This
909 * case really shouldn't ever occur.
911 bp->b_flags |= B_LOCKED;
912 ++hammer_count_io_locked;
913 } else {
915 * Disassociate the BP. If the io has no refs left we
916 * have to add it to the loose list.
918 hammer_io_disassociate(iou);
919 if (iou->io.type != HAMMER_STRUCTURE_VOLUME) {
920 KKASSERT(iou->io.bp == NULL);
921 KKASSERT(iou->io.mod_list == NULL);
922 crit_enter(); /* biodone race against list */
923 iou->io.mod_list = &iou->io.hmp->lose_list;
924 TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
925 crit_exit();
930 static int
931 hammer_io_fsync(struct vnode *vp)
933 return(0);
937 * NOTE: will not be called unless we tell the kernel about the
938 * bioops. Unused... we use the mount's VFS_SYNC instead.
940 static int
941 hammer_io_sync(struct mount *mp)
943 return(0);
946 static void
947 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
952 * I/O pre-check for reading and writing. HAMMER only uses this for
953 * B_CACHE buffers so checkread just shouldn't happen, but if it does
954 * allow it.
956 * Writing is a different case. We don't want the kernel to try to write
957 * out a buffer that HAMMER may be modifying passively or which has a
958 * dependancy. In addition, kernel-demanded writes can only proceed for
959 * certain types of buffers (i.e. UNDO and DATA types). Other dirty
960 * buffer types can only be explicitly written by the flusher.
962 * checkwrite will only be called for bdwrite()n buffers. If we return
963 * success the kernel is guaranteed to initiate the buffer write.
965 static int
966 hammer_io_checkread(struct buf *bp)
968 return(0);
971 static int
972 hammer_io_checkwrite(struct buf *bp)
974 hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
977 * This shouldn't happen under normal operation.
979 if (io->type == HAMMER_STRUCTURE_VOLUME ||
980 io->type == HAMMER_STRUCTURE_META_BUFFER) {
981 if (!panicstr)
982 panic("hammer_io_checkwrite: illegal buffer");
983 if ((bp->b_flags & B_LOCKED) == 0) {
984 bp->b_flags |= B_LOCKED;
985 ++hammer_count_io_locked;
987 return(1);
991 * We can only clear the modified bit if the IO is not currently
992 * undergoing modification. Otherwise we may miss changes.
994 * Only data and undo buffers can reach here. These buffers do
995 * not have terminal crc functions but we temporarily reference
996 * the IO anyway, just in case.
998 if (io->modify_refs == 0 && io->modified) {
999 hammer_ref(&io->lock);
1000 hammer_io_clear_modify(io, 0);
1001 hammer_unref(&io->lock);
1002 } else if (io->modified) {
1003 KKASSERT(io->type == HAMMER_STRUCTURE_DATA_BUFFER);
1007 * The kernel is going to start the IO, set io->running.
1009 KKASSERT(io->running == 0);
1010 io->running = 1;
1011 io->hmp->io_running_space += io->bytes;
1012 hammer_count_io_running_write += io->bytes;
1013 return(0);
1017 * Return non-zero if we wish to delay the kernel's attempt to flush
1018 * this buffer to disk.
1020 static int
1021 hammer_io_countdeps(struct buf *bp, int n)
1023 return(0);
1026 struct bio_ops hammer_bioops = {
1027 .io_start = hammer_io_start,
1028 .io_complete = hammer_io_complete,
1029 .io_deallocate = hammer_io_deallocate,
1030 .io_fsync = hammer_io_fsync,
1031 .io_sync = hammer_io_sync,
1032 .io_movedeps = hammer_io_movedeps,
1033 .io_countdeps = hammer_io_countdeps,
1034 .io_checkread = hammer_io_checkread,
1035 .io_checkwrite = hammer_io_checkwrite,
1038 /************************************************************************
1039 * DIRECT IO OPS *
1040 ************************************************************************
1042 * These functions operate directly on the buffer cache buffer associated
1043 * with a front-end vnode rather then a back-end device vnode.
1047 * Read a buffer associated with a front-end vnode directly from the
1048 * disk media. The bio may be issued asynchronously. If leaf is non-NULL
1049 * we validate the CRC.
1051 * We must check for the presence of a HAMMER buffer to handle the case
1052 * where the reblocker has rewritten the data (which it does via the HAMMER
1053 * buffer system, not via the high-level vnode buffer cache), but not yet
1054 * committed the buffer to the media.
1057 hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1058 hammer_btree_leaf_elm_t leaf)
1060 hammer_off_t buf_offset;
1061 hammer_off_t zone2_offset;
1062 hammer_volume_t volume;
1063 struct buf *bp;
1064 struct bio *nbio;
1065 int vol_no;
1066 int error;
1068 buf_offset = bio->bio_offset;
1069 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
1070 HAMMER_ZONE_LARGE_DATA);
1073 * The buffer cache may have an aliased buffer (the reblocker can
1074 * write them). If it does we have to sync any dirty data before
1075 * we can build our direct-read. This is a non-critical code path.
1077 bp = bio->bio_buf;
1078 hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
1081 * Resolve to a zone-2 offset. The conversion just requires
1082 * munging the top 4 bits but we want to abstract it anyway
1083 * so the blockmap code can verify the zone assignment.
1085 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1086 if (error)
1087 goto done;
1088 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
1089 HAMMER_ZONE_RAW_BUFFER);
1092 * Resolve volume and raw-offset for 3rd level bio. The
1093 * offset will be specific to the volume.
1095 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1096 volume = hammer_get_volume(hmp, vol_no, &error);
1097 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1098 error = EIO;
1100 if (error == 0) {
1102 * 3rd level bio
1104 nbio = push_bio(bio);
1105 nbio->bio_offset = volume->ondisk->vol_buf_beg +
1106 (zone2_offset & HAMMER_OFF_SHORT_MASK);
1107 #if 0
1109 * XXX disabled - our CRC check doesn't work if the OS
1110 * does bogus_page replacement on the direct-read.
1112 if (leaf && hammer_verify_data) {
1113 nbio->bio_done = hammer_io_direct_read_complete;
1114 nbio->bio_caller_info1.uvalue32 = leaf->data_crc;
1116 #endif
1117 hammer_stats_disk_read += bp->b_bufsize;
1118 vn_strategy(volume->devvp, nbio);
1120 hammer_rel_volume(volume, 0);
1121 done:
1122 if (error) {
1123 kprintf("hammer_direct_read: failed @ %016llx\n",
1124 (long long)zone2_offset);
1125 bp->b_error = error;
1126 bp->b_flags |= B_ERROR;
1127 biodone(bio);
1129 return(error);
1132 #if 0
1134 * On completion of the BIO this callback must check the data CRC
1135 * and chain to the previous bio.
1137 static
1138 void
1139 hammer_io_direct_read_complete(struct bio *nbio)
1141 struct bio *obio;
1142 struct buf *bp;
1143 u_int32_t rec_crc = nbio->bio_caller_info1.uvalue32;
1145 bp = nbio->bio_buf;
1146 if (crc32(bp->b_data, bp->b_bufsize) != rec_crc) {
1147 kprintf("HAMMER: data_crc error @%016llx/%d\n",
1148 nbio->bio_offset, bp->b_bufsize);
1149 if (hammer_debug_debug)
1150 Debugger("");
1151 bp->b_flags |= B_ERROR;
1152 bp->b_error = EIO;
1154 obio = pop_bio(nbio);
1155 biodone(obio);
1157 #endif
1160 * Write a buffer associated with a front-end vnode directly to the
1161 * disk media. The bio may be issued asynchronously.
1163 * The BIO is associated with the specified record and RECF_DIRECT_IO
1164 * is set. The recorded is added to its object.
1167 hammer_io_direct_write(hammer_mount_t hmp, hammer_record_t record,
1168 struct bio *bio)
1170 hammer_btree_leaf_elm_t leaf = &record->leaf;
1171 hammer_off_t buf_offset;
1172 hammer_off_t zone2_offset;
1173 hammer_volume_t volume;
1174 hammer_buffer_t buffer;
1175 struct buf *bp;
1176 struct bio *nbio;
1177 char *ptr;
1178 int vol_no;
1179 int error;
1181 buf_offset = leaf->data_offset;
1183 KKASSERT(buf_offset > HAMMER_ZONE_BTREE);
1184 KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
1186 if ((buf_offset & HAMMER_BUFMASK) == 0 &&
1187 leaf->data_len >= HAMMER_BUFSIZE) {
1189 * We are using the vnode's bio to write directly to the
1190 * media, any hammer_buffer at the same zone-X offset will
1191 * now have stale data.
1193 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1194 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1195 volume = hammer_get_volume(hmp, vol_no, &error);
1197 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1198 error = EIO;
1199 if (error == 0) {
1200 bp = bio->bio_buf;
1201 KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
1203 hammer_del_buffers(hmp, buf_offset,
1204 zone2_offset, bp->b_bufsize);
1208 * Second level bio - cached zone2 offset.
1210 * (We can put our bio_done function in either the
1211 * 2nd or 3rd level).
1213 nbio = push_bio(bio);
1214 nbio->bio_offset = zone2_offset;
1215 nbio->bio_done = hammer_io_direct_write_complete;
1216 nbio->bio_caller_info1.ptr = record;
1217 record->zone2_offset = zone2_offset;
1218 record->flags |= HAMMER_RECF_DIRECT_IO |
1219 HAMMER_RECF_DIRECT_INVAL;
1222 * Third level bio - raw offset specific to the
1223 * correct volume.
1225 zone2_offset &= HAMMER_OFF_SHORT_MASK;
1226 nbio = push_bio(nbio);
1227 nbio->bio_offset = volume->ondisk->vol_buf_beg +
1228 zone2_offset;
1229 hammer_stats_disk_write += bp->b_bufsize;
1230 vn_strategy(volume->devvp, nbio);
1231 hammer_io_flush_mark(volume);
1233 hammer_rel_volume(volume, 0);
1234 } else {
1236 * Must fit in a standard HAMMER buffer. In this case all
1237 * consumers use the HAMMER buffer system and RECF_DIRECT_IO
1238 * does not need to be set-up.
1240 KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
1241 buffer = NULL;
1242 ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
1243 if (error == 0) {
1244 bp = bio->bio_buf;
1245 bp->b_flags |= B_AGE;
1246 hammer_io_modify(&buffer->io, 1);
1247 bcopy(bp->b_data, ptr, leaf->data_len);
1248 hammer_io_modify_done(&buffer->io);
1249 hammer_rel_buffer(buffer, 0);
1250 bp->b_resid = 0;
1251 biodone(bio);
1254 if (error == 0) {
1256 * The record is all setup now, add it. Potential conflics
1257 * have already been dealt with.
1259 error = hammer_mem_add(record);
1260 KKASSERT(error == 0);
1261 } else {
1263 * Major suckage occured. Also note: The record was never added
1264 * to the tree so we do not have to worry about the backend.
1266 kprintf("hammer_direct_write: failed @ %016llx\n",
1267 (long long)leaf->data_offset);
1268 bp = bio->bio_buf;
1269 bp->b_resid = 0;
1270 bp->b_error = EIO;
1271 bp->b_flags |= B_ERROR;
1272 biodone(bio);
1273 record->flags |= HAMMER_RECF_DELETED_FE;
1274 hammer_rel_mem_record(record);
1276 return(error);
1280 * On completion of the BIO this callback must disconnect
1281 * it from the hammer_record and chain to the previous bio.
1283 * An I/O error forces the mount to read-only. Data buffers
1284 * are not B_LOCKED like meta-data buffers are, so we have to
1285 * throw the buffer away to prevent the kernel from retrying.
1287 static
1288 void
1289 hammer_io_direct_write_complete(struct bio *nbio)
1291 struct bio *obio;
1292 struct buf *bp;
1293 hammer_record_t record = nbio->bio_caller_info1.ptr;
1295 bp = nbio->bio_buf;
1296 obio = pop_bio(nbio);
1297 if (bp->b_flags & B_ERROR) {
1298 hammer_critical_error(record->ip->hmp, record->ip,
1299 bp->b_error,
1300 "while writing bulk data");
1301 bp->b_flags |= B_INVAL;
1303 biodone(obio);
1305 KKASSERT(record != NULL);
1306 KKASSERT(record->flags & HAMMER_RECF_DIRECT_IO);
1307 if (record->flags & HAMMER_RECF_DIRECT_WAIT) {
1308 record->flags &= ~(HAMMER_RECF_DIRECT_IO |
1309 HAMMER_RECF_DIRECT_WAIT);
1310 /* record can disappear once DIRECT_IO flag is cleared */
1311 wakeup(&record->flags);
1312 } else {
1313 record->flags &= ~HAMMER_RECF_DIRECT_IO;
1314 /* record can disappear once DIRECT_IO flag is cleared */
1320 * This is called before a record is either committed to the B-Tree
1321 * or destroyed, to resolve any associated direct-IO.
1323 * (1) We must wait for any direct-IO related to the record to complete.
1325 * (2) We must remove any buffer cache aliases for data accessed via
1326 * leaf->data_offset or zone2_offset so non-direct-IO consumers
1327 * (the mirroring and reblocking code) do not see stale data.
1329 void
1330 hammer_io_direct_wait(hammer_record_t record)
1333 * Wait for I/O to complete
1335 if (record->flags & HAMMER_RECF_DIRECT_IO) {
1336 crit_enter();
1337 while (record->flags & HAMMER_RECF_DIRECT_IO) {
1338 record->flags |= HAMMER_RECF_DIRECT_WAIT;
1339 tsleep(&record->flags, 0, "hmdiow", 0);
1341 crit_exit();
1345 * Invalidate any related buffer cache aliases associated with the
1346 * backing device. This is needed because the buffer cache buffer
1347 * for file data is associated with the file vnode, not the backing
1348 * device vnode.
1350 * XXX I do not think this case can occur any more now that
1351 * reservations ensure that all such buffers are removed before
1352 * an area can be reused.
1354 if (record->flags & HAMMER_RECF_DIRECT_INVAL) {
1355 KKASSERT(record->leaf.data_offset);
1356 hammer_del_buffers(record->ip->hmp, record->leaf.data_offset,
1357 record->zone2_offset, record->leaf.data_len,
1359 record->flags &= ~HAMMER_RECF_DIRECT_INVAL;
1364 * This is called to remove the second-level cached zone-2 offset from
1365 * frontend buffer cache buffers, now stale due to a data relocation.
1366 * These offsets are generated by cluster_read() via VOP_BMAP, or directly
1367 * by hammer_vop_strategy_read().
1369 * This is rather nasty because here we have something like the reblocker
1370 * scanning the raw B-Tree with no held references on anything, really,
1371 * other then a shared lock on the B-Tree node, and we have to access the
1372 * frontend's buffer cache to check for and clean out the association.
1373 * Specifically, if the reblocker is moving data on the disk, these cached
1374 * offsets will become invalid.
1376 * Only data record types associated with the large-data zone are subject
1377 * to direct-io and need to be checked.
1380 void
1381 hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf)
1383 struct hammer_inode_info iinfo;
1384 int zone;
1386 if (leaf->base.rec_type != HAMMER_RECTYPE_DATA)
1387 return;
1388 zone = HAMMER_ZONE_DECODE(leaf->data_offset);
1389 if (zone != HAMMER_ZONE_LARGE_DATA_INDEX)
1390 return;
1391 iinfo.obj_id = leaf->base.obj_id;
1392 iinfo.obj_asof = 0; /* unused */
1393 iinfo.obj_localization = leaf->base.localization &
1394 HAMMER_LOCALIZE_PSEUDOFS_MASK;
1395 iinfo.u.leaf = leaf;
1396 hammer_scan_inode_snapshots(hmp, &iinfo,
1397 hammer_io_direct_uncache_callback,
1398 leaf);
1401 static int
1402 hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data)
1404 hammer_inode_info_t iinfo = data;
1405 hammer_off_t data_offset;
1406 hammer_off_t file_offset;
1407 struct vnode *vp;
1408 struct buf *bp;
1409 int blksize;
1411 if (ip->vp == NULL)
1412 return(0);
1413 data_offset = iinfo->u.leaf->data_offset;
1414 file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len;
1415 blksize = iinfo->u.leaf->data_len;
1416 KKASSERT((blksize & HAMMER_BUFMASK) == 0);
1418 hammer_ref(&ip->lock);
1419 if (hammer_get_vnode(ip, &vp) == 0) {
1420 if ((bp = findblk(ip->vp, file_offset, FINDBLK_TEST)) != NULL &&
1421 bp->b_bio2.bio_offset != NOOFFSET) {
1422 bp = getblk(ip->vp, file_offset, blksize, 0, 0);
1423 bp->b_bio2.bio_offset = NOOFFSET;
1424 brelse(bp);
1426 vput(vp);
1428 hammer_rel_inode(ip, 0);
1429 return(0);
1434 * This function is called when writes may have occured on the volume,
1435 * indicating that the device may be holding cached writes.
1437 static void
1438 hammer_io_flush_mark(hammer_volume_t volume)
1440 volume->vol_flags |= HAMMER_VOLF_NEEDFLUSH;
1444 * This function ensures that the device has flushed any cached writes out.
1446 void
1447 hammer_io_flush_sync(hammer_mount_t hmp)
1449 hammer_volume_t volume;
1450 struct buf *bp_base = NULL;
1451 struct buf *bp;
1453 RB_FOREACH(volume, hammer_vol_rb_tree, &hmp->rb_vols_root) {
1454 if (volume->vol_flags & HAMMER_VOLF_NEEDFLUSH) {
1455 volume->vol_flags &= ~HAMMER_VOLF_NEEDFLUSH;
1456 bp = getpbuf(NULL);
1457 bp->b_bio1.bio_offset = 0;
1458 bp->b_bufsize = 0;
1459 bp->b_bcount = 0;
1460 bp->b_cmd = BUF_CMD_FLUSH;
1461 bp->b_bio1.bio_caller_info1.cluster_head = bp_base;
1462 bp->b_bio1.bio_done = biodone_sync;
1463 bp->b_bio1.bio_flags |= BIO_SYNC;
1464 bp_base = bp;
1465 vn_strategy(volume->devvp, &bp->b_bio1);
1468 while ((bp = bp_base) != NULL) {
1469 bp_base = bp->b_bio1.bio_caller_info1.cluster_head;
1470 biowait(&bp->b_bio1, "hmrFLS");
1471 relpbuf(bp, NULL);