HAMMER VFS - Major retooling of the refcount mechanics, and fix a deadlock
[dragonfly.git] / sys / vfs / hammer / hammer_io.c
blob0a8539c72bab14cd8c44a91bcd1d22bc057d8100
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.55 2008/09/15 17:02:49 dillon Exp $
37 * IO Primitives and buffer cache management
39 * All major data-tracking structures in HAMMER contain a struct hammer_io
40 * which is used to manage their backing store. We use filesystem buffers
41 * for backing store and we leave them passively associated with their
42 * HAMMER structures.
44 * If the kernel tries to destroy a passively associated buf which we cannot
45 * yet let go we set B_LOCKED in the buffer and then actively released it
46 * later when we can.
49 #include "hammer.h"
50 #include <sys/fcntl.h>
51 #include <sys/nlookup.h>
52 #include <sys/buf.h>
53 #include <sys/buf2.h>
55 static void hammer_io_modify(hammer_io_t io, int count);
56 static void hammer_io_deallocate(struct buf *bp);
57 #if 0
58 static void hammer_io_direct_read_complete(struct bio *nbio);
59 #endif
60 static void hammer_io_direct_write_complete(struct bio *nbio);
61 static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data);
62 static void hammer_io_set_modlist(struct hammer_io *io);
63 static void hammer_io_flush_mark(hammer_volume_t volume);
67 * Initialize a new, already-zero'd hammer_io structure, or reinitialize
68 * an existing hammer_io structure which may have switched to another type.
70 void
71 hammer_io_init(hammer_io_t io, hammer_volume_t volume, enum hammer_io_type type)
73 io->volume = volume;
74 io->hmp = volume->io.hmp;
75 io->type = type;
79 * Helper routine to disassociate a buffer cache buffer from an I/O
80 * structure. The buffer is unlocked and marked appropriate for reclamation.
82 * The io may have 0 or 1 references depending on who called us. The
83 * caller is responsible for dealing with the refs.
85 * This call can only be made when no action is required on the buffer.
87 * The caller must own the buffer and the IO must indicate that the
88 * structure no longer owns it (io.released != 0).
90 static void
91 hammer_io_disassociate(hammer_io_structure_t iou)
93 struct buf *bp = iou->io.bp;
95 KKASSERT(iou->io.released);
96 KKASSERT(iou->io.modified == 0);
97 KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou);
98 buf_dep_init(bp);
99 iou->io.bp = NULL;
102 * If the buffer was locked someone wanted to get rid of it.
104 if (bp->b_flags & B_LOCKED) {
105 --hammer_count_io_locked;
106 bp->b_flags &= ~B_LOCKED;
108 if (iou->io.reclaim) {
109 bp->b_flags |= B_NOCACHE|B_RELBUF;
110 iou->io.reclaim = 0;
113 switch(iou->io.type) {
114 case HAMMER_STRUCTURE_VOLUME:
115 iou->volume.ondisk = NULL;
116 break;
117 case HAMMER_STRUCTURE_DATA_BUFFER:
118 case HAMMER_STRUCTURE_META_BUFFER:
119 case HAMMER_STRUCTURE_UNDO_BUFFER:
120 iou->buffer.ondisk = NULL;
121 break;
122 case HAMMER_STRUCTURE_DUMMY:
123 panic("hammer_io_disassociate: bad io type");
124 break;
129 * Wait for any physical IO to complete
131 * XXX we aren't interlocked against a spinlock or anything so there
132 * is a small window in the interlock / io->running == 0 test.
134 void
135 hammer_io_wait(hammer_io_t io)
137 if (io->running) {
138 for (;;) {
139 io->waiting = 1;
140 tsleep_interlock(io, 0);
141 if (io->running == 0)
142 break;
143 tsleep(io, PINTERLOCKED, "hmrflw", hz);
144 if (io->running == 0)
145 break;
151 * Wait for all currently queued HAMMER-initiated I/Os to complete.
153 * This is not supposed to count direct I/O's but some can leak
154 * through (for non-full-sized direct I/Os).
156 void
157 hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush)
159 struct hammer_io iodummy;
160 hammer_io_t io;
163 * Degenerate case, no I/O is running
165 crit_enter();
166 if (TAILQ_EMPTY(&hmp->iorun_list)) {
167 crit_exit();
168 if (doflush)
169 hammer_io_flush_sync(hmp);
170 return;
172 bzero(&iodummy, sizeof(iodummy));
173 iodummy.type = HAMMER_STRUCTURE_DUMMY;
176 * Add placemarker and then wait until it becomes the head of
177 * the list.
179 TAILQ_INSERT_TAIL(&hmp->iorun_list, &iodummy, iorun_entry);
180 while (TAILQ_FIRST(&hmp->iorun_list) != &iodummy) {
181 tsleep(&iodummy, 0, ident, 0);
185 * Chain in case several placemarkers are present.
187 TAILQ_REMOVE(&hmp->iorun_list, &iodummy, iorun_entry);
188 io = TAILQ_FIRST(&hmp->iorun_list);
189 if (io && io->type == HAMMER_STRUCTURE_DUMMY)
190 wakeup(io);
191 crit_exit();
193 if (doflush)
194 hammer_io_flush_sync(hmp);
198 * Clear a flagged error condition on a I/O buffer. The caller must hold
199 * its own ref on the buffer.
201 void
202 hammer_io_clear_error(struct hammer_io *io)
204 if (io->ioerror) {
205 io->ioerror = 0;
206 hammer_rel(&io->lock);
207 KKASSERT(hammer_isactive(&io->lock));
212 * This is an advisory function only which tells the buffer cache
213 * the bp is not a meta-data buffer, even though it is backed by
214 * a block device.
216 * This is used by HAMMER's reblocking code to avoid trying to
217 * swapcache the filesystem's data when it is read or written
218 * by the reblocking code.
220 void
221 hammer_io_notmeta(hammer_buffer_t buffer)
223 buffer->io.bp->b_flags |= B_NOTMETA;
227 #define HAMMER_MAXRA 4
230 * Load bp for a HAMMER structure. The io must be exclusively locked by
231 * the caller.
233 * This routine is mostly used on meta-data and small-data blocks. Generally
234 * speaking HAMMER assumes some locality of reference and will cluster
235 * a 64K read.
237 * Note that clustering occurs at the device layer, not the logical layer.
238 * If the buffers do not apply to the current operation they may apply to
239 * some other.
242 hammer_io_read(struct vnode *devvp, struct hammer_io *io, hammer_off_t limit)
244 struct buf *bp;
245 int error;
247 if ((bp = io->bp) == NULL) {
248 hammer_count_io_running_read += io->bytes;
249 if (hammer_cluster_enable) {
250 error = cluster_read(devvp, limit,
251 io->offset, io->bytes,
252 HAMMER_CLUSTER_SIZE,
253 HAMMER_CLUSTER_BUFS, &io->bp);
254 } else {
255 error = bread(devvp, io->offset, io->bytes, &io->bp);
257 hammer_stats_disk_read += io->bytes;
258 hammer_count_io_running_read -= io->bytes;
261 * The code generally assumes b_ops/b_dep has been set-up,
262 * even if we error out here.
264 bp = io->bp;
265 bp->b_ops = &hammer_bioops;
266 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
267 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
268 BUF_KERNPROC(bp);
269 KKASSERT(io->modified == 0);
270 KKASSERT(io->running == 0);
271 KKASSERT(io->waiting == 0);
272 io->released = 0; /* we hold an active lock on bp */
273 } else {
274 error = 0;
276 return(error);
280 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
281 * Must be called with the IO exclusively locked.
283 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
284 * I/O by forcing the buffer to not be in a released state before calling
285 * it.
287 * This function will also mark the IO as modified but it will not
288 * increment the modify_refs count.
291 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
293 struct buf *bp;
295 if ((bp = io->bp) == NULL) {
296 io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
297 bp = io->bp;
298 bp->b_ops = &hammer_bioops;
299 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
300 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
301 io->released = 0;
302 KKASSERT(io->running == 0);
303 io->waiting = 0;
304 BUF_KERNPROC(bp);
305 } else {
306 if (io->released) {
307 regetblk(bp);
308 BUF_KERNPROC(bp);
309 io->released = 0;
312 hammer_io_modify(io, 0);
313 vfs_bio_clrbuf(bp);
314 return(0);
318 * Advance the activity count on the underlying buffer because
319 * HAMMER does not getblk/brelse on every access.
321 void
322 hammer_io_advance(struct hammer_io *io)
324 if (io->bp)
325 buf_act_advance(io->bp);
329 * Remove potential device level aliases against buffers managed by high level
330 * vnodes. Aliases can also be created due to mixed buffer sizes or via
331 * direct access to the backing store device.
333 * This is nasty because the buffers are also VMIO-backed. Even if a buffer
334 * does not exist its backing VM pages might, and we have to invalidate
335 * those as well or a getblk() will reinstate them.
337 * Buffer cache buffers associated with hammer_buffers cannot be
338 * invalidated.
341 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
343 hammer_io_structure_t iou;
344 hammer_off_t phys_offset;
345 struct buf *bp;
346 int error;
348 phys_offset = volume->ondisk->vol_buf_beg +
349 (zone2_offset & HAMMER_OFF_SHORT_MASK);
350 crit_enter();
351 if ((bp = findblk(volume->devvp, phys_offset, FINDBLK_TEST)) != NULL)
352 bp = getblk(volume->devvp, phys_offset, bp->b_bufsize, 0, 0);
353 else
354 bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0);
355 if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
356 #if 0
357 hammer_ref(&iou->io.lock);
358 hammer_io_clear_modify(&iou->io, 1);
359 bundirty(bp);
360 iou->io.released = 0;
361 BUF_KERNPROC(bp);
362 iou->io.reclaim = 1;
363 iou->io.waitdep = 1;
364 KKASSERT(hammer_isactive(&iou->io.lock) == 1);
365 hammer_rel_buffer(&iou->buffer, 0);
366 /*hammer_io_deallocate(bp);*/
367 #endif
368 bqrelse(bp);
369 error = EAGAIN;
370 } else {
371 KKASSERT((bp->b_flags & B_LOCKED) == 0);
372 bundirty(bp);
373 bp->b_flags |= B_NOCACHE|B_RELBUF;
374 brelse(bp);
375 error = 0;
377 crit_exit();
378 return(error);
382 * This routine is called on the last reference to a hammer structure.
383 * The io must be interlocked with a refcount of zero. The hammer structure
384 * will remain interlocked on return.
386 * This routine may return a non-NULL bp to the caller for dispoal.
387 * The caller typically brelse()'s the bp.
389 * The bp may or may not still be passively associated with the IO. It
390 * will remain passively associated if it is unreleasable (e.g. a modified
391 * meta-data buffer).
393 * The only requirement here is that modified meta-data and volume-header
394 * buffer may NOT be disassociated from the IO structure, and consequently
395 * we also leave such buffers actively associated with the IO if they already
396 * are (since the kernel can't do anything with them anyway). Only the
397 * flusher is allowed to write such buffers out. Modified pure-data and
398 * undo buffers are returned to the kernel but left passively associated
399 * so we can track when the kernel writes the bp out.
401 struct buf *
402 hammer_io_release(struct hammer_io *io, int flush)
404 union hammer_io_structure *iou = (void *)io;
405 struct buf *bp;
407 if ((bp = io->bp) == NULL)
408 return(NULL);
411 * Try to flush a dirty IO to disk if asked to by the
412 * caller or if the kernel tried to flush the buffer in the past.
414 * Kernel-initiated flushes are only allowed for pure-data buffers.
415 * meta-data and volume buffers can only be flushed explicitly
416 * by HAMMER.
418 if (io->modified) {
419 if (flush) {
420 hammer_io_flush(io, 0);
421 } else if (bp->b_flags & B_LOCKED) {
422 switch(io->type) {
423 case HAMMER_STRUCTURE_DATA_BUFFER:
424 hammer_io_flush(io, 0);
425 break;
426 case HAMMER_STRUCTURE_UNDO_BUFFER:
427 hammer_io_flush(io, hammer_undo_reclaim(io));
428 break;
429 default:
430 break;
432 } /* else no explicit request to flush the buffer */
436 * Wait for the IO to complete if asked to. This occurs when
437 * the buffer must be disposed of definitively during an umount
438 * or buffer invalidation.
440 if (io->waitdep && io->running) {
441 hammer_io_wait(io);
445 * Return control of the buffer to the kernel (with the provisio
446 * that our bioops can override kernel decisions with regards to
447 * the buffer).
449 if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
451 * Always disassociate the bp if an explicit flush
452 * was requested and the IO completed with no error
453 * (so unmount can really clean up the structure).
455 if (io->released) {
456 regetblk(bp);
457 BUF_KERNPROC(bp);
458 } else {
459 io->released = 1;
461 hammer_io_disassociate((hammer_io_structure_t)io);
462 /* return the bp */
463 } else if (io->modified) {
465 * Only certain IO types can be released to the kernel if
466 * the buffer has been modified.
468 * volume and meta-data IO types may only be explicitly
469 * flushed by HAMMER.
471 switch(io->type) {
472 case HAMMER_STRUCTURE_DATA_BUFFER:
473 case HAMMER_STRUCTURE_UNDO_BUFFER:
474 if (io->released == 0) {
475 io->released = 1;
476 bdwrite(bp);
478 break;
479 default:
480 break;
482 bp = NULL; /* bp left associated */
483 } else if (io->released == 0) {
485 * Clean buffers can be generally released to the kernel.
486 * We leave the bp passively associated with the HAMMER
487 * structure and use bioops to disconnect it later on
488 * if the kernel wants to discard the buffer.
490 * We can steal the structure's ownership of the bp.
492 io->released = 1;
493 if (bp->b_flags & B_LOCKED) {
494 hammer_io_disassociate(iou);
495 /* return the bp */
496 } else {
497 if (io->reclaim) {
498 hammer_io_disassociate(iou);
499 /* return the bp */
500 } else {
501 /* return the bp (bp passively associated) */
504 } else {
506 * A released buffer is passively associate with our
507 * hammer_io structure. The kernel cannot destroy it
508 * without making a bioops call. If the kernel (B_LOCKED)
509 * or we (reclaim) requested that the buffer be destroyed
510 * we destroy it, otherwise we do a quick get/release to
511 * reset its position in the kernel's LRU list.
513 * Leaving the buffer passively associated allows us to
514 * use the kernel's LRU buffer flushing mechanisms rather
515 * then rolling our own.
517 * XXX there are two ways of doing this. We can re-acquire
518 * and passively release to reset the LRU, or not.
520 if (io->running == 0) {
521 regetblk(bp);
522 if ((bp->b_flags & B_LOCKED) || io->reclaim) {
523 hammer_io_disassociate(iou);
524 /* return the bp */
525 } else {
526 /* return the bp (bp passively associated) */
528 } else {
530 * bp is left passively associated but we do not
531 * try to reacquire it. Interactions with the io
532 * structure will occur on completion of the bp's
533 * I/O.
535 bp = NULL;
538 return(bp);
542 * This routine is called with a locked IO when a flush is desired and
543 * no other references to the structure exists other then ours. This
544 * routine is ONLY called when HAMMER believes it is safe to flush a
545 * potentially modified buffer out.
547 void
548 hammer_io_flush(struct hammer_io *io, int reclaim)
550 struct buf *bp;
553 * Degenerate case - nothing to flush if nothing is dirty.
555 if (io->modified == 0) {
556 return;
559 KKASSERT(io->bp);
560 KKASSERT(io->modify_refs <= 0);
563 * Acquire ownership of the bp, particularly before we clear our
564 * modified flag.
566 * We are going to bawrite() this bp. Don't leave a window where
567 * io->released is set, we actually own the bp rather then our
568 * buffer.
570 bp = io->bp;
571 if (io->released) {
572 regetblk(bp);
573 /* BUF_KERNPROC(io->bp); */
574 /* io->released = 0; */
575 KKASSERT(io->released);
576 KKASSERT(io->bp == bp);
578 io->released = 1;
580 if (reclaim) {
581 io->reclaim = 1;
582 if ((bp->b_flags & B_LOCKED) == 0) {
583 bp->b_flags |= B_LOCKED;
584 ++hammer_count_io_locked;
589 * Acquire exclusive access to the bp and then clear the modified
590 * state of the buffer prior to issuing I/O to interlock any
591 * modifications made while the I/O is in progress. This shouldn't
592 * happen anyway but losing data would be worse. The modified bit
593 * will be rechecked after the IO completes.
595 * NOTE: This call also finalizes the buffer's content (inval == 0).
597 * This is only legal when lock.refs == 1 (otherwise we might clear
598 * the modified bit while there are still users of the cluster
599 * modifying the data).
601 * Do this before potentially blocking so any attempt to modify the
602 * ondisk while we are blocked blocks waiting for us.
604 hammer_ref(&io->lock);
605 hammer_io_clear_modify(io, 0);
606 hammer_rel(&io->lock);
608 if (hammer_debug_io & 0x0002)
609 kprintf("hammer io_write %016jx\n", bp->b_bio1.bio_offset);
612 * Transfer ownership to the kernel and initiate I/O.
614 io->running = 1;
615 io->hmp->io_running_space += io->bytes;
616 TAILQ_INSERT_TAIL(&io->hmp->iorun_list, io, iorun_entry);
617 hammer_count_io_running_write += io->bytes;
618 bawrite(bp);
619 hammer_io_flush_mark(io->volume);
622 /************************************************************************
623 * BUFFER DIRTYING *
624 ************************************************************************
626 * These routines deal with dependancies created when IO buffers get
627 * modified. The caller must call hammer_modify_*() on a referenced
628 * HAMMER structure prior to modifying its on-disk data.
630 * Any intent to modify an IO buffer acquires the related bp and imposes
631 * various write ordering dependancies.
635 * Mark a HAMMER structure as undergoing modification. Meta-data buffers
636 * are locked until the flusher can deal with them, pure data buffers
637 * can be written out.
639 static
640 void
641 hammer_io_modify(hammer_io_t io, int count)
644 * io->modify_refs must be >= 0
646 while (io->modify_refs < 0) {
647 io->waitmod = 1;
648 tsleep(io, 0, "hmrmod", 0);
652 * Shortcut if nothing to do.
654 KKASSERT(hammer_isactive(&io->lock) && io->bp != NULL);
655 io->modify_refs += count;
656 if (io->modified && io->released == 0)
657 return;
659 hammer_lock_ex(&io->lock);
660 if (io->modified == 0) {
661 hammer_io_set_modlist(io);
662 io->modified = 1;
664 if (io->released) {
665 regetblk(io->bp);
666 BUF_KERNPROC(io->bp);
667 io->released = 0;
668 KKASSERT(io->modified != 0);
670 hammer_unlock(&io->lock);
673 static __inline
674 void
675 hammer_io_modify_done(hammer_io_t io)
677 KKASSERT(io->modify_refs > 0);
678 --io->modify_refs;
679 if (io->modify_refs == 0 && io->waitmod) {
680 io->waitmod = 0;
681 wakeup(io);
685 void
686 hammer_io_write_interlock(hammer_io_t io)
688 while (io->modify_refs != 0) {
689 io->waitmod = 1;
690 tsleep(io, 0, "hmrmod", 0);
692 io->modify_refs = -1;
695 void
696 hammer_io_done_interlock(hammer_io_t io)
698 KKASSERT(io->modify_refs == -1);
699 io->modify_refs = 0;
700 if (io->waitmod) {
701 io->waitmod = 0;
702 wakeup(io);
707 * Caller intends to modify a volume's ondisk structure.
709 * This is only allowed if we are the flusher or we have a ref on the
710 * sync_lock.
712 void
713 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
714 void *base, int len)
716 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
718 hammer_io_modify(&volume->io, 1);
719 if (len) {
720 intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
721 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
722 hammer_generate_undo(trans,
723 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
724 base, len);
729 * Caller intends to modify a buffer's ondisk structure.
731 * This is only allowed if we are the flusher or we have a ref on the
732 * sync_lock.
734 void
735 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
736 void *base, int len)
738 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
740 hammer_io_modify(&buffer->io, 1);
741 if (len) {
742 intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
743 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
744 hammer_generate_undo(trans,
745 buffer->zone2_offset + rel_offset,
746 base, len);
750 void
751 hammer_modify_volume_done(hammer_volume_t volume)
753 hammer_io_modify_done(&volume->io);
756 void
757 hammer_modify_buffer_done(hammer_buffer_t buffer)
759 hammer_io_modify_done(&buffer->io);
763 * Mark an entity as not being dirty any more and finalize any
764 * delayed adjustments to the buffer.
766 * Delayed adjustments are an important performance enhancement, allowing
767 * us to avoid recalculating B-Tree node CRCs over and over again when
768 * making bulk-modifications to the B-Tree.
770 * If inval is non-zero delayed adjustments are ignored.
772 * This routine may dereference related btree nodes and cause the
773 * buffer to be dereferenced. The caller must own a reference on io.
775 void
776 hammer_io_clear_modify(struct hammer_io *io, int inval)
778 if (io->modified == 0)
779 return;
782 * Take us off the mod-list and clear the modified bit.
784 KKASSERT(io->mod_list != NULL);
785 if (io->mod_list == &io->hmp->volu_list ||
786 io->mod_list == &io->hmp->meta_list) {
787 io->hmp->locked_dirty_space -= io->bytes;
788 hammer_count_dirtybufspace -= io->bytes;
790 TAILQ_REMOVE(io->mod_list, io, mod_entry);
791 io->mod_list = NULL;
792 io->modified = 0;
795 * If this bit is not set there are no delayed adjustments.
797 if (io->gencrc == 0)
798 return;
799 io->gencrc = 0;
802 * Finalize requested CRCs. The NEEDSCRC flag also holds a reference
803 * on the node (& underlying buffer). Release the node after clearing
804 * the flag.
806 if (io->type == HAMMER_STRUCTURE_META_BUFFER) {
807 hammer_buffer_t buffer = (void *)io;
808 hammer_node_t node;
810 restart:
811 TAILQ_FOREACH(node, &buffer->clist, entry) {
812 if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
813 continue;
814 node->flags &= ~HAMMER_NODE_NEEDSCRC;
815 KKASSERT(node->ondisk);
816 if (inval == 0)
817 node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
818 hammer_rel_node(node);
819 goto restart;
822 /* caller must still have ref on io */
823 KKASSERT(hammer_isactive(&io->lock));
827 * Clear the IO's modify list. Even though the IO is no longer modified
828 * it may still be on the lose_list. This routine is called just before
829 * the governing hammer_buffer is destroyed.
831 void
832 hammer_io_clear_modlist(struct hammer_io *io)
834 KKASSERT(io->modified == 0);
835 if (io->mod_list) {
836 crit_enter(); /* biodone race against list */
837 KKASSERT(io->mod_list == &io->hmp->lose_list);
838 TAILQ_REMOVE(io->mod_list, io, mod_entry);
839 io->mod_list = NULL;
840 crit_exit();
844 static void
845 hammer_io_set_modlist(struct hammer_io *io)
847 struct hammer_mount *hmp = io->hmp;
849 KKASSERT(io->mod_list == NULL);
851 switch(io->type) {
852 case HAMMER_STRUCTURE_VOLUME:
853 io->mod_list = &hmp->volu_list;
854 hmp->locked_dirty_space += io->bytes;
855 hammer_count_dirtybufspace += io->bytes;
856 break;
857 case HAMMER_STRUCTURE_META_BUFFER:
858 io->mod_list = &hmp->meta_list;
859 hmp->locked_dirty_space += io->bytes;
860 hammer_count_dirtybufspace += io->bytes;
861 break;
862 case HAMMER_STRUCTURE_UNDO_BUFFER:
863 io->mod_list = &hmp->undo_list;
864 break;
865 case HAMMER_STRUCTURE_DATA_BUFFER:
866 io->mod_list = &hmp->data_list;
867 break;
868 case HAMMER_STRUCTURE_DUMMY:
869 panic("hammer_io_disassociate: bad io type");
870 break;
872 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
875 /************************************************************************
876 * HAMMER_BIOOPS *
877 ************************************************************************
882 * Pre-IO initiation kernel callback - cluster build only
884 static void
885 hammer_io_start(struct buf *bp)
890 * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
892 * NOTE: HAMMER may modify a buffer after initiating I/O. The modified bit
893 * may also be set if we were marking a cluster header open. Only remove
894 * our dependancy if the modified bit is clear.
896 static void
897 hammer_io_complete(struct buf *bp)
899 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
900 struct hammer_io *ionext;
902 KKASSERT(iou->io.released == 1);
905 * Deal with people waiting for I/O to drain
907 if (iou->io.running) {
909 * Deal with critical write errors. Once a critical error
910 * has been flagged in hmp the UNDO FIFO will not be updated.
911 * That way crash recover will give us a consistent
912 * filesystem.
914 * Because of this we can throw away failed UNDO buffers. If
915 * we throw away META or DATA buffers we risk corrupting
916 * the now read-only version of the filesystem visible to
917 * the user. Clear B_ERROR so the buffer is not re-dirtied
918 * by the kernel and ref the io so it doesn't get thrown
919 * away.
921 if (bp->b_flags & B_ERROR) {
922 hammer_critical_error(iou->io.hmp, NULL, bp->b_error,
923 "while flushing meta-data");
924 switch(iou->io.type) {
925 case HAMMER_STRUCTURE_UNDO_BUFFER:
926 break;
927 default:
928 if (iou->io.ioerror == 0) {
929 iou->io.ioerror = 1;
930 hammer_ref(&iou->io.lock);
932 break;
934 bp->b_flags &= ~B_ERROR;
935 bundirty(bp);
936 #if 0
937 hammer_io_set_modlist(&iou->io);
938 iou->io.modified = 1;
939 #endif
941 hammer_stats_disk_write += iou->io.bytes;
942 hammer_count_io_running_write -= iou->io.bytes;
943 iou->io.hmp->io_running_space -= iou->io.bytes;
944 KKASSERT(iou->io.hmp->io_running_space >= 0);
945 iou->io.running = 0;
948 * Remove from iorun list and wakeup any multi-io waiter(s).
950 if (TAILQ_FIRST(&iou->io.hmp->iorun_list) == &iou->io) {
951 ionext = TAILQ_NEXT(&iou->io, iorun_entry);
952 if (ionext && ionext->type == HAMMER_STRUCTURE_DUMMY)
953 wakeup(ionext);
955 TAILQ_REMOVE(&iou->io.hmp->iorun_list, &iou->io, iorun_entry);
956 } else {
957 hammer_stats_disk_read += iou->io.bytes;
960 if (iou->io.waiting) {
961 iou->io.waiting = 0;
962 wakeup(iou);
966 * If B_LOCKED is set someone wanted to deallocate the bp at some
967 * point, try to do it now. The operation will fail if there are
968 * refs or if hammer_io_deallocate() is unable to gain the
969 * interlock.
971 if (bp->b_flags & B_LOCKED) {
972 --hammer_count_io_locked;
973 bp->b_flags &= ~B_LOCKED;
974 hammer_io_deallocate(bp);
975 /* structure may be dead now */
980 * Callback from kernel when it wishes to deallocate a passively
981 * associated structure. This mostly occurs with clean buffers
982 * but it may be possible for a holding structure to be marked dirty
983 * while its buffer is passively associated. The caller owns the bp.
985 * If we cannot disassociate we set B_LOCKED to prevent the buffer
986 * from getting reused.
988 * WARNING: Because this can be called directly by getnewbuf we cannot
989 * recurse into the tree. If a bp cannot be immediately disassociated
990 * our only recourse is to set B_LOCKED.
992 * WARNING: This may be called from an interrupt via hammer_io_complete()
994 static void
995 hammer_io_deallocate(struct buf *bp)
997 hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
999 KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
1000 if (hammer_try_interlock_norefs(&iou->io.lock) == 0) {
1002 * We cannot safely disassociate a bp from a referenced
1003 * or interlocked HAMMER structure.
1005 bp->b_flags |= B_LOCKED;
1006 ++hammer_count_io_locked;
1007 } else if (iou->io.modified) {
1009 * It is not legal to disassociate a modified buffer. This
1010 * case really shouldn't ever occur.
1012 bp->b_flags |= B_LOCKED;
1013 ++hammer_count_io_locked;
1014 hammer_put_interlock(&iou->io.lock, 0);
1015 } else {
1017 * Disassociate the BP. If the io has no refs left we
1018 * have to add it to the loose list.
1020 hammer_io_disassociate(iou);
1021 if (iou->io.type != HAMMER_STRUCTURE_VOLUME) {
1022 KKASSERT(iou->io.bp == NULL);
1023 KKASSERT(iou->io.mod_list == NULL);
1024 crit_enter(); /* biodone race against list */
1025 iou->io.mod_list = &iou->io.hmp->lose_list;
1026 TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
1027 crit_exit();
1029 hammer_put_interlock(&iou->io.lock, 1);
1033 static int
1034 hammer_io_fsync(struct vnode *vp)
1036 return(0);
1040 * NOTE: will not be called unless we tell the kernel about the
1041 * bioops. Unused... we use the mount's VFS_SYNC instead.
1043 static int
1044 hammer_io_sync(struct mount *mp)
1046 return(0);
1049 static void
1050 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
1055 * I/O pre-check for reading and writing. HAMMER only uses this for
1056 * B_CACHE buffers so checkread just shouldn't happen, but if it does
1057 * allow it.
1059 * Writing is a different case. We don't want the kernel to try to write
1060 * out a buffer that HAMMER may be modifying passively or which has a
1061 * dependancy. In addition, kernel-demanded writes can only proceed for
1062 * certain types of buffers (i.e. UNDO and DATA types). Other dirty
1063 * buffer types can only be explicitly written by the flusher.
1065 * checkwrite will only be called for bdwrite()n buffers. If we return
1066 * success the kernel is guaranteed to initiate the buffer write.
1068 static int
1069 hammer_io_checkread(struct buf *bp)
1071 return(0);
1074 static int
1075 hammer_io_checkwrite(struct buf *bp)
1077 hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
1080 * This shouldn't happen under normal operation.
1082 if (io->type == HAMMER_STRUCTURE_VOLUME ||
1083 io->type == HAMMER_STRUCTURE_META_BUFFER) {
1084 if (!panicstr)
1085 panic("hammer_io_checkwrite: illegal buffer");
1086 if ((bp->b_flags & B_LOCKED) == 0) {
1087 bp->b_flags |= B_LOCKED;
1088 ++hammer_count_io_locked;
1090 return(1);
1094 * We can only clear the modified bit if the IO is not currently
1095 * undergoing modification. Otherwise we may miss changes.
1097 * Only data and undo buffers can reach here. These buffers do
1098 * not have terminal crc functions but we temporarily reference
1099 * the IO anyway, just in case.
1101 if (io->modify_refs == 0 && io->modified) {
1102 hammer_ref(&io->lock);
1103 hammer_io_clear_modify(io, 0);
1104 hammer_rel(&io->lock);
1105 } else if (io->modified) {
1106 KKASSERT(io->type == HAMMER_STRUCTURE_DATA_BUFFER);
1110 * The kernel is going to start the IO, set io->running.
1112 KKASSERT(io->running == 0);
1113 io->running = 1;
1114 io->hmp->io_running_space += io->bytes;
1115 TAILQ_INSERT_TAIL(&io->hmp->iorun_list, io, iorun_entry);
1116 hammer_count_io_running_write += io->bytes;
1117 return(0);
1121 * Return non-zero if we wish to delay the kernel's attempt to flush
1122 * this buffer to disk.
1124 static int
1125 hammer_io_countdeps(struct buf *bp, int n)
1127 return(0);
1130 struct bio_ops hammer_bioops = {
1131 .io_start = hammer_io_start,
1132 .io_complete = hammer_io_complete,
1133 .io_deallocate = hammer_io_deallocate,
1134 .io_fsync = hammer_io_fsync,
1135 .io_sync = hammer_io_sync,
1136 .io_movedeps = hammer_io_movedeps,
1137 .io_countdeps = hammer_io_countdeps,
1138 .io_checkread = hammer_io_checkread,
1139 .io_checkwrite = hammer_io_checkwrite,
1142 /************************************************************************
1143 * DIRECT IO OPS *
1144 ************************************************************************
1146 * These functions operate directly on the buffer cache buffer associated
1147 * with a front-end vnode rather then a back-end device vnode.
1151 * Read a buffer associated with a front-end vnode directly from the
1152 * disk media. The bio may be issued asynchronously. If leaf is non-NULL
1153 * we validate the CRC.
1155 * We must check for the presence of a HAMMER buffer to handle the case
1156 * where the reblocker has rewritten the data (which it does via the HAMMER
1157 * buffer system, not via the high-level vnode buffer cache), but not yet
1158 * committed the buffer to the media.
1161 hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1162 hammer_btree_leaf_elm_t leaf)
1164 hammer_off_t buf_offset;
1165 hammer_off_t zone2_offset;
1166 hammer_volume_t volume;
1167 struct buf *bp;
1168 struct bio *nbio;
1169 int vol_no;
1170 int error;
1172 buf_offset = bio->bio_offset;
1173 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
1174 HAMMER_ZONE_LARGE_DATA);
1177 * The buffer cache may have an aliased buffer (the reblocker can
1178 * write them). If it does we have to sync any dirty data before
1179 * we can build our direct-read. This is a non-critical code path.
1181 bp = bio->bio_buf;
1182 hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
1185 * Resolve to a zone-2 offset. The conversion just requires
1186 * munging the top 4 bits but we want to abstract it anyway
1187 * so the blockmap code can verify the zone assignment.
1189 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1190 if (error)
1191 goto done;
1192 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
1193 HAMMER_ZONE_RAW_BUFFER);
1196 * Resolve volume and raw-offset for 3rd level bio. The
1197 * offset will be specific to the volume.
1199 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1200 volume = hammer_get_volume(hmp, vol_no, &error);
1201 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1202 error = EIO;
1204 if (error == 0) {
1206 * 3rd level bio
1208 nbio = push_bio(bio);
1209 nbio->bio_offset = volume->ondisk->vol_buf_beg +
1210 (zone2_offset & HAMMER_OFF_SHORT_MASK);
1211 #if 0
1213 * XXX disabled - our CRC check doesn't work if the OS
1214 * does bogus_page replacement on the direct-read.
1216 if (leaf && hammer_verify_data) {
1217 nbio->bio_done = hammer_io_direct_read_complete;
1218 nbio->bio_caller_info1.uvalue32 = leaf->data_crc;
1220 #endif
1221 hammer_stats_disk_read += bp->b_bufsize;
1222 vn_strategy(volume->devvp, nbio);
1224 hammer_rel_volume(volume, 0);
1225 done:
1226 if (error) {
1227 kprintf("hammer_direct_read: failed @ %016llx\n",
1228 (long long)zone2_offset);
1229 bp->b_error = error;
1230 bp->b_flags |= B_ERROR;
1231 biodone(bio);
1233 return(error);
1236 #if 0
1238 * On completion of the BIO this callback must check the data CRC
1239 * and chain to the previous bio.
1241 static
1242 void
1243 hammer_io_direct_read_complete(struct bio *nbio)
1245 struct bio *obio;
1246 struct buf *bp;
1247 u_int32_t rec_crc = nbio->bio_caller_info1.uvalue32;
1249 bp = nbio->bio_buf;
1250 if (crc32(bp->b_data, bp->b_bufsize) != rec_crc) {
1251 kprintf("HAMMER: data_crc error @%016llx/%d\n",
1252 nbio->bio_offset, bp->b_bufsize);
1253 if (hammer_debug_critical)
1254 Debugger("data_crc on read");
1255 bp->b_flags |= B_ERROR;
1256 bp->b_error = EIO;
1258 obio = pop_bio(nbio);
1259 biodone(obio);
1261 #endif
1264 * Write a buffer associated with a front-end vnode directly to the
1265 * disk media. The bio may be issued asynchronously.
1267 * The BIO is associated with the specified record and RECF_DIRECT_IO
1268 * is set. The recorded is added to its object.
1271 hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio,
1272 hammer_record_t record)
1274 hammer_btree_leaf_elm_t leaf = &record->leaf;
1275 hammer_off_t buf_offset;
1276 hammer_off_t zone2_offset;
1277 hammer_volume_t volume;
1278 hammer_buffer_t buffer;
1279 struct buf *bp;
1280 struct bio *nbio;
1281 char *ptr;
1282 int vol_no;
1283 int error;
1285 buf_offset = leaf->data_offset;
1287 KKASSERT(buf_offset > HAMMER_ZONE_BTREE);
1288 KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
1291 * Issue or execute the I/O. The new memory record must replace
1292 * the old one before the I/O completes, otherwise a reaquisition of
1293 * the buffer will load the old media data instead of the new.
1295 if ((buf_offset & HAMMER_BUFMASK) == 0 &&
1296 leaf->data_len >= HAMMER_BUFSIZE) {
1298 * We are using the vnode's bio to write directly to the
1299 * media, any hammer_buffer at the same zone-X offset will
1300 * now have stale data.
1302 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1303 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1304 volume = hammer_get_volume(hmp, vol_no, &error);
1306 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1307 error = EIO;
1308 if (error == 0) {
1309 bp = bio->bio_buf;
1310 KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
1312 hammer_del_buffers(hmp, buf_offset,
1313 zone2_offset, bp->b_bufsize);
1317 * Second level bio - cached zone2 offset.
1319 * (We can put our bio_done function in either the
1320 * 2nd or 3rd level).
1322 nbio = push_bio(bio);
1323 nbio->bio_offset = zone2_offset;
1324 nbio->bio_done = hammer_io_direct_write_complete;
1325 nbio->bio_caller_info1.ptr = record;
1326 record->zone2_offset = zone2_offset;
1327 record->flags |= HAMMER_RECF_DIRECT_IO |
1328 HAMMER_RECF_DIRECT_INVAL;
1331 * Third level bio - raw offset specific to the
1332 * correct volume.
1334 zone2_offset &= HAMMER_OFF_SHORT_MASK;
1335 nbio = push_bio(nbio);
1336 nbio->bio_offset = volume->ondisk->vol_buf_beg +
1337 zone2_offset;
1338 hammer_stats_disk_write += bp->b_bufsize;
1339 hammer_ip_replace_bulk(hmp, record);
1340 vn_strategy(volume->devvp, nbio);
1341 hammer_io_flush_mark(volume);
1343 hammer_rel_volume(volume, 0);
1344 } else {
1346 * Must fit in a standard HAMMER buffer. In this case all
1347 * consumers use the HAMMER buffer system and RECF_DIRECT_IO
1348 * does not need to be set-up.
1350 KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
1351 buffer = NULL;
1352 ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
1353 if (error == 0) {
1354 bp = bio->bio_buf;
1355 bp->b_flags |= B_AGE;
1356 hammer_io_modify(&buffer->io, 1);
1357 bcopy(bp->b_data, ptr, leaf->data_len);
1358 hammer_io_modify_done(&buffer->io);
1359 hammer_rel_buffer(buffer, 0);
1360 bp->b_resid = 0;
1361 hammer_ip_replace_bulk(hmp, record);
1362 biodone(bio);
1365 if (error) {
1367 * Major suckage occured. Also note: The record was
1368 * never added to the tree so we do not have to worry
1369 * about the backend.
1371 kprintf("hammer_direct_write: failed @ %016llx\n",
1372 (long long)leaf->data_offset);
1373 bp = bio->bio_buf;
1374 bp->b_resid = 0;
1375 bp->b_error = EIO;
1376 bp->b_flags |= B_ERROR;
1377 biodone(bio);
1378 record->flags |= HAMMER_RECF_DELETED_FE;
1379 hammer_rel_mem_record(record);
1381 return(error);
1385 * On completion of the BIO this callback must disconnect
1386 * it from the hammer_record and chain to the previous bio.
1388 * An I/O error forces the mount to read-only. Data buffers
1389 * are not B_LOCKED like meta-data buffers are, so we have to
1390 * throw the buffer away to prevent the kernel from retrying.
1392 static
1393 void
1394 hammer_io_direct_write_complete(struct bio *nbio)
1396 struct bio *obio;
1397 struct buf *bp;
1398 hammer_record_t record = nbio->bio_caller_info1.ptr;
1400 bp = nbio->bio_buf;
1401 obio = pop_bio(nbio);
1402 if (bp->b_flags & B_ERROR) {
1403 hammer_critical_error(record->ip->hmp, record->ip,
1404 bp->b_error,
1405 "while writing bulk data");
1406 bp->b_flags |= B_INVAL;
1408 biodone(obio);
1410 KKASSERT(record != NULL);
1411 KKASSERT(record->flags & HAMMER_RECF_DIRECT_IO);
1412 if (record->flags & HAMMER_RECF_DIRECT_WAIT) {
1413 record->flags &= ~(HAMMER_RECF_DIRECT_IO |
1414 HAMMER_RECF_DIRECT_WAIT);
1415 /* record can disappear once DIRECT_IO flag is cleared */
1416 wakeup(&record->flags);
1417 } else {
1418 record->flags &= ~HAMMER_RECF_DIRECT_IO;
1419 /* record can disappear once DIRECT_IO flag is cleared */
1425 * This is called before a record is either committed to the B-Tree
1426 * or destroyed, to resolve any associated direct-IO.
1428 * (1) We must wait for any direct-IO related to the record to complete.
1430 * (2) We must remove any buffer cache aliases for data accessed via
1431 * leaf->data_offset or zone2_offset so non-direct-IO consumers
1432 * (the mirroring and reblocking code) do not see stale data.
1434 void
1435 hammer_io_direct_wait(hammer_record_t record)
1438 * Wait for I/O to complete
1440 if (record->flags & HAMMER_RECF_DIRECT_IO) {
1441 crit_enter();
1442 while (record->flags & HAMMER_RECF_DIRECT_IO) {
1443 record->flags |= HAMMER_RECF_DIRECT_WAIT;
1444 tsleep(&record->flags, 0, "hmdiow", 0);
1446 crit_exit();
1450 * Invalidate any related buffer cache aliases associated with the
1451 * backing device. This is needed because the buffer cache buffer
1452 * for file data is associated with the file vnode, not the backing
1453 * device vnode.
1455 * XXX I do not think this case can occur any more now that
1456 * reservations ensure that all such buffers are removed before
1457 * an area can be reused.
1459 if (record->flags & HAMMER_RECF_DIRECT_INVAL) {
1460 KKASSERT(record->leaf.data_offset);
1461 hammer_del_buffers(record->ip->hmp, record->leaf.data_offset,
1462 record->zone2_offset, record->leaf.data_len,
1464 record->flags &= ~HAMMER_RECF_DIRECT_INVAL;
1469 * This is called to remove the second-level cached zone-2 offset from
1470 * frontend buffer cache buffers, now stale due to a data relocation.
1471 * These offsets are generated by cluster_read() via VOP_BMAP, or directly
1472 * by hammer_vop_strategy_read().
1474 * This is rather nasty because here we have something like the reblocker
1475 * scanning the raw B-Tree with no held references on anything, really,
1476 * other then a shared lock on the B-Tree node, and we have to access the
1477 * frontend's buffer cache to check for and clean out the association.
1478 * Specifically, if the reblocker is moving data on the disk, these cached
1479 * offsets will become invalid.
1481 * Only data record types associated with the large-data zone are subject
1482 * to direct-io and need to be checked.
1485 void
1486 hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf)
1488 struct hammer_inode_info iinfo;
1489 int zone;
1491 if (leaf->base.rec_type != HAMMER_RECTYPE_DATA)
1492 return;
1493 zone = HAMMER_ZONE_DECODE(leaf->data_offset);
1494 if (zone != HAMMER_ZONE_LARGE_DATA_INDEX)
1495 return;
1496 iinfo.obj_id = leaf->base.obj_id;
1497 iinfo.obj_asof = 0; /* unused */
1498 iinfo.obj_localization = leaf->base.localization &
1499 HAMMER_LOCALIZE_PSEUDOFS_MASK;
1500 iinfo.u.leaf = leaf;
1501 hammer_scan_inode_snapshots(hmp, &iinfo,
1502 hammer_io_direct_uncache_callback,
1503 leaf);
1506 static int
1507 hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data)
1509 hammer_inode_info_t iinfo = data;
1510 hammer_off_t data_offset;
1511 hammer_off_t file_offset;
1512 struct vnode *vp;
1513 struct buf *bp;
1514 int blksize;
1516 if (ip->vp == NULL)
1517 return(0);
1518 data_offset = iinfo->u.leaf->data_offset;
1519 file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len;
1520 blksize = iinfo->u.leaf->data_len;
1521 KKASSERT((blksize & HAMMER_BUFMASK) == 0);
1523 hammer_ref(&ip->lock);
1524 if (hammer_get_vnode(ip, &vp) == 0) {
1525 if ((bp = findblk(ip->vp, file_offset, FINDBLK_TEST)) != NULL &&
1526 bp->b_bio2.bio_offset != NOOFFSET) {
1527 bp = getblk(ip->vp, file_offset, blksize, 0, 0);
1528 bp->b_bio2.bio_offset = NOOFFSET;
1529 brelse(bp);
1531 vput(vp);
1533 hammer_rel_inode(ip, 0);
1534 return(0);
1539 * This function is called when writes may have occured on the volume,
1540 * indicating that the device may be holding cached writes.
1542 static void
1543 hammer_io_flush_mark(hammer_volume_t volume)
1545 volume->vol_flags |= HAMMER_VOLF_NEEDFLUSH;
1549 * This function ensures that the device has flushed any cached writes out.
1551 void
1552 hammer_io_flush_sync(hammer_mount_t hmp)
1554 hammer_volume_t volume;
1555 struct buf *bp_base = NULL;
1556 struct buf *bp;
1558 RB_FOREACH(volume, hammer_vol_rb_tree, &hmp->rb_vols_root) {
1559 if (volume->vol_flags & HAMMER_VOLF_NEEDFLUSH) {
1560 volume->vol_flags &= ~HAMMER_VOLF_NEEDFLUSH;
1561 bp = getpbuf(NULL);
1562 bp->b_bio1.bio_offset = 0;
1563 bp->b_bufsize = 0;
1564 bp->b_bcount = 0;
1565 bp->b_cmd = BUF_CMD_FLUSH;
1566 bp->b_bio1.bio_caller_info1.cluster_head = bp_base;
1567 bp->b_bio1.bio_done = biodone_sync;
1568 bp->b_bio1.bio_flags |= BIO_SYNC;
1569 bp_base = bp;
1570 vn_strategy(volume->devvp, &bp->b_bio1);
1573 while ((bp = bp_base) != NULL) {
1574 bp_base = bp->b_bio1.bio_caller_info1.cluster_head;
1575 biowait(&bp->b_bio1, "hmrFLS");
1576 relpbuf(bp, NULL);