HAMMER VFS - version 2 work - move directory entries to inode localization
[dragonfly.git] / sys / vfs / hammer / hammer_io.c
blob8b40c76a6fe96e72e46c9c880877da5f431270fd
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.55 2008/09/15 17:02:49 dillon Exp $
37 * IO Primitives and buffer cache management
39 * All major data-tracking structures in HAMMER contain a struct hammer_io
40 * which is used to manage their backing store. We use filesystem buffers
41 * for backing store and we leave them passively associated with their
42 * HAMMER structures.
44 * If the kernel tries to destroy a passively associated buf which we cannot
45 * yet let go we set B_LOCKED in the buffer and then actively released it
46 * later when we can.
49 #include "hammer.h"
50 #include <sys/fcntl.h>
51 #include <sys/nlookup.h>
52 #include <sys/buf.h>
53 #include <sys/buf2.h>
55 static void hammer_io_modify(hammer_io_t io, int count);
56 static void hammer_io_deallocate(struct buf *bp);
57 #if 0
58 static void hammer_io_direct_read_complete(struct bio *nbio);
59 #endif
60 static void hammer_io_direct_write_complete(struct bio *nbio);
61 static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data);
62 static void hammer_io_set_modlist(struct hammer_io *io);
63 static void hammer_io_flush_mark(hammer_volume_t volume);
64 static void hammer_io_flush_sync_done(struct bio *bio);
68 * Initialize a new, already-zero'd hammer_io structure, or reinitialize
69 * an existing hammer_io structure which may have switched to another type.
71 void
72 hammer_io_init(hammer_io_t io, hammer_volume_t volume, enum hammer_io_type type)
74 io->volume = volume;
75 io->hmp = volume->io.hmp;
76 io->type = type;
80 * Helper routine to disassociate a buffer cache buffer from an I/O
81 * structure. The buffer is unlocked and marked appropriate for reclamation.
83 * The io may have 0 or 1 references depending on who called us. The
84 * caller is responsible for dealing with the refs.
86 * This call can only be made when no action is required on the buffer.
88 * The caller must own the buffer and the IO must indicate that the
89 * structure no longer owns it (io.released != 0).
91 static void
92 hammer_io_disassociate(hammer_io_structure_t iou)
94 struct buf *bp = iou->io.bp;
96 KKASSERT(iou->io.released);
97 KKASSERT(iou->io.modified == 0);
98 KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou);
99 buf_dep_init(bp);
100 iou->io.bp = NULL;
103 * If the buffer was locked someone wanted to get rid of it.
105 if (bp->b_flags & B_LOCKED) {
106 --hammer_count_io_locked;
107 bp->b_flags &= ~B_LOCKED;
109 if (iou->io.reclaim) {
110 bp->b_flags |= B_NOCACHE|B_RELBUF;
111 iou->io.reclaim = 0;
114 switch(iou->io.type) {
115 case HAMMER_STRUCTURE_VOLUME:
116 iou->volume.ondisk = NULL;
117 break;
118 case HAMMER_STRUCTURE_DATA_BUFFER:
119 case HAMMER_STRUCTURE_META_BUFFER:
120 case HAMMER_STRUCTURE_UNDO_BUFFER:
121 iou->buffer.ondisk = NULL;
122 break;
127 * Wait for any physical IO to complete
129 void
130 hammer_io_wait(hammer_io_t io)
132 if (io->running) {
133 crit_enter();
134 tsleep_interlock(io);
135 io->waiting = 1;
136 for (;;) {
137 tsleep(io, 0, "hmrflw", 0);
138 if (io->running == 0)
139 break;
140 tsleep_interlock(io);
141 io->waiting = 1;
142 if (io->running == 0)
143 break;
145 crit_exit();
150 * Wait for all hammer_io-initated write I/O's to complete. This is not
151 * supposed to count direct I/O's but some can leak through (for
152 * non-full-sized direct I/Os).
154 void
155 hammer_io_wait_all(hammer_mount_t hmp, const char *ident)
157 hammer_io_flush_sync(hmp);
158 crit_enter();
159 while (hmp->io_running_space)
160 tsleep(&hmp->io_running_space, 0, ident, 0);
161 crit_exit();
164 #define HAMMER_MAXRA 4
167 * Load bp for a HAMMER structure. The io must be exclusively locked by
168 * the caller.
170 * This routine is mostly used on meta-data and small-data blocks. Generally
171 * speaking HAMMER assumes some locality of reference and will cluster
172 * a 64K read.
174 * Note that clustering occurs at the device layer, not the logical layer.
175 * If the buffers do not apply to the current operation they may apply to
176 * some other.
179 hammer_io_read(struct vnode *devvp, struct hammer_io *io, hammer_off_t limit)
181 struct buf *bp;
182 int error;
184 if ((bp = io->bp) == NULL) {
185 hammer_count_io_running_read += io->bytes;
186 if (hammer_cluster_enable) {
187 error = cluster_read(devvp, limit,
188 io->offset, io->bytes,
189 HAMMER_CLUSTER_SIZE,
190 HAMMER_CLUSTER_BUFS, &io->bp);
191 } else {
192 error = bread(devvp, io->offset, io->bytes, &io->bp);
194 hammer_stats_disk_read += io->bytes;
195 hammer_count_io_running_read -= io->bytes;
198 * The code generally assumes b_ops/b_dep has been set-up,
199 * even if we error out here.
201 bp = io->bp;
202 bp->b_ops = &hammer_bioops;
203 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
204 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
205 BUF_KERNPROC(bp);
206 KKASSERT(io->modified == 0);
207 KKASSERT(io->running == 0);
208 KKASSERT(io->waiting == 0);
209 io->released = 0; /* we hold an active lock on bp */
210 } else {
211 error = 0;
213 return(error);
217 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
218 * Must be called with the IO exclusively locked.
220 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
221 * I/O by forcing the buffer to not be in a released state before calling
222 * it.
224 * This function will also mark the IO as modified but it will not
225 * increment the modify_refs count.
228 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
230 struct buf *bp;
232 if ((bp = io->bp) == NULL) {
233 io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
234 bp = io->bp;
235 bp->b_ops = &hammer_bioops;
236 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
237 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
238 io->released = 0;
239 KKASSERT(io->running == 0);
240 io->waiting = 0;
241 BUF_KERNPROC(bp);
242 } else {
243 if (io->released) {
244 regetblk(bp);
245 BUF_KERNPROC(bp);
246 io->released = 0;
249 hammer_io_modify(io, 0);
250 vfs_bio_clrbuf(bp);
251 return(0);
255 * Remove potential device level aliases against buffers managed by high level
256 * vnodes. Aliases can also be created due to mixed buffer sizes or via
257 * direct access to the backing store device.
259 * This is nasty because the buffers are also VMIO-backed. Even if a buffer
260 * does not exist its backing VM pages might, and we have to invalidate
261 * those as well or a getblk() will reinstate them.
263 * Buffer cache buffers associated with hammer_buffers cannot be
264 * invalidated.
267 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
269 hammer_io_structure_t iou;
270 hammer_off_t phys_offset;
271 struct buf *bp;
272 int error;
274 phys_offset = volume->ondisk->vol_buf_beg +
275 (zone2_offset & HAMMER_OFF_SHORT_MASK);
276 crit_enter();
277 if ((bp = findblk(volume->devvp, phys_offset)) != NULL)
278 bp = getblk(volume->devvp, phys_offset, bp->b_bufsize, 0, 0);
279 else
280 bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0);
281 if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
282 #if 0
283 hammer_ref(&iou->io.lock);
284 hammer_io_clear_modify(&iou->io, 1);
285 bundirty(bp);
286 iou->io.released = 0;
287 BUF_KERNPROC(bp);
288 iou->io.reclaim = 1;
289 iou->io.waitdep = 1;
290 KKASSERT(iou->io.lock.refs == 1);
291 hammer_rel_buffer(&iou->buffer, 0);
292 /*hammer_io_deallocate(bp);*/
293 #endif
294 bqrelse(bp);
295 error = EAGAIN;
296 } else {
297 KKASSERT((bp->b_flags & B_LOCKED) == 0);
298 bundirty(bp);
299 bp->b_flags |= B_NOCACHE|B_RELBUF;
300 brelse(bp);
301 error = 0;
303 crit_exit();
304 return(error);
308 * This routine is called on the last reference to a hammer structure.
309 * The io is usually interlocked with io.loading and io.refs must be 1.
311 * This routine may return a non-NULL bp to the caller for dispoal. Disposal
312 * simply means the caller finishes decrementing the ref-count on the
313 * IO structure then brelse()'s the bp. The bp may or may not still be
314 * passively associated with the IO.
316 * The only requirement here is that modified meta-data and volume-header
317 * buffer may NOT be disassociated from the IO structure, and consequently
318 * we also leave such buffers actively associated with the IO if they already
319 * are (since the kernel can't do anything with them anyway). Only the
320 * flusher is allowed to write such buffers out. Modified pure-data and
321 * undo buffers are returned to the kernel but left passively associated
322 * so we can track when the kernel writes the bp out.
324 struct buf *
325 hammer_io_release(struct hammer_io *io, int flush)
327 union hammer_io_structure *iou = (void *)io;
328 struct buf *bp;
330 if ((bp = io->bp) == NULL)
331 return(NULL);
334 * Try to flush a dirty IO to disk if asked to by the
335 * caller or if the kernel tried to flush the buffer in the past.
337 * Kernel-initiated flushes are only allowed for pure-data buffers.
338 * meta-data and volume buffers can only be flushed explicitly
339 * by HAMMER.
341 if (io->modified) {
342 if (flush) {
343 hammer_io_flush(io);
344 } else if (bp->b_flags & B_LOCKED) {
345 switch(io->type) {
346 case HAMMER_STRUCTURE_DATA_BUFFER:
347 case HAMMER_STRUCTURE_UNDO_BUFFER:
348 hammer_io_flush(io);
349 break;
350 default:
351 break;
353 } /* else no explicit request to flush the buffer */
357 * Wait for the IO to complete if asked to. This occurs when
358 * the buffer must be disposed of definitively during an umount
359 * or buffer invalidation.
361 if (io->waitdep && io->running) {
362 hammer_io_wait(io);
366 * Return control of the buffer to the kernel (with the provisio
367 * that our bioops can override kernel decisions with regards to
368 * the buffer).
370 if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
372 * Always disassociate the bp if an explicit flush
373 * was requested and the IO completed with no error
374 * (so unmount can really clean up the structure).
376 if (io->released) {
377 regetblk(bp);
378 BUF_KERNPROC(bp);
379 } else {
380 io->released = 1;
382 hammer_io_disassociate((hammer_io_structure_t)io);
383 /* return the bp */
384 } else if (io->modified) {
386 * Only certain IO types can be released to the kernel if
387 * the buffer has been modified.
389 * volume and meta-data IO types may only be explicitly
390 * flushed by HAMMER.
392 switch(io->type) {
393 case HAMMER_STRUCTURE_DATA_BUFFER:
394 case HAMMER_STRUCTURE_UNDO_BUFFER:
395 if (io->released == 0) {
396 io->released = 1;
397 bdwrite(bp);
399 break;
400 default:
401 break;
403 bp = NULL; /* bp left associated */
404 } else if (io->released == 0) {
406 * Clean buffers can be generally released to the kernel.
407 * We leave the bp passively associated with the HAMMER
408 * structure and use bioops to disconnect it later on
409 * if the kernel wants to discard the buffer.
411 * We can steal the structure's ownership of the bp.
413 io->released = 1;
414 if (bp->b_flags & B_LOCKED) {
415 hammer_io_disassociate(iou);
416 /* return the bp */
417 } else {
418 if (io->reclaim) {
419 hammer_io_disassociate(iou);
420 /* return the bp */
421 } else {
422 /* return the bp (bp passively associated) */
425 } else {
427 * A released buffer is passively associate with our
428 * hammer_io structure. The kernel cannot destroy it
429 * without making a bioops call. If the kernel (B_LOCKED)
430 * or we (reclaim) requested that the buffer be destroyed
431 * we destroy it, otherwise we do a quick get/release to
432 * reset its position in the kernel's LRU list.
434 * Leaving the buffer passively associated allows us to
435 * use the kernel's LRU buffer flushing mechanisms rather
436 * then rolling our own.
438 * XXX there are two ways of doing this. We can re-acquire
439 * and passively release to reset the LRU, or not.
441 if (io->running == 0) {
442 regetblk(bp);
443 if ((bp->b_flags & B_LOCKED) || io->reclaim) {
444 hammer_io_disassociate(iou);
445 /* return the bp */
446 } else {
447 /* return the bp (bp passively associated) */
449 } else {
451 * bp is left passively associated but we do not
452 * try to reacquire it. Interactions with the io
453 * structure will occur on completion of the bp's
454 * I/O.
456 bp = NULL;
459 return(bp);
463 * This routine is called with a locked IO when a flush is desired and
464 * no other references to the structure exists other then ours. This
465 * routine is ONLY called when HAMMER believes it is safe to flush a
466 * potentially modified buffer out.
468 void
469 hammer_io_flush(struct hammer_io *io)
471 struct buf *bp;
474 * Degenerate case - nothing to flush if nothing is dirty.
476 if (io->modified == 0) {
477 return;
480 KKASSERT(io->bp);
481 KKASSERT(io->modify_refs <= 0);
484 * Acquire ownership of the bp, particularly before we clear our
485 * modified flag.
487 * We are going to bawrite() this bp. Don't leave a window where
488 * io->released is set, we actually own the bp rather then our
489 * buffer.
491 bp = io->bp;
492 if (io->released) {
493 regetblk(bp);
494 /* BUF_KERNPROC(io->bp); */
495 /* io->released = 0; */
496 KKASSERT(io->released);
497 KKASSERT(io->bp == bp);
499 io->released = 1;
502 * Acquire exclusive access to the bp and then clear the modified
503 * state of the buffer prior to issuing I/O to interlock any
504 * modifications made while the I/O is in progress. This shouldn't
505 * happen anyway but losing data would be worse. The modified bit
506 * will be rechecked after the IO completes.
508 * NOTE: This call also finalizes the buffer's content (inval == 0).
510 * This is only legal when lock.refs == 1 (otherwise we might clear
511 * the modified bit while there are still users of the cluster
512 * modifying the data).
514 * Do this before potentially blocking so any attempt to modify the
515 * ondisk while we are blocked blocks waiting for us.
517 hammer_ref(&io->lock);
518 hammer_io_clear_modify(io, 0);
519 hammer_unref(&io->lock);
522 * Transfer ownership to the kernel and initiate I/O.
524 io->running = 1;
525 io->hmp->io_running_space += io->bytes;
526 hammer_count_io_running_write += io->bytes;
527 bawrite(bp);
528 hammer_io_flush_mark(io->volume);
531 /************************************************************************
532 * BUFFER DIRTYING *
533 ************************************************************************
535 * These routines deal with dependancies created when IO buffers get
536 * modified. The caller must call hammer_modify_*() on a referenced
537 * HAMMER structure prior to modifying its on-disk data.
539 * Any intent to modify an IO buffer acquires the related bp and imposes
540 * various write ordering dependancies.
544 * Mark a HAMMER structure as undergoing modification. Meta-data buffers
545 * are locked until the flusher can deal with them, pure data buffers
546 * can be written out.
548 static
549 void
550 hammer_io_modify(hammer_io_t io, int count)
553 * io->modify_refs must be >= 0
555 while (io->modify_refs < 0) {
556 io->waitmod = 1;
557 tsleep(io, 0, "hmrmod", 0);
561 * Shortcut if nothing to do.
563 KKASSERT(io->lock.refs != 0 && io->bp != NULL);
564 io->modify_refs += count;
565 if (io->modified && io->released == 0)
566 return;
568 hammer_lock_ex(&io->lock);
569 if (io->modified == 0) {
570 hammer_io_set_modlist(io);
571 io->modified = 1;
573 if (io->released) {
574 regetblk(io->bp);
575 BUF_KERNPROC(io->bp);
576 io->released = 0;
577 KKASSERT(io->modified != 0);
579 hammer_unlock(&io->lock);
582 static __inline
583 void
584 hammer_io_modify_done(hammer_io_t io)
586 KKASSERT(io->modify_refs > 0);
587 --io->modify_refs;
588 if (io->modify_refs == 0 && io->waitmod) {
589 io->waitmod = 0;
590 wakeup(io);
594 void
595 hammer_io_write_interlock(hammer_io_t io)
597 while (io->modify_refs != 0) {
598 io->waitmod = 1;
599 tsleep(io, 0, "hmrmod", 0);
601 io->modify_refs = -1;
604 void
605 hammer_io_done_interlock(hammer_io_t io)
607 KKASSERT(io->modify_refs == -1);
608 io->modify_refs = 0;
609 if (io->waitmod) {
610 io->waitmod = 0;
611 wakeup(io);
616 * Caller intends to modify a volume's ondisk structure.
618 * This is only allowed if we are the flusher or we have a ref on the
619 * sync_lock.
621 void
622 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
623 void *base, int len)
625 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
627 hammer_io_modify(&volume->io, 1);
628 if (len) {
629 intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
630 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
631 hammer_generate_undo(trans, &volume->io,
632 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
633 base, len);
638 * Caller intends to modify a buffer's ondisk structure.
640 * This is only allowed if we are the flusher or we have a ref on the
641 * sync_lock.
643 void
644 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
645 void *base, int len)
647 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
649 hammer_io_modify(&buffer->io, 1);
650 if (len) {
651 intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
652 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
653 hammer_generate_undo(trans, &buffer->io,
654 buffer->zone2_offset + rel_offset,
655 base, len);
659 void
660 hammer_modify_volume_done(hammer_volume_t volume)
662 hammer_io_modify_done(&volume->io);
665 void
666 hammer_modify_buffer_done(hammer_buffer_t buffer)
668 hammer_io_modify_done(&buffer->io);
672 * Mark an entity as not being dirty any more and finalize any
673 * delayed adjustments to the buffer.
675 * Delayed adjustments are an important performance enhancement, allowing
676 * us to avoid recalculating B-Tree node CRCs over and over again when
677 * making bulk-modifications to the B-Tree.
679 * If inval is non-zero delayed adjustments are ignored.
681 * This routine may dereference related btree nodes and cause the
682 * buffer to be dereferenced. The caller must own a reference on io.
684 void
685 hammer_io_clear_modify(struct hammer_io *io, int inval)
687 if (io->modified == 0)
688 return;
691 * Take us off the mod-list and clear the modified bit.
693 KKASSERT(io->mod_list != NULL);
694 if (io->mod_list == &io->hmp->volu_list ||
695 io->mod_list == &io->hmp->meta_list) {
696 io->hmp->locked_dirty_space -= io->bytes;
697 hammer_count_dirtybufspace -= io->bytes;
699 TAILQ_REMOVE(io->mod_list, io, mod_entry);
700 io->mod_list = NULL;
701 io->modified = 0;
704 * If this bit is not set there are no delayed adjustments.
706 if (io->gencrc == 0)
707 return;
708 io->gencrc = 0;
711 * Finalize requested CRCs. The NEEDSCRC flag also holds a reference
712 * on the node (& underlying buffer). Release the node after clearing
713 * the flag.
715 if (io->type == HAMMER_STRUCTURE_META_BUFFER) {
716 hammer_buffer_t buffer = (void *)io;
717 hammer_node_t node;
719 restart:
720 TAILQ_FOREACH(node, &buffer->clist, entry) {
721 if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
722 continue;
723 node->flags &= ~HAMMER_NODE_NEEDSCRC;
724 KKASSERT(node->ondisk);
725 if (inval == 0)
726 node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
727 hammer_rel_node(node);
728 goto restart;
731 /* caller must still have ref on io */
732 KKASSERT(io->lock.refs > 0);
736 * Clear the IO's modify list. Even though the IO is no longer modified
737 * it may still be on the lose_list. This routine is called just before
738 * the governing hammer_buffer is destroyed.
740 void
741 hammer_io_clear_modlist(struct hammer_io *io)
743 KKASSERT(io->modified == 0);
744 if (io->mod_list) {
745 crit_enter(); /* biodone race against list */
746 KKASSERT(io->mod_list == &io->hmp->lose_list);
747 TAILQ_REMOVE(io->mod_list, io, mod_entry);
748 io->mod_list = NULL;
749 crit_exit();
753 static void
754 hammer_io_set_modlist(struct hammer_io *io)
756 struct hammer_mount *hmp = io->hmp;
758 KKASSERT(io->mod_list == NULL);
760 switch(io->type) {
761 case HAMMER_STRUCTURE_VOLUME:
762 io->mod_list = &hmp->volu_list;
763 hmp->locked_dirty_space += io->bytes;
764 hammer_count_dirtybufspace += io->bytes;
765 break;
766 case HAMMER_STRUCTURE_META_BUFFER:
767 io->mod_list = &hmp->meta_list;
768 hmp->locked_dirty_space += io->bytes;
769 hammer_count_dirtybufspace += io->bytes;
770 break;
771 case HAMMER_STRUCTURE_UNDO_BUFFER:
772 io->mod_list = &hmp->undo_list;
773 break;
774 case HAMMER_STRUCTURE_DATA_BUFFER:
775 io->mod_list = &hmp->data_list;
776 break;
778 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
781 /************************************************************************
782 * HAMMER_BIOOPS *
783 ************************************************************************
788 * Pre-IO initiation kernel callback - cluster build only
790 static void
791 hammer_io_start(struct buf *bp)
796 * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
798 * NOTE: HAMMER may modify a buffer after initiating I/O. The modified bit
799 * may also be set if we were marking a cluster header open. Only remove
800 * our dependancy if the modified bit is clear.
802 static void
803 hammer_io_complete(struct buf *bp)
805 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
807 KKASSERT(iou->io.released == 1);
810 * Deal with people waiting for I/O to drain
812 if (iou->io.running) {
814 * Deal with critical write errors. Once a critical error
815 * has been flagged in hmp the UNDO FIFO will not be updated.
816 * That way crash recover will give us a consistent
817 * filesystem.
819 * Because of this we can throw away failed UNDO buffers. If
820 * we throw away META or DATA buffers we risk corrupting
821 * the now read-only version of the filesystem visible to
822 * the user. Clear B_ERROR so the buffer is not re-dirtied
823 * by the kernel and ref the io so it doesn't get thrown
824 * away.
826 if (bp->b_flags & B_ERROR) {
827 hammer_critical_error(iou->io.hmp, NULL, bp->b_error,
828 "while flushing meta-data");
829 switch(iou->io.type) {
830 case HAMMER_STRUCTURE_UNDO_BUFFER:
831 break;
832 default:
833 if (iou->io.ioerror == 0) {
834 iou->io.ioerror = 1;
835 if (iou->io.lock.refs == 0)
836 ++hammer_count_refedbufs;
837 hammer_ref(&iou->io.lock);
839 break;
841 bp->b_flags &= ~B_ERROR;
842 bundirty(bp);
843 #if 0
844 hammer_io_set_modlist(&iou->io);
845 iou->io.modified = 1;
846 #endif
848 hammer_stats_disk_write += iou->io.bytes;
849 hammer_count_io_running_write -= iou->io.bytes;
850 iou->io.hmp->io_running_space -= iou->io.bytes;
851 if (iou->io.hmp->io_running_space == 0)
852 wakeup(&iou->io.hmp->io_running_space);
853 KKASSERT(iou->io.hmp->io_running_space >= 0);
854 iou->io.running = 0;
855 } else {
856 hammer_stats_disk_read += iou->io.bytes;
859 if (iou->io.waiting) {
860 iou->io.waiting = 0;
861 wakeup(iou);
865 * If B_LOCKED is set someone wanted to deallocate the bp at some
866 * point, do it now if refs has become zero.
868 if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) {
869 KKASSERT(iou->io.modified == 0);
870 --hammer_count_io_locked;
871 bp->b_flags &= ~B_LOCKED;
872 hammer_io_deallocate(bp);
873 /* structure may be dead now */
878 * Callback from kernel when it wishes to deallocate a passively
879 * associated structure. This mostly occurs with clean buffers
880 * but it may be possible for a holding structure to be marked dirty
881 * while its buffer is passively associated. The caller owns the bp.
883 * If we cannot disassociate we set B_LOCKED to prevent the buffer
884 * from getting reused.
886 * WARNING: Because this can be called directly by getnewbuf we cannot
887 * recurse into the tree. If a bp cannot be immediately disassociated
888 * our only recourse is to set B_LOCKED.
890 * WARNING: This may be called from an interrupt via hammer_io_complete()
892 static void
893 hammer_io_deallocate(struct buf *bp)
895 hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
897 KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
898 if (iou->io.lock.refs > 0 || iou->io.modified) {
900 * It is not legal to disassociate a modified buffer. This
901 * case really shouldn't ever occur.
903 bp->b_flags |= B_LOCKED;
904 ++hammer_count_io_locked;
905 } else {
907 * Disassociate the BP. If the io has no refs left we
908 * have to add it to the loose list.
910 hammer_io_disassociate(iou);
911 if (iou->io.type != HAMMER_STRUCTURE_VOLUME) {
912 KKASSERT(iou->io.bp == NULL);
913 KKASSERT(iou->io.mod_list == NULL);
914 crit_enter(); /* biodone race against list */
915 iou->io.mod_list = &iou->io.hmp->lose_list;
916 TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
917 crit_exit();
922 static int
923 hammer_io_fsync(struct vnode *vp)
925 return(0);
929 * NOTE: will not be called unless we tell the kernel about the
930 * bioops. Unused... we use the mount's VFS_SYNC instead.
932 static int
933 hammer_io_sync(struct mount *mp)
935 return(0);
938 static void
939 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
944 * I/O pre-check for reading and writing. HAMMER only uses this for
945 * B_CACHE buffers so checkread just shouldn't happen, but if it does
946 * allow it.
948 * Writing is a different case. We don't want the kernel to try to write
949 * out a buffer that HAMMER may be modifying passively or which has a
950 * dependancy. In addition, kernel-demanded writes can only proceed for
951 * certain types of buffers (i.e. UNDO and DATA types). Other dirty
952 * buffer types can only be explicitly written by the flusher.
954 * checkwrite will only be called for bdwrite()n buffers. If we return
955 * success the kernel is guaranteed to initiate the buffer write.
957 static int
958 hammer_io_checkread(struct buf *bp)
960 return(0);
963 static int
964 hammer_io_checkwrite(struct buf *bp)
966 hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
969 * This shouldn't happen under normal operation.
971 if (io->type == HAMMER_STRUCTURE_VOLUME ||
972 io->type == HAMMER_STRUCTURE_META_BUFFER) {
973 if (!panicstr)
974 panic("hammer_io_checkwrite: illegal buffer");
975 if ((bp->b_flags & B_LOCKED) == 0) {
976 bp->b_flags |= B_LOCKED;
977 ++hammer_count_io_locked;
979 return(1);
983 * We can only clear the modified bit if the IO is not currently
984 * undergoing modification. Otherwise we may miss changes.
986 * Only data and undo buffers can reach here. These buffers do
987 * not have terminal crc functions but we temporarily reference
988 * the IO anyway, just in case.
990 if (io->modify_refs == 0 && io->modified) {
991 hammer_ref(&io->lock);
992 hammer_io_clear_modify(io, 0);
993 hammer_unref(&io->lock);
994 } else if (io->modified) {
995 KKASSERT(io->type == HAMMER_STRUCTURE_DATA_BUFFER);
999 * The kernel is going to start the IO, set io->running.
1001 KKASSERT(io->running == 0);
1002 io->running = 1;
1003 io->hmp->io_running_space += io->bytes;
1004 hammer_count_io_running_write += io->bytes;
1005 return(0);
1009 * Return non-zero if we wish to delay the kernel's attempt to flush
1010 * this buffer to disk.
1012 static int
1013 hammer_io_countdeps(struct buf *bp, int n)
1015 return(0);
1018 struct bio_ops hammer_bioops = {
1019 .io_start = hammer_io_start,
1020 .io_complete = hammer_io_complete,
1021 .io_deallocate = hammer_io_deallocate,
1022 .io_fsync = hammer_io_fsync,
1023 .io_sync = hammer_io_sync,
1024 .io_movedeps = hammer_io_movedeps,
1025 .io_countdeps = hammer_io_countdeps,
1026 .io_checkread = hammer_io_checkread,
1027 .io_checkwrite = hammer_io_checkwrite,
1030 /************************************************************************
1031 * DIRECT IO OPS *
1032 ************************************************************************
1034 * These functions operate directly on the buffer cache buffer associated
1035 * with a front-end vnode rather then a back-end device vnode.
1039 * Read a buffer associated with a front-end vnode directly from the
1040 * disk media. The bio may be issued asynchronously. If leaf is non-NULL
1041 * we validate the CRC.
1043 * We must check for the presence of a HAMMER buffer to handle the case
1044 * where the reblocker has rewritten the data (which it does via the HAMMER
1045 * buffer system, not via the high-level vnode buffer cache), but not yet
1046 * committed the buffer to the media.
1049 hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1050 hammer_btree_leaf_elm_t leaf)
1052 hammer_off_t buf_offset;
1053 hammer_off_t zone2_offset;
1054 hammer_volume_t volume;
1055 struct buf *bp;
1056 struct bio *nbio;
1057 int vol_no;
1058 int error;
1060 buf_offset = bio->bio_offset;
1061 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
1062 HAMMER_ZONE_LARGE_DATA);
1065 * The buffer cache may have an aliased buffer (the reblocker can
1066 * write them). If it does we have to sync any dirty data before
1067 * we can build our direct-read. This is a non-critical code path.
1069 bp = bio->bio_buf;
1070 hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
1073 * Resolve to a zone-2 offset. The conversion just requires
1074 * munging the top 4 bits but we want to abstract it anyway
1075 * so the blockmap code can verify the zone assignment.
1077 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1078 if (error)
1079 goto done;
1080 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
1081 HAMMER_ZONE_RAW_BUFFER);
1084 * Resolve volume and raw-offset for 3rd level bio. The
1085 * offset will be specific to the volume.
1087 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1088 volume = hammer_get_volume(hmp, vol_no, &error);
1089 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1090 error = EIO;
1092 if (error == 0) {
1094 * 3rd level bio
1096 nbio = push_bio(bio);
1097 nbio->bio_offset = volume->ondisk->vol_buf_beg +
1098 (zone2_offset & HAMMER_OFF_SHORT_MASK);
1099 #if 0
1101 * XXX disabled - our CRC check doesn't work if the OS
1102 * does bogus_page replacement on the direct-read.
1104 if (leaf && hammer_verify_data) {
1105 nbio->bio_done = hammer_io_direct_read_complete;
1106 nbio->bio_caller_info1.uvalue32 = leaf->data_crc;
1108 #endif
1109 hammer_stats_disk_read += bp->b_bufsize;
1110 vn_strategy(volume->devvp, nbio);
1112 hammer_rel_volume(volume, 0);
1113 done:
1114 if (error) {
1115 kprintf("hammer_direct_read: failed @ %016llx\n",
1116 (long long)zone2_offset);
1117 bp->b_error = error;
1118 bp->b_flags |= B_ERROR;
1119 biodone(bio);
1121 return(error);
1124 #if 0
1126 * On completion of the BIO this callback must check the data CRC
1127 * and chain to the previous bio.
1129 static
1130 void
1131 hammer_io_direct_read_complete(struct bio *nbio)
1133 struct bio *obio;
1134 struct buf *bp;
1135 u_int32_t rec_crc = nbio->bio_caller_info1.uvalue32;
1137 bp = nbio->bio_buf;
1138 if (crc32(bp->b_data, bp->b_bufsize) != rec_crc) {
1139 kprintf("HAMMER: data_crc error @%016llx/%d\n",
1140 nbio->bio_offset, bp->b_bufsize);
1141 if (hammer_debug_debug)
1142 Debugger("");
1143 bp->b_flags |= B_ERROR;
1144 bp->b_error = EIO;
1146 obio = pop_bio(nbio);
1147 biodone(obio);
1149 #endif
1152 * Write a buffer associated with a front-end vnode directly to the
1153 * disk media. The bio may be issued asynchronously.
1155 * The BIO is associated with the specified record and RECF_DIRECT_IO
1156 * is set. The recorded is added to its object.
1159 hammer_io_direct_write(hammer_mount_t hmp, hammer_record_t record,
1160 struct bio *bio)
1162 hammer_btree_leaf_elm_t leaf = &record->leaf;
1163 hammer_off_t buf_offset;
1164 hammer_off_t zone2_offset;
1165 hammer_volume_t volume;
1166 hammer_buffer_t buffer;
1167 struct buf *bp;
1168 struct bio *nbio;
1169 char *ptr;
1170 int vol_no;
1171 int error;
1173 buf_offset = leaf->data_offset;
1175 KKASSERT(buf_offset > HAMMER_ZONE_BTREE);
1176 KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
1178 if ((buf_offset & HAMMER_BUFMASK) == 0 &&
1179 leaf->data_len >= HAMMER_BUFSIZE) {
1181 * We are using the vnode's bio to write directly to the
1182 * media, any hammer_buffer at the same zone-X offset will
1183 * now have stale data.
1185 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1186 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1187 volume = hammer_get_volume(hmp, vol_no, &error);
1189 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1190 error = EIO;
1191 if (error == 0) {
1192 bp = bio->bio_buf;
1193 KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
1195 hammer_del_buffers(hmp, buf_offset,
1196 zone2_offset, bp->b_bufsize);
1200 * Second level bio - cached zone2 offset.
1202 * (We can put our bio_done function in either the
1203 * 2nd or 3rd level).
1205 nbio = push_bio(bio);
1206 nbio->bio_offset = zone2_offset;
1207 nbio->bio_done = hammer_io_direct_write_complete;
1208 nbio->bio_caller_info1.ptr = record;
1209 record->zone2_offset = zone2_offset;
1210 record->flags |= HAMMER_RECF_DIRECT_IO |
1211 HAMMER_RECF_DIRECT_INVAL;
1214 * Third level bio - raw offset specific to the
1215 * correct volume.
1217 zone2_offset &= HAMMER_OFF_SHORT_MASK;
1218 nbio = push_bio(nbio);
1219 nbio->bio_offset = volume->ondisk->vol_buf_beg +
1220 zone2_offset;
1221 hammer_stats_disk_write += bp->b_bufsize;
1222 vn_strategy(volume->devvp, nbio);
1223 hammer_io_flush_mark(volume);
1225 hammer_rel_volume(volume, 0);
1226 } else {
1228 * Must fit in a standard HAMMER buffer. In this case all
1229 * consumers use the HAMMER buffer system and RECF_DIRECT_IO
1230 * does not need to be set-up.
1232 KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
1233 buffer = NULL;
1234 ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
1235 if (error == 0) {
1236 bp = bio->bio_buf;
1237 bp->b_flags |= B_AGE;
1238 hammer_io_modify(&buffer->io, 1);
1239 bcopy(bp->b_data, ptr, leaf->data_len);
1240 hammer_io_modify_done(&buffer->io);
1241 hammer_rel_buffer(buffer, 0);
1242 bp->b_resid = 0;
1243 biodone(bio);
1246 if (error == 0) {
1248 * The record is all setup now, add it. Potential conflics
1249 * have already been dealt with.
1251 error = hammer_mem_add(record);
1252 KKASSERT(error == 0);
1253 } else {
1255 * Major suckage occured. Also note: The record was never added
1256 * to the tree so we do not have to worry about the backend.
1258 kprintf("hammer_direct_write: failed @ %016llx\n",
1259 (long long)leaf->data_offset);
1260 bp = bio->bio_buf;
1261 bp->b_resid = 0;
1262 bp->b_error = EIO;
1263 bp->b_flags |= B_ERROR;
1264 biodone(bio);
1265 record->flags |= HAMMER_RECF_DELETED_FE;
1266 hammer_rel_mem_record(record);
1268 return(error);
1272 * On completion of the BIO this callback must disconnect
1273 * it from the hammer_record and chain to the previous bio.
1275 * An I/O error forces the mount to read-only. Data buffers
1276 * are not B_LOCKED like meta-data buffers are, so we have to
1277 * throw the buffer away to prevent the kernel from retrying.
1279 static
1280 void
1281 hammer_io_direct_write_complete(struct bio *nbio)
1283 struct bio *obio;
1284 struct buf *bp;
1285 hammer_record_t record = nbio->bio_caller_info1.ptr;
1287 bp = nbio->bio_buf;
1288 obio = pop_bio(nbio);
1289 if (bp->b_flags & B_ERROR) {
1290 hammer_critical_error(record->ip->hmp, record->ip,
1291 bp->b_error,
1292 "while writing bulk data");
1293 bp->b_flags |= B_INVAL;
1295 biodone(obio);
1297 KKASSERT(record != NULL);
1298 KKASSERT(record->flags & HAMMER_RECF_DIRECT_IO);
1299 record->flags &= ~HAMMER_RECF_DIRECT_IO;
1300 if (record->flags & HAMMER_RECF_DIRECT_WAIT) {
1301 record->flags &= ~HAMMER_RECF_DIRECT_WAIT;
1302 wakeup(&record->flags);
1308 * This is called before a record is either committed to the B-Tree
1309 * or destroyed, to resolve any associated direct-IO.
1311 * (1) We must wait for any direct-IO related to the record to complete.
1313 * (2) We must remove any buffer cache aliases for data accessed via
1314 * leaf->data_offset or zone2_offset so non-direct-IO consumers
1315 * (the mirroring and reblocking code) do not see stale data.
1317 void
1318 hammer_io_direct_wait(hammer_record_t record)
1321 * Wait for I/O to complete
1323 if (record->flags & HAMMER_RECF_DIRECT_IO) {
1324 crit_enter();
1325 while (record->flags & HAMMER_RECF_DIRECT_IO) {
1326 record->flags |= HAMMER_RECF_DIRECT_WAIT;
1327 tsleep(&record->flags, 0, "hmdiow", 0);
1329 crit_exit();
1333 * Invalidate any related buffer cache aliases associated with the
1334 * backing device. This is needed because the buffer cache buffer
1335 * for file data is associated with the file vnode, not the backing
1336 * device vnode.
1338 * XXX I do not think this case can occur any more now that
1339 * reservations ensure that all such buffers are removed before
1340 * an area can be reused.
1342 if (record->flags & HAMMER_RECF_DIRECT_INVAL) {
1343 KKASSERT(record->leaf.data_offset);
1344 hammer_del_buffers(record->ip->hmp, record->leaf.data_offset,
1345 record->zone2_offset, record->leaf.data_len,
1347 record->flags &= ~HAMMER_RECF_DIRECT_INVAL;
1352 * This is called to remove the second-level cached zone-2 offset from
1353 * frontend buffer cache buffers, now stale due to a data relocation.
1354 * These offsets are generated by cluster_read() via VOP_BMAP, or directly
1355 * by hammer_vop_strategy_read().
1357 * This is rather nasty because here we have something like the reblocker
1358 * scanning the raw B-Tree with no held references on anything, really,
1359 * other then a shared lock on the B-Tree node, and we have to access the
1360 * frontend's buffer cache to check for and clean out the association.
1361 * Specifically, if the reblocker is moving data on the disk, these cached
1362 * offsets will become invalid.
1364 * Only data record types associated with the large-data zone are subject
1365 * to direct-io and need to be checked.
1368 void
1369 hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf)
1371 struct hammer_inode_info iinfo;
1372 int zone;
1374 if (leaf->base.rec_type != HAMMER_RECTYPE_DATA)
1375 return;
1376 zone = HAMMER_ZONE_DECODE(leaf->data_offset);
1377 if (zone != HAMMER_ZONE_LARGE_DATA_INDEX)
1378 return;
1379 iinfo.obj_id = leaf->base.obj_id;
1380 iinfo.obj_asof = 0; /* unused */
1381 iinfo.obj_localization = leaf->base.localization &
1382 HAMMER_LOCALIZE_PSEUDOFS_MASK;
1383 iinfo.u.leaf = leaf;
1384 hammer_scan_inode_snapshots(hmp, &iinfo,
1385 hammer_io_direct_uncache_callback,
1386 leaf);
1389 static int
1390 hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data)
1392 hammer_inode_info_t iinfo = data;
1393 hammer_off_t data_offset;
1394 hammer_off_t file_offset;
1395 struct vnode *vp;
1396 struct buf *bp;
1397 int blksize;
1399 if (ip->vp == NULL)
1400 return(0);
1401 data_offset = iinfo->u.leaf->data_offset;
1402 file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len;
1403 blksize = iinfo->u.leaf->data_len;
1404 KKASSERT((blksize & HAMMER_BUFMASK) == 0);
1406 hammer_ref(&ip->lock);
1407 if (hammer_get_vnode(ip, &vp) == 0) {
1408 if ((bp = findblk(ip->vp, file_offset)) != NULL &&
1409 bp->b_bio2.bio_offset != NOOFFSET) {
1410 bp = getblk(ip->vp, file_offset, blksize, 0, 0);
1411 bp->b_bio2.bio_offset = NOOFFSET;
1412 brelse(bp);
1414 vput(vp);
1416 hammer_rel_inode(ip, 0);
1417 return(0);
1422 * This function is called when writes may have occured on the volume,
1423 * indicating that the device may be holding cached writes.
1425 static void
1426 hammer_io_flush_mark(hammer_volume_t volume)
1428 volume->vol_flags |= HAMMER_VOLF_NEEDFLUSH;
1432 * This function ensures that the device has flushed any cached writes out.
1434 void
1435 hammer_io_flush_sync(hammer_mount_t hmp)
1437 hammer_volume_t volume;
1438 struct buf *bp_base = NULL;
1439 struct buf *bp;
1441 RB_FOREACH(volume, hammer_vol_rb_tree, &hmp->rb_vols_root) {
1442 if (volume->vol_flags & HAMMER_VOLF_NEEDFLUSH) {
1443 volume->vol_flags &= ~HAMMER_VOLF_NEEDFLUSH;
1444 bp = getpbuf(NULL);
1445 bp->b_bio1.bio_offset = 0;
1446 bp->b_bufsize = 0;
1447 bp->b_bcount = 0;
1448 bp->b_cmd = BUF_CMD_FLUSH;
1449 bp->b_bio1.bio_caller_info1.cluster_head = bp_base;
1450 bp->b_bio1.bio_done = hammer_io_flush_sync_done;
1451 bp->b_flags |= B_ASYNC;
1452 bp_base = bp;
1453 vn_strategy(volume->devvp, &bp->b_bio1);
1456 while ((bp = bp_base) != NULL) {
1457 bp_base = bp->b_bio1.bio_caller_info1.cluster_head;
1458 while (bp->b_cmd != BUF_CMD_DONE) {
1459 crit_enter();
1460 tsleep_interlock(&bp->b_cmd);
1461 if (bp->b_cmd != BUF_CMD_DONE)
1462 tsleep(&bp->b_cmd, 0, "hmrFLS", 0);
1463 crit_exit();
1465 bp->b_flags &= ~B_ASYNC;
1466 relpbuf(bp, NULL);
1471 * Callback to deal with completed flush commands to the device.
1473 static void
1474 hammer_io_flush_sync_done(struct bio *bio)
1476 struct buf *bp;
1478 bp = bio->bio_buf;
1479 bp->b_cmd = BUF_CMD_DONE;
1480 wakeup(&bp->b_cmd);