Pre-2.0 release, MFC 63 - HAMMER I/O error handling and catastrophic
[dragonfly.git] / sys / vfs / hammer / hammer_io.c
blob04ac62475b16aa228d02316916e577117f3b92a0
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.49.2.2 2008/07/18 00:21:09 dillon Exp $
37 * IO Primitives and buffer cache management
39 * All major data-tracking structures in HAMMER contain a struct hammer_io
40 * which is used to manage their backing store. We use filesystem buffers
41 * for backing store and we leave them passively associated with their
42 * HAMMER structures.
44 * If the kernel tries to destroy a passively associated buf which we cannot
45 * yet let go we set B_LOCKED in the buffer and then actively released it
46 * later when we can.
49 #include "hammer.h"
50 #include <sys/fcntl.h>
51 #include <sys/nlookup.h>
52 #include <sys/buf.h>
53 #include <sys/buf2.h>
55 static void hammer_io_modify(hammer_io_t io, int count);
56 static void hammer_io_deallocate(struct buf *bp);
57 #if 0
58 static void hammer_io_direct_read_complete(struct bio *nbio);
59 #endif
60 static void hammer_io_direct_write_complete(struct bio *nbio);
61 static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data);
62 static void hammer_io_set_modlist(struct hammer_io *io);
65 * Initialize a new, already-zero'd hammer_io structure, or reinitialize
66 * an existing hammer_io structure which may have switched to another type.
68 void
69 hammer_io_init(hammer_io_t io, hammer_mount_t hmp, enum hammer_io_type type)
71 io->hmp = hmp;
72 io->type = type;
76 * Helper routine to disassociate a buffer cache buffer from an I/O
77 * structure. The buffer is unlocked and marked appropriate for reclamation.
79 * The io may have 0 or 1 references depending on who called us. The
80 * caller is responsible for dealing with the refs.
82 * This call can only be made when no action is required on the buffer.
84 * The caller must own the buffer and the IO must indicate that the
85 * structure no longer owns it (io.released != 0).
87 static void
88 hammer_io_disassociate(hammer_io_structure_t iou)
90 struct buf *bp = iou->io.bp;
92 KKASSERT(iou->io.released);
93 KKASSERT(iou->io.modified == 0);
94 KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou);
95 buf_dep_init(bp);
96 iou->io.bp = NULL;
99 * If the buffer was locked someone wanted to get rid of it.
101 if (bp->b_flags & B_LOCKED) {
102 --hammer_count_io_locked;
103 bp->b_flags &= ~B_LOCKED;
105 if (iou->io.reclaim) {
106 bp->b_flags |= B_NOCACHE|B_RELBUF;
107 iou->io.reclaim = 0;
110 switch(iou->io.type) {
111 case HAMMER_STRUCTURE_VOLUME:
112 iou->volume.ondisk = NULL;
113 break;
114 case HAMMER_STRUCTURE_DATA_BUFFER:
115 case HAMMER_STRUCTURE_META_BUFFER:
116 case HAMMER_STRUCTURE_UNDO_BUFFER:
117 iou->buffer.ondisk = NULL;
118 break;
123 * Wait for any physical IO to complete
125 void
126 hammer_io_wait(hammer_io_t io)
128 if (io->running) {
129 crit_enter();
130 tsleep_interlock(io);
131 io->waiting = 1;
132 for (;;) {
133 tsleep(io, 0, "hmrflw", 0);
134 if (io->running == 0)
135 break;
136 tsleep_interlock(io);
137 io->waiting = 1;
138 if (io->running == 0)
139 break;
141 crit_exit();
146 * Wait for all hammer_io-initated write I/O's to complete. This is not
147 * supposed to count direct I/O's but some can leak through (for
148 * non-full-sized direct I/Os).
150 void
151 hammer_io_wait_all(hammer_mount_t hmp, const char *ident)
153 crit_enter();
154 while (hmp->io_running_space)
155 tsleep(&hmp->io_running_space, 0, ident, 0);
156 crit_exit();
159 #define HAMMER_MAXRA 4
162 * Load bp for a HAMMER structure. The io must be exclusively locked by
163 * the caller.
165 * This routine is mostly used on meta-data and small-data blocks. Generally
166 * speaking HAMMER assumes some locality of reference and will cluster
167 * a 64K read.
169 * Note that clustering occurs at the device layer, not the logical layer.
170 * If the buffers do not apply to the current operation they may apply to
171 * some other.
174 hammer_io_read(struct vnode *devvp, struct hammer_io *io, hammer_off_t limit)
176 struct buf *bp;
177 int error;
179 if ((bp = io->bp) == NULL) {
180 hammer_count_io_running_read += io->bytes;
181 if (hammer_cluster_enable) {
182 error = cluster_read(devvp, limit,
183 io->offset, io->bytes,
184 HAMMER_CLUSTER_SIZE,
185 HAMMER_CLUSTER_BUFS, &io->bp);
186 } else {
187 error = bread(devvp, io->offset, io->bytes, &io->bp);
189 hammer_stats_disk_read += io->bytes;
190 hammer_count_io_running_read -= io->bytes;
193 * The code generally assumes b_ops/b_dep has been set-up,
194 * even if we error out here.
196 bp = io->bp;
197 bp->b_ops = &hammer_bioops;
198 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
199 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
200 BUF_KERNPROC(bp);
201 KKASSERT(io->modified == 0);
202 KKASSERT(io->running == 0);
203 KKASSERT(io->waiting == 0);
204 io->released = 0; /* we hold an active lock on bp */
205 } else {
206 error = 0;
208 return(error);
212 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
213 * Must be called with the IO exclusively locked.
215 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
216 * I/O by forcing the buffer to not be in a released state before calling
217 * it.
219 * This function will also mark the IO as modified but it will not
220 * increment the modify_refs count.
223 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
225 struct buf *bp;
227 if ((bp = io->bp) == NULL) {
228 io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
229 bp = io->bp;
230 bp->b_ops = &hammer_bioops;
231 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
232 LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
233 io->released = 0;
234 KKASSERT(io->running == 0);
235 io->waiting = 0;
236 BUF_KERNPROC(bp);
237 } else {
238 if (io->released) {
239 regetblk(bp);
240 BUF_KERNPROC(bp);
241 io->released = 0;
244 hammer_io_modify(io, 0);
245 vfs_bio_clrbuf(bp);
246 return(0);
250 * Remove potential device level aliases against buffers managed by high level
251 * vnodes.
253 void
254 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
256 hammer_io_structure_t iou;
257 hammer_off_t phys_offset;
258 struct buf *bp;
260 phys_offset = volume->ondisk->vol_buf_beg +
261 (zone2_offset & HAMMER_OFF_SHORT_MASK);
262 crit_enter();
263 if ((bp = findblk(volume->devvp, phys_offset)) != NULL) {
264 bp = getblk(volume->devvp, phys_offset, bp->b_bufsize, 0, 0);
265 if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
266 hammer_io_clear_modify(&iou->io, 1);
267 bundirty(bp);
268 iou->io.reclaim = 1;
269 hammer_io_deallocate(bp);
270 } else {
271 KKASSERT((bp->b_flags & B_LOCKED) == 0);
272 bundirty(bp);
273 bp->b_flags |= B_NOCACHE|B_RELBUF;
275 brelse(bp);
277 crit_exit();
281 * This routine is called on the last reference to a hammer structure.
282 * The io is usually interlocked with io.loading and io.refs must be 1.
284 * This routine may return a non-NULL bp to the caller for dispoal. Disposal
285 * simply means the caller finishes decrementing the ref-count on the
286 * IO structure then brelse()'s the bp. The bp may or may not still be
287 * passively associated with the IO.
289 * The only requirement here is that modified meta-data and volume-header
290 * buffer may NOT be disassociated from the IO structure, and consequently
291 * we also leave such buffers actively associated with the IO if they already
292 * are (since the kernel can't do anything with them anyway). Only the
293 * flusher is allowed to write such buffers out. Modified pure-data and
294 * undo buffers are returned to the kernel but left passively associated
295 * so we can track when the kernel writes the bp out.
297 struct buf *
298 hammer_io_release(struct hammer_io *io, int flush)
300 union hammer_io_structure *iou = (void *)io;
301 struct buf *bp;
303 if ((bp = io->bp) == NULL)
304 return(NULL);
307 * Try to flush a dirty IO to disk if asked to by the
308 * caller or if the kernel tried to flush the buffer in the past.
310 * Kernel-initiated flushes are only allowed for pure-data buffers.
311 * meta-data and volume buffers can only be flushed explicitly
312 * by HAMMER.
314 if (io->modified) {
315 if (flush) {
316 hammer_io_flush(io);
317 } else if (bp->b_flags & B_LOCKED) {
318 switch(io->type) {
319 case HAMMER_STRUCTURE_DATA_BUFFER:
320 case HAMMER_STRUCTURE_UNDO_BUFFER:
321 hammer_io_flush(io);
322 break;
323 default:
324 break;
326 } /* else no explicit request to flush the buffer */
330 * Wait for the IO to complete if asked to.
332 if (io->waitdep && io->running) {
333 hammer_io_wait(io);
337 * Return control of the buffer to the kernel (with the provisio
338 * that our bioops can override kernel decisions with regards to
339 * the buffer).
341 if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
343 * Always disassociate the bp if an explicit flush
344 * was requested and the IO completed with no error
345 * (so unmount can really clean up the structure).
347 if (io->released) {
348 regetblk(bp);
349 BUF_KERNPROC(bp);
350 } else {
351 io->released = 1;
353 hammer_io_disassociate((hammer_io_structure_t)io);
354 /* return the bp */
355 } else if (io->modified) {
357 * Only certain IO types can be released to the kernel if
358 * the buffer has been modified.
360 * volume and meta-data IO types may only be explicitly
361 * flushed by HAMMER.
363 switch(io->type) {
364 case HAMMER_STRUCTURE_DATA_BUFFER:
365 case HAMMER_STRUCTURE_UNDO_BUFFER:
366 if (io->released == 0) {
367 io->released = 1;
368 bdwrite(bp);
370 break;
371 default:
372 break;
374 bp = NULL; /* bp left associated */
375 } else if (io->released == 0) {
377 * Clean buffers can be generally released to the kernel.
378 * We leave the bp passively associated with the HAMMER
379 * structure and use bioops to disconnect it later on
380 * if the kernel wants to discard the buffer.
382 * We can steal the structure's ownership of the bp.
384 io->released = 1;
385 if (bp->b_flags & B_LOCKED) {
386 hammer_io_disassociate(iou);
387 /* return the bp */
388 } else {
389 if (io->reclaim) {
390 hammer_io_disassociate(iou);
391 /* return the bp */
392 } else {
393 /* return the bp (bp passively associated) */
396 } else {
398 * A released buffer is passively associate with our
399 * hammer_io structure. The kernel cannot destroy it
400 * without making a bioops call. If the kernel (B_LOCKED)
401 * or we (reclaim) requested that the buffer be destroyed
402 * we destroy it, otherwise we do a quick get/release to
403 * reset its position in the kernel's LRU list.
405 * Leaving the buffer passively associated allows us to
406 * use the kernel's LRU buffer flushing mechanisms rather
407 * then rolling our own.
409 * XXX there are two ways of doing this. We can re-acquire
410 * and passively release to reset the LRU, or not.
412 if (io->running == 0) {
413 regetblk(bp);
414 if ((bp->b_flags & B_LOCKED) || io->reclaim) {
415 hammer_io_disassociate(iou);
416 /* return the bp */
417 } else {
418 /* return the bp (bp passively associated) */
420 } else {
422 * bp is left passively associated but we do not
423 * try to reacquire it. Interactions with the io
424 * structure will occur on completion of the bp's
425 * I/O.
427 bp = NULL;
430 return(bp);
434 * This routine is called with a locked IO when a flush is desired and
435 * no other references to the structure exists other then ours. This
436 * routine is ONLY called when HAMMER believes it is safe to flush a
437 * potentially modified buffer out.
439 void
440 hammer_io_flush(struct hammer_io *io)
442 struct buf *bp;
445 * Degenerate case - nothing to flush if nothing is dirty.
447 if (io->modified == 0) {
448 return;
451 KKASSERT(io->bp);
452 KKASSERT(io->modify_refs <= 0);
455 * Acquire ownership of the bp, particularly before we clear our
456 * modified flag.
458 * We are going to bawrite() this bp. Don't leave a window where
459 * io->released is set, we actually own the bp rather then our
460 * buffer.
462 bp = io->bp;
463 if (io->released) {
464 regetblk(bp);
465 /* BUF_KERNPROC(io->bp); */
466 /* io->released = 0; */
467 KKASSERT(io->released);
468 KKASSERT(io->bp == bp);
470 io->released = 1;
473 * Acquire exclusive access to the bp and then clear the modified
474 * state of the buffer prior to issuing I/O to interlock any
475 * modifications made while the I/O is in progress. This shouldn't
476 * happen anyway but losing data would be worse. The modified bit
477 * will be rechecked after the IO completes.
479 * NOTE: This call also finalizes the buffer's content (inval == 0).
481 * This is only legal when lock.refs == 1 (otherwise we might clear
482 * the modified bit while there are still users of the cluster
483 * modifying the data).
485 * Do this before potentially blocking so any attempt to modify the
486 * ondisk while we are blocked blocks waiting for us.
488 hammer_io_clear_modify(io, 0);
491 * Transfer ownership to the kernel and initiate I/O.
493 io->running = 1;
494 io->hmp->io_running_space += io->bytes;
495 hammer_count_io_running_write += io->bytes;
496 bawrite(bp);
499 /************************************************************************
500 * BUFFER DIRTYING *
501 ************************************************************************
503 * These routines deal with dependancies created when IO buffers get
504 * modified. The caller must call hammer_modify_*() on a referenced
505 * HAMMER structure prior to modifying its on-disk data.
507 * Any intent to modify an IO buffer acquires the related bp and imposes
508 * various write ordering dependancies.
512 * Mark a HAMMER structure as undergoing modification. Meta-data buffers
513 * are locked until the flusher can deal with them, pure data buffers
514 * can be written out.
516 static
517 void
518 hammer_io_modify(hammer_io_t io, int count)
521 * io->modify_refs must be >= 0
523 while (io->modify_refs < 0) {
524 io->waitmod = 1;
525 tsleep(io, 0, "hmrmod", 0);
529 * Shortcut if nothing to do.
531 KKASSERT(io->lock.refs != 0 && io->bp != NULL);
532 io->modify_refs += count;
533 if (io->modified && io->released == 0)
534 return;
536 hammer_lock_ex(&io->lock);
537 if (io->modified == 0) {
538 hammer_io_set_modlist(io);
539 io->modified = 1;
541 if (io->released) {
542 regetblk(io->bp);
543 BUF_KERNPROC(io->bp);
544 io->released = 0;
545 KKASSERT(io->modified != 0);
547 hammer_unlock(&io->lock);
550 static __inline
551 void
552 hammer_io_modify_done(hammer_io_t io)
554 KKASSERT(io->modify_refs > 0);
555 --io->modify_refs;
556 if (io->modify_refs == 0 && io->waitmod) {
557 io->waitmod = 0;
558 wakeup(io);
562 void
563 hammer_io_write_interlock(hammer_io_t io)
565 while (io->modify_refs != 0) {
566 io->waitmod = 1;
567 tsleep(io, 0, "hmrmod", 0);
569 io->modify_refs = -1;
572 void
573 hammer_io_done_interlock(hammer_io_t io)
575 KKASSERT(io->modify_refs == -1);
576 io->modify_refs = 0;
577 if (io->waitmod) {
578 io->waitmod = 0;
579 wakeup(io);
584 * Caller intends to modify a volume's ondisk structure.
586 * This is only allowed if we are the flusher or we have a ref on the
587 * sync_lock.
589 void
590 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
591 void *base, int len)
593 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
595 hammer_io_modify(&volume->io, 1);
596 if (len) {
597 intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
598 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
599 hammer_generate_undo(trans, &volume->io,
600 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
601 base, len);
606 * Caller intends to modify a buffer's ondisk structure.
608 * This is only allowed if we are the flusher or we have a ref on the
609 * sync_lock.
611 void
612 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
613 void *base, int len)
615 KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
617 hammer_io_modify(&buffer->io, 1);
618 if (len) {
619 intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
620 KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
621 hammer_generate_undo(trans, &buffer->io,
622 buffer->zone2_offset + rel_offset,
623 base, len);
627 void
628 hammer_modify_volume_done(hammer_volume_t volume)
630 hammer_io_modify_done(&volume->io);
633 void
634 hammer_modify_buffer_done(hammer_buffer_t buffer)
636 hammer_io_modify_done(&buffer->io);
640 * Mark an entity as not being dirty any more and finalize any
641 * delayed adjustments to the buffer.
643 * Delayed adjustments are an important performance enhancement, allowing
644 * us to avoid recalculating B-Tree node CRCs over and over again when
645 * making bulk-modifications to the B-Tree.
647 * If inval is non-zero delayed adjustments are ignored.
649 void
650 hammer_io_clear_modify(struct hammer_io *io, int inval)
652 if (io->modified == 0)
653 return;
656 * Take us off the mod-list and clear the modified bit.
658 KKASSERT(io->mod_list != NULL);
659 if (io->mod_list == &io->hmp->volu_list ||
660 io->mod_list == &io->hmp->meta_list) {
661 io->hmp->locked_dirty_space -= io->bytes;
662 hammer_count_dirtybufspace -= io->bytes;
664 TAILQ_REMOVE(io->mod_list, io, mod_entry);
665 io->mod_list = NULL;
666 io->modified = 0;
669 * If this bit is not set there are no delayed adjustments.
671 if (io->gencrc == 0)
672 return;
673 io->gencrc = 0;
676 * Finalize requested CRCs. The NEEDSCRC flag also holds a reference
677 * on the node (& underlying buffer). Release the node after clearing
678 * the flag.
680 if (io->type == HAMMER_STRUCTURE_META_BUFFER) {
681 hammer_buffer_t buffer = (void *)io;
682 hammer_node_t node;
684 restart:
685 TAILQ_FOREACH(node, &buffer->clist, entry) {
686 if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
687 continue;
688 node->flags &= ~HAMMER_NODE_NEEDSCRC;
689 KKASSERT(node->ondisk);
690 if (inval == 0)
691 node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
692 hammer_rel_node(node);
693 goto restart;
700 * Clear the IO's modify list. Even though the IO is no longer modified
701 * it may still be on the lose_list. This routine is called just before
702 * the governing hammer_buffer is destroyed.
704 void
705 hammer_io_clear_modlist(struct hammer_io *io)
707 KKASSERT(io->modified == 0);
708 if (io->mod_list) {
709 crit_enter(); /* biodone race against list */
710 KKASSERT(io->mod_list == &io->hmp->lose_list);
711 TAILQ_REMOVE(io->mod_list, io, mod_entry);
712 io->mod_list = NULL;
713 crit_exit();
717 static void
718 hammer_io_set_modlist(struct hammer_io *io)
720 struct hammer_mount *hmp = io->hmp;
722 KKASSERT(io->mod_list == NULL);
724 switch(io->type) {
725 case HAMMER_STRUCTURE_VOLUME:
726 io->mod_list = &hmp->volu_list;
727 hmp->locked_dirty_space += io->bytes;
728 hammer_count_dirtybufspace += io->bytes;
729 break;
730 case HAMMER_STRUCTURE_META_BUFFER:
731 io->mod_list = &hmp->meta_list;
732 hmp->locked_dirty_space += io->bytes;
733 hammer_count_dirtybufspace += io->bytes;
734 break;
735 case HAMMER_STRUCTURE_UNDO_BUFFER:
736 io->mod_list = &hmp->undo_list;
737 break;
738 case HAMMER_STRUCTURE_DATA_BUFFER:
739 io->mod_list = &hmp->data_list;
740 break;
742 TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
745 /************************************************************************
746 * HAMMER_BIOOPS *
747 ************************************************************************
752 * Pre-IO initiation kernel callback - cluster build only
754 static void
755 hammer_io_start(struct buf *bp)
760 * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
762 * NOTE: HAMMER may modify a buffer after initiating I/O. The modified bit
763 * may also be set if we were marking a cluster header open. Only remove
764 * our dependancy if the modified bit is clear.
766 static void
767 hammer_io_complete(struct buf *bp)
769 union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
771 KKASSERT(iou->io.released == 1);
774 * Deal with people waiting for I/O to drain
776 if (iou->io.running) {
778 * Deal with critical write errors. Once a critical error
779 * has been flagged in hmp the UNDO FIFO will not be updated.
780 * That way crash recover will give us a consistent
781 * filesystem.
783 * Because of this we can throw away failed UNDO buffers. If
784 * we throw away META or DATA buffers we risk corrupting
785 * the now read-only version of the filesystem visible to
786 * the user. Clear B_ERROR so the buffer is not re-dirtied
787 * by the kernel and ref the io so it doesn't get thrown
788 * away.
790 if (bp->b_flags & B_ERROR) {
791 hammer_critical_error(iou->io.hmp, NULL, bp->b_error,
792 "while flushing meta-data");
793 switch(iou->io.type) {
794 case HAMMER_STRUCTURE_UNDO_BUFFER:
795 break;
796 default:
797 if (iou->io.ioerror == 0) {
798 iou->io.ioerror = 1;
799 if (iou->io.lock.refs == 0)
800 ++hammer_count_refedbufs;
801 hammer_ref(&iou->io.lock);
803 break;
805 bp->b_flags &= ~B_ERROR;
806 bundirty(bp);
807 #if 0
808 hammer_io_set_modlist(&iou->io);
809 iou->io.modified = 1;
810 #endif
812 hammer_stats_disk_write += iou->io.bytes;
813 hammer_count_io_running_write -= iou->io.bytes;
814 iou->io.hmp->io_running_space -= iou->io.bytes;
815 if (iou->io.hmp->io_running_space == 0)
816 wakeup(&iou->io.hmp->io_running_space);
817 KKASSERT(iou->io.hmp->io_running_space >= 0);
818 iou->io.running = 0;
819 } else {
820 hammer_stats_disk_read += iou->io.bytes;
823 if (iou->io.waiting) {
824 iou->io.waiting = 0;
825 wakeup(iou);
829 * If B_LOCKED is set someone wanted to deallocate the bp at some
830 * point, do it now if refs has become zero.
832 if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) {
833 KKASSERT(iou->io.modified == 0);
834 --hammer_count_io_locked;
835 bp->b_flags &= ~B_LOCKED;
836 hammer_io_deallocate(bp);
837 /* structure may be dead now */
842 * Callback from kernel when it wishes to deallocate a passively
843 * associated structure. This mostly occurs with clean buffers
844 * but it may be possible for a holding structure to be marked dirty
845 * while its buffer is passively associated. The caller owns the bp.
847 * If we cannot disassociate we set B_LOCKED to prevent the buffer
848 * from getting reused.
850 * WARNING: Because this can be called directly by getnewbuf we cannot
851 * recurse into the tree. If a bp cannot be immediately disassociated
852 * our only recourse is to set B_LOCKED.
854 * WARNING: This may be called from an interrupt via hammer_io_complete()
856 static void
857 hammer_io_deallocate(struct buf *bp)
859 hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
861 KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
862 if (iou->io.lock.refs > 0 || iou->io.modified) {
864 * It is not legal to disassociate a modified buffer. This
865 * case really shouldn't ever occur.
867 bp->b_flags |= B_LOCKED;
868 ++hammer_count_io_locked;
869 } else {
871 * Disassociate the BP. If the io has no refs left we
872 * have to add it to the loose list.
874 hammer_io_disassociate(iou);
875 if (iou->io.type != HAMMER_STRUCTURE_VOLUME) {
876 KKASSERT(iou->io.bp == NULL);
877 KKASSERT(iou->io.mod_list == NULL);
878 crit_enter(); /* biodone race against list */
879 iou->io.mod_list = &iou->io.hmp->lose_list;
880 TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
881 crit_exit();
886 static int
887 hammer_io_fsync(struct vnode *vp)
889 return(0);
893 * NOTE: will not be called unless we tell the kernel about the
894 * bioops. Unused... we use the mount's VFS_SYNC instead.
896 static int
897 hammer_io_sync(struct mount *mp)
899 return(0);
902 static void
903 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
908 * I/O pre-check for reading and writing. HAMMER only uses this for
909 * B_CACHE buffers so checkread just shouldn't happen, but if it does
910 * allow it.
912 * Writing is a different case. We don't want the kernel to try to write
913 * out a buffer that HAMMER may be modifying passively or which has a
914 * dependancy. In addition, kernel-demanded writes can only proceed for
915 * certain types of buffers (i.e. UNDO and DATA types). Other dirty
916 * buffer types can only be explicitly written by the flusher.
918 * checkwrite will only be called for bdwrite()n buffers. If we return
919 * success the kernel is guaranteed to initiate the buffer write.
921 static int
922 hammer_io_checkread(struct buf *bp)
924 return(0);
927 static int
928 hammer_io_checkwrite(struct buf *bp)
930 hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
933 * This shouldn't happen under normal operation.
935 if (io->type == HAMMER_STRUCTURE_VOLUME ||
936 io->type == HAMMER_STRUCTURE_META_BUFFER) {
937 if (!panicstr)
938 panic("hammer_io_checkwrite: illegal buffer");
939 if ((bp->b_flags & B_LOCKED) == 0) {
940 bp->b_flags |= B_LOCKED;
941 ++hammer_count_io_locked;
943 return(1);
947 * We can only clear the modified bit if the IO is not currently
948 * undergoing modification. Otherwise we may miss changes.
950 if (io->modify_refs == 0 && io->modified)
951 hammer_io_clear_modify(io, 0);
954 * The kernel is going to start the IO, set io->running.
956 KKASSERT(io->running == 0);
957 io->running = 1;
958 io->hmp->io_running_space += io->bytes;
959 hammer_count_io_running_write += io->bytes;
960 return(0);
964 * Return non-zero if we wish to delay the kernel's attempt to flush
965 * this buffer to disk.
967 static int
968 hammer_io_countdeps(struct buf *bp, int n)
970 return(0);
973 struct bio_ops hammer_bioops = {
974 .io_start = hammer_io_start,
975 .io_complete = hammer_io_complete,
976 .io_deallocate = hammer_io_deallocate,
977 .io_fsync = hammer_io_fsync,
978 .io_sync = hammer_io_sync,
979 .io_movedeps = hammer_io_movedeps,
980 .io_countdeps = hammer_io_countdeps,
981 .io_checkread = hammer_io_checkread,
982 .io_checkwrite = hammer_io_checkwrite,
985 /************************************************************************
986 * DIRECT IO OPS *
987 ************************************************************************
989 * These functions operate directly on the buffer cache buffer associated
990 * with a front-end vnode rather then a back-end device vnode.
994 * Read a buffer associated with a front-end vnode directly from the
995 * disk media. The bio may be issued asynchronously. If leaf is non-NULL
996 * we validate the CRC.
998 * A second-level bio already resolved to a zone-2 offset (typically by
999 * the BMAP code, or by a previous hammer_io_direct_write()), is passed.
1001 * We must check for the presence of a HAMMER buffer to handle the case
1002 * where the reblocker has rewritten the data (which it does via the HAMMER
1003 * buffer system, not via the high-level vnode buffer cache), but not yet
1004 * committed the buffer to the media.
1007 hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1008 hammer_btree_leaf_elm_t leaf)
1010 hammer_off_t buf_offset;
1011 hammer_off_t zone2_offset;
1012 hammer_volume_t volume;
1013 struct buf *bp;
1014 struct bio *nbio;
1015 int vol_no;
1016 int error;
1018 buf_offset = bio->bio_offset;
1019 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
1020 HAMMER_ZONE_LARGE_DATA);
1023 * The buffer cache may have an aliased buffer (the reblocker can
1024 * write them). If it does we have to sync any dirty data before
1025 * we can build our direct-read. This is a non-critical code path.
1027 bp = bio->bio_buf;
1028 hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
1031 * Resolve to a zone-2 offset. The conversion just requires
1032 * munging the top 4 bits but we want to abstract it anyway
1033 * so the blockmap code can verify the zone assignment.
1035 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1036 if (error)
1037 goto done;
1038 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
1039 HAMMER_ZONE_RAW_BUFFER);
1042 * Resolve volume and raw-offset for 3rd level bio. The
1043 * offset will be specific to the volume.
1045 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1046 volume = hammer_get_volume(hmp, vol_no, &error);
1047 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1048 error = EIO;
1050 if (error == 0) {
1051 zone2_offset &= HAMMER_OFF_SHORT_MASK;
1053 nbio = push_bio(bio);
1054 nbio->bio_offset = volume->ondisk->vol_buf_beg +
1055 zone2_offset;
1056 #if 0
1058 * XXX disabled - our CRC check doesn't work if the OS
1059 * does bogus_page replacement on the direct-read.
1061 if (leaf && hammer_verify_data) {
1062 nbio->bio_done = hammer_io_direct_read_complete;
1063 nbio->bio_caller_info1.uvalue32 = leaf->data_crc;
1065 #endif
1066 hammer_stats_disk_read += bp->b_bufsize;
1067 vn_strategy(volume->devvp, nbio);
1069 hammer_rel_volume(volume, 0);
1070 done:
1071 if (error) {
1072 kprintf("hammer_direct_read: failed @ %016llx\n",
1073 zone2_offset);
1074 bp->b_error = error;
1075 bp->b_flags |= B_ERROR;
1076 biodone(bio);
1078 return(error);
1081 #if 0
1083 * On completion of the BIO this callback must check the data CRC
1084 * and chain to the previous bio.
1086 static
1087 void
1088 hammer_io_direct_read_complete(struct bio *nbio)
1090 struct bio *obio;
1091 struct buf *bp;
1092 u_int32_t rec_crc = nbio->bio_caller_info1.uvalue32;
1094 bp = nbio->bio_buf;
1095 if (crc32(bp->b_data, bp->b_bufsize) != rec_crc) {
1096 kprintf("HAMMER: data_crc error @%016llx/%d\n",
1097 nbio->bio_offset, bp->b_bufsize);
1098 if (hammer_debug_debug)
1099 Debugger("");
1100 bp->b_flags |= B_ERROR;
1101 bp->b_error = EIO;
1103 obio = pop_bio(nbio);
1104 biodone(obio);
1106 #endif
1109 * Write a buffer associated with a front-end vnode directly to the
1110 * disk media. The bio may be issued asynchronously.
1112 * The BIO is associated with the specified record and RECF_DIRECT_IO
1113 * is set.
1116 hammer_io_direct_write(hammer_mount_t hmp, hammer_record_t record,
1117 struct bio *bio)
1119 hammer_btree_leaf_elm_t leaf = &record->leaf;
1120 hammer_off_t buf_offset;
1121 hammer_off_t zone2_offset;
1122 hammer_volume_t volume;
1123 hammer_buffer_t buffer;
1124 struct buf *bp;
1125 struct bio *nbio;
1126 char *ptr;
1127 int vol_no;
1128 int error;
1130 buf_offset = leaf->data_offset;
1132 KKASSERT(buf_offset > HAMMER_ZONE_BTREE);
1133 KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
1135 if ((buf_offset & HAMMER_BUFMASK) == 0 &&
1136 leaf->data_len >= HAMMER_BUFSIZE) {
1138 * We are using the vnode's bio to write directly to the
1139 * media, any hammer_buffer at the same zone-X offset will
1140 * now have stale data.
1142 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1143 vol_no = HAMMER_VOL_DECODE(zone2_offset);
1144 volume = hammer_get_volume(hmp, vol_no, &error);
1146 if (error == 0 && zone2_offset >= volume->maxbuf_off)
1147 error = EIO;
1148 if (error == 0) {
1149 bp = bio->bio_buf;
1150 KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
1151 hammer_del_buffers(hmp, buf_offset,
1152 zone2_offset, bp->b_bufsize);
1155 * Second level bio - cached zone2 offset.
1157 * (We can put our bio_done function in either the
1158 * 2nd or 3rd level).
1160 nbio = push_bio(bio);
1161 nbio->bio_offset = zone2_offset;
1162 nbio->bio_done = hammer_io_direct_write_complete;
1163 nbio->bio_caller_info1.ptr = record;
1164 record->flags |= HAMMER_RECF_DIRECT_IO;
1167 * Third level bio - raw offset specific to the
1168 * correct volume.
1170 zone2_offset &= HAMMER_OFF_SHORT_MASK;
1171 nbio = push_bio(nbio);
1172 nbio->bio_offset = volume->ondisk->vol_buf_beg +
1173 zone2_offset;
1174 hammer_stats_disk_write += bp->b_bufsize;
1175 vn_strategy(volume->devvp, nbio);
1177 hammer_rel_volume(volume, 0);
1178 } else {
1180 * Must fit in a standard HAMMER buffer. In this case all
1181 * consumers use the HAMMER buffer system and RECF_DIRECT_IO
1182 * does not need to be set-up.
1184 KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
1185 buffer = NULL;
1186 ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
1187 if (error == 0) {
1188 bp = bio->bio_buf;
1189 bp->b_flags |= B_AGE;
1190 hammer_io_modify(&buffer->io, 1);
1191 bcopy(bp->b_data, ptr, leaf->data_len);
1192 hammer_io_modify_done(&buffer->io);
1193 hammer_rel_buffer(buffer, 0);
1194 bp->b_resid = 0;
1195 biodone(bio);
1198 if (error) {
1199 kprintf("hammer_direct_write: failed @ %016llx\n",
1200 leaf->data_offset);
1201 bp = bio->bio_buf;
1202 bp->b_resid = 0;
1203 bp->b_error = EIO;
1204 bp->b_flags |= B_ERROR;
1205 biodone(bio);
1207 return(error);
1211 * On completion of the BIO this callback must disconnect
1212 * it from the hammer_record and chain to the previous bio.
1214 * An I/O error forces the mount to read-only. Data buffers
1215 * are not B_LOCKED like meta-data buffers are, so we have to
1216 * throw the buffer away to prevent the kernel from retrying.
1218 static
1219 void
1220 hammer_io_direct_write_complete(struct bio *nbio)
1222 struct bio *obio;
1223 hammer_record_t record = nbio->bio_caller_info1.ptr;
1225 obio = pop_bio(nbio);
1226 if (obio->bio_buf->b_flags & B_ERROR) {
1227 hammer_critical_error(record->ip->hmp, record->ip,
1228 obio->bio_buf->b_error,
1229 "while writing bulk data");
1230 obio->bio_buf->b_flags |= B_INVAL;
1232 biodone(obio);
1233 KKASSERT(record != NULL && (record->flags & HAMMER_RECF_DIRECT_IO));
1234 record->flags &= ~HAMMER_RECF_DIRECT_IO;
1235 if (record->flags & HAMMER_RECF_DIRECT_WAIT) {
1236 record->flags &= ~HAMMER_RECF_DIRECT_WAIT;
1237 wakeup(&record->flags);
1243 * This is called before a record is either committed to the B-Tree
1244 * or destroyed, to resolve any associated direct-IO. We must
1245 * ensure that the data is available on-media to other consumers
1246 * such as the reblocker or mirroring code.
1248 * Note that other consumers might access the data via the block
1249 * device's buffer cache and not the high level vnode's buffer cache.
1251 void
1252 hammer_io_direct_wait(hammer_record_t record)
1254 crit_enter();
1255 while (record->flags & HAMMER_RECF_DIRECT_IO) {
1256 record->flags |= HAMMER_RECF_DIRECT_WAIT;
1257 tsleep(&record->flags, 0, "hmdiow", 0);
1259 crit_exit();
1263 * This is called to remove the second-level cached zone-2 offset from
1264 * frontend buffer cache buffers, now stale due to a data relocation.
1265 * These offsets are generated by cluster_read() via VOP_BMAP, or directly
1266 * by hammer_vop_strategy_read().
1268 * This is rather nasty because here we have something like the reblocker
1269 * scanning the raw B-Tree with no held references on anything, really,
1270 * other then a shared lock on the B-Tree node, and we have to access the
1271 * frontend's buffer cache to check for and clean out the association.
1272 * Specifically, if the reblocker is moving data on the disk, these cached
1273 * offsets will become invalid.
1275 * Only data record types associated with the large-data zone are subject
1276 * to direct-io and need to be checked.
1279 void
1280 hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf)
1282 struct hammer_inode_info iinfo;
1283 int zone;
1285 if (leaf->base.rec_type != HAMMER_RECTYPE_DATA)
1286 return;
1287 zone = HAMMER_ZONE_DECODE(leaf->data_offset);
1288 if (zone != HAMMER_ZONE_LARGE_DATA_INDEX)
1289 return;
1290 iinfo.obj_id = leaf->base.obj_id;
1291 iinfo.obj_asof = 0; /* unused */
1292 iinfo.obj_localization = leaf->base.localization &
1293 HAMMER_LOCALIZE_PSEUDOFS_MASK;
1294 iinfo.u.leaf = leaf;
1295 hammer_scan_inode_snapshots(hmp, &iinfo,
1296 hammer_io_direct_uncache_callback,
1297 leaf);
1300 static int
1301 hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data)
1303 hammer_inode_info_t iinfo = data;
1304 hammer_off_t data_offset;
1305 hammer_off_t file_offset;
1306 struct vnode *vp;
1307 struct buf *bp;
1308 int blksize;
1310 if (ip->vp == NULL)
1311 return(0);
1312 data_offset = iinfo->u.leaf->data_offset;
1313 file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len;
1314 blksize = iinfo->u.leaf->data_len;
1315 KKASSERT((blksize & HAMMER_BUFMASK) == 0);
1317 hammer_ref(&ip->lock);
1318 if (hammer_get_vnode(ip, &vp) == 0) {
1319 if ((bp = findblk(ip->vp, file_offset)) != NULL &&
1320 bp->b_bio2.bio_offset != NOOFFSET) {
1321 bp = getblk(ip->vp, file_offset, blksize, 0, 0);
1322 bp->b_bio2.bio_offset = NOOFFSET;
1323 brelse(bp);
1325 vput(vp);
1327 hammer_rel_inode(ip, 0);
1328 return(0);