2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * IO Primitives and buffer cache management
37 * All major data-tracking structures in HAMMER contain a struct hammer_io
38 * which is used to manage their backing store. We use filesystem buffers
39 * for backing store and we leave them passively associated with their
42 * If the kernel tries to destroy a passively associated buf which we cannot
43 * yet let go we set B_LOCKED in the buffer and then actively released it
46 * The io_token is required for anything which might race bioops and bio_done
47 * callbacks, with one exception: A successful hammer_try_interlock_norefs().
48 * the fs_token will be held in all other cases.
55 static void hammer_io_modify(hammer_io_t io
, int count
);
56 static void hammer_io_deallocate(struct buf
*bp
);
57 static void hammer_indirect_callback(struct bio
*bio
);
58 static void hammer_io_direct_write_complete(struct bio
*nbio
);
59 static int hammer_io_direct_uncache_callback(hammer_inode_t ip
, void *data
);
60 static void hammer_io_set_modlist(hammer_io_t io
);
61 static __inline
void hammer_io_flush_mark(hammer_volume_t volume
);
62 static struct bio_ops hammer_bioops
;
65 hammer_mod_rb_compare(hammer_io_t io1
, hammer_io_t io2
)
67 hammer_off_t io1_offset
;
68 hammer_off_t io2_offset
;
71 * Encoded offsets are neither valid block device offsets
72 * nor valid zone-X offsets.
74 io1_offset
= HAMMER_ENCODE(0, io1
->volume
->vol_no
, io1
->offset
);
75 io2_offset
= HAMMER_ENCODE(0, io2
->volume
->vol_no
, io2
->offset
);
77 if (io1_offset
< io2_offset
)
79 if (io1_offset
> io2_offset
)
84 RB_GENERATE(hammer_mod_rb_tree
, hammer_io
, rb_node
, hammer_mod_rb_compare
);
87 * Initialize a new, already-zero'd hammer_io structure, or reinitialize
88 * an existing hammer_io structure which may have switched to another type.
91 hammer_io_init(hammer_io_t io
, hammer_volume_t volume
, hammer_io_type_t type
)
94 io
->hmp
= volume
->io
.hmp
;
99 hammer_zone_to_iotype(int zone
)
101 hammer_io_type_t iotype
;
104 case HAMMER_ZONE_RAW_VOLUME_INDEX
:
105 iotype
= HAMMER_IOTYPE_VOLUME
;
107 case HAMMER_ZONE_RAW_BUFFER_INDEX
:
108 case HAMMER_ZONE_FREEMAP_INDEX
:
109 case HAMMER_ZONE_BTREE_INDEX
:
110 case HAMMER_ZONE_META_INDEX
:
111 iotype
= HAMMER_IOTYPE_META_BUFFER
;
113 case HAMMER_ZONE_UNDO_INDEX
:
114 iotype
= HAMMER_IOTYPE_UNDO_BUFFER
;
116 case HAMMER_ZONE_LARGE_DATA_INDEX
:
117 case HAMMER_ZONE_SMALL_DATA_INDEX
:
118 iotype
= HAMMER_IOTYPE_DATA_BUFFER
;
121 iotype
= HAMMER_IOTYPE_DUMMY
;
129 hammer_io_to_iostring(hammer_io_t io
)
131 const char *iostr
= NULL
;
134 case HAMMER_IOTYPE_VOLUME
:
137 case HAMMER_IOTYPE_META_BUFFER
:
138 switch(HAMMER_ZONE(HAMMER_ITOB(io
)->zoneX_offset
)) {
139 case HAMMER_ZONE_RAW_BUFFER
:
140 iostr
= "meta/raw_buffer";
142 case HAMMER_ZONE_FREEMAP
:
143 iostr
= "meta/freemap";
145 case HAMMER_ZONE_BTREE
:
146 iostr
= "meta/btree";
148 case HAMMER_ZONE_META
:
153 case HAMMER_IOTYPE_UNDO_BUFFER
:
156 case HAMMER_IOTYPE_DATA_BUFFER
:
157 switch(HAMMER_ZONE(HAMMER_ITOB(io
)->zoneX_offset
)) {
158 case HAMMER_ZONE_LARGE_DATA
:
159 iostr
= "data/large_data";
161 case HAMMER_ZONE_SMALL_DATA
:
162 iostr
= "data/small_data";
166 case HAMMER_IOTYPE_DUMMY
:
170 hpanic("bad io type");
178 * Helper routine to disassociate a buffer cache buffer from an I/O
179 * structure. The io must be interlocked and marked appropriately for
182 * The io must be in a released state with the io->bp owned and
183 * locked by the caller of this function. When not called from an
184 * io_deallocate() this cannot race an io_deallocate() since the
185 * kernel would be unable to get the buffer lock in that case.
186 * (The released state in this case means we own the bp, not the
187 * hammer_io structure).
189 * The io may have 0 or 1 references depending on who called us. The
190 * caller is responsible for dealing with the refs.
192 * This call can only be made when no action is required on the buffer.
194 * This function is guaranteed not to race against anything because we
195 * own both the io lock and the bp lock and are interlocked with no
199 hammer_io_disassociate(hammer_io_t io
)
201 struct buf
*bp
= io
->bp
;
203 KKASSERT(io
->released
);
204 KKASSERT(io
->modified
== 0);
205 KKASSERT(hammer_buf_peek_io(bp
) == io
);
210 * If the buffer was locked someone wanted to get rid of it.
212 if (bp
->b_flags
& B_LOCKED
) {
213 atomic_add_int(&hammer_count_io_locked
, -1);
214 bp
->b_flags
&= ~B_LOCKED
;
217 bp
->b_flags
|= B_NOCACHE
|B_RELBUF
;
222 case HAMMER_IOTYPE_VOLUME
:
223 HAMMER_ITOV(io
)->ondisk
= NULL
;
225 case HAMMER_IOTYPE_DATA_BUFFER
:
226 case HAMMER_IOTYPE_META_BUFFER
:
227 case HAMMER_IOTYPE_UNDO_BUFFER
:
228 HAMMER_ITOB(io
)->ondisk
= NULL
;
230 case HAMMER_IOTYPE_DUMMY
:
231 hpanic("bad io type");
237 * Wait for any physical IO to complete
239 * XXX we aren't interlocked against a spinlock or anything so there
240 * is a small window in the interlock / io->running == 0 test.
243 hammer_io_wait(hammer_io_t io
)
246 hammer_mount_t hmp
= io
->hmp
;
248 lwkt_gettoken(&hmp
->io_token
);
249 while (io
->running
) {
251 tsleep_interlock(io
, 0);
253 tsleep(io
, PINTERLOCKED
, "hmrflw", hz
);
255 lwkt_reltoken(&hmp
->io_token
);
260 * Wait for all currently queued HAMMER-initiated I/Os to complete.
262 * This is not supposed to count direct I/O's but some can leak
263 * through (for non-full-sized direct I/Os).
266 hammer_io_wait_all(hammer_mount_t hmp
, const char *ident
, int doflush
)
268 struct hammer_io iodummy
;
272 * Degenerate case, no I/O is running
274 lwkt_gettoken(&hmp
->io_token
);
275 if (TAILQ_EMPTY(&hmp
->iorun_list
)) {
276 lwkt_reltoken(&hmp
->io_token
);
278 hammer_io_flush_sync(hmp
);
281 bzero(&iodummy
, sizeof(iodummy
));
282 iodummy
.type
= HAMMER_IOTYPE_DUMMY
;
285 * Add placemarker and then wait until it becomes the head of
288 TAILQ_INSERT_TAIL(&hmp
->iorun_list
, &iodummy
, iorun_entry
);
289 while (TAILQ_FIRST(&hmp
->iorun_list
) != &iodummy
) {
290 tsleep(&iodummy
, 0, ident
, 0);
294 * Chain in case several placemarkers are present.
296 TAILQ_REMOVE(&hmp
->iorun_list
, &iodummy
, iorun_entry
);
297 io
= TAILQ_FIRST(&hmp
->iorun_list
);
298 if (io
&& io
->type
== HAMMER_IOTYPE_DUMMY
)
300 lwkt_reltoken(&hmp
->io_token
);
303 hammer_io_flush_sync(hmp
);
307 * Clear a flagged error condition on a I/O buffer. The caller must hold
308 * its own ref on the buffer.
311 hammer_io_clear_error(hammer_io_t io
)
313 hammer_mount_t hmp
= io
->hmp
;
315 lwkt_gettoken(&hmp
->io_token
);
318 hammer_rel(&io
->lock
);
319 KKASSERT(hammer_isactive(&io
->lock
));
321 lwkt_reltoken(&hmp
->io_token
);
325 hammer_io_clear_error_noassert(hammer_io_t io
)
327 hammer_mount_t hmp
= io
->hmp
;
329 lwkt_gettoken(&hmp
->io_token
);
332 hammer_rel(&io
->lock
);
334 lwkt_reltoken(&hmp
->io_token
);
338 * This is an advisory function only which tells the buffer cache
339 * the bp is not a meta-data buffer, even though it is backed by
342 * This is used by HAMMER's reblocking code to avoid trying to
343 * swapcache the filesystem's data when it is read or written
344 * by the reblocking code.
346 * The caller has a ref on the buffer preventing the bp from
347 * being disassociated from it.
350 hammer_io_notmeta(hammer_buffer_t buffer
)
352 if ((buffer
->io
.bp
->b_flags
& B_NOTMETA
) == 0) {
353 hammer_mount_t hmp
= buffer
->io
.hmp
;
355 lwkt_gettoken(&hmp
->io_token
);
356 buffer
->io
.bp
->b_flags
|= B_NOTMETA
;
357 lwkt_reltoken(&hmp
->io_token
);
362 * Load bp for a HAMMER structure. The io must be exclusively locked by
365 * This routine is mostly used on meta-data and small-data blocks. Generally
366 * speaking HAMMER assumes some locality of reference and will cluster.
368 * Note that the caller (hammer_ondisk.c) may place further restrictions
369 * on clusterability via the limit (in bytes). Typically large-data
370 * zones cannot be clustered due to their mixed buffer sizes. This is
371 * not an issue since such clustering occurs in hammer_vnops at the
372 * regular file layer, whereas this is the buffered block device layer.
374 * No I/O callbacks can occur while we hold the buffer locked.
377 hammer_io_read(struct vnode
*devvp
, hammer_io_t io
, int limit
)
382 if ((bp
= io
->bp
) == NULL
) {
383 int hce
= hammer_cluster_enable
;
385 atomic_add_long(&hammer_count_io_running_read
, io
->bytes
);
386 if (hce
&& limit
> io
->bytes
) {
387 error
= cluster_read(devvp
, io
->offset
+ limit
,
388 io
->offset
, io
->bytes
,
390 HAMMER_CLUSTER_SIZE
* hce
,
393 error
= bread(devvp
, io
->offset
, io
->bytes
, &io
->bp
);
395 hammer_stats_disk_read
+= io
->bytes
;
396 atomic_add_long(&hammer_count_io_running_read
, -io
->bytes
);
399 * The code generally assumes b_ops/b_dep has been set-up,
400 * even if we error out here.
403 if ((hammer_debug_io
& 0x0001) && (bp
->b_flags
& B_IOISSUED
)) {
404 hdkprintf("zone2_offset %016jx %s\n",
405 (intmax_t)bp
->b_bio2
.bio_offset
,
406 hammer_io_to_iostring(io
));
408 bp
->b_flags
&= ~B_IOISSUED
;
409 bp
->b_ops
= &hammer_bioops
;
411 hammer_buf_attach_io(bp
, io
); /* locked by the io lock */
413 KKASSERT(io
->modified
== 0);
414 KKASSERT(io
->running
== 0);
415 KKASSERT(io
->waiting
== 0);
416 io
->released
= 0; /* we hold an active lock on bp */
424 * Similar to hammer_io_read() but returns a zero'd out buffer instead.
425 * Must be called with the IO exclusively locked.
427 * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
428 * I/O by forcing the buffer to not be in a released state before calling
431 * This function will also mark the IO as modified but it will not
432 * increment the modify_refs count.
434 * No I/O callbacks can occur while we hold the buffer locked.
437 hammer_io_new(struct vnode
*devvp
, hammer_io_t io
)
441 if ((bp
= io
->bp
) == NULL
) {
442 io
->bp
= getblk(devvp
, io
->offset
, io
->bytes
, 0, 0);
444 bp
->b_ops
= &hammer_bioops
;
446 hammer_buf_attach_io(bp
, io
); /* locked by the io lock */
448 KKASSERT(io
->running
== 0);
458 hammer_io_modify(io
, 0);
464 * Advance the activity count on the underlying buffer because
465 * HAMMER does not getblk/brelse on every access.
467 * The io->bp cannot go away while the buffer is referenced.
470 hammer_io_advance(hammer_io_t io
)
473 buf_act_advance(io
->bp
);
477 * Remove potential device level aliases against buffers managed by high level
478 * vnodes. Aliases can also be created due to mixed buffer sizes or via
479 * direct access to the backing store device.
481 * This is nasty because the buffers are also VMIO-backed. Even if a buffer
482 * does not exist its backing VM pages might, and we have to invalidate
483 * those as well or a getblk() will reinstate them.
485 * Buffer cache buffers associated with hammer_buffers cannot be
489 hammer_io_inval(hammer_volume_t volume
, hammer_off_t zone2_offset
)
493 hammer_off_t phys_offset
;
497 hmp
= volume
->io
.hmp
;
498 lwkt_gettoken(&hmp
->io_token
);
501 * If a device buffer already exists for the specified physical
502 * offset use that, otherwise instantiate a buffer to cover any
503 * related VM pages, set BNOCACHE, and brelse().
505 phys_offset
= hammer_xlate_to_phys(volume
->ondisk
, zone2_offset
);
506 if ((bp
= findblk(volume
->devvp
, phys_offset
, 0)) != NULL
)
509 bp
= getblk(volume
->devvp
, phys_offset
, HAMMER_BUFSIZE
, 0, 0);
511 if ((io
= hammer_buf_peek_io(bp
)) != NULL
) {
513 hammer_ref(&io
->lock
);
514 hammer_io_clear_modify(io
, 1);
519 io
->waitdep
= 1; /* XXX this is a fs_token field */
520 KKASSERT(hammer_isactive(&io
->lock
) == 1);
521 hammer_rel_buffer(HAMMER_ITOB(io
), 0);
522 /*hammer_io_deallocate(bp);*/
527 KKASSERT((bp
->b_flags
& B_LOCKED
) == 0);
529 bp
->b_flags
|= B_NOCACHE
|B_RELBUF
;
533 lwkt_reltoken(&hmp
->io_token
);
538 * This routine is called on the last reference to a hammer structure.
539 * The io must be interlocked with a refcount of zero. The hammer structure
540 * will remain interlocked on return.
542 * This routine may return a non-NULL bp to the caller for dispoal.
543 * The caller typically brelse()'s the bp.
545 * The bp may or may not still be passively associated with the IO. It
546 * will remain passively associated if it is unreleasable (e.g. a modified
549 * The only requirement here is that modified meta-data and volume-header
550 * buffer may NOT be disassociated from the IO structure, and consequently
551 * we also leave such buffers actively associated with the IO if they already
552 * are (since the kernel can't do anything with them anyway). Only the
553 * flusher is allowed to write such buffers out. Modified pure-data and
554 * undo buffers are returned to the kernel but left passively associated
555 * so we can track when the kernel writes the bp out.
558 hammer_io_release(hammer_io_t io
, int flush
)
562 if ((bp
= io
->bp
) == NULL
)
566 * Try to flush a dirty IO to disk if asked to by the
567 * caller or if the kernel tried to flush the buffer in the past.
569 * Kernel-initiated flushes are only allowed for pure-data buffers.
570 * meta-data and volume buffers can only be flushed explicitly
575 hammer_io_flush(io
, 0);
576 } else if (bp
->b_flags
& B_LOCKED
) {
578 case HAMMER_IOTYPE_DATA_BUFFER
:
579 hammer_io_flush(io
, 0);
581 case HAMMER_IOTYPE_UNDO_BUFFER
:
582 hammer_io_flush(io
, hammer_undo_reclaim(io
));
587 } /* else no explicit request to flush the buffer */
591 * Wait for the IO to complete if asked to. This occurs when
592 * the buffer must be disposed of definitively during an umount
593 * or buffer invalidation.
595 if (io
->waitdep
&& io
->running
) {
600 * Return control of the buffer to the kernel (with the provisio
601 * that our bioops can override kernel decisions with regards to
604 if ((flush
|| io
->reclaim
) && io
->modified
== 0 && io
->running
== 0) {
606 * Always disassociate the bp if an explicit flush
607 * was requested and the IO completed with no error
608 * (so unmount can really clean up the structure).
616 hammer_io_disassociate(io
);
618 } else if (io
->modified
) {
620 * Only certain IO types can be released to the kernel if
621 * the buffer has been modified.
623 * volume and meta-data IO types may only be explicitly
627 case HAMMER_IOTYPE_DATA_BUFFER
:
628 case HAMMER_IOTYPE_UNDO_BUFFER
:
629 if (io
->released
== 0) {
631 bp
->b_flags
|= B_CLUSTEROK
;
638 bp
= NULL
; /* bp left associated */
639 } else if (io
->released
== 0) {
641 * Clean buffers can be generally released to the kernel.
642 * We leave the bp passively associated with the HAMMER
643 * structure and use bioops to disconnect it later on
644 * if the kernel wants to discard the buffer.
646 * We can steal the structure's ownership of the bp.
649 if (bp
->b_flags
& B_LOCKED
) {
650 hammer_io_disassociate(io
);
654 hammer_io_disassociate(io
);
657 /* return the bp (bp passively associated) */
662 * A released buffer is passively associate with our
663 * hammer_io structure. The kernel cannot destroy it
664 * without making a bioops call. If the kernel (B_LOCKED)
665 * or we (reclaim) requested that the buffer be destroyed
666 * we destroy it, otherwise we do a quick get/release to
667 * reset its position in the kernel's LRU list.
669 * Leaving the buffer passively associated allows us to
670 * use the kernel's LRU buffer flushing mechanisms rather
671 * then rolling our own.
673 * XXX there are two ways of doing this. We can re-acquire
674 * and passively release to reset the LRU, or not.
676 if (io
->running
== 0) {
678 if ((bp
->b_flags
& B_LOCKED
) || io
->reclaim
) {
679 hammer_io_disassociate(io
);
682 /* return the bp (bp passively associated) */
686 * bp is left passively associated but we do not
687 * try to reacquire it. Interactions with the io
688 * structure will occur on completion of the bp's
698 * This routine is called with a locked IO when a flush is desired and
699 * no other references to the structure exists other then ours. This
700 * routine is ONLY called when HAMMER believes it is safe to flush a
701 * potentially modified buffer out.
703 * The locked io or io reference prevents a flush from being initiated
707 hammer_io_flush(hammer_io_t io
, int reclaim
)
713 * Degenerate case - nothing to flush if nothing is dirty.
715 if (io
->modified
== 0)
719 KKASSERT(io
->modify_refs
<= 0);
722 * Acquire ownership of the bp, particularly before we clear our
725 * We are going to bawrite() this bp. Don't leave a window where
726 * io->released is set, we actually own the bp rather then our
729 * The io_token should not be required here as only
735 /* BUF_KERNPROC(io->bp); */
736 /* io->released = 0; */
737 KKASSERT(io
->released
);
738 KKASSERT(io
->bp
== bp
);
745 if ((bp
->b_flags
& B_LOCKED
) == 0) {
746 bp
->b_flags
|= B_LOCKED
;
747 atomic_add_int(&hammer_count_io_locked
, 1);
752 * Acquire exclusive access to the bp and then clear the modified
753 * state of the buffer prior to issuing I/O to interlock any
754 * modifications made while the I/O is in progress. This shouldn't
755 * happen anyway but losing data would be worse. The modified bit
756 * will be rechecked after the IO completes.
758 * NOTE: This call also finalizes the buffer's content (inval == 0).
760 * This is only legal when lock.refs == 1 (otherwise we might clear
761 * the modified bit while there are still users of the cluster
762 * modifying the data).
764 * Do this before potentially blocking so any attempt to modify the
765 * ondisk while we are blocked blocks waiting for us.
767 hammer_ref(&io
->lock
);
768 hammer_io_clear_modify(io
, 0);
769 hammer_rel(&io
->lock
);
771 if (hammer_debug_io
& 0x0002)
772 hdkprintf("%016jx\n", bp
->b_bio1
.bio_offset
);
775 * Transfer ownership to the kernel and initiate I/O.
777 * NOTE: We do not hold io_token so an atomic op is required to
778 * update io_running_space.
781 atomic_add_long(&hmp
->io_running_space
, io
->bytes
);
782 atomic_add_long(&hammer_count_io_running_write
, io
->bytes
);
783 lwkt_gettoken(&hmp
->io_token
);
784 TAILQ_INSERT_TAIL(&hmp
->iorun_list
, io
, iorun_entry
);
785 lwkt_reltoken(&hmp
->io_token
);
787 hammer_io_flush_mark(io
->volume
);
790 /************************************************************************
792 ************************************************************************
794 * These routines deal with dependancies created when IO buffers get
795 * modified. The caller must call hammer_modify_*() on a referenced
796 * HAMMER structure prior to modifying its on-disk data.
798 * Any intent to modify an IO buffer acquires the related bp and imposes
799 * various write ordering dependancies.
803 * Mark a HAMMER structure as undergoing modification. Meta-data buffers
804 * are locked until the flusher can deal with them, pure data buffers
805 * can be written out.
807 * The referenced io prevents races.
811 hammer_io_modify(hammer_io_t io
, int count
)
814 * io->modify_refs must be >= 0
816 while (io
->modify_refs
< 0) {
818 tsleep(io
, 0, "hmrmod", 0);
822 * Shortcut if nothing to do.
824 KKASSERT(hammer_isactive(&io
->lock
) && io
->bp
!= NULL
);
825 io
->modify_refs
+= count
;
826 if (io
->modified
&& io
->released
== 0)
830 * NOTE: It is important not to set the modified bit
831 * until after we have acquired the bp or we risk
832 * racing against checkwrite.
834 hammer_lock_ex(&io
->lock
);
837 BUF_KERNPROC(io
->bp
);
840 if (io
->modified
== 0) {
841 hammer_io_set_modlist(io
);
844 hammer_unlock(&io
->lock
);
849 hammer_io_modify_done(hammer_io_t io
)
851 KKASSERT(io
->modify_refs
> 0);
853 if (io
->modify_refs
== 0 && io
->waitmod
) {
860 * The write interlock blocks other threads trying to modify a buffer
861 * (they block in hammer_io_modify()) after us, or blocks us while other
862 * threads are in the middle of modifying a buffer.
864 * The caller also has a ref on the io, however if we are not careful
865 * we will race bioops callbacks (checkwrite). To deal with this
866 * we must at least acquire and release the io_token, and it is probably
867 * better to hold it through the setting of modify_refs.
870 hammer_io_write_interlock(hammer_io_t io
)
872 hammer_mount_t hmp
= io
->hmp
;
874 lwkt_gettoken(&hmp
->io_token
);
875 while (io
->modify_refs
!= 0) {
877 tsleep(io
, 0, "hmrmod", 0);
879 io
->modify_refs
= -1;
880 lwkt_reltoken(&hmp
->io_token
);
884 hammer_io_done_interlock(hammer_io_t io
)
886 KKASSERT(io
->modify_refs
== -1);
895 * Caller intends to modify a volume's ondisk structure.
897 * This is only allowed if we are the flusher or we have a ref on the
901 hammer_modify_volume(hammer_transaction_t trans
, hammer_volume_t volume
,
904 KKASSERT (trans
== NULL
|| trans
->sync_lock_refs
> 0);
906 hammer_io_modify(&volume
->io
, 1);
908 intptr_t rel_offset
= (intptr_t)base
- (intptr_t)volume
->ondisk
;
909 KKASSERT((rel_offset
& ~(intptr_t)HAMMER_BUFMASK
) == 0);
910 hammer_generate_undo(trans
,
911 HAMMER_ENCODE_RAW_VOLUME(volume
->vol_no
, rel_offset
),
917 * Caller intends to modify a buffer's ondisk structure.
919 * This is only allowed if we are the flusher or we have a ref on the
923 hammer_modify_buffer(hammer_transaction_t trans
, hammer_buffer_t buffer
,
926 KKASSERT (trans
== NULL
|| trans
->sync_lock_refs
> 0);
928 hammer_io_modify(&buffer
->io
, 1);
930 intptr_t rel_offset
= (intptr_t)base
- (intptr_t)buffer
->ondisk
;
931 KKASSERT((rel_offset
& ~(intptr_t)HAMMER_BUFMASK
) == 0);
932 hammer_generate_undo(trans
,
933 buffer
->zone2_offset
+ rel_offset
,
939 hammer_modify_volume_done(hammer_volume_t volume
)
941 hammer_io_modify_done(&volume
->io
);
945 hammer_modify_buffer_done(hammer_buffer_t buffer
)
947 hammer_io_modify_done(&buffer
->io
);
951 * Mark an entity as not being dirty any more and finalize any
952 * delayed adjustments to the buffer.
954 * Delayed adjustments are an important performance enhancement, allowing
955 * us to avoid recalculating B-Tree node CRCs over and over again when
956 * making bulk-modifications to the B-Tree.
958 * If inval is non-zero delayed adjustments are ignored.
960 * This routine may dereference related btree nodes and cause the
961 * buffer to be dereferenced. The caller must own a reference on io.
964 hammer_io_clear_modify(hammer_io_t io
, int inval
)
969 * io_token is needed to avoid races on mod_root
971 if (io
->modified
== 0)
974 lwkt_gettoken(&hmp
->io_token
);
975 if (io
->modified
== 0) {
976 lwkt_reltoken(&hmp
->io_token
);
981 * Take us off the mod-list and clear the modified bit.
983 KKASSERT(io
->mod_root
!= NULL
);
984 if (io
->mod_root
== &io
->hmp
->volu_root
||
985 io
->mod_root
== &io
->hmp
->meta_root
) {
986 io
->hmp
->locked_dirty_space
-= io
->bytes
;
987 atomic_add_long(&hammer_count_dirtybufspace
, -io
->bytes
);
989 RB_REMOVE(hammer_mod_rb_tree
, io
->mod_root
, io
);
993 lwkt_reltoken(&hmp
->io_token
);
996 * If this bit is not set there are no delayed adjustments.
1003 * Finalize requested CRCs. The NEEDSCRC flag also holds a reference
1004 * on the node (& underlying buffer). Release the node after clearing
1007 if (io
->type
== HAMMER_IOTYPE_META_BUFFER
) {
1008 hammer_buffer_t buffer
= HAMMER_ITOB(io
);
1012 TAILQ_FOREACH(node
, &buffer
->node_list
, entry
) {
1013 if ((node
->flags
& HAMMER_NODE_NEEDSCRC
) == 0)
1015 node
->flags
&= ~HAMMER_NODE_NEEDSCRC
;
1016 KKASSERT(node
->ondisk
);
1018 hammer_crc_set_btree(hmp
->version
, node
->ondisk
);
1019 hammer_rel_node(node
);
1023 /* caller must still have ref on io */
1024 KKASSERT(hammer_isactive(&io
->lock
));
1028 * Clear the IO's modify list. Even though the IO is no longer modified
1029 * it may still be on the lose_root. This routine is called just before
1030 * the governing hammer_buffer is destroyed.
1032 * mod_root requires io_token protection.
1035 hammer_io_clear_modlist(hammer_io_t io
)
1037 hammer_mount_t hmp
= io
->hmp
;
1039 KKASSERT(io
->modified
== 0);
1041 lwkt_gettoken(&hmp
->io_token
);
1043 KKASSERT(io
->mod_root
== &io
->hmp
->lose_root
);
1044 RB_REMOVE(hammer_mod_rb_tree
, io
->mod_root
, io
);
1045 io
->mod_root
= NULL
;
1047 lwkt_reltoken(&hmp
->io_token
);
1052 hammer_io_set_modlist(hammer_io_t io
)
1054 hammer_mount_t hmp
= io
->hmp
;
1056 lwkt_gettoken(&hmp
->io_token
);
1057 KKASSERT(io
->mod_root
== NULL
);
1060 case HAMMER_IOTYPE_VOLUME
:
1061 io
->mod_root
= &hmp
->volu_root
;
1062 hmp
->locked_dirty_space
+= io
->bytes
;
1063 atomic_add_long(&hammer_count_dirtybufspace
, io
->bytes
);
1065 case HAMMER_IOTYPE_META_BUFFER
:
1066 io
->mod_root
= &hmp
->meta_root
;
1067 hmp
->locked_dirty_space
+= io
->bytes
;
1068 atomic_add_long(&hammer_count_dirtybufspace
, io
->bytes
);
1070 case HAMMER_IOTYPE_UNDO_BUFFER
:
1071 io
->mod_root
= &hmp
->undo_root
;
1073 case HAMMER_IOTYPE_DATA_BUFFER
:
1074 io
->mod_root
= &hmp
->data_root
;
1076 case HAMMER_IOTYPE_DUMMY
:
1077 hpanic("bad io type");
1078 break; /* NOT REACHED */
1080 if (RB_INSERT(hammer_mod_rb_tree
, io
->mod_root
, io
)) {
1081 hpanic("duplicate entry @ %d:%015jx",
1082 io
->volume
->vol_no
, io
->offset
);
1085 lwkt_reltoken(&hmp
->io_token
);
1088 /************************************************************************
1090 ************************************************************************
1095 * Pre-IO initiation kernel callback - cluster build only
1097 * bioops callback - hold io_token
1100 hammer_io_start(struct buf
*bp
)
1102 /* nothing to do, so io_token not needed */
1106 * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
1108 * NOTE: HAMMER may modify a data buffer after we have initiated write
1111 * NOTE: MPSAFE callback
1113 * bioops callback - hold io_token
1116 hammer_io_complete(struct buf
*bp
)
1118 hammer_io_t io
= hammer_buf_peek_io(bp
);
1119 hammer_mount_t hmp
= io
->hmp
;
1122 lwkt_gettoken(&hmp
->io_token
);
1124 KKASSERT(io
->released
== 1);
1127 * Deal with people waiting for I/O to drain
1131 * Deal with critical write errors. Once a critical error
1132 * has been flagged in hmp the UNDO FIFO will not be updated.
1133 * That way crash recover will give us a consistent
1136 * Because of this we can throw away failed UNDO buffers. If
1137 * we throw away META or DATA buffers we risk corrupting
1138 * the now read-only version of the filesystem visible to
1139 * the user. Clear B_ERROR so the buffer is not re-dirtied
1140 * by the kernel and ref the io so it doesn't get thrown
1143 if (bp
->b_flags
& B_ERROR
) {
1144 lwkt_gettoken(&hmp
->fs_token
);
1145 hammer_critical_error(hmp
, NULL
, bp
->b_error
,
1146 "while flushing meta-data");
1147 lwkt_reltoken(&hmp
->fs_token
);
1150 case HAMMER_IOTYPE_UNDO_BUFFER
:
1153 if (io
->ioerror
== 0) {
1155 hammer_ref(&io
->lock
);
1159 bp
->b_flags
&= ~B_ERROR
;
1162 hammer_io_set_modlist(io
);
1166 hammer_stats_disk_write
+= io
->bytes
;
1167 atomic_add_long(&hammer_count_io_running_write
, -io
->bytes
);
1168 atomic_add_long(&hmp
->io_running_space
, -io
->bytes
);
1169 KKASSERT(hmp
->io_running_space
>= 0);
1173 * Remove from iorun list and wakeup any multi-io waiter(s).
1175 if (TAILQ_FIRST(&hmp
->iorun_list
) == io
) {
1176 ionext
= TAILQ_NEXT(io
, iorun_entry
);
1177 if (ionext
&& ionext
->type
== HAMMER_IOTYPE_DUMMY
)
1180 TAILQ_REMOVE(&hmp
->iorun_list
, io
, iorun_entry
);
1182 hammer_stats_disk_read
+= io
->bytes
;
1191 * If B_LOCKED is set someone wanted to deallocate the bp at some
1192 * point, try to do it now. The operation will fail if there are
1193 * refs or if hammer_io_deallocate() is unable to gain the
1196 if (bp
->b_flags
& B_LOCKED
) {
1197 atomic_add_int(&hammer_count_io_locked
, -1);
1198 bp
->b_flags
&= ~B_LOCKED
;
1199 hammer_io_deallocate(bp
);
1200 /* structure may be dead now */
1202 lwkt_reltoken(&hmp
->io_token
);
1206 * Callback from kernel when it wishes to deallocate a passively
1207 * associated structure. This mostly occurs with clean buffers
1208 * but it may be possible for a holding structure to be marked dirty
1209 * while its buffer is passively associated. The caller owns the bp.
1211 * If we cannot disassociate we set B_LOCKED to prevent the buffer
1212 * from getting reused.
1214 * WARNING: Because this can be called directly by getnewbuf we cannot
1215 * recurse into the tree. If a bp cannot be immediately disassociated
1216 * our only recourse is to set B_LOCKED.
1218 * WARNING: This may be called from an interrupt via hammer_io_complete()
1220 * bioops callback - hold io_token
1223 hammer_io_deallocate(struct buf
*bp
)
1225 hammer_io_t io
= hammer_buf_peek_io(bp
);
1230 lwkt_gettoken(&hmp
->io_token
);
1232 KKASSERT((bp
->b_flags
& B_LOCKED
) == 0 && io
->running
== 0);
1233 if (hammer_try_interlock_norefs(&io
->lock
) == 0) {
1235 * We cannot safely disassociate a bp from a referenced
1236 * or interlocked HAMMER structure.
1238 bp
->b_flags
|= B_LOCKED
;
1239 atomic_add_int(&hammer_count_io_locked
, 1);
1240 } else if (io
->modified
) {
1242 * It is not legal to disassociate a modified buffer. This
1243 * case really shouldn't ever occur.
1245 bp
->b_flags
|= B_LOCKED
;
1246 atomic_add_int(&hammer_count_io_locked
, 1);
1247 hammer_put_interlock(&io
->lock
, 0);
1250 * Disassociate the BP. If the io has no refs left we
1251 * have to add it to the loose list. The kernel has
1252 * locked the buffer and therefore our io must be
1253 * in a released state.
1255 hammer_io_disassociate(io
);
1256 if (io
->type
!= HAMMER_IOTYPE_VOLUME
) {
1257 KKASSERT(io
->bp
== NULL
);
1258 KKASSERT(io
->mod_root
== NULL
);
1259 io
->mod_root
= &hmp
->lose_root
;
1260 if (RB_INSERT(hammer_mod_rb_tree
, io
->mod_root
, io
)) {
1261 hpanic("duplicate entry @ %d:%015jx",
1262 io
->volume
->vol_no
, io
->offset
);
1266 hammer_put_interlock(&io
->lock
, 1);
1268 lwkt_reltoken(&hmp
->io_token
);
1272 * bioops callback - hold io_token
1275 hammer_io_fsync(struct vnode
*vp
)
1277 /* nothing to do, so io_token not needed */
1282 * NOTE: will not be called unless we tell the kernel about the
1283 * bioops. Unused... we use the mount's VFS_SYNC instead.
1285 * bioops callback - hold io_token
1288 hammer_io_sync(struct mount
*mp
)
1290 /* nothing to do, so io_token not needed */
1295 * bioops callback - hold io_token
1298 hammer_io_movedeps(struct buf
*bp1
, struct buf
*bp2
)
1300 /* nothing to do, so io_token not needed */
1304 * I/O pre-check for reading and writing. HAMMER only uses this for
1305 * B_CACHE buffers so checkread just shouldn't happen, but if it does
1308 * Writing is a different case. We don't want the kernel to try to write
1309 * out a buffer that HAMMER may be modifying passively or which has a
1310 * dependancy. In addition, kernel-demanded writes can only proceed for
1311 * certain types of buffers (i.e. UNDO and DATA types). Other dirty
1312 * buffer types can only be explicitly written by the flusher.
1314 * checkwrite will only be called for bdwrite()n buffers. If we return
1315 * success the kernel is guaranteed to initiate the buffer write.
1317 * bioops callback - hold io_token
1320 hammer_io_checkread(struct buf
*bp
)
1322 /* nothing to do, so io_token not needed */
1327 * The kernel is asking us whether it can write out a dirty buffer or not.
1329 * bioops callback - hold io_token
1332 hammer_io_checkwrite(struct buf
*bp
)
1334 hammer_io_t io
= hammer_buf_peek_io(bp
);
1335 hammer_mount_t hmp
= io
->hmp
;
1338 * This shouldn't happen under normal operation.
1340 lwkt_gettoken(&hmp
->io_token
);
1341 if (io
->type
== HAMMER_IOTYPE_VOLUME
||
1342 io
->type
== HAMMER_IOTYPE_META_BUFFER
) {
1344 hpanic("illegal buffer");
1345 if ((bp
->b_flags
& B_LOCKED
) == 0) {
1346 bp
->b_flags
|= B_LOCKED
;
1347 atomic_add_int(&hammer_count_io_locked
, 1);
1349 lwkt_reltoken(&hmp
->io_token
);
1354 * We have to be able to interlock the IO to safely modify any
1355 * of its fields without holding the fs_token. If we can't lock
1356 * it then we are racing someone.
1358 * Our ownership of the bp lock prevents the io from being ripped
1359 * out from under us.
1361 if (hammer_try_interlock_norefs(&io
->lock
) == 0) {
1362 bp
->b_flags
|= B_LOCKED
;
1363 atomic_add_int(&hammer_count_io_locked
, 1);
1364 lwkt_reltoken(&hmp
->io_token
);
1369 * The modified bit must be cleared prior to the initiation of
1370 * any IO (returning 0 initiates the IO). Because this is a
1371 * normal data buffer hammer_io_clear_modify() runs through a
1372 * simple degenerate case.
1374 * Return 0 will cause the kernel to initiate the IO, and we
1375 * must normally clear the modified bit before we begin. If
1376 * the io has modify_refs we do not clear the modified bit,
1377 * otherwise we may miss changes.
1379 * Only data and undo buffers can reach here. These buffers do
1380 * not have terminal crc functions but we temporarily reference
1381 * the IO anyway, just in case.
1383 if (io
->modify_refs
== 0 && io
->modified
) {
1384 hammer_ref(&io
->lock
);
1385 hammer_io_clear_modify(io
, 0);
1386 hammer_rel(&io
->lock
);
1387 } else if (io
->modified
) {
1388 KKASSERT(io
->type
== HAMMER_IOTYPE_DATA_BUFFER
);
1392 * The kernel is going to start the IO, set io->running.
1394 KKASSERT(io
->running
== 0);
1396 atomic_add_long(&io
->hmp
->io_running_space
, io
->bytes
);
1397 atomic_add_long(&hammer_count_io_running_write
, io
->bytes
);
1398 TAILQ_INSERT_TAIL(&io
->hmp
->iorun_list
, io
, iorun_entry
);
1400 hammer_put_interlock(&io
->lock
, 1);
1401 lwkt_reltoken(&hmp
->io_token
);
1407 * Return non-zero if we wish to delay the kernel's attempt to flush
1408 * this buffer to disk.
1410 * bioops callback - hold io_token
1413 hammer_io_countdeps(struct buf
*bp
, int n
)
1415 /* nothing to do, so io_token not needed */
1419 static struct bio_ops hammer_bioops
= {
1420 .io_start
= hammer_io_start
,
1421 .io_complete
= hammer_io_complete
,
1422 .io_deallocate
= hammer_io_deallocate
,
1423 .io_fsync
= hammer_io_fsync
,
1424 .io_sync
= hammer_io_sync
,
1425 .io_movedeps
= hammer_io_movedeps
,
1426 .io_countdeps
= hammer_io_countdeps
,
1427 .io_checkread
= hammer_io_checkread
,
1428 .io_checkwrite
= hammer_io_checkwrite
,
1431 /************************************************************************
1433 ************************************************************************
1435 * These functions operate directly on the buffer cache buffer associated
1436 * with a front-end vnode rather then a back-end device vnode.
1440 * Read a buffer associated with a front-end vnode directly from the
1441 * disk media. The bio may be issued asynchronously. If leaf is non-NULL
1442 * we validate the CRC.
1444 * We must check for the presence of a HAMMER buffer to handle the case
1445 * where the reblocker has rewritten the data (which it does via the HAMMER
1446 * buffer system, not via the high-level vnode buffer cache), but not yet
1447 * committed the buffer to the media.
1450 hammer_io_direct_read(hammer_mount_t hmp
, struct bio
*bio
,
1451 hammer_btree_leaf_elm_t leaf
)
1453 hammer_off_t buf_offset
;
1454 hammer_off_t zone2_offset
;
1455 hammer_volume_t volume
;
1461 buf_offset
= bio
->bio_offset
;
1462 KKASSERT(hammer_is_zone_large_data(buf_offset
));
1465 * The buffer cache may have an aliased buffer (the reblocker can
1466 * write them). If it does we have to sync any dirty data before
1467 * we can build our direct-read. This is a non-critical code path.
1470 hammer_sync_buffers(hmp
, buf_offset
, bp
->b_bufsize
);
1473 * Resolve to a zone-2 offset. The conversion just requires
1474 * munging the top 4 bits but we want to abstract it anyway
1475 * so the blockmap code can verify the zone assignment.
1477 zone2_offset
= hammer_blockmap_lookup(hmp
, buf_offset
, &error
);
1480 KKASSERT(hammer_is_zone_raw_buffer(zone2_offset
));
1483 * Resolve volume and raw-offset for 3rd level bio. The
1484 * offset will be specific to the volume.
1486 vol_no
= HAMMER_VOL_DECODE(zone2_offset
);
1487 volume
= hammer_get_volume(hmp
, vol_no
, &error
);
1488 if (error
== 0 && zone2_offset
>= volume
->maxbuf_off
)
1493 * 3rd level bio (the caller has already pushed once)
1495 nbio
= push_bio(bio
);
1496 nbio
->bio_offset
= hammer_xlate_to_phys(volume
->ondisk
,
1498 hammer_stats_disk_read
+= bp
->b_bufsize
;
1499 vn_strategy(volume
->devvp
, nbio
);
1501 hammer_rel_volume(volume
, 0);
1504 hdkprintf("failed @ %016jx\n", (intmax_t)zone2_offset
);
1505 bp
->b_error
= error
;
1506 bp
->b_flags
|= B_ERROR
;
1513 * This works similarly to hammer_io_direct_read() except instead of
1514 * directly reading from the device into the bio we instead indirectly
1515 * read through the device's buffer cache and then copy the data into
1518 * If leaf is non-NULL and validation is enabled, the CRC will be checked.
1520 * This routine also executes asynchronously. It allows hammer strategy
1521 * calls to operate asynchronously when in double_buffer mode (in addition
1522 * to operating asynchronously when in normal mode).
1525 hammer_io_indirect_read(hammer_mount_t hmp
, struct bio
*bio
,
1526 hammer_btree_leaf_elm_t leaf
)
1528 hammer_off_t buf_offset
;
1529 hammer_off_t zone2_offset
;
1530 hammer_volume_t volume
;
1535 buf_offset
= bio
->bio_offset
;
1536 KKASSERT(hammer_is_zone_large_data(buf_offset
));
1539 * The buffer cache may have an aliased buffer (the reblocker can
1540 * write them). If it does we have to sync any dirty data before
1541 * we can build our direct-read. This is a non-critical code path.
1544 hammer_sync_buffers(hmp
, buf_offset
, bp
->b_bufsize
);
1547 * Resolve to a zone-2 offset. The conversion just requires
1548 * munging the top 4 bits but we want to abstract it anyway
1549 * so the blockmap code can verify the zone assignment.
1551 zone2_offset
= hammer_blockmap_lookup(hmp
, buf_offset
, &error
);
1554 KKASSERT(hammer_is_zone_raw_buffer(zone2_offset
));
1557 * Resolve volume and raw-offset for 3rd level bio. The
1558 * offset will be specific to the volume.
1560 vol_no
= HAMMER_VOL_DECODE(zone2_offset
);
1561 volume
= hammer_get_volume(hmp
, vol_no
, &error
);
1562 if (error
== 0 && zone2_offset
>= volume
->maxbuf_off
)
1567 * Convert to the raw volume->devvp offset and acquire
1568 * the buf, issuing async I/O if necessary.
1573 buf_offset
= hammer_xlate_to_phys(volume
->ondisk
, zone2_offset
);
1575 if (leaf
&& hammer_verify_data
) {
1576 bio
->bio_caller_info1
.uvalue32
= leaf
->data_crc
;
1577 bio
->bio_caller_info2
.index
= 1;
1579 bio
->bio_caller_info2
.index
= 0;
1581 bio
->bio_caller_info3
.ptr
= hmp
;
1583 hce
= hammer_cluster_enable
;
1585 limit
= HAMMER_BIGBLOCK_DOALIGN(zone2_offset
);
1586 limit
-= zone2_offset
;
1587 cluster_readcb(volume
->devvp
, limit
, buf_offset
,
1590 HAMMER_CLUSTER_SIZE
,
1591 HAMMER_CLUSTER_SIZE
* hce
,
1592 hammer_indirect_callback
,
1595 breadcb(volume
->devvp
, buf_offset
, bp
->b_bufsize
,
1597 hammer_indirect_callback
, bio
);
1600 hammer_rel_volume(volume
, 0);
1603 hdkprintf("failed @ %016jx\n", (intmax_t)zone2_offset
);
1604 bp
->b_error
= error
;
1605 bp
->b_flags
|= B_ERROR
;
1612 * Indirect callback on completion. bio/bp specify the device-backed
1613 * buffer. bio->bio_caller_info1.ptr holds obio.
1615 * obio/obp is the original regular file buffer. obio->bio_caller_info*
1616 * contains the crc specification.
1618 * We are responsible for calling bpdone() and bqrelse() on bio/bp, and
1619 * for calling biodone() on obio.
1622 hammer_indirect_callback(struct bio
*bio
)
1624 struct buf
*bp
= bio
->bio_buf
;
1630 * If BIO_DONE is already set the device buffer was already
1631 * fully valid (B_CACHE). If it is not set then I/O was issued
1632 * and we have to run I/O completion as the last bio.
1634 * Nobody is waiting for our device I/O to complete, we are
1635 * responsible for bqrelse()ing it which means we also have to do
1636 * the equivalent of biowait() and clear BIO_DONE (which breadcb()
1639 * Any preexisting device buffer should match the requested size,
1640 * but due to big-block recycling and other factors there is some
1641 * fragility there, so we assert that the device buffer covers
1644 if ((bio
->bio_flags
& BIO_DONE
) == 0)
1646 bio
->bio_flags
&= ~(BIO_DONE
| BIO_SYNC
);
1648 obio
= bio
->bio_caller_info1
.ptr
;
1649 obp
= obio
->bio_buf
;
1650 hmp
= obio
->bio_caller_info3
.ptr
;
1652 if (bp
->b_flags
& B_ERROR
) {
1654 * Error from block device
1656 obp
->b_flags
|= B_ERROR
;
1657 obp
->b_error
= bp
->b_error
;
1658 } else if (obio
->bio_caller_info2
.index
&&
1659 obio
->bio_caller_info1
.uvalue32
!=
1660 hammer_datacrc(hmp
->version
,
1661 bp
->b_data
, obp
->b_bufsize
) &&
1662 obio
->bio_caller_info1
.uvalue32
!=
1663 hammer_datacrc(HAMMER_VOL_VERSION_SIX
,
1664 bp
->b_data
, obp
->b_bufsize
)) {
1666 * CRC error. First check against current hammer version,
1667 * then back-off and check against version 6 (the original
1670 obp
->b_flags
|= B_ERROR
;
1676 KKASSERT(bp
->b_bufsize
>= obp
->b_bufsize
);
1677 bcopy(bp
->b_data
, obp
->b_data
, obp
->b_bufsize
);
1679 obp
->b_flags
|= B_AGE
;
1686 * Write a buffer associated with a front-end vnode directly to the
1687 * disk media. The bio may be issued asynchronously.
1689 * The BIO is associated with the specified record and RECG_DIRECT_IO
1690 * is set. The recorded is added to its object.
1693 hammer_io_direct_write(hammer_mount_t hmp
, struct bio
*bio
,
1694 hammer_record_t record
)
1696 hammer_btree_leaf_elm_t leaf
= &record
->leaf
;
1697 hammer_off_t buf_offset
;
1698 hammer_off_t zone2_offset
;
1699 hammer_volume_t volume
;
1700 hammer_buffer_t buffer
;
1707 buf_offset
= leaf
->data_offset
;
1709 KKASSERT(hammer_is_zone_record(buf_offset
));
1710 KKASSERT(bio
->bio_buf
->b_cmd
== BUF_CMD_WRITE
);
1713 * Issue or execute the I/O. The new memory record must replace
1714 * the old one before the I/O completes, otherwise a reaquisition of
1715 * the buffer will load the old media data instead of the new.
1717 if ((buf_offset
& HAMMER_BUFMASK
) == 0 &&
1718 leaf
->data_len
>= HAMMER_BUFSIZE
) {
1720 * We are using the vnode's bio to write directly to the
1721 * media, any hammer_buffer at the same zone-X offset will
1722 * now have stale data.
1724 zone2_offset
= hammer_blockmap_lookup(hmp
, buf_offset
, &error
);
1725 vol_no
= HAMMER_VOL_DECODE(zone2_offset
);
1726 volume
= hammer_get_volume(hmp
, vol_no
, &error
);
1728 if (error
== 0 && zone2_offset
>= volume
->maxbuf_off
)
1732 KKASSERT((bp
->b_bufsize
& HAMMER_BUFMASK
) == 0);
1735 * Second level bio - cached zone2 offset.
1737 * (We can put our bio_done function in either the
1738 * 2nd or 3rd level).
1740 nbio
= push_bio(bio
);
1741 nbio
->bio_offset
= zone2_offset
;
1742 nbio
->bio_done
= hammer_io_direct_write_complete
;
1743 nbio
->bio_caller_info1
.ptr
= record
;
1744 record
->zone2_offset
= zone2_offset
;
1745 record
->gflags
|= HAMMER_RECG_DIRECT_IO
|
1746 HAMMER_RECG_DIRECT_INVAL
;
1749 * Third level bio - raw offset specific to the
1752 nbio
= push_bio(nbio
);
1753 nbio
->bio_offset
= hammer_xlate_to_phys(volume
->ondisk
,
1755 hammer_stats_disk_write
+= bp
->b_bufsize
;
1756 hammer_ip_replace_bulk(hmp
, record
);
1757 vn_strategy(volume
->devvp
, nbio
);
1758 hammer_io_flush_mark(volume
);
1760 hammer_rel_volume(volume
, 0);
1763 * Must fit in a standard HAMMER buffer. In this case all
1764 * consumers use the HAMMER buffer system and RECG_DIRECT_IO
1765 * does not need to be set-up.
1767 KKASSERT(((buf_offset
^ (buf_offset
+ leaf
->data_len
- 1)) & ~HAMMER_BUFMASK64
) == 0);
1769 ptr
= hammer_bread(hmp
, buf_offset
, &error
, &buffer
);
1772 bp
->b_flags
|= B_AGE
;
1773 hammer_io_modify(&buffer
->io
, 1);
1774 bcopy(bp
->b_data
, ptr
, leaf
->data_len
);
1775 hammer_io_modify_done(&buffer
->io
);
1776 hammer_rel_buffer(buffer
, 0);
1778 hammer_ip_replace_bulk(hmp
, record
);
1784 * Major suckage occured. Also note: The record was
1785 * never added to the tree so we do not have to worry
1786 * about the backend.
1788 hdkprintf("failed @ %016jx\n", (intmax_t)leaf
->data_offset
);
1792 bp
->b_flags
|= B_ERROR
;
1794 record
->flags
|= HAMMER_RECF_DELETED_FE
;
1795 hammer_rel_mem_record(record
);
1801 * On completion of the BIO this callback must disconnect
1802 * it from the hammer_record and chain to the previous bio.
1804 * An I/O error forces the mount to read-only. Data buffers
1805 * are not B_LOCKED like meta-data buffers are, so we have to
1806 * throw the buffer away to prevent the kernel from retrying.
1808 * NOTE: MPSAFE callback, only modify fields we have explicit
1809 * access to (the bp and the record->gflags).
1813 hammer_io_direct_write_complete(struct bio
*nbio
)
1817 hammer_record_t record
;
1820 record
= nbio
->bio_caller_info1
.ptr
;
1821 KKASSERT(record
!= NULL
);
1822 hmp
= record
->ip
->hmp
;
1824 lwkt_gettoken(&hmp
->io_token
);
1827 obio
= pop_bio(nbio
);
1828 if (bp
->b_flags
& B_ERROR
) {
1829 lwkt_gettoken(&hmp
->fs_token
);
1830 hammer_critical_error(hmp
, record
->ip
, bp
->b_error
,
1831 "while writing bulk data");
1832 lwkt_reltoken(&hmp
->fs_token
);
1833 bp
->b_flags
|= B_INVAL
;
1836 KKASSERT(record
->gflags
& HAMMER_RECG_DIRECT_IO
);
1837 if (record
->gflags
& HAMMER_RECG_DIRECT_WAIT
) {
1838 record
->gflags
&= ~(HAMMER_RECG_DIRECT_IO
|
1839 HAMMER_RECG_DIRECT_WAIT
);
1840 /* record can disappear once DIRECT_IO flag is cleared */
1841 wakeup(&record
->flags
);
1843 record
->gflags
&= ~HAMMER_RECG_DIRECT_IO
;
1844 /* record can disappear once DIRECT_IO flag is cleared */
1847 lwkt_reltoken(&hmp
->io_token
);
1854 * This is called before a record is either committed to the B-Tree
1855 * or destroyed, to resolve any associated direct-IO.
1857 * (1) We must wait for any direct-IO related to the record to complete.
1859 * (2) We must remove any buffer cache aliases for data accessed via
1860 * leaf->data_offset or zone2_offset so non-direct-IO consumers
1861 * (the mirroring and reblocking code) do not see stale data.
1864 hammer_io_direct_wait(hammer_record_t record
)
1866 hammer_mount_t hmp
= record
->ip
->hmp
;
1869 * Wait for I/O to complete
1871 if (record
->gflags
& HAMMER_RECG_DIRECT_IO
) {
1872 lwkt_gettoken(&hmp
->io_token
);
1873 while (record
->gflags
& HAMMER_RECG_DIRECT_IO
) {
1874 record
->gflags
|= HAMMER_RECG_DIRECT_WAIT
;
1875 tsleep(&record
->flags
, 0, "hmdiow", 0);
1877 lwkt_reltoken(&hmp
->io_token
);
1881 * Invalidate any related buffer cache aliases associated with the
1882 * backing device. This is needed because the buffer cache buffer
1883 * for file data is associated with the file vnode, not the backing
1886 * XXX I do not think this case can occur any more now that
1887 * reservations ensure that all such buffers are removed before
1888 * an area can be reused.
1890 if (record
->gflags
& HAMMER_RECG_DIRECT_INVAL
) {
1891 KKASSERT(record
->leaf
.data_offset
);
1892 hammer_del_buffers(hmp
, record
->leaf
.data_offset
,
1893 record
->zone2_offset
, record
->leaf
.data_len
,
1895 record
->gflags
&= ~HAMMER_RECG_DIRECT_INVAL
;
1900 * This is called to remove the second-level cached zone-2 offset from
1901 * frontend buffer cache buffers, now stale due to a data relocation.
1902 * These offsets are generated by cluster_read() via VOP_BMAP, or directly
1903 * by hammer_vop_strategy_read().
1905 * This is rather nasty because here we have something like the reblocker
1906 * scanning the raw B-Tree with no held references on anything, really,
1907 * other then a shared lock on the B-Tree node, and we have to access the
1908 * frontend's buffer cache to check for and clean out the association.
1909 * Specifically, if the reblocker is moving data on the disk, these cached
1910 * offsets will become invalid.
1912 * Only data record types associated with the large-data zone are subject
1913 * to direct-io and need to be checked.
1917 hammer_io_direct_uncache(hammer_mount_t hmp
, hammer_btree_leaf_elm_t leaf
)
1919 struct hammer_inode_info iinfo
;
1922 if (leaf
->base
.rec_type
!= HAMMER_RECTYPE_DATA
)
1924 zone
= HAMMER_ZONE_DECODE(leaf
->data_offset
);
1925 if (zone
!= HAMMER_ZONE_LARGE_DATA_INDEX
)
1927 iinfo
.obj_id
= leaf
->base
.obj_id
;
1928 iinfo
.obj_asof
= 0; /* unused */
1929 iinfo
.obj_localization
= leaf
->base
.localization
&
1930 HAMMER_LOCALIZE_PSEUDOFS_MASK
;
1931 iinfo
.u
.leaf
= leaf
;
1932 hammer_scan_inode_snapshots(hmp
, &iinfo
,
1933 hammer_io_direct_uncache_callback
,
1938 hammer_io_direct_uncache_callback(hammer_inode_t ip
, void *data
)
1940 hammer_inode_info_t iinfo
= data
;
1941 hammer_off_t file_offset
;
1948 file_offset
= iinfo
->u
.leaf
->base
.key
- iinfo
->u
.leaf
->data_len
;
1949 blksize
= iinfo
->u
.leaf
->data_len
;
1950 KKASSERT((blksize
& HAMMER_BUFMASK
) == 0);
1953 * Warning: FINDBLK_TEST return stable storage but not stable
1954 * contents. It happens to be ok in this case.
1956 hammer_ref(&ip
->lock
);
1957 if (hammer_get_vnode(ip
, &vp
) == 0) {
1958 if ((bp
= findblk(ip
->vp
, file_offset
, FINDBLK_TEST
)) != NULL
&&
1959 bp
->b_bio2
.bio_offset
!= NOOFFSET
) {
1960 bp
= getblk(ip
->vp
, file_offset
, blksize
, 0, 0);
1961 bp
->b_bio2
.bio_offset
= NOOFFSET
;
1966 hammer_rel_inode(ip
, 0);
1972 * This function is called when writes may have occured on the volume,
1973 * indicating that the device may be holding cached writes.
1975 static __inline
void
1976 hammer_io_flush_mark(hammer_volume_t volume
)
1978 atomic_set_int(&volume
->vol_flags
, HAMMER_VOLF_NEEDFLUSH
);
1982 * This function ensures that the device has flushed any cached writes out.
1985 hammer_io_flush_sync(hammer_mount_t hmp
)
1987 hammer_volume_t volume
;
1988 struct buf
*bp_base
= NULL
;
1991 RB_FOREACH(volume
, hammer_vol_rb_tree
, &hmp
->rb_vols_root
) {
1992 if (volume
->vol_flags
& HAMMER_VOLF_NEEDFLUSH
) {
1993 atomic_clear_int(&volume
->vol_flags
,
1994 HAMMER_VOLF_NEEDFLUSH
);
1996 bp
->b_bio1
.bio_offset
= 0;
1999 bp
->b_cmd
= BUF_CMD_FLUSH
;
2000 bp
->b_bio1
.bio_caller_info1
.cluster_head
= bp_base
;
2001 bp
->b_bio1
.bio_done
= biodone_sync
;
2002 bp
->b_bio1
.bio_flags
|= BIO_SYNC
;
2004 vn_strategy(volume
->devvp
, &bp
->b_bio1
);
2007 while ((bp
= bp_base
) != NULL
) {
2008 bp_base
= bp
->b_bio1
.bio_caller_info1
.cluster_head
;
2009 biowait(&bp
->b_bio1
, "hmrFLS");
2015 * Limit the amount of backlog which we allow to build up
2018 hammer_io_limit_backlog(hammer_mount_t hmp
)
2020 waitrunningbufspace();