2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
48 static void hammer_free_volume(hammer_volume_t volume
);
49 static int hammer_load_volume(hammer_volume_t volume
);
50 static int hammer_load_buffer(hammer_buffer_t buffer
, int isnew
);
51 static int hammer_load_node(hammer_transaction_t trans
,
52 hammer_node_t node
, int isnew
);
53 static void _hammer_rel_node(hammer_node_t node
, int locked
);
56 hammer_vol_rb_compare(hammer_volume_t vol1
, hammer_volume_t vol2
)
58 if (vol1
->vol_no
< vol2
->vol_no
)
60 if (vol1
->vol_no
> vol2
->vol_no
)
66 * hammer_buffer structures are indexed via their zoneX_offset, not
70 hammer_buf_rb_compare(hammer_buffer_t buf1
, hammer_buffer_t buf2
)
72 if (buf1
->zoneX_offset
< buf2
->zoneX_offset
)
74 if (buf1
->zoneX_offset
> buf2
->zoneX_offset
)
80 hammer_nod_rb_compare(hammer_node_t node1
, hammer_node_t node2
)
82 if (node1
->node_offset
< node2
->node_offset
)
84 if (node1
->node_offset
> node2
->node_offset
)
89 RB_GENERATE2(hammer_vol_rb_tree
, hammer_volume
, rb_node
,
90 hammer_vol_rb_compare
, int32_t, vol_no
);
91 RB_GENERATE2(hammer_buf_rb_tree
, hammer_buffer
, rb_node
,
92 hammer_buf_rb_compare
, hammer_off_t
, zoneX_offset
);
93 RB_GENERATE2(hammer_nod_rb_tree
, hammer_node
, rb_node
,
94 hammer_nod_rb_compare
, hammer_off_t
, node_offset
);
96 /************************************************************************
98 ************************************************************************
100 * Load a HAMMER volume by name. Returns 0 on success or a positive error
101 * code on failure. Volumes must be loaded at mount time, get_volume() will
102 * not load a new volume.
104 * Calls made to hammer_load_volume() or single-threaded
107 hammer_install_volume(struct hammer_mount
*hmp
, const char *volname
,
111 hammer_volume_t volume
;
112 struct hammer_volume_ondisk
*ondisk
;
113 struct nlookupdata nd
;
114 struct buf
*bp
= NULL
;
120 ronly
= ((mp
->mnt_flag
& MNT_RDONLY
) ? 1 : 0);
123 * Allocate a volume structure
125 ++hammer_count_volumes
;
126 volume
= kmalloc(sizeof(*volume
), hmp
->m_misc
, M_WAITOK
|M_ZERO
);
127 volume
->vol_name
= kstrdup(volname
, hmp
->m_misc
);
128 volume
->io
.hmp
= hmp
; /* bootstrap */
129 hammer_io_init(&volume
->io
, volume
, HAMMER_STRUCTURE_VOLUME
);
130 volume
->io
.offset
= 0LL;
131 volume
->io
.bytes
= HAMMER_BUFSIZE
;
134 * Get the device vnode
137 error
= nlookup_init(&nd
, volume
->vol_name
, UIO_SYSSPACE
, NLC_FOLLOW
);
139 error
= nlookup(&nd
);
141 error
= cache_vref(&nd
.nl_nch
, nd
.nl_cred
, &volume
->devvp
);
145 volume
->devvp
= devvp
;
149 if (vn_isdisk(volume
->devvp
, &error
)) {
150 error
= vfs_mountedon(volume
->devvp
);
153 if (error
== 0 && vcount(volume
->devvp
) > 0)
156 vn_lock(volume
->devvp
, LK_EXCLUSIVE
| LK_RETRY
);
157 error
= vinvalbuf(volume
->devvp
, V_SAVE
, 0, 0);
159 error
= VOP_OPEN(volume
->devvp
,
160 (ronly
? FREAD
: FREAD
|FWRITE
),
163 vn_unlock(volume
->devvp
);
166 hammer_free_volume(volume
);
169 volume
->devvp
->v_rdev
->si_mountpoint
= mp
;
173 * Extract the volume number from the volume header and do various
176 error
= bread(volume
->devvp
, 0LL, HAMMER_BUFSIZE
, &bp
);
179 ondisk
= (void *)bp
->b_data
;
180 if (ondisk
->vol_signature
!= HAMMER_FSBUF_VOLUME
) {
181 kprintf("hammer_mount: volume %s has an invalid header\n",
186 volume
->vol_no
= ondisk
->vol_no
;
187 volume
->buffer_base
= ondisk
->vol_buf_beg
;
188 volume
->vol_flags
= ondisk
->vol_flags
;
189 volume
->nblocks
= ondisk
->vol_nblocks
;
190 volume
->maxbuf_off
= HAMMER_ENCODE_RAW_BUFFER(volume
->vol_no
,
191 ondisk
->vol_buf_end
- ondisk
->vol_buf_beg
);
192 volume
->maxraw_off
= ondisk
->vol_buf_end
;
194 if (RB_EMPTY(&hmp
->rb_vols_root
)) {
195 hmp
->fsid
= ondisk
->vol_fsid
;
196 } else if (bcmp(&hmp
->fsid
, &ondisk
->vol_fsid
, sizeof(uuid_t
))) {
197 kprintf("hammer_mount: volume %s's fsid does not match "
198 "other volumes\n", volume
->vol_name
);
204 * Insert the volume structure into the red-black tree.
206 if (RB_INSERT(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, volume
)) {
207 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
208 volume
->vol_name
, volume
->vol_no
);
213 * Set the root volume . HAMMER special cases rootvol the structure.
214 * We do not hold a ref because this would prevent related I/O
215 * from being flushed.
217 if (error
== 0 && ondisk
->vol_rootvol
== ondisk
->vol_no
) {
218 hmp
->rootvol
= volume
;
219 hmp
->nvolumes
= ondisk
->vol_count
;
224 hmp
->mp
->mnt_stat
.f_blocks
+= ondisk
->vol0_stat_bigblocks
*
225 (HAMMER_LARGEBLOCK_SIZE
/ HAMMER_BUFSIZE
);
226 hmp
->mp
->mnt_vstat
.f_blocks
+= ondisk
->vol0_stat_bigblocks
*
227 (HAMMER_LARGEBLOCK_SIZE
/ HAMMER_BUFSIZE
);
233 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
235 volume
->devvp
->v_rdev
->si_mountpoint
= NULL
;
236 VOP_CLOSE(volume
->devvp
, ronly
? FREAD
: FREAD
|FWRITE
);
237 hammer_free_volume(volume
);
243 * This is called for each volume when updating the mount point from
244 * read-write to read-only or vise-versa.
247 hammer_adjust_volume_mode(hammer_volume_t volume
, void *data __unused
)
250 vn_lock(volume
->devvp
, LK_EXCLUSIVE
| LK_RETRY
);
251 if (volume
->io
.hmp
->ronly
) {
252 /* do not call vinvalbuf */
253 VOP_OPEN(volume
->devvp
, FREAD
, FSCRED
, NULL
);
254 VOP_CLOSE(volume
->devvp
, FREAD
|FWRITE
);
256 /* do not call vinvalbuf */
257 VOP_OPEN(volume
->devvp
, FREAD
|FWRITE
, FSCRED
, NULL
);
258 VOP_CLOSE(volume
->devvp
, FREAD
);
260 vn_unlock(volume
->devvp
);
266 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
267 * so returns -1 on failure.
270 hammer_unload_volume(hammer_volume_t volume
, void *data __unused
)
272 hammer_mount_t hmp
= volume
->io
.hmp
;
273 int ronly
= ((hmp
->mp
->mnt_flag
& MNT_RDONLY
) ? 1 : 0);
276 * Clean up the root volume pointer, which is held unlocked in hmp.
278 if (hmp
->rootvol
== volume
)
282 * We must not flush a dirty buffer to disk on umount. It should
283 * have already been dealt with by the flusher, or we may be in
284 * catastrophic failure.
286 hammer_io_clear_modify(&volume
->io
, 1);
287 volume
->io
.waitdep
= 1;
290 * Clean up the persistent ref ioerror might have on the volume
292 if (volume
->io
.ioerror
) {
293 volume
->io
.ioerror
= 0;
294 hammer_rel(&volume
->io
.lock
);
298 * This should release the bp. Releasing the volume with flush set
299 * implies the interlock is set.
301 hammer_ref_interlock_true(&volume
->io
.lock
);
302 hammer_rel_volume(volume
, 1);
303 KKASSERT(volume
->io
.bp
== NULL
);
306 * There should be no references on the volume, no clusters, and
309 KKASSERT(hammer_norefs(&volume
->io
.lock
));
311 volume
->ondisk
= NULL
;
313 if (volume
->devvp
->v_rdev
&&
314 volume
->devvp
->v_rdev
->si_mountpoint
== hmp
->mp
316 volume
->devvp
->v_rdev
->si_mountpoint
= NULL
;
320 * Make sure we don't sync anything to disk if we
321 * are in read-only mode (1) or critically-errored
322 * (2). Note that there may be dirty buffers in
323 * normal read-only mode from crash recovery.
325 vinvalbuf(volume
->devvp
, 0, 0, 0);
326 VOP_CLOSE(volume
->devvp
, FREAD
);
329 * Normal termination, save any dirty buffers
330 * (XXX there really shouldn't be any).
332 vinvalbuf(volume
->devvp
, V_SAVE
, 0, 0);
333 VOP_CLOSE(volume
->devvp
, FREAD
|FWRITE
);
338 * Destroy the structure
340 RB_REMOVE(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, volume
);
341 hammer_free_volume(volume
);
347 hammer_free_volume(hammer_volume_t volume
)
349 hammer_mount_t hmp
= volume
->io
.hmp
;
351 if (volume
->vol_name
) {
352 kfree(volume
->vol_name
, hmp
->m_misc
);
353 volume
->vol_name
= NULL
;
356 vrele(volume
->devvp
);
357 volume
->devvp
= NULL
;
359 --hammer_count_volumes
;
360 kfree(volume
, hmp
->m_misc
);
364 * Get a HAMMER volume. The volume must already exist.
367 hammer_get_volume(struct hammer_mount
*hmp
, int32_t vol_no
, int *errorp
)
369 struct hammer_volume
*volume
;
372 * Locate the volume structure
374 volume
= RB_LOOKUP(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, vol_no
);
375 if (volume
== NULL
) {
381 * Reference the volume, load/check the data on the 0->1 transition.
382 * hammer_load_volume() will dispose of the interlock on return,
383 * and also clean up the ref count on error.
385 if (hammer_ref_interlock(&volume
->io
.lock
)) {
386 *errorp
= hammer_load_volume(volume
);
390 KKASSERT(volume
->ondisk
);
397 hammer_ref_volume(hammer_volume_t volume
)
402 * Reference the volume and deal with the check condition used to
403 * load its ondisk info.
405 if (hammer_ref_interlock(&volume
->io
.lock
)) {
406 error
= hammer_load_volume(volume
);
408 KKASSERT(volume
->ondisk
);
415 hammer_get_root_volume(struct hammer_mount
*hmp
, int *errorp
)
417 hammer_volume_t volume
;
419 volume
= hmp
->rootvol
;
420 KKASSERT(volume
!= NULL
);
423 * Reference the volume and deal with the check condition used to
424 * load its ondisk info.
426 if (hammer_ref_interlock(&volume
->io
.lock
)) {
427 *errorp
= hammer_load_volume(volume
);
431 KKASSERT(volume
->ondisk
);
438 * Load a volume's on-disk information. The volume must be referenced and
439 * the interlock is held on call. The interlock will be released on return.
440 * The reference will also be released on return if an error occurs.
443 hammer_load_volume(hammer_volume_t volume
)
447 if (volume
->ondisk
== NULL
) {
448 error
= hammer_io_read(volume
->devvp
, &volume
->io
,
451 volume
->ondisk
= (void *)volume
->io
.bp
->b_data
;
452 hammer_ref_interlock_done(&volume
->io
.lock
);
454 hammer_rel_volume(volume
, 1);
463 * Release a previously acquired reference on the volume.
465 * Volumes are not unloaded from memory during normal operation.
468 hammer_rel_volume(hammer_volume_t volume
, int locked
)
472 if (hammer_rel_interlock(&volume
->io
.lock
, locked
)) {
473 volume
->ondisk
= NULL
;
474 bp
= hammer_io_release(&volume
->io
, locked
);
475 hammer_rel_interlock_done(&volume
->io
.lock
, locked
);
482 hammer_mountcheck_volumes(struct hammer_mount
*hmp
)
487 for (i
= 0; i
< hmp
->nvolumes
; ++i
) {
488 vol
= RB_LOOKUP(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, i
);
495 /************************************************************************
497 ************************************************************************
499 * Manage buffers. Currently most blockmap-backed zones are direct-mapped
500 * to zone-2 buffer offsets, without a translation stage. However, the
501 * hammer_buffer structure is indexed by its zoneX_offset, not its
504 * The proper zone must be maintained throughout the code-base all the way
505 * through to the big-block allocator, or routines like hammer_del_buffers()
506 * will not be able to locate all potentially conflicting buffers.
509 hammer_get_buffer(hammer_mount_t hmp
, hammer_off_t buf_offset
,
510 int bytes
, int isnew
, int *errorp
)
512 hammer_buffer_t buffer
;
513 hammer_volume_t volume
;
514 hammer_off_t zone2_offset
;
515 hammer_io_type_t iotype
;
519 buf_offset
&= ~HAMMER_BUFMASK64
;
522 * Shortcut if the buffer is already cached
524 buffer
= RB_LOOKUP(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
, buf_offset
);
527 * Once refed the ondisk field will not be cleared by
528 * any other action. Shortcut the operation if the
529 * ondisk structure is valid.
531 if (hammer_ref_interlock(&buffer
->io
.lock
) == 0) {
532 hammer_io_advance(&buffer
->io
);
533 KKASSERT(buffer
->ondisk
);
539 * 0->1 transition or defered 0->1 transition (CHECK),
540 * interlock now held. Shortcut if ondisk is already
543 ++hammer_count_refedbufs
;
544 if (buffer
->ondisk
) {
545 hammer_io_advance(&buffer
->io
);
546 hammer_ref_interlock_done(&buffer
->io
.lock
);
552 * The buffer is no longer loose if it has a ref, and
553 * cannot become loose once it gains a ref. Loose
554 * buffers will never be in a modified state. This should
555 * only occur on the 0->1 transition of refs.
557 * lose_list can be modified via a biodone() interrupt.
559 if (buffer
->io
.mod_list
== &hmp
->lose_list
) {
560 crit_enter(); /* biodone race against list */
561 TAILQ_REMOVE(buffer
->io
.mod_list
, &buffer
->io
,
564 buffer
->io
.mod_list
= NULL
;
565 KKASSERT(buffer
->io
.modified
== 0);
571 * What is the buffer class?
573 zone
= HAMMER_ZONE_DECODE(buf_offset
);
576 case HAMMER_ZONE_LARGE_DATA_INDEX
:
577 case HAMMER_ZONE_SMALL_DATA_INDEX
:
578 iotype
= HAMMER_STRUCTURE_DATA_BUFFER
;
580 case HAMMER_ZONE_UNDO_INDEX
:
581 iotype
= HAMMER_STRUCTURE_UNDO_BUFFER
;
583 case HAMMER_ZONE_META_INDEX
:
586 * NOTE: inode data and directory entries are placed in this
587 * zone. inode atime/mtime is updated in-place and thus
588 * buffers containing inodes must be synchronized as
589 * meta-buffers, same as buffers containing B-Tree info.
591 iotype
= HAMMER_STRUCTURE_META_BUFFER
;
596 * Handle blockmap offset translations
598 if (zone
>= HAMMER_ZONE_BTREE_INDEX
) {
599 zone2_offset
= hammer_blockmap_lookup(hmp
, buf_offset
, errorp
);
600 } else if (zone
== HAMMER_ZONE_UNDO_INDEX
) {
601 zone2_offset
= hammer_undo_lookup(hmp
, buf_offset
, errorp
);
603 KKASSERT(zone
== HAMMER_ZONE_RAW_BUFFER_INDEX
);
604 zone2_offset
= buf_offset
;
611 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
614 KKASSERT((zone2_offset
& HAMMER_OFF_ZONE_MASK
) ==
615 HAMMER_ZONE_RAW_BUFFER
);
616 vol_no
= HAMMER_VOL_DECODE(zone2_offset
);
617 volume
= hammer_get_volume(hmp
, vol_no
, errorp
);
621 KKASSERT(zone2_offset
< volume
->maxbuf_off
);
624 * Allocate a new buffer structure. We will check for races later.
626 ++hammer_count_buffers
;
627 buffer
= kmalloc(sizeof(*buffer
), hmp
->m_misc
,
628 M_WAITOK
|M_ZERO
|M_USE_RESERVE
);
629 buffer
->zone2_offset
= zone2_offset
;
630 buffer
->zoneX_offset
= buf_offset
;
632 hammer_io_init(&buffer
->io
, volume
, iotype
);
633 buffer
->io
.offset
= volume
->ondisk
->vol_buf_beg
+
634 (zone2_offset
& HAMMER_OFF_SHORT_MASK
);
635 buffer
->io
.bytes
= bytes
;
636 TAILQ_INIT(&buffer
->clist
);
637 hammer_ref_interlock_true(&buffer
->io
.lock
);
640 * Insert the buffer into the RB tree and handle late collisions.
642 if (RB_INSERT(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
, buffer
)) {
643 hammer_rel_volume(volume
, 0);
644 buffer
->io
.volume
= NULL
; /* safety */
645 if (hammer_rel_interlock(&buffer
->io
.lock
, 1)) /* safety */
646 hammer_rel_interlock_done(&buffer
->io
.lock
, 1);
647 --hammer_count_buffers
;
648 kfree(buffer
, hmp
->m_misc
);
651 ++hammer_count_refedbufs
;
655 * The buffer is referenced and interlocked. Load the buffer
656 * if necessary. hammer_load_buffer() deals with the interlock
657 * and, if an error is returned, also deals with the ref.
659 if (buffer
->ondisk
== NULL
) {
660 *errorp
= hammer_load_buffer(buffer
, isnew
);
664 hammer_io_advance(&buffer
->io
);
665 hammer_ref_interlock_done(&buffer
->io
.lock
);
672 * This is used by the direct-read code to deal with large-data buffers
673 * created by the reblocker and mirror-write code. The direct-read code
674 * bypasses the HAMMER buffer subsystem and so any aliased dirty or write-
675 * running hammer buffers must be fully synced to disk before we can issue
678 * This code path is not considered critical as only the rebocker and
679 * mirror-write code will create large-data buffers via the HAMMER buffer
680 * subsystem. They do that because they operate at the B-Tree level and
681 * do not access the vnode/inode structures.
684 hammer_sync_buffers(hammer_mount_t hmp
, hammer_off_t base_offset
, int bytes
)
686 hammer_buffer_t buffer
;
689 KKASSERT((base_offset
& HAMMER_OFF_ZONE_MASK
) ==
690 HAMMER_ZONE_LARGE_DATA
);
693 buffer
= RB_LOOKUP(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
,
695 if (buffer
&& (buffer
->io
.modified
|| buffer
->io
.running
)) {
696 error
= hammer_ref_buffer(buffer
);
698 hammer_io_wait(&buffer
->io
);
699 if (buffer
->io
.modified
) {
700 hammer_io_write_interlock(&buffer
->io
);
701 hammer_io_flush(&buffer
->io
, 0);
702 hammer_io_done_interlock(&buffer
->io
);
703 hammer_io_wait(&buffer
->io
);
705 hammer_rel_buffer(buffer
, 0);
708 base_offset
+= HAMMER_BUFSIZE
;
709 bytes
-= HAMMER_BUFSIZE
;
714 * Destroy all buffers covering the specified zoneX offset range. This
715 * is called when the related blockmap layer2 entry is freed or when
716 * a direct write bypasses our buffer/buffer-cache subsystem.
718 * The buffers may be referenced by the caller itself. Setting reclaim
719 * will cause the buffer to be destroyed when it's ref count reaches zero.
721 * Return 0 on success, EAGAIN if some buffers could not be destroyed due
722 * to additional references held by other threads, or some other (typically
726 hammer_del_buffers(hammer_mount_t hmp
, hammer_off_t base_offset
,
727 hammer_off_t zone2_offset
, int bytes
,
728 int report_conflicts
)
730 hammer_buffer_t buffer
;
731 hammer_volume_t volume
;
736 vol_no
= HAMMER_VOL_DECODE(zone2_offset
);
737 volume
= hammer_get_volume(hmp
, vol_no
, &ret_error
);
738 KKASSERT(ret_error
== 0);
741 buffer
= RB_LOOKUP(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
,
744 error
= hammer_ref_buffer(buffer
);
745 if (error
== 0 && !hammer_oneref(&buffer
->io
.lock
)) {
747 hammer_rel_buffer(buffer
, 0);
750 KKASSERT(buffer
->zone2_offset
== zone2_offset
);
751 hammer_io_clear_modify(&buffer
->io
, 1);
752 buffer
->io
.reclaim
= 1;
753 buffer
->io
.waitdep
= 1;
754 KKASSERT(buffer
->io
.volume
== volume
);
755 hammer_rel_buffer(buffer
, 0);
758 error
= hammer_io_inval(volume
, zone2_offset
);
762 if (report_conflicts
||
763 (hammer_debug_general
& 0x8000)) {
764 kprintf("hammer_del_buffers: unable to "
765 "invalidate %016llx buffer=%p rep=%d\n",
766 (long long)base_offset
,
767 buffer
, report_conflicts
);
770 base_offset
+= HAMMER_BUFSIZE
;
771 zone2_offset
+= HAMMER_BUFSIZE
;
772 bytes
-= HAMMER_BUFSIZE
;
774 hammer_rel_volume(volume
, 0);
779 * Given a referenced and interlocked buffer load/validate the data.
781 * The buffer interlock will be released on return. If an error is
782 * returned the buffer reference will also be released (and the buffer
783 * pointer will thus be stale).
786 hammer_load_buffer(hammer_buffer_t buffer
, int isnew
)
788 hammer_volume_t volume
;
792 * Load the buffer's on-disk info
794 volume
= buffer
->io
.volume
;
796 if (hammer_debug_io
& 0x0001) {
797 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
798 (long long)buffer
->zoneX_offset
,
799 (long long)buffer
->zone2_offset
,
800 isnew
, buffer
->ondisk
);
803 if (buffer
->ondisk
== NULL
) {
805 error
= hammer_io_new(volume
->devvp
, &buffer
->io
);
807 error
= hammer_io_read(volume
->devvp
, &buffer
->io
,
811 buffer
->ondisk
= (void *)buffer
->io
.bp
->b_data
;
813 error
= hammer_io_new(volume
->devvp
, &buffer
->io
);
818 hammer_io_advance(&buffer
->io
);
819 hammer_ref_interlock_done(&buffer
->io
.lock
);
821 hammer_rel_buffer(buffer
, 1);
827 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
828 * This routine is only called during unmount or when a volume is
831 * If data != NULL, it specifies a volume whoose buffers should
835 hammer_unload_buffer(hammer_buffer_t buffer
, void *data
)
837 struct hammer_volume
*volume
= (struct hammer_volume
*) data
;
840 * If volume != NULL we are only interested in unloading buffers
841 * associated with a particular volume.
843 if (volume
!= NULL
&& volume
!= buffer
->io
.volume
)
847 * Clean up the persistent ref ioerror might have on the buffer
848 * and acquire a ref. Expect a 0->1 transition.
850 if (buffer
->io
.ioerror
) {
851 buffer
->io
.ioerror
= 0;
852 hammer_rel(&buffer
->io
.lock
);
853 --hammer_count_refedbufs
;
855 hammer_ref_interlock_true(&buffer
->io
.lock
);
856 ++hammer_count_refedbufs
;
859 * We must not flush a dirty buffer to disk on umount. It should
860 * have already been dealt with by the flusher, or we may be in
861 * catastrophic failure.
863 * We must set waitdep to ensure that a running buffer is waited
864 * on and released prior to us trying to unload the volume.
866 hammer_io_clear_modify(&buffer
->io
, 1);
867 hammer_flush_buffer_nodes(buffer
);
868 buffer
->io
.waitdep
= 1;
869 hammer_rel_buffer(buffer
, 1);
874 * Reference a buffer that is either already referenced or via a specially
875 * handled pointer (aka cursor->buffer).
878 hammer_ref_buffer(hammer_buffer_t buffer
)
884 * Acquire a ref, plus the buffer will be interlocked on the
887 locked
= hammer_ref_interlock(&buffer
->io
.lock
);
890 * At this point a biodone() will not touch the buffer other then
891 * incidental bits. However, lose_list can be modified via
892 * a biodone() interrupt.
896 if (buffer
->io
.mod_list
== &buffer
->io
.hmp
->lose_list
) {
898 if (buffer
->io
.mod_list
== &buffer
->io
.hmp
->lose_list
) {
899 TAILQ_REMOVE(buffer
->io
.mod_list
, &buffer
->io
,
901 buffer
->io
.mod_list
= NULL
;
907 ++hammer_count_refedbufs
;
908 error
= hammer_load_buffer(buffer
, 0);
909 /* NOTE: on error the buffer pointer is stale */
917 * Release a reference on the buffer. On the 1->0 transition the
918 * underlying IO will be released but the data reference is left
921 * Only destroy the structure itself if the related buffer cache buffer
922 * was disassociated from it. This ties the management of the structure
923 * to the buffer cache subsystem. buffer->ondisk determines whether the
924 * embedded io is referenced or not.
927 hammer_rel_buffer(hammer_buffer_t buffer
, int locked
)
929 hammer_volume_t volume
;
931 struct buf
*bp
= NULL
;
934 hmp
= buffer
->io
.hmp
;
936 if (hammer_rel_interlock(&buffer
->io
.lock
, locked
) == 0)
940 * hammer_count_refedbufs accounting. Decrement if we are in
941 * the error path or if CHECK is clear.
943 * If we are not in the error path and CHECK is set the caller
944 * probably just did a hammer_ref() and didn't account for it,
945 * so we don't account for the loss here.
947 if (locked
|| (buffer
->io
.lock
.refs
& HAMMER_REFS_CHECK
) == 0)
948 --hammer_count_refedbufs
;
951 * If the caller locked us or the normal released transitions
952 * from 1->0 (and acquired the lock) attempt to release the
953 * io. If the called locked us we tell hammer_io_release()
954 * to flush (which would be the unload or failure path).
956 bp
= hammer_io_release(&buffer
->io
, locked
);
959 * If the buffer has no bp association and no refs we can destroy
962 * NOTE: It is impossible for any associated B-Tree nodes to have
963 * refs if the buffer has no additional refs.
965 if (buffer
->io
.bp
== NULL
&& hammer_norefs(&buffer
->io
.lock
)) {
966 RB_REMOVE(hammer_buf_rb_tree
,
967 &buffer
->io
.hmp
->rb_bufs_root
,
969 volume
= buffer
->io
.volume
;
970 buffer
->io
.volume
= NULL
; /* sanity */
971 hammer_rel_volume(volume
, 0);
972 hammer_io_clear_modlist(&buffer
->io
);
973 hammer_flush_buffer_nodes(buffer
);
974 KKASSERT(TAILQ_EMPTY(&buffer
->clist
));
981 hammer_rel_interlock_done(&buffer
->io
.lock
, locked
);
985 --hammer_count_buffers
;
986 kfree(buffer
, hmp
->m_misc
);
991 * Access the filesystem buffer containing the specified hammer offset.
992 * buf_offset is a conglomeration of the volume number and vol_buf_beg
993 * relative buffer offset. It must also have bit 55 set to be valid.
994 * (see hammer_off_t in hammer_disk.h).
996 * Any prior buffer in *bufferp will be released and replaced by the
999 * NOTE: The buffer is indexed via its zoneX_offset but we allow the
1000 * passed cached *bufferp to match against either zoneX or zone2.
1004 _hammer_bread(hammer_mount_t hmp
, hammer_off_t buf_offset
, int bytes
,
1005 int *errorp
, struct hammer_buffer
**bufferp
)
1007 hammer_buffer_t buffer
;
1008 int32_t xoff
= (int32_t)buf_offset
& HAMMER_BUFMASK
;
1010 buf_offset
&= ~HAMMER_BUFMASK64
;
1011 KKASSERT((buf_offset
& HAMMER_OFF_ZONE_MASK
) != 0);
1014 if (buffer
== NULL
|| (buffer
->zone2_offset
!= buf_offset
&&
1015 buffer
->zoneX_offset
!= buf_offset
)) {
1017 hammer_rel_buffer(buffer
, 0);
1018 buffer
= hammer_get_buffer(hmp
, buf_offset
, bytes
, 0, errorp
);
1025 * Return a pointer to the buffer data.
1030 return((char *)buffer
->ondisk
+ xoff
);
1034 hammer_bread(hammer_mount_t hmp
, hammer_off_t buf_offset
,
1035 int *errorp
, struct hammer_buffer
**bufferp
)
1037 return(_hammer_bread(hmp
, buf_offset
, HAMMER_BUFSIZE
, errorp
, bufferp
));
1041 hammer_bread_ext(hammer_mount_t hmp
, hammer_off_t buf_offset
, int bytes
,
1042 int *errorp
, struct hammer_buffer
**bufferp
)
1044 bytes
= (bytes
+ HAMMER_BUFMASK
) & ~HAMMER_BUFMASK
;
1045 return(_hammer_bread(hmp
, buf_offset
, bytes
, errorp
, bufferp
));
1049 * Access the filesystem buffer containing the specified hammer offset.
1050 * No disk read operation occurs. The result buffer may contain garbage.
1052 * Any prior buffer in *bufferp will be released and replaced by the
1055 * This function marks the buffer dirty but does not increment its
1056 * modify_refs count.
1060 _hammer_bnew(hammer_mount_t hmp
, hammer_off_t buf_offset
, int bytes
,
1061 int *errorp
, struct hammer_buffer
**bufferp
)
1063 hammer_buffer_t buffer
;
1064 int32_t xoff
= (int32_t)buf_offset
& HAMMER_BUFMASK
;
1066 buf_offset
&= ~HAMMER_BUFMASK64
;
1069 if (buffer
== NULL
|| (buffer
->zone2_offset
!= buf_offset
&&
1070 buffer
->zoneX_offset
!= buf_offset
)) {
1072 hammer_rel_buffer(buffer
, 0);
1073 buffer
= hammer_get_buffer(hmp
, buf_offset
, bytes
, 1, errorp
);
1080 * Return a pointer to the buffer data.
1085 return((char *)buffer
->ondisk
+ xoff
);
1089 hammer_bnew(hammer_mount_t hmp
, hammer_off_t buf_offset
,
1090 int *errorp
, struct hammer_buffer
**bufferp
)
1092 return(_hammer_bnew(hmp
, buf_offset
, HAMMER_BUFSIZE
, errorp
, bufferp
));
1096 hammer_bnew_ext(hammer_mount_t hmp
, hammer_off_t buf_offset
, int bytes
,
1097 int *errorp
, struct hammer_buffer
**bufferp
)
1099 bytes
= (bytes
+ HAMMER_BUFMASK
) & ~HAMMER_BUFMASK
;
1100 return(_hammer_bnew(hmp
, buf_offset
, bytes
, errorp
, bufferp
));
1103 /************************************************************************
1105 ************************************************************************
1107 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
1108 * method used by the HAMMER filesystem.
1110 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1111 * associated with its buffer, and will only referenced the buffer while
1112 * the node itself is referenced.
1114 * A hammer_node can also be passively associated with other HAMMER
1115 * structures, such as inodes, while retaining 0 references. These
1116 * associations can be cleared backwards using a pointer-to-pointer in
1119 * This allows the HAMMER implementation to cache hammer_nodes long-term
1120 * and short-cut a great deal of the infrastructure's complexity. In
1121 * most cases a cached node can be reacquired without having to dip into
1122 * either the buffer or cluster management code.
1124 * The caller must pass a referenced cluster on call and will retain
1125 * ownership of the reference on return. The node will acquire its own
1126 * additional references, if necessary.
1129 hammer_get_node(hammer_transaction_t trans
, hammer_off_t node_offset
,
1130 int isnew
, int *errorp
)
1132 hammer_mount_t hmp
= trans
->hmp
;
1136 KKASSERT((node_offset
& HAMMER_OFF_ZONE_MASK
) == HAMMER_ZONE_BTREE
);
1139 * Locate the structure, allocating one if necessary.
1142 node
= RB_LOOKUP(hammer_nod_rb_tree
, &hmp
->rb_nods_root
, node_offset
);
1144 ++hammer_count_nodes
;
1145 node
= kmalloc(sizeof(*node
), hmp
->m_misc
, M_WAITOK
|M_ZERO
|M_USE_RESERVE
);
1146 node
->node_offset
= node_offset
;
1148 TAILQ_INIT(&node
->cursor_list
);
1149 TAILQ_INIT(&node
->cache_list
);
1150 if (RB_INSERT(hammer_nod_rb_tree
, &hmp
->rb_nods_root
, node
)) {
1151 --hammer_count_nodes
;
1152 kfree(node
, hmp
->m_misc
);
1155 doload
= hammer_ref_interlock_true(&node
->lock
);
1157 doload
= hammer_ref_interlock(&node
->lock
);
1160 *errorp
= hammer_load_node(trans
, node
, isnew
);
1161 trans
->flags
|= HAMMER_TRANSF_DIDIO
;
1165 KKASSERT(node
->ondisk
);
1167 hammer_io_advance(&node
->buffer
->io
);
1173 * Reference an already-referenced node. 0->1 transitions should assert
1174 * so we do not have to deal with hammer_ref() setting CHECK.
1177 hammer_ref_node(hammer_node_t node
)
1179 KKASSERT(hammer_isactive(&node
->lock
) && node
->ondisk
!= NULL
);
1180 hammer_ref(&node
->lock
);
1184 * Load a node's on-disk data reference. Called with the node referenced
1187 * On return the node interlock will be unlocked. If a non-zero error code
1188 * is returned the node will also be dereferenced (and the caller's pointer
1192 hammer_load_node(hammer_transaction_t trans
, hammer_node_t node
, int isnew
)
1194 hammer_buffer_t buffer
;
1195 hammer_off_t buf_offset
;
1199 if (node
->ondisk
== NULL
) {
1201 * This is a little confusing but the jist is that
1202 * node->buffer determines whether the node is on
1203 * the buffer's clist and node->ondisk determines
1204 * whether the buffer is referenced.
1206 * We could be racing a buffer release, in which case
1207 * node->buffer may become NULL while we are blocked
1208 * referencing the buffer.
1210 if ((buffer
= node
->buffer
) != NULL
) {
1211 error
= hammer_ref_buffer(buffer
);
1212 if (error
== 0 && node
->buffer
== NULL
) {
1213 TAILQ_INSERT_TAIL(&buffer
->clist
,
1215 node
->buffer
= buffer
;
1218 buf_offset
= node
->node_offset
& ~HAMMER_BUFMASK64
;
1219 buffer
= hammer_get_buffer(node
->hmp
, buf_offset
,
1220 HAMMER_BUFSIZE
, 0, &error
);
1222 KKASSERT(error
== 0);
1223 TAILQ_INSERT_TAIL(&buffer
->clist
,
1225 node
->buffer
= buffer
;
1230 node
->ondisk
= (void *)((char *)buffer
->ondisk
+
1231 (node
->node_offset
& HAMMER_BUFMASK
));
1234 * Check CRC. NOTE: Neither flag is set and the CRC is not
1235 * generated on new B-Tree nodes.
1238 (node
->flags
& HAMMER_NODE_CRCANY
) == 0) {
1239 if (hammer_crc_test_btree(node
->ondisk
) == 0) {
1240 if (hammer_debug_critical
)
1241 Debugger("CRC FAILED: B-TREE NODE");
1242 node
->flags
|= HAMMER_NODE_CRCBAD
;
1244 node
->flags
|= HAMMER_NODE_CRCGOOD
;
1248 if (node
->flags
& HAMMER_NODE_CRCBAD
) {
1249 if (trans
->flags
& HAMMER_TRANSF_CRCDOM
)
1256 _hammer_rel_node(node
, 1);
1258 hammer_ref_interlock_done(&node
->lock
);
1264 * Safely reference a node, interlock against flushes via the IO subsystem.
1267 hammer_ref_node_safe(hammer_transaction_t trans
, hammer_node_cache_t cache
,
1275 doload
= hammer_ref_interlock(&node
->lock
);
1277 *errorp
= hammer_load_node(trans
, node
, 0);
1281 KKASSERT(node
->ondisk
);
1282 if (node
->flags
& HAMMER_NODE_CRCBAD
) {
1283 if (trans
->flags
& HAMMER_TRANSF_CRCDOM
)
1287 _hammer_rel_node(node
, 0);
1300 * Release a hammer_node. On the last release the node dereferences
1301 * its underlying buffer and may or may not be destroyed.
1303 * If locked is non-zero the passed node has been interlocked by the
1304 * caller and we are in the failure/unload path, otherwise it has not and
1305 * we are doing a normal release.
1307 * This function will dispose of the interlock and the reference.
1308 * On return the node pointer is stale.
1311 _hammer_rel_node(hammer_node_t node
, int locked
)
1313 hammer_buffer_t buffer
;
1316 * Deref the node. If this isn't the 1->0 transition we're basically
1317 * done. If locked is non-zero this function will just deref the
1318 * locked node and return TRUE, otherwise it will deref the locked
1319 * node and either lock and return TRUE on the 1->0 transition or
1320 * not lock and return FALSE.
1322 if (hammer_rel_interlock(&node
->lock
, locked
) == 0)
1326 * Either locked was non-zero and we are interlocked, or the
1327 * hammer_rel_interlock() call returned non-zero and we are
1330 * The ref-count must still be decremented if locked != 0 so
1331 * the cleanup required still varies a bit.
1333 * hammer_flush_node() when called with 1 or 2 will dispose of
1334 * the lock and possible ref-count.
1336 if (node
->ondisk
== NULL
) {
1337 hammer_flush_node(node
, locked
+ 1);
1338 /* node is stale now */
1343 * Do not disassociate the node from the buffer if it represents
1344 * a modified B-Tree node that still needs its crc to be generated.
1346 if (node
->flags
& HAMMER_NODE_NEEDSCRC
) {
1347 hammer_rel_interlock_done(&node
->lock
, locked
);
1352 * Do final cleanups and then either destroy the node and leave it
1353 * passively cached. The buffer reference is removed regardless.
1355 buffer
= node
->buffer
;
1356 node
->ondisk
= NULL
;
1358 if ((node
->flags
& HAMMER_NODE_FLUSH
) == 0) {
1362 hammer_rel_interlock_done(&node
->lock
, locked
);
1367 hammer_flush_node(node
, locked
+ 1);
1371 hammer_rel_buffer(buffer
, 0);
1375 hammer_rel_node(hammer_node_t node
)
1377 _hammer_rel_node(node
, 0);
1381 * Free space on-media associated with a B-Tree node.
1384 hammer_delete_node(hammer_transaction_t trans
, hammer_node_t node
)
1386 KKASSERT((node
->flags
& HAMMER_NODE_DELETED
) == 0);
1387 node
->flags
|= HAMMER_NODE_DELETED
;
1388 hammer_blockmap_free(trans
, node
->node_offset
, sizeof(*node
->ondisk
));
1392 * Passively cache a referenced hammer_node. The caller may release
1393 * the node on return.
1396 hammer_cache_node(hammer_node_cache_t cache
, hammer_node_t node
)
1399 * If the node doesn't exist, or is being deleted, don't cache it!
1401 * The node can only ever be NULL in the I/O failure path.
1403 if (node
== NULL
|| (node
->flags
& HAMMER_NODE_DELETED
))
1405 if (cache
->node
== node
)
1408 hammer_uncache_node(cache
);
1409 if (node
->flags
& HAMMER_NODE_DELETED
)
1412 TAILQ_INSERT_TAIL(&node
->cache_list
, cache
, entry
);
1416 hammer_uncache_node(hammer_node_cache_t cache
)
1420 if ((node
= cache
->node
) != NULL
) {
1421 TAILQ_REMOVE(&node
->cache_list
, cache
, entry
);
1423 if (TAILQ_EMPTY(&node
->cache_list
))
1424 hammer_flush_node(node
, 0);
1429 * Remove a node's cache references and destroy the node if it has no
1430 * other references or backing store.
1432 * locked == 0 Normal unlocked operation
1433 * locked == 1 Call hammer_rel_interlock_done(..., 0);
1434 * locked == 2 Call hammer_rel_interlock_done(..., 1);
1436 * XXX for now this isn't even close to being MPSAFE so the refs check
1440 hammer_flush_node(hammer_node_t node
, int locked
)
1442 hammer_node_cache_t cache
;
1443 hammer_buffer_t buffer
;
1444 hammer_mount_t hmp
= node
->hmp
;
1447 while ((cache
= TAILQ_FIRST(&node
->cache_list
)) != NULL
) {
1448 TAILQ_REMOVE(&node
->cache_list
, cache
, entry
);
1453 * NOTE: refs is predisposed if another thread is blocking and
1454 * will be larger than 0 in that case. We aren't MPSAFE
1457 if (node
->ondisk
== NULL
&& hammer_norefs(&node
->lock
)) {
1458 KKASSERT((node
->flags
& HAMMER_NODE_NEEDSCRC
) == 0);
1459 RB_REMOVE(hammer_nod_rb_tree
, &node
->hmp
->rb_nods_root
, node
);
1460 if ((buffer
= node
->buffer
) != NULL
) {
1461 node
->buffer
= NULL
;
1462 TAILQ_REMOVE(&buffer
->clist
, node
, entry
);
1463 /* buffer is unreferenced because ondisk is NULL */
1471 * Deal with the interlock if locked == 1 or locked == 2.
1474 hammer_rel_interlock_done(&node
->lock
, locked
- 1);
1477 * Destroy if requested
1480 --hammer_count_nodes
;
1481 kfree(node
, hmp
->m_misc
);
1486 * Flush passively cached B-Tree nodes associated with this buffer.
1487 * This is only called when the buffer is about to be destroyed, so
1488 * none of the nodes should have any references. The buffer is locked.
1490 * We may be interlocked with the buffer.
1493 hammer_flush_buffer_nodes(hammer_buffer_t buffer
)
1497 while ((node
= TAILQ_FIRST(&buffer
->clist
)) != NULL
) {
1498 KKASSERT(node
->ondisk
== NULL
);
1499 KKASSERT((node
->flags
& HAMMER_NODE_NEEDSCRC
) == 0);
1501 if (hammer_try_interlock_norefs(&node
->lock
)) {
1502 hammer_ref(&node
->lock
);
1503 node
->flags
|= HAMMER_NODE_FLUSH
;
1504 _hammer_rel_node(node
, 1);
1506 KKASSERT(node
->buffer
!= NULL
);
1507 buffer
= node
->buffer
;
1508 node
->buffer
= NULL
;
1509 TAILQ_REMOVE(&buffer
->clist
, node
, entry
);
1510 /* buffer is unreferenced because ondisk is NULL */
1516 /************************************************************************
1518 ************************************************************************/
1521 * Allocate a B-Tree node.
1524 hammer_alloc_btree(hammer_transaction_t trans
, hammer_off_t hint
, int *errorp
)
1526 hammer_buffer_t buffer
= NULL
;
1527 hammer_node_t node
= NULL
;
1528 hammer_off_t node_offset
;
1530 node_offset
= hammer_blockmap_alloc(trans
, HAMMER_ZONE_BTREE_INDEX
,
1531 sizeof(struct hammer_node_ondisk
),
1534 node
= hammer_get_node(trans
, node_offset
, 1, errorp
);
1535 hammer_modify_node_noundo(trans
, node
);
1536 bzero(node
->ondisk
, sizeof(*node
->ondisk
));
1537 hammer_modify_node_done(node
);
1540 hammer_rel_buffer(buffer
, 0);
1545 * Allocate data. If the address of a data buffer is supplied then
1546 * any prior non-NULL *data_bufferp will be released and *data_bufferp
1547 * will be set to the related buffer. The caller must release it when
1548 * finally done. The initial *data_bufferp should be set to NULL by
1551 * The caller is responsible for making hammer_modify*() calls on the
1555 hammer_alloc_data(hammer_transaction_t trans
, int32_t data_len
,
1556 u_int16_t rec_type
, hammer_off_t
*data_offsetp
,
1557 struct hammer_buffer
**data_bufferp
,
1558 hammer_off_t hint
, int *errorp
)
1568 case HAMMER_RECTYPE_INODE
:
1569 case HAMMER_RECTYPE_DIRENTRY
:
1570 case HAMMER_RECTYPE_EXT
:
1571 case HAMMER_RECTYPE_FIX
:
1572 case HAMMER_RECTYPE_PFS
:
1573 case HAMMER_RECTYPE_SNAPSHOT
:
1574 case HAMMER_RECTYPE_CONFIG
:
1575 zone
= HAMMER_ZONE_META_INDEX
;
1577 case HAMMER_RECTYPE_DATA
:
1578 case HAMMER_RECTYPE_DB
:
1579 if (data_len
<= HAMMER_BUFSIZE
/ 2) {
1580 zone
= HAMMER_ZONE_SMALL_DATA_INDEX
;
1582 data_len
= (data_len
+ HAMMER_BUFMASK
) &
1584 zone
= HAMMER_ZONE_LARGE_DATA_INDEX
;
1588 panic("hammer_alloc_data: rec_type %04x unknown",
1590 zone
= 0; /* NOT REACHED */
1593 *data_offsetp
= hammer_blockmap_alloc(trans
, zone
, data_len
,
1598 if (*errorp
== 0 && data_bufferp
) {
1600 data
= hammer_bread_ext(trans
->hmp
, *data_offsetp
,
1601 data_len
, errorp
, data_bufferp
);
1612 * Sync dirty buffers to the media and clean-up any loose ends.
1614 * These functions do not start the flusher going, they simply
1615 * queue everything up to the flusher.
1617 static int hammer_sync_scan1(struct mount
*mp
, struct vnode
*vp
, void *data
);
1618 static int hammer_sync_scan2(struct mount
*mp
, struct vnode
*vp
, void *data
);
1621 hammer_queue_inodes_flusher(hammer_mount_t hmp
, int waitfor
)
1623 struct hammer_sync_info info
;
1626 info
.waitfor
= waitfor
;
1627 if (waitfor
== MNT_WAIT
) {
1628 vmntvnodescan(hmp
->mp
, VMSC_GETVP
|VMSC_ONEPASS
,
1629 hammer_sync_scan1
, hammer_sync_scan2
, &info
);
1631 vmntvnodescan(hmp
->mp
, VMSC_GETVP
|VMSC_ONEPASS
|VMSC_NOWAIT
,
1632 hammer_sync_scan1
, hammer_sync_scan2
, &info
);
1638 * Filesystem sync. If doing a synchronous sync make a second pass on
1639 * the vnodes in case any were already flushing during the first pass,
1640 * and activate the flusher twice (the second time brings the UNDO FIFO's
1641 * start position up to the end position after the first call).
1644 hammer_sync_hmp(hammer_mount_t hmp
, int waitfor
)
1646 struct hammer_sync_info info
;
1649 info
.waitfor
= MNT_NOWAIT
;
1650 vmntvnodescan(hmp
->mp
, VMSC_GETVP
|VMSC_NOWAIT
,
1651 hammer_sync_scan1
, hammer_sync_scan2
, &info
);
1652 if (info
.error
== 0 && waitfor
== MNT_WAIT
) {
1653 info
.waitfor
= waitfor
;
1654 vmntvnodescan(hmp
->mp
, VMSC_GETVP
,
1655 hammer_sync_scan1
, hammer_sync_scan2
, &info
);
1657 if (waitfor
== MNT_WAIT
) {
1658 hammer_flusher_sync(hmp
);
1659 hammer_flusher_sync(hmp
);
1661 hammer_flusher_async(hmp
, NULL
);
1662 hammer_flusher_async(hmp
, NULL
);
1668 hammer_sync_scan1(struct mount
*mp
, struct vnode
*vp
, void *data
)
1670 struct hammer_inode
*ip
;
1673 if (vp
->v_type
== VNON
|| ip
== NULL
||
1674 ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0 &&
1675 RB_EMPTY(&vp
->v_rbdirty_tree
))) {
1682 hammer_sync_scan2(struct mount
*mp
, struct vnode
*vp
, void *data
)
1684 struct hammer_sync_info
*info
= data
;
1685 struct hammer_inode
*ip
;
1689 if (vp
->v_type
== VNON
|| vp
->v_type
== VBAD
||
1690 ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0 &&
1691 RB_EMPTY(&vp
->v_rbdirty_tree
))) {
1694 error
= VOP_FSYNC(vp
, MNT_NOWAIT
, 0);
1696 info
->error
= error
;