2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
48 static void hammer_free_volume(hammer_volume_t volume
);
49 static int hammer_load_volume(hammer_volume_t volume
);
50 static int hammer_load_buffer(hammer_buffer_t buffer
, int isnew
);
51 static int hammer_load_node(hammer_transaction_t trans
,
52 hammer_node_t node
, int isnew
);
55 hammer_vol_rb_compare(hammer_volume_t vol1
, hammer_volume_t vol2
)
57 if (vol1
->vol_no
< vol2
->vol_no
)
59 if (vol1
->vol_no
> vol2
->vol_no
)
65 * hammer_buffer structures are indexed via their zoneX_offset, not
69 hammer_buf_rb_compare(hammer_buffer_t buf1
, hammer_buffer_t buf2
)
71 if (buf1
->zoneX_offset
< buf2
->zoneX_offset
)
73 if (buf1
->zoneX_offset
> buf2
->zoneX_offset
)
79 hammer_nod_rb_compare(hammer_node_t node1
, hammer_node_t node2
)
81 if (node1
->node_offset
< node2
->node_offset
)
83 if (node1
->node_offset
> node2
->node_offset
)
88 RB_GENERATE2(hammer_vol_rb_tree
, hammer_volume
, rb_node
,
89 hammer_vol_rb_compare
, int32_t, vol_no
);
90 RB_GENERATE2(hammer_buf_rb_tree
, hammer_buffer
, rb_node
,
91 hammer_buf_rb_compare
, hammer_off_t
, zoneX_offset
);
92 RB_GENERATE2(hammer_nod_rb_tree
, hammer_node
, rb_node
,
93 hammer_nod_rb_compare
, hammer_off_t
, node_offset
);
95 /************************************************************************
97 ************************************************************************
99 * Load a HAMMER volume by name. Returns 0 on success or a positive error
100 * code on failure. Volumes must be loaded at mount time, get_volume() will
101 * not load a new volume.
103 * Calls made to hammer_load_volume() or single-threaded
106 hammer_install_volume(struct hammer_mount
*hmp
, const char *volname
,
110 hammer_volume_t volume
;
111 struct hammer_volume_ondisk
*ondisk
;
112 struct nlookupdata nd
;
113 struct buf
*bp
= NULL
;
119 ronly
= ((mp
->mnt_flag
& MNT_RDONLY
) ? 1 : 0);
122 * Allocate a volume structure
124 ++hammer_count_volumes
;
125 volume
= kmalloc(sizeof(*volume
), hmp
->m_misc
, M_WAITOK
|M_ZERO
);
126 volume
->vol_name
= kstrdup(volname
, hmp
->m_misc
);
127 volume
->io
.hmp
= hmp
; /* bootstrap */
128 hammer_io_init(&volume
->io
, volume
, HAMMER_STRUCTURE_VOLUME
);
129 volume
->io
.offset
= 0LL;
130 volume
->io
.bytes
= HAMMER_BUFSIZE
;
133 * Get the device vnode
136 error
= nlookup_init(&nd
, volume
->vol_name
, UIO_SYSSPACE
, NLC_FOLLOW
);
138 error
= nlookup(&nd
);
140 error
= cache_vref(&nd
.nl_nch
, nd
.nl_cred
, &volume
->devvp
);
144 volume
->devvp
= devvp
;
148 if (vn_isdisk(volume
->devvp
, &error
)) {
149 error
= vfs_mountedon(volume
->devvp
);
152 if (error
== 0 && vcount(volume
->devvp
) > 0)
155 vn_lock(volume
->devvp
, LK_EXCLUSIVE
| LK_RETRY
);
156 error
= vinvalbuf(volume
->devvp
, V_SAVE
, 0, 0);
158 error
= VOP_OPEN(volume
->devvp
,
159 (ronly
? FREAD
: FREAD
|FWRITE
),
162 vn_unlock(volume
->devvp
);
165 hammer_free_volume(volume
);
168 volume
->devvp
->v_rdev
->si_mountpoint
= mp
;
172 * Extract the volume number from the volume header and do various
175 error
= bread(volume
->devvp
, 0LL, HAMMER_BUFSIZE
, &bp
);
178 ondisk
= (void *)bp
->b_data
;
179 if (ondisk
->vol_signature
!= HAMMER_FSBUF_VOLUME
) {
180 kprintf("hammer_mount: volume %s has an invalid header\n",
185 volume
->vol_no
= ondisk
->vol_no
;
186 volume
->buffer_base
= ondisk
->vol_buf_beg
;
187 volume
->vol_flags
= ondisk
->vol_flags
;
188 volume
->nblocks
= ondisk
->vol_nblocks
;
189 volume
->maxbuf_off
= HAMMER_ENCODE_RAW_BUFFER(volume
->vol_no
,
190 ondisk
->vol_buf_end
- ondisk
->vol_buf_beg
);
191 volume
->maxraw_off
= ondisk
->vol_buf_end
;
193 if (RB_EMPTY(&hmp
->rb_vols_root
)) {
194 hmp
->fsid
= ondisk
->vol_fsid
;
195 } else if (bcmp(&hmp
->fsid
, &ondisk
->vol_fsid
, sizeof(uuid_t
))) {
196 kprintf("hammer_mount: volume %s's fsid does not match "
197 "other volumes\n", volume
->vol_name
);
203 * Insert the volume structure into the red-black tree.
205 if (RB_INSERT(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, volume
)) {
206 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
207 volume
->vol_name
, volume
->vol_no
);
212 * Set the root volume . HAMMER special cases rootvol the structure.
213 * We do not hold a ref because this would prevent related I/O
214 * from being flushed.
216 if (error
== 0 && ondisk
->vol_rootvol
== ondisk
->vol_no
) {
217 hmp
->rootvol
= volume
;
218 hmp
->nvolumes
= ondisk
->vol_count
;
223 hmp
->mp
->mnt_stat
.f_blocks
+= ondisk
->vol0_stat_bigblocks
*
224 (HAMMER_LARGEBLOCK_SIZE
/ HAMMER_BUFSIZE
);
225 hmp
->mp
->mnt_vstat
.f_blocks
+= ondisk
->vol0_stat_bigblocks
*
226 (HAMMER_LARGEBLOCK_SIZE
/ HAMMER_BUFSIZE
);
232 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
234 volume
->devvp
->v_rdev
->si_mountpoint
= NULL
;
235 VOP_CLOSE(volume
->devvp
, ronly
? FREAD
: FREAD
|FWRITE
);
236 hammer_free_volume(volume
);
242 * This is called for each volume when updating the mount point from
243 * read-write to read-only or vise-versa.
246 hammer_adjust_volume_mode(hammer_volume_t volume
, void *data __unused
)
249 vn_lock(volume
->devvp
, LK_EXCLUSIVE
| LK_RETRY
);
250 if (volume
->io
.hmp
->ronly
) {
251 /* do not call vinvalbuf */
252 VOP_OPEN(volume
->devvp
, FREAD
, FSCRED
, NULL
);
253 VOP_CLOSE(volume
->devvp
, FREAD
|FWRITE
);
255 /* do not call vinvalbuf */
256 VOP_OPEN(volume
->devvp
, FREAD
|FWRITE
, FSCRED
, NULL
);
257 VOP_CLOSE(volume
->devvp
, FREAD
);
259 vn_unlock(volume
->devvp
);
265 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
266 * so returns -1 on failure.
269 hammer_unload_volume(hammer_volume_t volume
, void *data __unused
)
271 hammer_mount_t hmp
= volume
->io
.hmp
;
272 int ronly
= ((hmp
->mp
->mnt_flag
& MNT_RDONLY
) ? 1 : 0);
275 * Clean up the root volume pointer, which is held unlocked in hmp.
277 if (hmp
->rootvol
== volume
)
281 * We must not flush a dirty buffer to disk on umount. It should
282 * have already been dealt with by the flusher, or we may be in
283 * catastrophic failure.
285 hammer_io_clear_modify(&volume
->io
, 1);
286 volume
->io
.waitdep
= 1;
289 * Clean up the persistent ref ioerror might have on the volume
291 if (volume
->io
.ioerror
) {
292 volume
->io
.ioerror
= 0;
293 hammer_unref(&volume
->io
.lock
);
297 * This should release the bp.
299 KKASSERT(volume
->io
.lock
.refs
== 0);
300 hammer_ref(&volume
->io
.lock
);
301 hammer_rel_volume(volume
, 1);
302 KKASSERT(volume
->io
.bp
== NULL
);
305 * There should be no references on the volume, no clusters, and
308 KKASSERT(volume
->io
.lock
.refs
== 0);
310 volume
->ondisk
= NULL
;
312 if (volume
->devvp
->v_rdev
&&
313 volume
->devvp
->v_rdev
->si_mountpoint
== hmp
->mp
315 volume
->devvp
->v_rdev
->si_mountpoint
= NULL
;
319 * Make sure we don't sync anything to disk if we
320 * are in read-only mode (1) or critically-errored
321 * (2). Note that there may be dirty buffers in
322 * normal read-only mode from crash recovery.
324 vinvalbuf(volume
->devvp
, 0, 0, 0);
325 VOP_CLOSE(volume
->devvp
, FREAD
);
328 * Normal termination, save any dirty buffers
329 * (XXX there really shouldn't be any).
331 vinvalbuf(volume
->devvp
, V_SAVE
, 0, 0);
332 VOP_CLOSE(volume
->devvp
, FREAD
|FWRITE
);
337 * Destroy the structure
339 RB_REMOVE(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, volume
);
340 hammer_free_volume(volume
);
346 hammer_free_volume(hammer_volume_t volume
)
348 hammer_mount_t hmp
= volume
->io
.hmp
;
350 if (volume
->vol_name
) {
351 kfree(volume
->vol_name
, hmp
->m_misc
);
352 volume
->vol_name
= NULL
;
355 vrele(volume
->devvp
);
356 volume
->devvp
= NULL
;
358 --hammer_count_volumes
;
359 kfree(volume
, hmp
->m_misc
);
363 * Get a HAMMER volume. The volume must already exist.
366 hammer_get_volume(struct hammer_mount
*hmp
, int32_t vol_no
, int *errorp
)
368 struct hammer_volume
*volume
;
371 * Locate the volume structure
373 volume
= RB_LOOKUP(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, vol_no
);
374 if (volume
== NULL
) {
378 hammer_ref(&volume
->io
.lock
);
381 * Deal with on-disk info
383 if (volume
->ondisk
== NULL
|| volume
->io
.loading
) {
384 *errorp
= hammer_load_volume(volume
);
386 hammer_rel_volume(volume
, 1);
396 hammer_ref_volume(hammer_volume_t volume
)
400 hammer_ref(&volume
->io
.lock
);
403 * Deal with on-disk info
405 if (volume
->ondisk
== NULL
|| volume
->io
.loading
) {
406 error
= hammer_load_volume(volume
);
408 hammer_rel_volume(volume
, 1);
416 hammer_get_root_volume(struct hammer_mount
*hmp
, int *errorp
)
418 hammer_volume_t volume
;
420 volume
= hmp
->rootvol
;
421 KKASSERT(volume
!= NULL
);
422 hammer_ref(&volume
->io
.lock
);
425 * Deal with on-disk info
427 if (volume
->ondisk
== NULL
|| volume
->io
.loading
) {
428 *errorp
= hammer_load_volume(volume
);
430 hammer_rel_volume(volume
, 1);
440 * Load a volume's on-disk information. The volume must be referenced and
441 * not locked. We temporarily acquire an exclusive lock to interlock
442 * against releases or multiple get's.
445 hammer_load_volume(hammer_volume_t volume
)
449 ++volume
->io
.loading
;
450 hammer_lock_ex(&volume
->io
.lock
);
452 if (volume
->ondisk
== NULL
) {
453 error
= hammer_io_read(volume
->devvp
, &volume
->io
,
456 volume
->ondisk
= (void *)volume
->io
.bp
->b_data
;
460 --volume
->io
.loading
;
461 hammer_unlock(&volume
->io
.lock
);
466 * Release a volume. Call hammer_io_release on the last reference. We have
467 * to acquire an exclusive lock to interlock against volume->ondisk tests
468 * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
471 * Volumes are not unloaded from memory during normal operation.
474 hammer_rel_volume(hammer_volume_t volume
, int flush
)
476 struct buf
*bp
= NULL
;
479 if (volume
->io
.lock
.refs
== 1) {
480 ++volume
->io
.loading
;
481 hammer_lock_ex(&volume
->io
.lock
);
482 if (volume
->io
.lock
.refs
== 1) {
483 volume
->ondisk
= NULL
;
484 bp
= hammer_io_release(&volume
->io
, flush
);
486 --volume
->io
.loading
;
487 hammer_unlock(&volume
->io
.lock
);
489 hammer_unref(&volume
->io
.lock
);
496 hammer_mountcheck_volumes(struct hammer_mount
*hmp
)
501 for (i
= 0; i
< hmp
->nvolumes
; ++i
) {
502 vol
= RB_LOOKUP(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, i
);
509 /************************************************************************
511 ************************************************************************
513 * Manage buffers. Currently most blockmap-backed zones are direct-mapped
514 * to zone-2 buffer offsets, without a translation stage. However, the
515 * hammer_buffer structure is indexed by its zoneX_offset, not its
518 * The proper zone must be maintained throughout the code-base all the way
519 * through to the big-block allocator, or routines like hammer_del_buffers()
520 * will not be able to locate all potentially conflicting buffers.
523 hammer_get_buffer(hammer_mount_t hmp
, hammer_off_t buf_offset
,
524 int bytes
, int isnew
, int *errorp
)
526 hammer_buffer_t buffer
;
527 hammer_volume_t volume
;
528 hammer_off_t zone2_offset
;
529 hammer_io_type_t iotype
;
533 buf_offset
&= ~HAMMER_BUFMASK64
;
536 * Shortcut if the buffer is already cached
538 buffer
= RB_LOOKUP(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
, buf_offset
);
540 if (buffer
->io
.lock
.refs
== 0)
541 ++hammer_count_refedbufs
;
542 hammer_ref(&buffer
->io
.lock
);
545 * Once refed the ondisk field will not be cleared by
548 if (buffer
->ondisk
&& buffer
->io
.loading
== 0) {
550 hammer_io_advance(&buffer
->io
);
555 * The buffer is no longer loose if it has a ref, and
556 * cannot become loose once it gains a ref. Loose
557 * buffers will never be in a modified state. This should
558 * only occur on the 0->1 transition of refs.
560 * lose_list can be modified via a biodone() interrupt.
562 if (buffer
->io
.mod_list
== &hmp
->lose_list
) {
563 crit_enter(); /* biodone race against list */
564 TAILQ_REMOVE(buffer
->io
.mod_list
, &buffer
->io
,
567 buffer
->io
.mod_list
= NULL
;
568 KKASSERT(buffer
->io
.modified
== 0);
574 * What is the buffer class?
576 zone
= HAMMER_ZONE_DECODE(buf_offset
);
579 case HAMMER_ZONE_LARGE_DATA_INDEX
:
580 case HAMMER_ZONE_SMALL_DATA_INDEX
:
581 iotype
= HAMMER_STRUCTURE_DATA_BUFFER
;
583 case HAMMER_ZONE_UNDO_INDEX
:
584 iotype
= HAMMER_STRUCTURE_UNDO_BUFFER
;
586 case HAMMER_ZONE_META_INDEX
:
589 * NOTE: inode data and directory entries are placed in this
590 * zone. inode atime/mtime is updated in-place and thus
591 * buffers containing inodes must be synchronized as
592 * meta-buffers, same as buffers containing B-Tree info.
594 iotype
= HAMMER_STRUCTURE_META_BUFFER
;
599 * Handle blockmap offset translations
601 if (zone
>= HAMMER_ZONE_BTREE_INDEX
) {
602 zone2_offset
= hammer_blockmap_lookup(hmp
, buf_offset
, errorp
);
603 } else if (zone
== HAMMER_ZONE_UNDO_INDEX
) {
604 zone2_offset
= hammer_undo_lookup(hmp
, buf_offset
, errorp
);
606 KKASSERT(zone
== HAMMER_ZONE_RAW_BUFFER_INDEX
);
607 zone2_offset
= buf_offset
;
614 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
617 KKASSERT((zone2_offset
& HAMMER_OFF_ZONE_MASK
) ==
618 HAMMER_ZONE_RAW_BUFFER
);
619 vol_no
= HAMMER_VOL_DECODE(zone2_offset
);
620 volume
= hammer_get_volume(hmp
, vol_no
, errorp
);
624 KKASSERT(zone2_offset
< volume
->maxbuf_off
);
627 * Allocate a new buffer structure. We will check for races later.
629 ++hammer_count_buffers
;
630 buffer
= kmalloc(sizeof(*buffer
), hmp
->m_misc
,
631 M_WAITOK
|M_ZERO
|M_USE_RESERVE
);
632 buffer
->zone2_offset
= zone2_offset
;
633 buffer
->zoneX_offset
= buf_offset
;
635 hammer_io_init(&buffer
->io
, volume
, iotype
);
636 buffer
->io
.offset
= volume
->ondisk
->vol_buf_beg
+
637 (zone2_offset
& HAMMER_OFF_SHORT_MASK
);
638 buffer
->io
.bytes
= bytes
;
639 TAILQ_INIT(&buffer
->clist
);
640 hammer_ref(&buffer
->io
.lock
);
643 * Insert the buffer into the RB tree and handle late collisions.
645 if (RB_INSERT(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
, buffer
)) {
646 hammer_rel_volume(volume
, 0);
647 buffer
->io
.volume
= NULL
; /* safety */
648 hammer_unref(&buffer
->io
.lock
); /* safety */
649 --hammer_count_buffers
;
650 kfree(buffer
, hmp
->m_misc
);
653 ++hammer_count_refedbufs
;
657 * Deal with on-disk info and loading races.
659 if (buffer
->ondisk
== NULL
|| buffer
->io
.loading
) {
660 *errorp
= hammer_load_buffer(buffer
, isnew
);
662 hammer_rel_buffer(buffer
, 1);
665 hammer_io_advance(&buffer
->io
);
669 hammer_io_advance(&buffer
->io
);
675 * This is used by the direct-read code to deal with large-data buffers
676 * created by the reblocker and mirror-write code. The direct-read code
677 * bypasses the HAMMER buffer subsystem and so any aliased dirty or write-
678 * running hammer buffers must be fully synced to disk before we can issue
681 * This code path is not considered critical as only the rebocker and
682 * mirror-write code will create large-data buffers via the HAMMER buffer
683 * subsystem. They do that because they operate at the B-Tree level and
684 * do not access the vnode/inode structures.
687 hammer_sync_buffers(hammer_mount_t hmp
, hammer_off_t base_offset
, int bytes
)
689 hammer_buffer_t buffer
;
692 KKASSERT((base_offset
& HAMMER_OFF_ZONE_MASK
) ==
693 HAMMER_ZONE_LARGE_DATA
);
696 buffer
= RB_LOOKUP(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
,
698 if (buffer
&& (buffer
->io
.modified
|| buffer
->io
.running
)) {
699 error
= hammer_ref_buffer(buffer
);
701 hammer_io_wait(&buffer
->io
);
702 if (buffer
->io
.modified
) {
703 hammer_io_write_interlock(&buffer
->io
);
704 hammer_io_flush(&buffer
->io
, 0);
705 hammer_io_done_interlock(&buffer
->io
);
706 hammer_io_wait(&buffer
->io
);
708 hammer_rel_buffer(buffer
, 0);
711 base_offset
+= HAMMER_BUFSIZE
;
712 bytes
-= HAMMER_BUFSIZE
;
717 * Destroy all buffers covering the specified zoneX offset range. This
718 * is called when the related blockmap layer2 entry is freed or when
719 * a direct write bypasses our buffer/buffer-cache subsystem.
721 * The buffers may be referenced by the caller itself. Setting reclaim
722 * will cause the buffer to be destroyed when it's ref count reaches zero.
724 * Return 0 on success, EAGAIN if some buffers could not be destroyed due
725 * to additional references held by other threads, or some other (typically
729 hammer_del_buffers(hammer_mount_t hmp
, hammer_off_t base_offset
,
730 hammer_off_t zone2_offset
, int bytes
,
731 int report_conflicts
)
733 hammer_buffer_t buffer
;
734 hammer_volume_t volume
;
739 vol_no
= HAMMER_VOL_DECODE(zone2_offset
);
740 volume
= hammer_get_volume(hmp
, vol_no
, &ret_error
);
741 KKASSERT(ret_error
== 0);
744 buffer
= RB_LOOKUP(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
,
747 error
= hammer_ref_buffer(buffer
);
748 if (error
== 0 && buffer
->io
.lock
.refs
!= 1) {
750 hammer_rel_buffer(buffer
, 0);
753 KKASSERT(buffer
->zone2_offset
== zone2_offset
);
754 hammer_io_clear_modify(&buffer
->io
, 1);
755 buffer
->io
.reclaim
= 1;
756 buffer
->io
.waitdep
= 1;
757 KKASSERT(buffer
->io
.volume
== volume
);
758 hammer_rel_buffer(buffer
, 0);
761 error
= hammer_io_inval(volume
, zone2_offset
);
765 if (report_conflicts
||
766 (hammer_debug_general
& 0x8000)) {
767 kprintf("hammer_del_buffers: unable to "
768 "invalidate %016llx buffer=%p rep=%d\n",
769 (long long)base_offset
,
770 buffer
, report_conflicts
);
773 base_offset
+= HAMMER_BUFSIZE
;
774 zone2_offset
+= HAMMER_BUFSIZE
;
775 bytes
-= HAMMER_BUFSIZE
;
777 hammer_rel_volume(volume
, 0);
782 hammer_load_buffer(hammer_buffer_t buffer
, int isnew
)
784 hammer_volume_t volume
;
788 * Load the buffer's on-disk info
790 volume
= buffer
->io
.volume
;
791 ++buffer
->io
.loading
;
792 hammer_lock_ex(&buffer
->io
.lock
);
794 if (hammer_debug_io
& 0x0001) {
795 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
796 (long long)buffer
->zoneX_offset
,
797 (long long)buffer
->zone2_offset
,
798 isnew
, buffer
->ondisk
);
801 if (buffer
->ondisk
== NULL
) {
803 error
= hammer_io_new(volume
->devvp
, &buffer
->io
);
805 error
= hammer_io_read(volume
->devvp
, &buffer
->io
,
809 buffer
->ondisk
= (void *)buffer
->io
.bp
->b_data
;
811 error
= hammer_io_new(volume
->devvp
, &buffer
->io
);
815 --buffer
->io
.loading
;
816 hammer_unlock(&buffer
->io
.lock
);
821 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
822 * This routine is only called during unmount or when a volume is
825 * If data != NULL, it specifies a volume whoose buffers should
829 hammer_unload_buffer(hammer_buffer_t buffer
, void *data
)
831 struct hammer_volume
*volume
= (struct hammer_volume
*) data
;
833 if (volume
!= NULL
&& volume
!= buffer
->io
.volume
) {
835 * We are only interested in unloading buffers of volume,
842 * Clean up the persistent ref ioerror might have on the buffer
843 * and acquire a ref (steal ioerror's if we can).
845 if (buffer
->io
.ioerror
) {
846 buffer
->io
.ioerror
= 0;
848 if (buffer
->io
.lock
.refs
== 0)
849 ++hammer_count_refedbufs
;
850 hammer_ref(&buffer
->io
.lock
);
854 * We must not flush a dirty buffer to disk on umount. It should
855 * have already been dealt with by the flusher, or we may be in
856 * catastrophic failure.
858 * We must set waitdep to ensure that a running buffer is waited
859 * on and released prior to us trying to unload the volume.
861 hammer_io_clear_modify(&buffer
->io
, 1);
862 hammer_flush_buffer_nodes(buffer
);
863 KKASSERT(buffer
->io
.lock
.refs
== 1);
864 buffer
->io
.waitdep
= 1;
865 hammer_rel_buffer(buffer
, 2);
870 * Reference a buffer that is either already referenced or via a specially
871 * handled pointer (aka cursor->buffer).
874 hammer_ref_buffer(hammer_buffer_t buffer
)
878 if (buffer
->io
.lock
.refs
== 0)
879 ++hammer_count_refedbufs
;
880 hammer_ref(&buffer
->io
.lock
);
883 * At this point a biodone() will not touch the buffer other then
884 * incidental bits. However, lose_list can be modified via
885 * a biodone() interrupt.
889 if (buffer
->io
.mod_list
== &buffer
->io
.hmp
->lose_list
) {
891 TAILQ_REMOVE(buffer
->io
.mod_list
, &buffer
->io
, mod_entry
);
892 buffer
->io
.mod_list
= NULL
;
896 if (buffer
->ondisk
== NULL
|| buffer
->io
.loading
) {
897 error
= hammer_load_buffer(buffer
, 0);
899 hammer_rel_buffer(buffer
, 1);
901 * NOTE: buffer pointer can become stale after
912 * Release a buffer. We have to deal with several places where
913 * another thread can ref the buffer.
915 * Only destroy the structure itself if the related buffer cache buffer
916 * was disassociated from it. This ties the management of the structure
917 * to the buffer cache subsystem. buffer->ondisk determines whether the
918 * embedded io is referenced or not.
921 hammer_rel_buffer(hammer_buffer_t buffer
, int flush
)
923 hammer_volume_t volume
;
925 struct buf
*bp
= NULL
;
928 hmp
= buffer
->io
.hmp
;
931 if (buffer
->io
.lock
.refs
== 1) {
932 ++buffer
->io
.loading
; /* force interlock check */
933 hammer_lock_ex(&buffer
->io
.lock
);
934 if (buffer
->io
.lock
.refs
== 1) {
935 bp
= hammer_io_release(&buffer
->io
, flush
);
937 if (buffer
->io
.lock
.refs
== 1)
938 --hammer_count_refedbufs
;
940 if (buffer
->io
.bp
== NULL
&&
941 buffer
->io
.lock
.refs
== 1) {
945 * NOTE: It is impossible for any associated
946 * B-Tree nodes to have refs if the buffer
947 * has no additional refs.
949 RB_REMOVE(hammer_buf_rb_tree
,
950 &buffer
->io
.hmp
->rb_bufs_root
,
952 volume
= buffer
->io
.volume
;
953 buffer
->io
.volume
= NULL
; /* sanity */
954 hammer_rel_volume(volume
, 0);
955 hammer_io_clear_modlist(&buffer
->io
);
956 hammer_flush_buffer_nodes(buffer
);
957 KKASSERT(TAILQ_EMPTY(&buffer
->clist
));
961 --buffer
->io
.loading
;
962 hammer_unlock(&buffer
->io
.lock
);
964 hammer_unref(&buffer
->io
.lock
);
969 --hammer_count_buffers
;
970 kfree(buffer
, hmp
->m_misc
);
975 * Access the filesystem buffer containing the specified hammer offset.
976 * buf_offset is a conglomeration of the volume number and vol_buf_beg
977 * relative buffer offset. It must also have bit 55 set to be valid.
978 * (see hammer_off_t in hammer_disk.h).
980 * Any prior buffer in *bufferp will be released and replaced by the
983 * NOTE: The buffer is indexed via its zoneX_offset but we allow the
984 * passed cached *bufferp to match against either zoneX or zone2.
988 _hammer_bread(hammer_mount_t hmp
, hammer_off_t buf_offset
, int bytes
,
989 int *errorp
, struct hammer_buffer
**bufferp
)
991 hammer_buffer_t buffer
;
992 int32_t xoff
= (int32_t)buf_offset
& HAMMER_BUFMASK
;
994 buf_offset
&= ~HAMMER_BUFMASK64
;
995 KKASSERT((buf_offset
& HAMMER_OFF_ZONE_MASK
) != 0);
998 if (buffer
== NULL
|| (buffer
->zone2_offset
!= buf_offset
&&
999 buffer
->zoneX_offset
!= buf_offset
)) {
1001 hammer_rel_buffer(buffer
, 0);
1002 buffer
= hammer_get_buffer(hmp
, buf_offset
, bytes
, 0, errorp
);
1009 * Return a pointer to the buffer data.
1014 return((char *)buffer
->ondisk
+ xoff
);
1018 hammer_bread(hammer_mount_t hmp
, hammer_off_t buf_offset
,
1019 int *errorp
, struct hammer_buffer
**bufferp
)
1021 return(_hammer_bread(hmp
, buf_offset
, HAMMER_BUFSIZE
, errorp
, bufferp
));
1025 hammer_bread_ext(hammer_mount_t hmp
, hammer_off_t buf_offset
, int bytes
,
1026 int *errorp
, struct hammer_buffer
**bufferp
)
1028 bytes
= (bytes
+ HAMMER_BUFMASK
) & ~HAMMER_BUFMASK
;
1029 return(_hammer_bread(hmp
, buf_offset
, bytes
, errorp
, bufferp
));
1033 * Access the filesystem buffer containing the specified hammer offset.
1034 * No disk read operation occurs. The result buffer may contain garbage.
1036 * Any prior buffer in *bufferp will be released and replaced by the
1039 * This function marks the buffer dirty but does not increment its
1040 * modify_refs count.
1044 _hammer_bnew(hammer_mount_t hmp
, hammer_off_t buf_offset
, int bytes
,
1045 int *errorp
, struct hammer_buffer
**bufferp
)
1047 hammer_buffer_t buffer
;
1048 int32_t xoff
= (int32_t)buf_offset
& HAMMER_BUFMASK
;
1050 buf_offset
&= ~HAMMER_BUFMASK64
;
1053 if (buffer
== NULL
|| (buffer
->zone2_offset
!= buf_offset
&&
1054 buffer
->zoneX_offset
!= buf_offset
)) {
1056 hammer_rel_buffer(buffer
, 0);
1057 buffer
= hammer_get_buffer(hmp
, buf_offset
, bytes
, 1, errorp
);
1064 * Return a pointer to the buffer data.
1069 return((char *)buffer
->ondisk
+ xoff
);
1073 hammer_bnew(hammer_mount_t hmp
, hammer_off_t buf_offset
,
1074 int *errorp
, struct hammer_buffer
**bufferp
)
1076 return(_hammer_bnew(hmp
, buf_offset
, HAMMER_BUFSIZE
, errorp
, bufferp
));
1080 hammer_bnew_ext(hammer_mount_t hmp
, hammer_off_t buf_offset
, int bytes
,
1081 int *errorp
, struct hammer_buffer
**bufferp
)
1083 bytes
= (bytes
+ HAMMER_BUFMASK
) & ~HAMMER_BUFMASK
;
1084 return(_hammer_bnew(hmp
, buf_offset
, bytes
, errorp
, bufferp
));
1087 /************************************************************************
1089 ************************************************************************
1091 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
1092 * method used by the HAMMER filesystem.
1094 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1095 * associated with its buffer, and will only referenced the buffer while
1096 * the node itself is referenced.
1098 * A hammer_node can also be passively associated with other HAMMER
1099 * structures, such as inodes, while retaining 0 references. These
1100 * associations can be cleared backwards using a pointer-to-pointer in
1103 * This allows the HAMMER implementation to cache hammer_nodes long-term
1104 * and short-cut a great deal of the infrastructure's complexity. In
1105 * most cases a cached node can be reacquired without having to dip into
1106 * either the buffer or cluster management code.
1108 * The caller must pass a referenced cluster on call and will retain
1109 * ownership of the reference on return. The node will acquire its own
1110 * additional references, if necessary.
1113 hammer_get_node(hammer_transaction_t trans
, hammer_off_t node_offset
,
1114 int isnew
, int *errorp
)
1116 hammer_mount_t hmp
= trans
->hmp
;
1119 KKASSERT((node_offset
& HAMMER_OFF_ZONE_MASK
) == HAMMER_ZONE_BTREE
);
1122 * Locate the structure, allocating one if necessary.
1125 node
= RB_LOOKUP(hammer_nod_rb_tree
, &hmp
->rb_nods_root
, node_offset
);
1127 ++hammer_count_nodes
;
1128 node
= kmalloc(sizeof(*node
), hmp
->m_misc
, M_WAITOK
|M_ZERO
|M_USE_RESERVE
);
1129 node
->node_offset
= node_offset
;
1131 TAILQ_INIT(&node
->cursor_list
);
1132 TAILQ_INIT(&node
->cache_list
);
1133 if (RB_INSERT(hammer_nod_rb_tree
, &hmp
->rb_nods_root
, node
)) {
1134 --hammer_count_nodes
;
1135 kfree(node
, hmp
->m_misc
);
1139 hammer_ref(&node
->lock
);
1142 hammer_io_advance(&node
->buffer
->io
);
1144 *errorp
= hammer_load_node(trans
, node
, isnew
);
1145 trans
->flags
|= HAMMER_TRANSF_DIDIO
;
1148 hammer_rel_node(node
);
1155 * Reference an already-referenced node.
1158 hammer_ref_node(hammer_node_t node
)
1160 KKASSERT(node
->lock
.refs
> 0 && node
->ondisk
!= NULL
);
1161 hammer_ref(&node
->lock
);
1165 * Load a node's on-disk data reference.
1168 hammer_load_node(hammer_transaction_t trans
, hammer_node_t node
, int isnew
)
1170 hammer_buffer_t buffer
;
1171 hammer_off_t buf_offset
;
1176 hammer_lock_ex(&node
->lock
);
1177 if (node
->ondisk
== NULL
) {
1179 * This is a little confusing but the jist is that
1180 * node->buffer determines whether the node is on
1181 * the buffer's clist and node->ondisk determines
1182 * whether the buffer is referenced.
1184 * We could be racing a buffer release, in which case
1185 * node->buffer may become NULL while we are blocked
1186 * referencing the buffer.
1188 if ((buffer
= node
->buffer
) != NULL
) {
1189 error
= hammer_ref_buffer(buffer
);
1190 if (error
== 0 && node
->buffer
== NULL
) {
1191 TAILQ_INSERT_TAIL(&buffer
->clist
,
1193 node
->buffer
= buffer
;
1196 buf_offset
= node
->node_offset
& ~HAMMER_BUFMASK64
;
1197 buffer
= hammer_get_buffer(node
->hmp
, buf_offset
,
1198 HAMMER_BUFSIZE
, 0, &error
);
1200 KKASSERT(error
== 0);
1201 TAILQ_INSERT_TAIL(&buffer
->clist
,
1203 node
->buffer
= buffer
;
1208 node
->ondisk
= (void *)((char *)buffer
->ondisk
+
1209 (node
->node_offset
& HAMMER_BUFMASK
));
1212 * Check CRC. NOTE: Neither flag is set and the CRC is not
1213 * generated on new B-Tree nodes.
1216 (node
->flags
& HAMMER_NODE_CRCANY
) == 0) {
1217 if (hammer_crc_test_btree(node
->ondisk
) == 0) {
1218 if (hammer_debug_critical
)
1219 Debugger("CRC FAILED: B-TREE NODE");
1220 node
->flags
|= HAMMER_NODE_CRCBAD
;
1222 node
->flags
|= HAMMER_NODE_CRCGOOD
;
1226 if (node
->flags
& HAMMER_NODE_CRCBAD
) {
1227 if (trans
->flags
& HAMMER_TRANSF_CRCDOM
)
1234 hammer_unlock(&node
->lock
);
1239 * Safely reference a node, interlock against flushes via the IO subsystem.
1242 hammer_ref_node_safe(hammer_transaction_t trans
, hammer_node_cache_t cache
,
1249 hammer_ref(&node
->lock
);
1251 if (node
->flags
& HAMMER_NODE_CRCBAD
) {
1252 if (trans
->flags
& HAMMER_TRANSF_CRCDOM
)
1260 *errorp
= hammer_load_node(trans
, node
, 0);
1263 hammer_rel_node(node
);
1273 * Release a hammer_node. On the last release the node dereferences
1274 * its underlying buffer and may or may not be destroyed.
1277 hammer_rel_node(hammer_node_t node
)
1279 hammer_buffer_t buffer
;
1282 * If this isn't the last ref just decrement the ref count and
1285 if (node
->lock
.refs
> 1) {
1286 hammer_unref(&node
->lock
);
1291 * If there is no ondisk info or no buffer the node failed to load,
1292 * remove the last reference and destroy the node.
1294 if (node
->ondisk
== NULL
) {
1295 hammer_unref(&node
->lock
);
1296 hammer_flush_node(node
);
1297 /* node is stale now */
1302 * Do not disassociate the node from the buffer if it represents
1303 * a modified B-Tree node that still needs its crc to be generated.
1305 if (node
->flags
& HAMMER_NODE_NEEDSCRC
)
1309 * Do final cleanups and then either destroy the node and leave it
1310 * passively cached. The buffer reference is removed regardless.
1312 buffer
= node
->buffer
;
1313 node
->ondisk
= NULL
;
1315 if ((node
->flags
& HAMMER_NODE_FLUSH
) == 0) {
1316 hammer_unref(&node
->lock
);
1317 hammer_rel_buffer(buffer
, 0);
1324 hammer_unref(&node
->lock
);
1325 hammer_flush_node(node
);
1327 hammer_rel_buffer(buffer
, 0);
1331 * Free space on-media associated with a B-Tree node.
1334 hammer_delete_node(hammer_transaction_t trans
, hammer_node_t node
)
1336 KKASSERT((node
->flags
& HAMMER_NODE_DELETED
) == 0);
1337 node
->flags
|= HAMMER_NODE_DELETED
;
1338 hammer_blockmap_free(trans
, node
->node_offset
, sizeof(*node
->ondisk
));
1342 * Passively cache a referenced hammer_node. The caller may release
1343 * the node on return.
1346 hammer_cache_node(hammer_node_cache_t cache
, hammer_node_t node
)
1349 * If the node doesn't exist, or is being deleted, don't cache it!
1351 * The node can only ever be NULL in the I/O failure path.
1353 if (node
== NULL
|| (node
->flags
& HAMMER_NODE_DELETED
))
1355 if (cache
->node
== node
)
1358 hammer_uncache_node(cache
);
1359 if (node
->flags
& HAMMER_NODE_DELETED
)
1362 TAILQ_INSERT_TAIL(&node
->cache_list
, cache
, entry
);
1366 hammer_uncache_node(hammer_node_cache_t cache
)
1370 if ((node
= cache
->node
) != NULL
) {
1371 TAILQ_REMOVE(&node
->cache_list
, cache
, entry
);
1373 if (TAILQ_EMPTY(&node
->cache_list
))
1374 hammer_flush_node(node
);
1379 * Remove a node's cache references and destroy the node if it has no
1380 * other references or backing store.
1383 hammer_flush_node(hammer_node_t node
)
1385 hammer_node_cache_t cache
;
1386 hammer_buffer_t buffer
;
1387 hammer_mount_t hmp
= node
->hmp
;
1389 while ((cache
= TAILQ_FIRST(&node
->cache_list
)) != NULL
) {
1390 TAILQ_REMOVE(&node
->cache_list
, cache
, entry
);
1393 if (node
->lock
.refs
== 0 && node
->ondisk
== NULL
) {
1394 KKASSERT((node
->flags
& HAMMER_NODE_NEEDSCRC
) == 0);
1395 RB_REMOVE(hammer_nod_rb_tree
, &node
->hmp
->rb_nods_root
, node
);
1396 if ((buffer
= node
->buffer
) != NULL
) {
1397 node
->buffer
= NULL
;
1398 TAILQ_REMOVE(&buffer
->clist
, node
, entry
);
1399 /* buffer is unreferenced because ondisk is NULL */
1401 --hammer_count_nodes
;
1402 kfree(node
, hmp
->m_misc
);
1407 * Flush passively cached B-Tree nodes associated with this buffer.
1408 * This is only called when the buffer is about to be destroyed, so
1409 * none of the nodes should have any references. The buffer is locked.
1411 * We may be interlocked with the buffer.
1414 hammer_flush_buffer_nodes(hammer_buffer_t buffer
)
1418 while ((node
= TAILQ_FIRST(&buffer
->clist
)) != NULL
) {
1419 KKASSERT(node
->ondisk
== NULL
);
1420 KKASSERT((node
->flags
& HAMMER_NODE_NEEDSCRC
) == 0);
1422 if (node
->lock
.refs
== 0) {
1423 hammer_ref(&node
->lock
);
1424 node
->flags
|= HAMMER_NODE_FLUSH
;
1425 hammer_rel_node(node
);
1427 KKASSERT(node
->loading
!= 0);
1428 KKASSERT(node
->buffer
!= NULL
);
1429 buffer
= node
->buffer
;
1430 node
->buffer
= NULL
;
1431 TAILQ_REMOVE(&buffer
->clist
, node
, entry
);
1432 /* buffer is unreferenced because ondisk is NULL */
1438 /************************************************************************
1440 ************************************************************************/
1443 * Allocate a B-Tree node.
1446 hammer_alloc_btree(hammer_transaction_t trans
, hammer_off_t hint
, int *errorp
)
1448 hammer_buffer_t buffer
= NULL
;
1449 hammer_node_t node
= NULL
;
1450 hammer_off_t node_offset
;
1452 node_offset
= hammer_blockmap_alloc(trans
, HAMMER_ZONE_BTREE_INDEX
,
1453 sizeof(struct hammer_node_ondisk
),
1456 node
= hammer_get_node(trans
, node_offset
, 1, errorp
);
1457 hammer_modify_node_noundo(trans
, node
);
1458 bzero(node
->ondisk
, sizeof(*node
->ondisk
));
1459 hammer_modify_node_done(node
);
1462 hammer_rel_buffer(buffer
, 0);
1467 * Allocate data. If the address of a data buffer is supplied then
1468 * any prior non-NULL *data_bufferp will be released and *data_bufferp
1469 * will be set to the related buffer. The caller must release it when
1470 * finally done. The initial *data_bufferp should be set to NULL by
1473 * The caller is responsible for making hammer_modify*() calls on the
1477 hammer_alloc_data(hammer_transaction_t trans
, int32_t data_len
,
1478 u_int16_t rec_type
, hammer_off_t
*data_offsetp
,
1479 struct hammer_buffer
**data_bufferp
,
1480 hammer_off_t hint
, int *errorp
)
1490 case HAMMER_RECTYPE_INODE
:
1491 case HAMMER_RECTYPE_DIRENTRY
:
1492 case HAMMER_RECTYPE_EXT
:
1493 case HAMMER_RECTYPE_FIX
:
1494 case HAMMER_RECTYPE_PFS
:
1495 case HAMMER_RECTYPE_SNAPSHOT
:
1496 case HAMMER_RECTYPE_CONFIG
:
1497 zone
= HAMMER_ZONE_META_INDEX
;
1499 case HAMMER_RECTYPE_DATA
:
1500 case HAMMER_RECTYPE_DB
:
1501 if (data_len
<= HAMMER_BUFSIZE
/ 2) {
1502 zone
= HAMMER_ZONE_SMALL_DATA_INDEX
;
1504 data_len
= (data_len
+ HAMMER_BUFMASK
) &
1506 zone
= HAMMER_ZONE_LARGE_DATA_INDEX
;
1510 panic("hammer_alloc_data: rec_type %04x unknown",
1512 zone
= 0; /* NOT REACHED */
1515 *data_offsetp
= hammer_blockmap_alloc(trans
, zone
, data_len
,
1520 if (*errorp
== 0 && data_bufferp
) {
1522 data
= hammer_bread_ext(trans
->hmp
, *data_offsetp
,
1523 data_len
, errorp
, data_bufferp
);
1534 * Sync dirty buffers to the media and clean-up any loose ends.
1536 * These functions do not start the flusher going, they simply
1537 * queue everything up to the flusher.
1539 static int hammer_sync_scan1(struct mount
*mp
, struct vnode
*vp
, void *data
);
1540 static int hammer_sync_scan2(struct mount
*mp
, struct vnode
*vp
, void *data
);
1543 hammer_queue_inodes_flusher(hammer_mount_t hmp
, int waitfor
)
1545 struct hammer_sync_info info
;
1548 info
.waitfor
= waitfor
;
1549 if (waitfor
== MNT_WAIT
) {
1550 vmntvnodescan(hmp
->mp
, VMSC_GETVP
|VMSC_ONEPASS
,
1551 hammer_sync_scan1
, hammer_sync_scan2
, &info
);
1553 vmntvnodescan(hmp
->mp
, VMSC_GETVP
|VMSC_ONEPASS
|VMSC_NOWAIT
,
1554 hammer_sync_scan1
, hammer_sync_scan2
, &info
);
1560 * Filesystem sync. If doing a synchronous sync make a second pass on
1561 * the vnodes in case any were already flushing during the first pass,
1562 * and activate the flusher twice (the second time brings the UNDO FIFO's
1563 * start position up to the end position after the first call).
1566 hammer_sync_hmp(hammer_mount_t hmp
, int waitfor
)
1568 struct hammer_sync_info info
;
1571 info
.waitfor
= MNT_NOWAIT
;
1572 vmntvnodescan(hmp
->mp
, VMSC_GETVP
|VMSC_NOWAIT
,
1573 hammer_sync_scan1
, hammer_sync_scan2
, &info
);
1574 if (info
.error
== 0 && waitfor
== MNT_WAIT
) {
1575 info
.waitfor
= waitfor
;
1576 vmntvnodescan(hmp
->mp
, VMSC_GETVP
,
1577 hammer_sync_scan1
, hammer_sync_scan2
, &info
);
1579 if (waitfor
== MNT_WAIT
) {
1580 hammer_flusher_sync(hmp
);
1581 hammer_flusher_sync(hmp
);
1583 hammer_flusher_async(hmp
, NULL
);
1584 hammer_flusher_async(hmp
, NULL
);
1590 hammer_sync_scan1(struct mount
*mp
, struct vnode
*vp
, void *data
)
1592 struct hammer_inode
*ip
;
1595 if (vp
->v_type
== VNON
|| ip
== NULL
||
1596 ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0 &&
1597 RB_EMPTY(&vp
->v_rbdirty_tree
))) {
1604 hammer_sync_scan2(struct mount
*mp
, struct vnode
*vp
, void *data
)
1606 struct hammer_sync_info
*info
= data
;
1607 struct hammer_inode
*ip
;
1611 if (vp
->v_type
== VNON
|| vp
->v_type
== VBAD
||
1612 ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0 &&
1613 RB_EMPTY(&vp
->v_rbdirty_tree
))) {
1616 error
= VOP_FSYNC(vp
, MNT_NOWAIT
, 0);
1618 info
->error
= error
;