2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
42 #include <sys/nlookup.h>
47 static void hammer_free_volume(hammer_volume_t volume
);
48 static int hammer_load_volume(hammer_volume_t volume
);
49 static int hammer_load_buffer(hammer_buffer_t buffer
, int isnew
);
50 static int hammer_load_node(hammer_transaction_t trans
,
51 hammer_node_t node
, int isnew
);
52 static void _hammer_rel_node(hammer_node_t node
, int locked
);
55 hammer_vol_rb_compare(hammer_volume_t vol1
, hammer_volume_t vol2
)
57 if (vol1
->vol_no
< vol2
->vol_no
)
59 if (vol1
->vol_no
> vol2
->vol_no
)
65 * hammer_buffer structures are indexed via their zoneX_offset, not
69 hammer_buf_rb_compare(hammer_buffer_t buf1
, hammer_buffer_t buf2
)
71 if (buf1
->zoneX_offset
< buf2
->zoneX_offset
)
73 if (buf1
->zoneX_offset
> buf2
->zoneX_offset
)
79 hammer_nod_rb_compare(hammer_node_t node1
, hammer_node_t node2
)
81 if (node1
->node_offset
< node2
->node_offset
)
83 if (node1
->node_offset
> node2
->node_offset
)
88 RB_GENERATE2(hammer_vol_rb_tree
, hammer_volume
, rb_node
,
89 hammer_vol_rb_compare
, int32_t, vol_no
);
90 RB_GENERATE2(hammer_buf_rb_tree
, hammer_buffer
, rb_node
,
91 hammer_buf_rb_compare
, hammer_off_t
, zoneX_offset
);
92 RB_GENERATE2(hammer_nod_rb_tree
, hammer_node
, rb_node
,
93 hammer_nod_rb_compare
, hammer_off_t
, node_offset
);
95 /************************************************************************
97 ************************************************************************
99 * Load a HAMMER volume by name. Returns 0 on success or a positive error
100 * code on failure. Volumes must be loaded at mount time or via hammer
101 * volume-add command, hammer_get_volume() will not load a new volume.
103 * The passed devvp is vref()'d but not locked. This function consumes the
104 * ref (typically by associating it with the volume structure).
106 * Calls made to hammer_load_volume() or single-threaded
109 hammer_install_volume(hammer_mount_t hmp
, const char *volname
,
110 struct vnode
*devvp
, void *data
)
113 hammer_volume_t volume
;
114 hammer_volume_ondisk_t ondisk
;
115 hammer_volume_ondisk_t img
;
116 struct nlookupdata nd
;
117 struct buf
*bp
= NULL
;
124 ronly
= ((mp
->mnt_flag
& MNT_RDONLY
) ? 1 : 0);
127 * Allocate a volume structure
129 ++hammer_count_volumes
;
130 volume
= kmalloc(sizeof(*volume
), hmp
->m_misc
, M_WAITOK
|M_ZERO
);
131 volume
->vol_name
= kstrdup(volname
, hmp
->m_misc
);
132 volume
->io
.hmp
= hmp
; /* bootstrap */
133 hammer_io_init(&volume
->io
, volume
, HAMMER_IOTYPE_VOLUME
);
134 volume
->io
.offset
= 0LL;
135 volume
->io
.bytes
= HAMMER_BUFSIZE
;
138 * Get the device vnode
141 error
= nlookup_init(&nd
, volume
->vol_name
, UIO_SYSSPACE
, NLC_FOLLOW
);
143 error
= nlookup(&nd
);
145 error
= cache_vref(&nd
.nl_nch
, nd
.nl_cred
, &volume
->devvp
);
149 volume
->devvp
= devvp
;
153 if (vn_isdisk(volume
->devvp
, &error
)) {
154 error
= vfs_mountedon(volume
->devvp
);
157 if (error
== 0 && vcount(volume
->devvp
) > 0)
160 vn_lock(volume
->devvp
, LK_EXCLUSIVE
| LK_RETRY
);
161 error
= vinvalbuf(volume
->devvp
, V_SAVE
, 0, 0);
163 error
= VOP_OPEN(volume
->devvp
,
164 (ronly
? FREAD
: FREAD
|FWRITE
),
167 vn_unlock(volume
->devvp
);
170 hammer_free_volume(volume
);
173 volume
->devvp
->v_rdev
->si_mountpoint
= mp
;
177 * Extract the volume number from the volume header and do various
180 error
= bread(volume
->devvp
, 0LL, HAMMER_BUFSIZE
, &bp
);
183 ondisk
= (void *)bp
->b_data
;
186 * Initialize the volume header with data if the data is specified.
188 if (ronly
== 0 && data
) {
189 img
= (hammer_volume_ondisk_t
)data
;
190 if (ondisk
->vol_signature
== HAMMER_FSBUF_VOLUME
) {
191 hkprintf("Formatting of valid HAMMER volume "
192 "%s denied. Erase with dd!\n", volname
);
196 bcopy(img
, ondisk
, sizeof(*img
));
199 if (ondisk
->vol_signature
!= HAMMER_FSBUF_VOLUME
) {
200 hkprintf("volume %s has an invalid header\n", volume
->vol_name
);
201 for (i
= 0; i
< (int)sizeof(ondisk
->vol_signature
); i
++) {
202 kprintf("%02x", ((char*)&ondisk
->vol_signature
)[i
] & 0xFF);
203 if (i
!= (int)sizeof(ondisk
->vol_signature
) - 1)
210 volume
->vol_no
= ondisk
->vol_no
;
211 volume
->vol_flags
= ondisk
->vol_flags
;
212 volume
->maxbuf_off
= HAMMER_ENCODE_RAW_BUFFER(volume
->vol_no
,
213 HAMMER_VOL_BUF_SIZE(ondisk
));
215 if (RB_EMPTY(&hmp
->rb_vols_root
)) {
216 hmp
->fsid
= ondisk
->vol_fsid
;
217 } else if (bcmp(&hmp
->fsid
, &ondisk
->vol_fsid
, sizeof(uuid_t
))) {
218 hkprintf("volume %s's fsid does not match other volumes\n",
225 * Insert the volume structure into the red-black tree.
227 if (RB_INSERT(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, volume
)) {
228 hkprintf("volume %s has a duplicate vol_no %d\n",
229 volume
->vol_name
, volume
->vol_no
);
234 hammer_volume_number_add(hmp
, volume
);
237 * Set the root volume . HAMMER special cases rootvol the structure.
238 * We do not hold a ref because this would prevent related I/O
239 * from being flushed.
241 if (error
== 0 && ondisk
->vol_rootvol
== ondisk
->vol_no
) {
242 hmp
->rootvol
= volume
;
243 hmp
->nvolumes
= ondisk
->vol_count
;
248 hmp
->mp
->mnt_stat
.f_blocks
+= ondisk
->vol0_stat_bigblocks
*
249 HAMMER_BUFFERS_PER_BIGBLOCK
;
250 hmp
->mp
->mnt_vstat
.f_blocks
+= ondisk
->vol0_stat_bigblocks
*
251 HAMMER_BUFFERS_PER_BIGBLOCK
;
257 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
259 volume
->devvp
->v_rdev
->si_mountpoint
= NULL
;
260 vn_lock(volume
->devvp
, LK_EXCLUSIVE
| LK_RETRY
);
261 VOP_CLOSE(volume
->devvp
, ronly
? FREAD
: FREAD
|FWRITE
, NULL
);
262 vn_unlock(volume
->devvp
);
263 hammer_free_volume(volume
);
269 * This is called for each volume when updating the mount point from
270 * read-write to read-only or vise-versa.
273 hammer_adjust_volume_mode(hammer_volume_t volume
, void *data __unused
)
276 vn_lock(volume
->devvp
, LK_EXCLUSIVE
| LK_RETRY
);
277 if (volume
->io
.hmp
->ronly
) {
278 /* do not call vinvalbuf */
279 VOP_OPEN(volume
->devvp
, FREAD
, FSCRED
, NULL
);
280 VOP_CLOSE(volume
->devvp
, FREAD
|FWRITE
, NULL
);
282 /* do not call vinvalbuf */
283 VOP_OPEN(volume
->devvp
, FREAD
|FWRITE
, FSCRED
, NULL
);
284 VOP_CLOSE(volume
->devvp
, FREAD
, NULL
);
286 vn_unlock(volume
->devvp
);
292 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
293 * so returns -1 on failure.
296 hammer_unload_volume(hammer_volume_t volume
, void *data
)
298 hammer_mount_t hmp
= volume
->io
.hmp
;
299 struct buf
*bp
= NULL
;
300 hammer_volume_ondisk_t img
;
301 int ronly
= ((hmp
->mp
->mnt_flag
& MNT_RDONLY
) ? 1 : 0);
305 * Clear the volume header with data if the data is specified.
307 if (ronly
== 0 && data
&& volume
->devvp
) {
308 img
= (hammer_volume_ondisk_t
)data
;
309 error
= bread(volume
->devvp
, 0LL, HAMMER_BUFSIZE
, &bp
);
310 if (error
|| bp
->b_bcount
< sizeof(*img
)) {
311 hmkprintf(hmp
, "Failed to read volume header: %d\n", error
);
314 bcopy(img
, bp
->b_data
, sizeof(*img
));
317 hmkprintf(hmp
, "Failed to clear volume header: %d\n",
323 * Clean up the root volume pointer, which is held unlocked in hmp.
325 if (hmp
->rootvol
== volume
)
329 * We must not flush a dirty buffer to disk on umount. It should
330 * have already been dealt with by the flusher, or we may be in
331 * catastrophic failure.
333 hammer_io_clear_modify(&volume
->io
, 1);
334 volume
->io
.waitdep
= 1;
337 * Clean up the persistent ref ioerror might have on the volume
339 if (volume
->io
.ioerror
)
340 hammer_io_clear_error_noassert(&volume
->io
);
343 * This should release the bp. Releasing the volume with flush set
344 * implies the interlock is set.
346 hammer_ref_interlock_true(&volume
->io
.lock
);
347 hammer_rel_volume(volume
, 1);
348 KKASSERT(volume
->io
.bp
== NULL
);
351 * There should be no references on the volume.
353 KKASSERT(hammer_norefs(&volume
->io
.lock
));
355 volume
->ondisk
= NULL
;
357 if (volume
->devvp
->v_rdev
&&
358 volume
->devvp
->v_rdev
->si_mountpoint
== hmp
->mp
) {
359 volume
->devvp
->v_rdev
->si_mountpoint
= NULL
;
363 * Make sure we don't sync anything to disk if we
364 * are in read-only mode (1) or critically-errored
365 * (2). Note that there may be dirty buffers in
366 * normal read-only mode from crash recovery.
368 vn_lock(volume
->devvp
, LK_EXCLUSIVE
| LK_RETRY
);
369 vinvalbuf(volume
->devvp
, 0, 0, 0);
370 VOP_CLOSE(volume
->devvp
, FREAD
, NULL
);
371 vn_unlock(volume
->devvp
);
374 * Normal termination, save any dirty buffers
375 * (XXX there really shouldn't be any).
377 vn_lock(volume
->devvp
, LK_EXCLUSIVE
| LK_RETRY
);
378 vinvalbuf(volume
->devvp
, V_SAVE
, 0, 0);
379 VOP_CLOSE(volume
->devvp
, FREAD
|FWRITE
, NULL
);
380 vn_unlock(volume
->devvp
);
385 * Destroy the structure
387 RB_REMOVE(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, volume
);
388 hammer_volume_number_del(hmp
, volume
);
389 hammer_free_volume(volume
);
395 hammer_free_volume(hammer_volume_t volume
)
397 hammer_mount_t hmp
= volume
->io
.hmp
;
399 if (volume
->vol_name
) {
400 kfree(volume
->vol_name
, hmp
->m_misc
);
401 volume
->vol_name
= NULL
;
404 vrele(volume
->devvp
);
405 volume
->devvp
= NULL
;
407 --hammer_count_volumes
;
408 kfree(volume
, hmp
->m_misc
);
412 * Get a HAMMER volume. The volume must already exist.
415 hammer_get_volume(hammer_mount_t hmp
, int32_t vol_no
, int *errorp
)
417 hammer_volume_t volume
;
420 * Locate the volume structure
422 volume
= RB_LOOKUP(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, vol_no
);
423 if (volume
== NULL
) {
429 * Reference the volume, load/check the data on the 0->1 transition.
430 * hammer_load_volume() will dispose of the interlock on return,
431 * and also clean up the ref count on error.
433 if (hammer_ref_interlock(&volume
->io
.lock
)) {
434 *errorp
= hammer_load_volume(volume
);
438 KKASSERT(volume
->ondisk
);
445 hammer_ref_volume(hammer_volume_t volume
)
450 * Reference the volume and deal with the check condition used to
451 * load its ondisk info.
453 if (hammer_ref_interlock(&volume
->io
.lock
)) {
454 error
= hammer_load_volume(volume
);
456 KKASSERT(volume
->ondisk
);
463 * May be called without fs_token
466 hammer_get_root_volume(hammer_mount_t hmp
, int *errorp
)
468 hammer_volume_t volume
;
470 volume
= hmp
->rootvol
;
471 KKASSERT(volume
!= NULL
);
474 * Reference the volume and deal with the check condition used to
475 * load its ondisk info.
477 if (hammer_ref_interlock(&volume
->io
.lock
)) {
478 lwkt_gettoken(&volume
->io
.hmp
->fs_token
);
479 *errorp
= hammer_load_volume(volume
);
480 lwkt_reltoken(&volume
->io
.hmp
->fs_token
);
484 KKASSERT(volume
->ondisk
);
491 * Load a volume's on-disk information. The volume must be referenced and
492 * the interlock is held on call. The interlock will be released on return.
493 * The reference will also be released on return if an error occurs.
496 hammer_load_volume(hammer_volume_t volume
)
500 if (volume
->ondisk
== NULL
) {
501 error
= hammer_io_read(volume
->devvp
, &volume
->io
,
504 volume
->ondisk
= (void *)volume
->io
.bp
->b_data
;
505 hammer_ref_interlock_done(&volume
->io
.lock
);
507 hammer_rel_volume(volume
, 1);
516 * Release a previously acquired reference on the volume.
518 * Volumes are not unloaded from memory during normal operation.
520 * May be called without fs_token
523 hammer_rel_volume(hammer_volume_t volume
, int locked
)
527 if (hammer_rel_interlock(&volume
->io
.lock
, locked
)) {
528 lwkt_gettoken(&volume
->io
.hmp
->fs_token
);
529 volume
->ondisk
= NULL
;
530 bp
= hammer_io_release(&volume
->io
, locked
);
531 lwkt_reltoken(&volume
->io
.hmp
->fs_token
);
532 hammer_rel_interlock_done(&volume
->io
.lock
, locked
);
539 hammer_mountcheck_volumes(hammer_mount_t hmp
)
544 HAMMER_VOLUME_NUMBER_FOREACH(hmp
, i
) {
545 vol
= RB_LOOKUP(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, i
);
553 hammer_get_installed_volumes(hammer_mount_t hmp
)
557 HAMMER_VOLUME_NUMBER_FOREACH(hmp
, i
)
562 /************************************************************************
564 ************************************************************************
566 * Manage buffers. Currently most blockmap-backed zones are direct-mapped
567 * to zone-2 buffer offsets, without a translation stage. However, the
568 * hammer_buffer structure is indexed by its zoneX_offset, not its
571 * The proper zone must be maintained throughout the code-base all the way
572 * through to the big-block allocator, or routines like hammer_del_buffers()
573 * will not be able to locate all potentially conflicting buffers.
577 * Helper function returns whether a zone offset can be directly translated
578 * to a raw buffer index or not. Really only the volume and undo zones
579 * can't be directly translated. Volumes are special-cased and undo zones
580 * shouldn't be aliased accessed in read-only mode.
582 * This function is ONLY used to detect aliased zones during a read-only
586 hammer_direct_zone(hammer_off_t buf_offset
)
588 int zone
= HAMMER_ZONE_DECODE(buf_offset
);
590 return(hammer_is_direct_mapped_index(zone
));
594 hammer_get_buffer(hammer_mount_t hmp
, hammer_off_t buf_offset
,
595 int bytes
, int isnew
, int *errorp
)
597 hammer_buffer_t buffer
;
598 hammer_volume_t volume
;
599 hammer_off_t zone2_offset
;
603 buf_offset
&= ~HAMMER_BUFMASK64
;
606 * Shortcut if the buffer is already cached
608 buffer
= RB_LOOKUP(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
, buf_offset
);
611 * Once refed the ondisk field will not be cleared by
612 * any other action. Shortcut the operation if the
613 * ondisk structure is valid.
616 if (hammer_ref_interlock(&buffer
->io
.lock
) == 0) {
617 hammer_io_advance(&buffer
->io
);
618 KKASSERT(buffer
->ondisk
);
624 * 0->1 transition or defered 0->1 transition (CHECK),
625 * interlock now held. Shortcut if ondisk is already
628 atomic_add_int(&hammer_count_refedbufs
, 1);
629 if (buffer
->ondisk
) {
630 hammer_io_advance(&buffer
->io
);
631 hammer_ref_interlock_done(&buffer
->io
.lock
);
637 * The buffer is no longer loose if it has a ref, and
638 * cannot become loose once it gains a ref. Loose
639 * buffers will never be in a modified state. This should
640 * only occur on the 0->1 transition of refs.
642 * lose_root can be modified via a biodone() interrupt
643 * so the io_token must be held.
645 if (buffer
->io
.mod_root
== &hmp
->lose_root
) {
646 lwkt_gettoken(&hmp
->io_token
);
647 if (buffer
->io
.mod_root
== &hmp
->lose_root
) {
648 RB_REMOVE(hammer_mod_rb_tree
,
649 buffer
->io
.mod_root
, &buffer
->io
);
650 buffer
->io
.mod_root
= NULL
;
651 KKASSERT(buffer
->io
.modified
== 0);
653 lwkt_reltoken(&hmp
->io_token
);
656 } else if (hmp
->ronly
&& hammer_direct_zone(buf_offset
)) {
658 * If this is a read-only mount there could be an alias
659 * in the raw-zone. If there is we use that buffer instead.
661 * rw mounts will not have aliases. Also note when going
662 * from ro -> rw the recovered raw buffers are flushed and
663 * reclaimed, so again there will not be any aliases once
666 buffer
= RB_LOOKUP(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
,
667 hammer_xlate_to_zone2(buf_offset
));
669 if (hammer_debug_general
& 0x0001) {
670 hkrateprintf(&hmp
->kdiag
,
671 "recovered aliased %016jx\n",
672 (intmax_t)buf_offset
);
679 * Handle blockmap offset translations
681 zone
= HAMMER_ZONE_DECODE(buf_offset
);
682 if (hammer_is_zone2_mapped_index(zone
)) {
683 zone2_offset
= hammer_blockmap_lookup(hmp
, buf_offset
, errorp
);
684 } else if (zone
== HAMMER_ZONE_UNDO_INDEX
) {
685 zone2_offset
= hammer_undo_lookup(hmp
, buf_offset
, errorp
);
687 /* Must be zone-2 (not 1 or 4 or 15) */
688 KKASSERT(zone
== HAMMER_ZONE_RAW_BUFFER_INDEX
);
689 zone2_offset
= buf_offset
;
696 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
699 KKASSERT(hammer_is_zone_raw_buffer(zone2_offset
));
700 vol_no
= HAMMER_VOL_DECODE(zone2_offset
);
701 volume
= hammer_get_volume(hmp
, vol_no
, errorp
);
705 KKASSERT(zone2_offset
< volume
->maxbuf_off
);
708 * Allocate a new buffer structure. We will check for races later.
710 ++hammer_count_buffers
;
711 buffer
= kmalloc(sizeof(*buffer
), hmp
->m_misc
,
712 M_WAITOK
|M_ZERO
|M_USE_RESERVE
);
713 buffer
->zone2_offset
= zone2_offset
;
714 buffer
->zoneX_offset
= buf_offset
;
716 hammer_io_init(&buffer
->io
, volume
, hammer_zone_to_iotype(zone
));
717 buffer
->io
.offset
= hammer_xlate_to_phys(volume
->ondisk
, zone2_offset
);
718 buffer
->io
.bytes
= bytes
;
719 TAILQ_INIT(&buffer
->node_list
);
720 hammer_ref_interlock_true(&buffer
->io
.lock
);
723 * Insert the buffer into the RB tree and handle late collisions.
725 if (RB_INSERT(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
, buffer
)) {
726 hammer_rel_volume(volume
, 0);
727 buffer
->io
.volume
= NULL
; /* safety */
728 if (hammer_rel_interlock(&buffer
->io
.lock
, 1)) /* safety */
729 hammer_rel_interlock_done(&buffer
->io
.lock
, 1);
730 --hammer_count_buffers
;
731 kfree(buffer
, hmp
->m_misc
);
734 atomic_add_int(&hammer_count_refedbufs
, 1);
738 * The buffer is referenced and interlocked. Load the buffer
739 * if necessary. hammer_load_buffer() deals with the interlock
740 * and, if an error is returned, also deals with the ref.
742 if (buffer
->ondisk
== NULL
) {
743 *errorp
= hammer_load_buffer(buffer
, isnew
);
747 hammer_io_advance(&buffer
->io
);
748 hammer_ref_interlock_done(&buffer
->io
.lock
);
755 * This is used by the direct-read code to deal with large-data buffers
756 * created by the reblocker and mirror-write code. The direct-read code
757 * bypasses the HAMMER buffer subsystem and so any aliased dirty or write-
758 * running hammer buffers must be fully synced to disk before we can issue
761 * This code path is not considered critical as only the rebocker and
762 * mirror-write code will create large-data buffers via the HAMMER buffer
763 * subsystem. They do that because they operate at the B-Tree level and
764 * do not access the vnode/inode structures.
767 hammer_sync_buffers(hammer_mount_t hmp
, hammer_off_t base_offset
, int bytes
)
769 hammer_buffer_t buffer
;
772 KKASSERT(hammer_is_zone_large_data(base_offset
));
775 buffer
= RB_LOOKUP(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
,
777 if (buffer
&& (buffer
->io
.modified
|| buffer
->io
.running
)) {
778 error
= hammer_ref_buffer(buffer
);
780 hammer_io_wait(&buffer
->io
);
781 if (buffer
->io
.modified
) {
782 hammer_io_write_interlock(&buffer
->io
);
783 hammer_io_flush(&buffer
->io
, 0);
784 hammer_io_done_interlock(&buffer
->io
);
785 hammer_io_wait(&buffer
->io
);
787 hammer_rel_buffer(buffer
, 0);
790 base_offset
+= HAMMER_BUFSIZE
;
791 bytes
-= HAMMER_BUFSIZE
;
796 * Destroy all buffers covering the specified zoneX offset range. This
797 * is called when the related blockmap layer2 entry is freed or when
798 * a direct write bypasses our buffer/buffer-cache subsystem.
800 * The buffers may be referenced by the caller itself. Setting reclaim
801 * will cause the buffer to be destroyed when it's ref count reaches zero.
803 * Return 0 on success, EAGAIN if some buffers could not be destroyed due
804 * to additional references held by other threads, or some other (typically
808 hammer_del_buffers(hammer_mount_t hmp
, hammer_off_t base_offset
,
809 hammer_off_t zone2_offset
, int bytes
,
810 int report_conflicts
)
812 hammer_buffer_t buffer
;
813 hammer_volume_t volume
;
818 vol_no
= HAMMER_VOL_DECODE(zone2_offset
);
819 volume
= hammer_get_volume(hmp
, vol_no
, &ret_error
);
820 KKASSERT(ret_error
== 0);
823 buffer
= RB_LOOKUP(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
,
826 error
= hammer_ref_buffer(buffer
);
827 if (hammer_debug_general
& 0x20000) {
828 hkprintf("delbufr %016jx rerr=%d 1ref=%d\n",
829 (intmax_t)buffer
->zoneX_offset
,
831 hammer_oneref(&buffer
->io
.lock
));
833 if (error
== 0 && !hammer_oneref(&buffer
->io
.lock
)) {
835 hammer_rel_buffer(buffer
, 0);
838 KKASSERT(buffer
->zone2_offset
== zone2_offset
);
839 hammer_io_clear_modify(&buffer
->io
, 1);
840 buffer
->io
.reclaim
= 1;
841 buffer
->io
.waitdep
= 1;
842 KKASSERT(buffer
->io
.volume
== volume
);
843 hammer_rel_buffer(buffer
, 0);
846 error
= hammer_io_inval(volume
, zone2_offset
);
850 if (report_conflicts
||
851 (hammer_debug_general
& 0x8000)) {
852 krateprintf(&hmp
->kdiag
,
853 "hammer_del_buffers: unable to "
854 "invalidate %016jx buffer=%p "
855 "rep=%d lkrefs=%08x\n",
856 (intmax_t)base_offset
,
857 buffer
, report_conflicts
,
858 (buffer
? buffer
->io
.lock
.refs
: -1));
861 base_offset
+= HAMMER_BUFSIZE
;
862 zone2_offset
+= HAMMER_BUFSIZE
;
863 bytes
-= HAMMER_BUFSIZE
;
865 hammer_rel_volume(volume
, 0);
870 * Given a referenced and interlocked buffer load/validate the data.
872 * The buffer interlock will be released on return. If an error is
873 * returned the buffer reference will also be released (and the buffer
874 * pointer will thus be stale).
877 hammer_load_buffer(hammer_buffer_t buffer
, int isnew
)
879 hammer_volume_t volume
;
883 * Load the buffer's on-disk info
885 volume
= buffer
->io
.volume
;
887 if (hammer_debug_io
& 0x0004) {
888 hdkprintf("load_buffer %016jx %016jx isnew=%d od=%p\n",
889 (intmax_t)buffer
->zoneX_offset
,
890 (intmax_t)buffer
->zone2_offset
,
891 isnew
, buffer
->ondisk
);
894 if (buffer
->ondisk
== NULL
) {
896 * Issue the read or generate a new buffer. When reading
897 * the limit argument controls any read-ahead clustering
898 * hammer_io_read() is allowed to do.
900 * We cannot read-ahead in the large-data zone and we cannot
901 * cross a big-block boundary as the next big-block might
902 * use a different buffer size.
905 error
= hammer_io_new(volume
->devvp
, &buffer
->io
);
906 } else if (hammer_is_zone_large_data(buffer
->zoneX_offset
)) {
907 error
= hammer_io_read(volume
->devvp
, &buffer
->io
,
912 limit
= HAMMER_BIGBLOCK_DOALIGN(buffer
->zone2_offset
);
913 limit
-= buffer
->zone2_offset
;
914 error
= hammer_io_read(volume
->devvp
, &buffer
->io
,
918 buffer
->ondisk
= (void *)buffer
->io
.bp
->b_data
;
920 error
= hammer_io_new(volume
->devvp
, &buffer
->io
);
925 hammer_io_advance(&buffer
->io
);
926 hammer_ref_interlock_done(&buffer
->io
.lock
);
928 hammer_rel_buffer(buffer
, 1);
934 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
935 * This routine is only called during unmount or when a volume is
938 * If data != NULL, it specifies a volume whoose buffers should
942 hammer_unload_buffer(hammer_buffer_t buffer
, void *data
)
944 hammer_volume_t volume
= (hammer_volume_t
)data
;
947 * If volume != NULL we are only interested in unloading buffers
948 * associated with a particular volume.
950 if (volume
!= NULL
&& volume
!= buffer
->io
.volume
)
954 * Clean up the persistent ref ioerror might have on the buffer
955 * and acquire a ref. Expect a 0->1 transition.
957 if (buffer
->io
.ioerror
) {
958 hammer_io_clear_error_noassert(&buffer
->io
);
959 atomic_add_int(&hammer_count_refedbufs
, -1);
961 hammer_ref_interlock_true(&buffer
->io
.lock
);
962 atomic_add_int(&hammer_count_refedbufs
, 1);
965 * We must not flush a dirty buffer to disk on umount. It should
966 * have already been dealt with by the flusher, or we may be in
967 * catastrophic failure.
969 * We must set waitdep to ensure that a running buffer is waited
970 * on and released prior to us trying to unload the volume.
972 hammer_io_clear_modify(&buffer
->io
, 1);
973 hammer_flush_buffer_nodes(buffer
);
974 buffer
->io
.waitdep
= 1;
975 hammer_rel_buffer(buffer
, 1);
980 * Reference a buffer that is either already referenced or via a specially
981 * handled pointer (aka cursor->buffer).
984 hammer_ref_buffer(hammer_buffer_t buffer
)
991 * Acquire a ref, plus the buffer will be interlocked on the
994 locked
= hammer_ref_interlock(&buffer
->io
.lock
);
995 hmp
= buffer
->io
.hmp
;
998 * At this point a biodone() will not touch the buffer other then
999 * incidental bits. However, lose_root can be modified via
1000 * a biodone() interrupt.
1002 * No longer loose. lose_root requires the io_token.
1004 if (buffer
->io
.mod_root
== &hmp
->lose_root
) {
1005 lwkt_gettoken(&hmp
->io_token
);
1006 if (buffer
->io
.mod_root
== &hmp
->lose_root
) {
1007 RB_REMOVE(hammer_mod_rb_tree
,
1008 buffer
->io
.mod_root
, &buffer
->io
);
1009 buffer
->io
.mod_root
= NULL
;
1011 lwkt_reltoken(&hmp
->io_token
);
1015 atomic_add_int(&hammer_count_refedbufs
, 1);
1016 error
= hammer_load_buffer(buffer
, 0);
1017 /* NOTE: on error the buffer pointer is stale */
1025 * Release a reference on the buffer. On the 1->0 transition the
1026 * underlying IO will be released but the data reference is left
1029 * Only destroy the structure itself if the related buffer cache buffer
1030 * was disassociated from it. This ties the management of the structure
1031 * to the buffer cache subsystem. buffer->ondisk determines whether the
1032 * embedded io is referenced or not.
1035 hammer_rel_buffer(hammer_buffer_t buffer
, int locked
)
1037 hammer_volume_t volume
;
1039 struct buf
*bp
= NULL
;
1042 hmp
= buffer
->io
.hmp
;
1044 if (hammer_rel_interlock(&buffer
->io
.lock
, locked
) == 0)
1048 * hammer_count_refedbufs accounting. Decrement if we are in
1049 * the error path or if CHECK is clear.
1051 * If we are not in the error path and CHECK is set the caller
1052 * probably just did a hammer_ref() and didn't account for it,
1053 * so we don't account for the loss here.
1055 if (locked
|| (buffer
->io
.lock
.refs
& HAMMER_REFS_CHECK
) == 0)
1056 atomic_add_int(&hammer_count_refedbufs
, -1);
1059 * If the caller locked us or the normal released transitions
1060 * from 1->0 (and acquired the lock) attempt to release the
1061 * io. If the called locked us we tell hammer_io_release()
1062 * to flush (which would be the unload or failure path).
1064 bp
= hammer_io_release(&buffer
->io
, locked
);
1067 * If the buffer has no bp association and no refs we can destroy
1070 * NOTE: It is impossible for any associated B-Tree nodes to have
1071 * refs if the buffer has no additional refs.
1073 if (buffer
->io
.bp
== NULL
&& hammer_norefs(&buffer
->io
.lock
)) {
1074 RB_REMOVE(hammer_buf_rb_tree
,
1075 &buffer
->io
.hmp
->rb_bufs_root
,
1077 volume
= buffer
->io
.volume
;
1078 buffer
->io
.volume
= NULL
; /* sanity */
1079 hammer_rel_volume(volume
, 0);
1080 hammer_io_clear_modlist(&buffer
->io
);
1081 hammer_flush_buffer_nodes(buffer
);
1082 KKASSERT(TAILQ_EMPTY(&buffer
->node_list
));
1089 hammer_rel_interlock_done(&buffer
->io
.lock
, locked
);
1093 --hammer_count_buffers
;
1094 kfree(buffer
, hmp
->m_misc
);
1099 * Access the filesystem buffer containing the specified hammer offset.
1100 * buf_offset is a conglomeration of the volume number and vol_buf_beg
1101 * relative buffer offset. It must also have bit 55 set to be valid.
1102 * (see hammer_off_t in hammer_disk.h).
1104 * Any prior buffer in *bufferp will be released and replaced by the
1107 * NOTE: The buffer is indexed via its zoneX_offset but we allow the
1108 * passed cached *bufferp to match against either zoneX or zone2.
1112 _hammer_bread(hammer_mount_t hmp
, hammer_off_t buf_offset
, int bytes
,
1113 int isnew
, int *errorp
, hammer_buffer_t
*bufferp
)
1115 hammer_buffer_t buffer
;
1116 int32_t xoff
= (int32_t)buf_offset
& HAMMER_BUFMASK
;
1118 buf_offset
&= ~HAMMER_BUFMASK64
;
1119 KKASSERT(HAMMER_ZONE(buf_offset
) != 0);
1122 if (buffer
== NULL
|| (buffer
->zone2_offset
!= buf_offset
&&
1123 buffer
->zoneX_offset
!= buf_offset
)) {
1125 hammer_rel_buffer(buffer
, 0);
1126 buffer
= hammer_get_buffer(hmp
, buf_offset
, bytes
, isnew
, errorp
);
1133 * Return a pointer to the buffer data.
1138 return((char *)buffer
->ondisk
+ xoff
);
1142 hammer_bread(hammer_mount_t hmp
, hammer_off_t buf_offset
,
1143 int *errorp
, hammer_buffer_t
*bufferp
)
1145 return(_hammer_bread(hmp
, buf_offset
, HAMMER_BUFSIZE
, 0, errorp
, bufferp
));
1149 hammer_bread_ext(hammer_mount_t hmp
, hammer_off_t buf_offset
, int bytes
,
1150 int *errorp
, hammer_buffer_t
*bufferp
)
1152 bytes
= HAMMER_BUFSIZE_DOALIGN(bytes
);
1153 return(_hammer_bread(hmp
, buf_offset
, bytes
, 0, errorp
, bufferp
));
1157 * Access the filesystem buffer containing the specified hammer offset.
1158 * No disk read operation occurs. The result buffer may contain garbage.
1160 * Any prior buffer in *bufferp will be released and replaced by the
1163 * This function marks the buffer dirty but does not increment its
1164 * modify_refs count.
1167 hammer_bnew(hammer_mount_t hmp
, hammer_off_t buf_offset
,
1168 int *errorp
, hammer_buffer_t
*bufferp
)
1170 return(_hammer_bread(hmp
, buf_offset
, HAMMER_BUFSIZE
, 1, errorp
, bufferp
));
1174 hammer_bnew_ext(hammer_mount_t hmp
, hammer_off_t buf_offset
, int bytes
,
1175 int *errorp
, hammer_buffer_t
*bufferp
)
1177 bytes
= HAMMER_BUFSIZE_DOALIGN(bytes
);
1178 return(_hammer_bread(hmp
, buf_offset
, bytes
, 1, errorp
, bufferp
));
1181 /************************************************************************
1183 ************************************************************************
1185 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
1186 * method used by the HAMMER filesystem.
1188 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1189 * associated with its buffer, and will only referenced the buffer while
1190 * the node itself is referenced.
1192 * A hammer_node can also be passively associated with other HAMMER
1193 * structures, such as inodes, while retaining 0 references. These
1194 * associations can be cleared backwards using a pointer-to-pointer in
1197 * This allows the HAMMER implementation to cache hammer_nodes long-term
1198 * and short-cut a great deal of the infrastructure's complexity. In
1199 * most cases a cached node can be reacquired without having to dip into
1203 hammer_get_node(hammer_transaction_t trans
, hammer_off_t node_offset
,
1204 int isnew
, int *errorp
)
1206 hammer_mount_t hmp
= trans
->hmp
;
1210 KKASSERT(hammer_is_zone_btree(node_offset
));
1213 * Locate the structure, allocating one if necessary.
1216 node
= RB_LOOKUP(hammer_nod_rb_tree
, &hmp
->rb_nods_root
, node_offset
);
1218 ++hammer_count_nodes
;
1219 node
= kmalloc(sizeof(*node
), hmp
->m_misc
, M_WAITOK
|M_ZERO
|M_USE_RESERVE
);
1220 node
->node_offset
= node_offset
;
1222 TAILQ_INIT(&node
->cursor_list
);
1223 TAILQ_INIT(&node
->cache_list
);
1224 if (RB_INSERT(hammer_nod_rb_tree
, &hmp
->rb_nods_root
, node
)) {
1225 --hammer_count_nodes
;
1226 kfree(node
, hmp
->m_misc
);
1229 doload
= hammer_ref_interlock_true(&node
->lock
);
1231 doload
= hammer_ref_interlock(&node
->lock
);
1234 *errorp
= hammer_load_node(trans
, node
, isnew
);
1238 KKASSERT(node
->ondisk
);
1240 hammer_io_advance(&node
->buffer
->io
);
1246 * Reference an already-referenced node. 0->1 transitions should assert
1247 * so we do not have to deal with hammer_ref() setting CHECK.
1250 hammer_ref_node(hammer_node_t node
)
1252 KKASSERT(hammer_isactive(&node
->lock
) && node
->ondisk
!= NULL
);
1253 hammer_ref(&node
->lock
);
1257 * Load a node's on-disk data reference. Called with the node referenced
1260 * On return the node interlock will be unlocked. If a non-zero error code
1261 * is returned the node will also be dereferenced (and the caller's pointer
1265 hammer_load_node(hammer_transaction_t trans
, hammer_node_t node
, int isnew
)
1267 hammer_buffer_t buffer
;
1268 hammer_off_t buf_offset
;
1272 if (node
->ondisk
== NULL
) {
1274 * This is a little confusing but the jist is that
1275 * node->buffer determines whether the node is on
1276 * the buffer's node_list and node->ondisk determines
1277 * whether the buffer is referenced.
1279 * We could be racing a buffer release, in which case
1280 * node->buffer may become NULL while we are blocked
1281 * referencing the buffer.
1283 if ((buffer
= node
->buffer
) != NULL
) {
1284 error
= hammer_ref_buffer(buffer
);
1285 if (error
== 0 && node
->buffer
== NULL
) {
1286 TAILQ_INSERT_TAIL(&buffer
->node_list
, node
, entry
);
1287 node
->buffer
= buffer
;
1290 buf_offset
= node
->node_offset
& ~HAMMER_BUFMASK64
;
1291 buffer
= hammer_get_buffer(node
->hmp
, buf_offset
,
1292 HAMMER_BUFSIZE
, 0, &error
);
1294 KKASSERT(error
== 0);
1295 TAILQ_INSERT_TAIL(&buffer
->node_list
, node
, entry
);
1296 node
->buffer
= buffer
;
1301 node
->ondisk
= (void *)((char *)buffer
->ondisk
+
1302 (node
->node_offset
& HAMMER_BUFMASK
));
1305 * Check CRC. NOTE: Neither flag is set and the CRC is not
1306 * generated on new B-Tree nodes.
1309 (node
->flags
& HAMMER_NODE_CRCANY
) == 0) {
1310 if (hammer_crc_test_btree(node
->ondisk
) == 0) {
1311 hdkprintf("CRC B-TREE NODE @ %016jx/%lu FAILED\n",
1312 (intmax_t)node
->node_offset
,
1313 sizeof(*node
->ondisk
));
1314 if (hammer_debug_critical
)
1315 Debugger("CRC FAILED: B-TREE NODE");
1316 node
->flags
|= HAMMER_NODE_CRCBAD
;
1318 node
->flags
|= HAMMER_NODE_CRCGOOD
;
1322 if (node
->flags
& HAMMER_NODE_CRCBAD
) {
1323 if (trans
->flags
& HAMMER_TRANSF_CRCDOM
)
1330 _hammer_rel_node(node
, 1);
1332 hammer_ref_interlock_done(&node
->lock
);
1338 * Safely reference a node, interlock against flushes via the IO subsystem.
1341 hammer_ref_node_safe(hammer_transaction_t trans
, hammer_node_cache_t cache
,
1349 doload
= hammer_ref_interlock(&node
->lock
);
1351 *errorp
= hammer_load_node(trans
, node
, 0);
1355 KKASSERT(node
->ondisk
);
1356 if (node
->flags
& HAMMER_NODE_CRCBAD
) {
1357 if (trans
->flags
& HAMMER_TRANSF_CRCDOM
)
1361 _hammer_rel_node(node
, 0);
1374 * Release a hammer_node. On the last release the node dereferences
1375 * its underlying buffer and may or may not be destroyed.
1377 * If locked is non-zero the passed node has been interlocked by the
1378 * caller and we are in the failure/unload path, otherwise it has not and
1379 * we are doing a normal release.
1381 * This function will dispose of the interlock and the reference.
1382 * On return the node pointer is stale.
1385 _hammer_rel_node(hammer_node_t node
, int locked
)
1387 hammer_buffer_t buffer
;
1390 * Deref the node. If this isn't the 1->0 transition we're basically
1391 * done. If locked is non-zero this function will just deref the
1392 * locked node and return 1, otherwise it will deref the locked
1393 * node and either lock and return 1 on the 1->0 transition or
1394 * not lock and return 0.
1396 if (hammer_rel_interlock(&node
->lock
, locked
) == 0)
1400 * Either locked was non-zero and we are interlocked, or the
1401 * hammer_rel_interlock() call returned non-zero and we are
1404 * The ref-count must still be decremented if locked != 0 so
1405 * the cleanup required still varies a bit.
1407 * hammer_flush_node() when called with 1 or 2 will dispose of
1408 * the lock and possible ref-count.
1410 if (node
->ondisk
== NULL
) {
1411 hammer_flush_node(node
, locked
+ 1);
1412 /* node is stale now */
1417 * Do not disassociate the node from the buffer if it represents
1418 * a modified B-Tree node that still needs its crc to be generated.
1420 if (node
->flags
& HAMMER_NODE_NEEDSCRC
) {
1421 hammer_rel_interlock_done(&node
->lock
, locked
);
1426 * Do final cleanups and then either destroy the node and leave it
1427 * passively cached. The buffer reference is removed regardless.
1429 buffer
= node
->buffer
;
1430 node
->ondisk
= NULL
;
1432 if ((node
->flags
& HAMMER_NODE_FLUSH
) == 0) {
1436 hammer_rel_interlock_done(&node
->lock
, locked
);
1441 hammer_flush_node(node
, locked
+ 1);
1445 hammer_rel_buffer(buffer
, 0);
1449 hammer_rel_node(hammer_node_t node
)
1451 _hammer_rel_node(node
, 0);
1455 * Free space on-media associated with a B-Tree node.
1458 hammer_delete_node(hammer_transaction_t trans
, hammer_node_t node
)
1460 KKASSERT((node
->flags
& HAMMER_NODE_DELETED
) == 0);
1461 node
->flags
|= HAMMER_NODE_DELETED
;
1462 hammer_blockmap_free(trans
, node
->node_offset
, sizeof(*node
->ondisk
));
1466 * Passively cache a referenced hammer_node. The caller may release
1467 * the node on return.
1470 hammer_cache_node(hammer_node_cache_t cache
, hammer_node_t node
)
1473 * If the node doesn't exist, or is being deleted, don't cache it!
1475 * The node can only ever be NULL in the I/O failure path.
1477 if (node
== NULL
|| (node
->flags
& HAMMER_NODE_DELETED
))
1479 if (cache
->node
== node
)
1482 hammer_uncache_node(cache
);
1483 if (node
->flags
& HAMMER_NODE_DELETED
)
1486 TAILQ_INSERT_TAIL(&node
->cache_list
, cache
, entry
);
1490 hammer_uncache_node(hammer_node_cache_t cache
)
1494 if ((node
= cache
->node
) != NULL
) {
1495 TAILQ_REMOVE(&node
->cache_list
, cache
, entry
);
1497 if (TAILQ_EMPTY(&node
->cache_list
))
1498 hammer_flush_node(node
, 0);
1503 * Remove a node's cache references and destroy the node if it has no
1504 * other references or backing store.
1506 * locked == 0 Normal unlocked operation
1507 * locked == 1 Call hammer_rel_interlock_done(..., 0);
1508 * locked == 2 Call hammer_rel_interlock_done(..., 1);
1510 * XXX for now this isn't even close to being MPSAFE so the refs check
1514 hammer_flush_node(hammer_node_t node
, int locked
)
1516 hammer_node_cache_t cache
;
1517 hammer_buffer_t buffer
;
1518 hammer_mount_t hmp
= node
->hmp
;
1521 while ((cache
= TAILQ_FIRST(&node
->cache_list
)) != NULL
) {
1522 TAILQ_REMOVE(&node
->cache_list
, cache
, entry
);
1527 * NOTE: refs is predisposed if another thread is blocking and
1528 * will be larger than 0 in that case. We aren't MPSAFE
1531 if (node
->ondisk
== NULL
&& hammer_norefs(&node
->lock
)) {
1532 KKASSERT((node
->flags
& HAMMER_NODE_NEEDSCRC
) == 0);
1533 RB_REMOVE(hammer_nod_rb_tree
, &node
->hmp
->rb_nods_root
, node
);
1534 if ((buffer
= node
->buffer
) != NULL
) {
1535 node
->buffer
= NULL
;
1536 TAILQ_REMOVE(&buffer
->node_list
, node
, entry
);
1537 /* buffer is unreferenced because ondisk is NULL */
1545 * Deal with the interlock if locked == 1 or locked == 2.
1548 hammer_rel_interlock_done(&node
->lock
, locked
- 1);
1551 * Destroy if requested
1554 --hammer_count_nodes
;
1555 kfree(node
, hmp
->m_misc
);
1560 * Flush passively cached B-Tree nodes associated with this buffer.
1561 * This is only called when the buffer is about to be destroyed, so
1562 * none of the nodes should have any references. The buffer is locked.
1564 * We may be interlocked with the buffer.
1567 hammer_flush_buffer_nodes(hammer_buffer_t buffer
)
1571 while ((node
= TAILQ_FIRST(&buffer
->node_list
)) != NULL
) {
1572 KKASSERT(node
->ondisk
== NULL
);
1573 KKASSERT((node
->flags
& HAMMER_NODE_NEEDSCRC
) == 0);
1575 if (hammer_try_interlock_norefs(&node
->lock
)) {
1576 hammer_ref(&node
->lock
);
1577 node
->flags
|= HAMMER_NODE_FLUSH
;
1578 _hammer_rel_node(node
, 1);
1580 KKASSERT(node
->buffer
!= NULL
);
1581 buffer
= node
->buffer
;
1582 node
->buffer
= NULL
;
1583 TAILQ_REMOVE(&buffer
->node_list
, node
, entry
);
1584 /* buffer is unreferenced because ondisk is NULL */
1590 /************************************************************************
1592 ************************************************************************/
1595 * Allocate a B-Tree node.
1598 hammer_alloc_btree(hammer_transaction_t trans
, hammer_off_t hint
, int *errorp
)
1600 hammer_buffer_t buffer
= NULL
;
1601 hammer_node_t node
= NULL
;
1602 hammer_off_t node_offset
;
1604 node_offset
= hammer_blockmap_alloc(trans
, HAMMER_ZONE_BTREE_INDEX
,
1605 sizeof(struct hammer_node_ondisk
),
1608 node
= hammer_get_node(trans
, node_offset
, 1, errorp
);
1609 hammer_modify_node_noundo(trans
, node
);
1610 bzero(node
->ondisk
, sizeof(*node
->ondisk
));
1611 hammer_modify_node_done(node
);
1614 hammer_rel_buffer(buffer
, 0);
1619 * Allocate data. If the address of a data buffer is supplied then
1620 * any prior non-NULL *data_bufferp will be released and *data_bufferp
1621 * will be set to the related buffer. The caller must release it when
1622 * finally done. The initial *data_bufferp should be set to NULL by
1625 * The caller is responsible for making hammer_modify*() calls on the
1629 hammer_alloc_data(hammer_transaction_t trans
, int32_t data_len
,
1630 uint16_t rec_type
, hammer_off_t
*data_offsetp
,
1631 hammer_buffer_t
*data_bufferp
,
1632 hammer_off_t hint
, int *errorp
)
1638 * Allocate data directly from blockmap.
1642 case HAMMER_RECTYPE_INODE
:
1643 case HAMMER_RECTYPE_DIRENTRY
:
1644 case HAMMER_RECTYPE_EXT
:
1645 case HAMMER_RECTYPE_FIX
:
1646 case HAMMER_RECTYPE_PFS
:
1647 case HAMMER_RECTYPE_SNAPSHOT
:
1648 case HAMMER_RECTYPE_CONFIG
:
1649 zone
= HAMMER_ZONE_META_INDEX
;
1651 case HAMMER_RECTYPE_DATA
:
1652 case HAMMER_RECTYPE_DB
:
1654 * Only mirror-write comes here.
1655 * Regular allocation path uses blockmap reservation.
1657 zone
= hammer_data_zone_index(data_len
);
1658 if (zone
== HAMMER_ZONE_LARGE_DATA_INDEX
) {
1660 data_len
= HAMMER_BUFSIZE_DOALIGN(data_len
);
1664 hpanic("rec_type %04x unknown", rec_type
);
1665 zone
= HAMMER_ZONE_UNAVAIL_INDEX
; /* NOT REACHED */
1668 *data_offsetp
= hammer_blockmap_alloc(trans
, zone
, data_len
,
1675 if (*errorp
== 0 && data_bufferp
&& data_len
)
1676 data
= hammer_bread_ext(trans
->hmp
, *data_offsetp
, data_len
,
1677 errorp
, data_bufferp
);
1682 * Sync dirty buffers to the media and clean-up any loose ends.
1684 * These functions do not start the flusher going, they simply
1685 * queue everything up to the flusher.
1687 static int hammer_sync_scan2(struct mount
*mp
, struct vnode
*vp
, void *data
);
1689 struct hammer_sync_info
{
1694 hammer_queue_inodes_flusher(hammer_mount_t hmp
, int waitfor
)
1696 struct hammer_sync_info info
;
1699 if (waitfor
== MNT_WAIT
) {
1700 vsyncscan(hmp
->mp
, VMSC_GETVP
| VMSC_ONEPASS
,
1701 hammer_sync_scan2
, &info
);
1703 vsyncscan(hmp
->mp
, VMSC_GETVP
| VMSC_ONEPASS
| VMSC_NOWAIT
,
1704 hammer_sync_scan2
, &info
);
1710 * Filesystem sync. If doing a synchronous sync make a second pass on
1711 * the vnodes in case any were already flushing during the first pass,
1712 * and activate the flusher twice (the second time brings the UNDO FIFO's
1713 * start position up to the end position after the first call).
1715 * If doing a lazy sync make just one pass on the vnode list, ignoring
1716 * any new vnodes added to the list while the sync is in progress.
1719 hammer_sync_hmp(hammer_mount_t hmp
, int waitfor
)
1721 struct hammer_sync_info info
;
1725 if (waitfor
& MNT_LAZY
)
1726 flags
|= VMSC_ONEPASS
;
1729 vsyncscan(hmp
->mp
, flags
| VMSC_NOWAIT
, hammer_sync_scan2
, &info
);
1731 if (info
.error
== 0 && (waitfor
& MNT_WAIT
)) {
1732 vsyncscan(hmp
->mp
, flags
, hammer_sync_scan2
, &info
);
1734 if (waitfor
== MNT_WAIT
) {
1735 hammer_flusher_sync(hmp
);
1736 hammer_flusher_sync(hmp
);
1738 hammer_flusher_async(hmp
, NULL
);
1739 hammer_flusher_async(hmp
, NULL
);
1745 hammer_sync_scan2(struct mount
*mp
, struct vnode
*vp
, void *data
)
1747 struct hammer_sync_info
*info
= data
;
1754 if (vp
->v_type
== VNON
|| vp
->v_type
== VBAD
) {
1758 if ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0 &&
1759 RB_EMPTY(&vp
->v_rbdirty_tree
)) {
1763 error
= VOP_FSYNC(vp
, MNT_NOWAIT
, 0);
1765 info
->error
= error
;