2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
48 static void hammer_free_volume(hammer_volume_t volume
);
49 static int hammer_load_volume(hammer_volume_t volume
);
50 static int hammer_load_buffer(hammer_buffer_t buffer
, int isnew
);
51 static int hammer_load_node(hammer_transaction_t trans
,
52 hammer_node_t node
, int isnew
);
53 static void _hammer_rel_node(hammer_node_t node
, int locked
);
56 hammer_vol_rb_compare(hammer_volume_t vol1
, hammer_volume_t vol2
)
58 if (vol1
->vol_no
< vol2
->vol_no
)
60 if (vol1
->vol_no
> vol2
->vol_no
)
66 * hammer_buffer structures are indexed via their zoneX_offset, not
70 hammer_buf_rb_compare(hammer_buffer_t buf1
, hammer_buffer_t buf2
)
72 if (buf1
->zoneX_offset
< buf2
->zoneX_offset
)
74 if (buf1
->zoneX_offset
> buf2
->zoneX_offset
)
80 hammer_nod_rb_compare(hammer_node_t node1
, hammer_node_t node2
)
82 if (node1
->node_offset
< node2
->node_offset
)
84 if (node1
->node_offset
> node2
->node_offset
)
89 RB_GENERATE2(hammer_vol_rb_tree
, hammer_volume
, rb_node
,
90 hammer_vol_rb_compare
, int32_t, vol_no
);
91 RB_GENERATE2(hammer_buf_rb_tree
, hammer_buffer
, rb_node
,
92 hammer_buf_rb_compare
, hammer_off_t
, zoneX_offset
);
93 RB_GENERATE2(hammer_nod_rb_tree
, hammer_node
, rb_node
,
94 hammer_nod_rb_compare
, hammer_off_t
, node_offset
);
96 /************************************************************************
98 ************************************************************************
100 * Load a HAMMER volume by name. Returns 0 on success or a positive error
101 * code on failure. Volumes must be loaded at mount time, get_volume() will
102 * not load a new volume.
104 * Calls made to hammer_load_volume() or single-threaded
107 hammer_install_volume(struct hammer_mount
*hmp
, const char *volname
,
111 hammer_volume_t volume
;
112 struct hammer_volume_ondisk
*ondisk
;
113 struct nlookupdata nd
;
114 struct buf
*bp
= NULL
;
120 ronly
= ((mp
->mnt_flag
& MNT_RDONLY
) ? 1 : 0);
123 * Allocate a volume structure
125 ++hammer_count_volumes
;
126 volume
= kmalloc(sizeof(*volume
), hmp
->m_misc
, M_WAITOK
|M_ZERO
);
127 volume
->vol_name
= kstrdup(volname
, hmp
->m_misc
);
128 volume
->io
.hmp
= hmp
; /* bootstrap */
129 hammer_io_init(&volume
->io
, volume
, HAMMER_STRUCTURE_VOLUME
);
130 volume
->io
.offset
= 0LL;
131 volume
->io
.bytes
= HAMMER_BUFSIZE
;
134 * Get the device vnode
137 error
= nlookup_init(&nd
, volume
->vol_name
, UIO_SYSSPACE
, NLC_FOLLOW
);
139 error
= nlookup(&nd
);
141 error
= cache_vref(&nd
.nl_nch
, nd
.nl_cred
, &volume
->devvp
);
145 volume
->devvp
= devvp
;
149 if (vn_isdisk(volume
->devvp
, &error
)) {
150 error
= vfs_mountedon(volume
->devvp
);
153 if (error
== 0 && vcount(volume
->devvp
) > 0)
156 vn_lock(volume
->devvp
, LK_EXCLUSIVE
| LK_RETRY
);
157 error
= vinvalbuf(volume
->devvp
, V_SAVE
, 0, 0);
159 error
= VOP_OPEN(volume
->devvp
,
160 (ronly
? FREAD
: FREAD
|FWRITE
),
163 vn_unlock(volume
->devvp
);
166 hammer_free_volume(volume
);
169 volume
->devvp
->v_rdev
->si_mountpoint
= mp
;
173 * Extract the volume number from the volume header and do various
176 error
= bread(volume
->devvp
, 0LL, HAMMER_BUFSIZE
, &bp
);
179 ondisk
= (void *)bp
->b_data
;
180 if (ondisk
->vol_signature
!= HAMMER_FSBUF_VOLUME
) {
181 kprintf("hammer_mount: volume %s has an invalid header\n",
186 volume
->vol_no
= ondisk
->vol_no
;
187 volume
->buffer_base
= ondisk
->vol_buf_beg
;
188 volume
->vol_flags
= ondisk
->vol_flags
;
189 volume
->nblocks
= ondisk
->vol_nblocks
;
190 volume
->maxbuf_off
= HAMMER_ENCODE_RAW_BUFFER(volume
->vol_no
,
191 ondisk
->vol_buf_end
- ondisk
->vol_buf_beg
);
192 volume
->maxraw_off
= ondisk
->vol_buf_end
;
194 if (RB_EMPTY(&hmp
->rb_vols_root
)) {
195 hmp
->fsid
= ondisk
->vol_fsid
;
196 } else if (bcmp(&hmp
->fsid
, &ondisk
->vol_fsid
, sizeof(uuid_t
))) {
197 kprintf("hammer_mount: volume %s's fsid does not match "
198 "other volumes\n", volume
->vol_name
);
204 * Insert the volume structure into the red-black tree.
206 if (RB_INSERT(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, volume
)) {
207 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
208 volume
->vol_name
, volume
->vol_no
);
213 * Set the root volume . HAMMER special cases rootvol the structure.
214 * We do not hold a ref because this would prevent related I/O
215 * from being flushed.
217 if (error
== 0 && ondisk
->vol_rootvol
== ondisk
->vol_no
) {
218 hmp
->rootvol
= volume
;
219 hmp
->nvolumes
= ondisk
->vol_count
;
224 hmp
->mp
->mnt_stat
.f_blocks
+= ondisk
->vol0_stat_bigblocks
*
225 (HAMMER_LARGEBLOCK_SIZE
/ HAMMER_BUFSIZE
);
226 hmp
->mp
->mnt_vstat
.f_blocks
+= ondisk
->vol0_stat_bigblocks
*
227 (HAMMER_LARGEBLOCK_SIZE
/ HAMMER_BUFSIZE
);
233 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
235 volume
->devvp
->v_rdev
->si_mountpoint
= NULL
;
236 VOP_CLOSE(volume
->devvp
, ronly
? FREAD
: FREAD
|FWRITE
);
237 hammer_free_volume(volume
);
243 * This is called for each volume when updating the mount point from
244 * read-write to read-only or vise-versa.
247 hammer_adjust_volume_mode(hammer_volume_t volume
, void *data __unused
)
250 vn_lock(volume
->devvp
, LK_EXCLUSIVE
| LK_RETRY
);
251 if (volume
->io
.hmp
->ronly
) {
252 /* do not call vinvalbuf */
253 VOP_OPEN(volume
->devvp
, FREAD
, FSCRED
, NULL
);
254 VOP_CLOSE(volume
->devvp
, FREAD
|FWRITE
);
256 /* do not call vinvalbuf */
257 VOP_OPEN(volume
->devvp
, FREAD
|FWRITE
, FSCRED
, NULL
);
258 VOP_CLOSE(volume
->devvp
, FREAD
);
260 vn_unlock(volume
->devvp
);
266 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
267 * so returns -1 on failure.
270 hammer_unload_volume(hammer_volume_t volume
, void *data __unused
)
272 hammer_mount_t hmp
= volume
->io
.hmp
;
273 int ronly
= ((hmp
->mp
->mnt_flag
& MNT_RDONLY
) ? 1 : 0);
276 * Clean up the root volume pointer, which is held unlocked in hmp.
278 if (hmp
->rootvol
== volume
)
282 * We must not flush a dirty buffer to disk on umount. It should
283 * have already been dealt with by the flusher, or we may be in
284 * catastrophic failure.
286 hammer_io_clear_modify(&volume
->io
, 1);
287 volume
->io
.waitdep
= 1;
290 * Clean up the persistent ref ioerror might have on the volume
292 if (volume
->io
.ioerror
)
293 hammer_io_clear_error_noassert(&volume
->io
);
296 * This should release the bp. Releasing the volume with flush set
297 * implies the interlock is set.
299 hammer_ref_interlock_true(&volume
->io
.lock
);
300 hammer_rel_volume(volume
, 1);
301 KKASSERT(volume
->io
.bp
== NULL
);
304 * There should be no references on the volume, no clusters, and
307 KKASSERT(hammer_norefs(&volume
->io
.lock
));
309 volume
->ondisk
= NULL
;
311 if (volume
->devvp
->v_rdev
&&
312 volume
->devvp
->v_rdev
->si_mountpoint
== hmp
->mp
314 volume
->devvp
->v_rdev
->si_mountpoint
= NULL
;
318 * Make sure we don't sync anything to disk if we
319 * are in read-only mode (1) or critically-errored
320 * (2). Note that there may be dirty buffers in
321 * normal read-only mode from crash recovery.
323 vinvalbuf(volume
->devvp
, 0, 0, 0);
324 VOP_CLOSE(volume
->devvp
, FREAD
);
327 * Normal termination, save any dirty buffers
328 * (XXX there really shouldn't be any).
330 vinvalbuf(volume
->devvp
, V_SAVE
, 0, 0);
331 VOP_CLOSE(volume
->devvp
, FREAD
|FWRITE
);
336 * Destroy the structure
338 RB_REMOVE(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, volume
);
339 hammer_free_volume(volume
);
345 hammer_free_volume(hammer_volume_t volume
)
347 hammer_mount_t hmp
= volume
->io
.hmp
;
349 if (volume
->vol_name
) {
350 kfree(volume
->vol_name
, hmp
->m_misc
);
351 volume
->vol_name
= NULL
;
354 vrele(volume
->devvp
);
355 volume
->devvp
= NULL
;
357 --hammer_count_volumes
;
358 kfree(volume
, hmp
->m_misc
);
362 * Get a HAMMER volume. The volume must already exist.
365 hammer_get_volume(struct hammer_mount
*hmp
, int32_t vol_no
, int *errorp
)
367 struct hammer_volume
*volume
;
370 * Locate the volume structure
372 volume
= RB_LOOKUP(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, vol_no
);
373 if (volume
== NULL
) {
379 * Reference the volume, load/check the data on the 0->1 transition.
380 * hammer_load_volume() will dispose of the interlock on return,
381 * and also clean up the ref count on error.
383 if (hammer_ref_interlock(&volume
->io
.lock
)) {
384 *errorp
= hammer_load_volume(volume
);
388 KKASSERT(volume
->ondisk
);
395 hammer_ref_volume(hammer_volume_t volume
)
400 * Reference the volume and deal with the check condition used to
401 * load its ondisk info.
403 if (hammer_ref_interlock(&volume
->io
.lock
)) {
404 error
= hammer_load_volume(volume
);
406 KKASSERT(volume
->ondisk
);
413 hammer_get_root_volume(struct hammer_mount
*hmp
, int *errorp
)
415 hammer_volume_t volume
;
417 volume
= hmp
->rootvol
;
418 KKASSERT(volume
!= NULL
);
421 * Reference the volume and deal with the check condition used to
422 * load its ondisk info.
424 if (hammer_ref_interlock(&volume
->io
.lock
)) {
425 *errorp
= hammer_load_volume(volume
);
429 KKASSERT(volume
->ondisk
);
436 * Load a volume's on-disk information. The volume must be referenced and
437 * the interlock is held on call. The interlock will be released on return.
438 * The reference will also be released on return if an error occurs.
441 hammer_load_volume(hammer_volume_t volume
)
445 if (volume
->ondisk
== NULL
) {
446 error
= hammer_io_read(volume
->devvp
, &volume
->io
,
449 volume
->ondisk
= (void *)volume
->io
.bp
->b_data
;
450 hammer_ref_interlock_done(&volume
->io
.lock
);
452 hammer_rel_volume(volume
, 1);
461 * Release a previously acquired reference on the volume.
463 * Volumes are not unloaded from memory during normal operation.
466 hammer_rel_volume(hammer_volume_t volume
, int locked
)
470 if (hammer_rel_interlock(&volume
->io
.lock
, locked
)) {
471 volume
->ondisk
= NULL
;
472 bp
= hammer_io_release(&volume
->io
, locked
);
473 hammer_rel_interlock_done(&volume
->io
.lock
, locked
);
480 hammer_mountcheck_volumes(struct hammer_mount
*hmp
)
485 for (i
= 0; i
< hmp
->nvolumes
; ++i
) {
486 vol
= RB_LOOKUP(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, i
);
493 /************************************************************************
495 ************************************************************************
497 * Manage buffers. Currently most blockmap-backed zones are direct-mapped
498 * to zone-2 buffer offsets, without a translation stage. However, the
499 * hammer_buffer structure is indexed by its zoneX_offset, not its
502 * The proper zone must be maintained throughout the code-base all the way
503 * through to the big-block allocator, or routines like hammer_del_buffers()
504 * will not be able to locate all potentially conflicting buffers.
507 hammer_get_buffer(hammer_mount_t hmp
, hammer_off_t buf_offset
,
508 int bytes
, int isnew
, int *errorp
)
510 hammer_buffer_t buffer
;
511 hammer_volume_t volume
;
512 hammer_off_t zone2_offset
;
513 hammer_io_type_t iotype
;
517 buf_offset
&= ~HAMMER_BUFMASK64
;
520 * Shortcut if the buffer is already cached
522 buffer
= RB_LOOKUP(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
, buf_offset
);
525 * Once refed the ondisk field will not be cleared by
526 * any other action. Shortcut the operation if the
527 * ondisk structure is valid.
530 if (hammer_ref_interlock(&buffer
->io
.lock
) == 0) {
531 hammer_io_advance(&buffer
->io
);
532 KKASSERT(buffer
->ondisk
);
538 * 0->1 transition or defered 0->1 transition (CHECK),
539 * interlock now held. Shortcut if ondisk is already
542 ++hammer_count_refedbufs
;
543 if (buffer
->ondisk
) {
544 hammer_io_advance(&buffer
->io
);
545 hammer_ref_interlock_done(&buffer
->io
.lock
);
551 * The buffer is no longer loose if it has a ref, and
552 * cannot become loose once it gains a ref. Loose
553 * buffers will never be in a modified state. This should
554 * only occur on the 0->1 transition of refs.
556 * lose_list can be modified via a biodone() interrupt
557 * so the io_token must be held.
559 if (buffer
->io
.mod_list
== &hmp
->lose_list
) {
560 lwkt_gettoken(&hmp
->io_token
);
561 if (buffer
->io
.mod_list
== &hmp
->lose_list
) {
562 TAILQ_REMOVE(buffer
->io
.mod_list
, &buffer
->io
,
564 buffer
->io
.mod_list
= NULL
;
565 KKASSERT(buffer
->io
.modified
== 0);
567 lwkt_reltoken(&hmp
->io_token
);
570 } else if (hmp
->ronly
) {
572 * If this is a read-only mount there could be an alias
573 * in the raw-zone. If there is we use that buffer instead.
575 * rw mounts will not have aliases. Also note when going
576 * from ro -> rw the recovered raw buffers are flushed and
577 * reclaimed, so again there will not be any aliases once
580 buffer
= RB_LOOKUP(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
,
581 (buf_offset
& ~HAMMER_OFF_ZONE_MASK
) |
582 HAMMER_ZONE_RAW_BUFFER
);
584 kprintf("HAMMER: recovered aliased %016jx\n",
585 (intmax_t)buf_offset
);
591 * What is the buffer class?
593 zone
= HAMMER_ZONE_DECODE(buf_offset
);
596 case HAMMER_ZONE_LARGE_DATA_INDEX
:
597 case HAMMER_ZONE_SMALL_DATA_INDEX
:
598 iotype
= HAMMER_STRUCTURE_DATA_BUFFER
;
600 case HAMMER_ZONE_UNDO_INDEX
:
601 iotype
= HAMMER_STRUCTURE_UNDO_BUFFER
;
603 case HAMMER_ZONE_META_INDEX
:
606 * NOTE: inode data and directory entries are placed in this
607 * zone. inode atime/mtime is updated in-place and thus
608 * buffers containing inodes must be synchronized as
609 * meta-buffers, same as buffers containing B-Tree info.
611 iotype
= HAMMER_STRUCTURE_META_BUFFER
;
616 * Handle blockmap offset translations
618 if (zone
>= HAMMER_ZONE_BTREE_INDEX
) {
619 zone2_offset
= hammer_blockmap_lookup(hmp
, buf_offset
, errorp
);
620 } else if (zone
== HAMMER_ZONE_UNDO_INDEX
) {
621 zone2_offset
= hammer_undo_lookup(hmp
, buf_offset
, errorp
);
623 KKASSERT(zone
== HAMMER_ZONE_RAW_BUFFER_INDEX
);
624 zone2_offset
= buf_offset
;
631 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
634 KKASSERT((zone2_offset
& HAMMER_OFF_ZONE_MASK
) ==
635 HAMMER_ZONE_RAW_BUFFER
);
636 vol_no
= HAMMER_VOL_DECODE(zone2_offset
);
637 volume
= hammer_get_volume(hmp
, vol_no
, errorp
);
641 KKASSERT(zone2_offset
< volume
->maxbuf_off
);
644 * Allocate a new buffer structure. We will check for races later.
646 ++hammer_count_buffers
;
647 buffer
= kmalloc(sizeof(*buffer
), hmp
->m_misc
,
648 M_WAITOK
|M_ZERO
|M_USE_RESERVE
);
649 buffer
->zone2_offset
= zone2_offset
;
650 buffer
->zoneX_offset
= buf_offset
;
652 hammer_io_init(&buffer
->io
, volume
, iotype
);
653 buffer
->io
.offset
= volume
->ondisk
->vol_buf_beg
+
654 (zone2_offset
& HAMMER_OFF_SHORT_MASK
);
655 buffer
->io
.bytes
= bytes
;
656 TAILQ_INIT(&buffer
->clist
);
657 hammer_ref_interlock_true(&buffer
->io
.lock
);
660 * Insert the buffer into the RB tree and handle late collisions.
662 if (RB_INSERT(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
, buffer
)) {
663 hammer_rel_volume(volume
, 0);
664 buffer
->io
.volume
= NULL
; /* safety */
665 if (hammer_rel_interlock(&buffer
->io
.lock
, 1)) /* safety */
666 hammer_rel_interlock_done(&buffer
->io
.lock
, 1);
667 --hammer_count_buffers
;
668 kfree(buffer
, hmp
->m_misc
);
671 ++hammer_count_refedbufs
;
675 * The buffer is referenced and interlocked. Load the buffer
676 * if necessary. hammer_load_buffer() deals with the interlock
677 * and, if an error is returned, also deals with the ref.
679 if (buffer
->ondisk
== NULL
) {
680 *errorp
= hammer_load_buffer(buffer
, isnew
);
684 hammer_io_advance(&buffer
->io
);
685 hammer_ref_interlock_done(&buffer
->io
.lock
);
692 * This is used by the direct-read code to deal with large-data buffers
693 * created by the reblocker and mirror-write code. The direct-read code
694 * bypasses the HAMMER buffer subsystem and so any aliased dirty or write-
695 * running hammer buffers must be fully synced to disk before we can issue
698 * This code path is not considered critical as only the rebocker and
699 * mirror-write code will create large-data buffers via the HAMMER buffer
700 * subsystem. They do that because they operate at the B-Tree level and
701 * do not access the vnode/inode structures.
704 hammer_sync_buffers(hammer_mount_t hmp
, hammer_off_t base_offset
, int bytes
)
706 hammer_buffer_t buffer
;
709 KKASSERT((base_offset
& HAMMER_OFF_ZONE_MASK
) ==
710 HAMMER_ZONE_LARGE_DATA
);
713 buffer
= RB_LOOKUP(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
,
715 if (buffer
&& (buffer
->io
.modified
|| buffer
->io
.running
)) {
716 error
= hammer_ref_buffer(buffer
);
718 hammer_io_wait(&buffer
->io
);
719 if (buffer
->io
.modified
) {
720 hammer_io_write_interlock(&buffer
->io
);
721 hammer_io_flush(&buffer
->io
, 0);
722 hammer_io_done_interlock(&buffer
->io
);
723 hammer_io_wait(&buffer
->io
);
725 hammer_rel_buffer(buffer
, 0);
728 base_offset
+= HAMMER_BUFSIZE
;
729 bytes
-= HAMMER_BUFSIZE
;
734 * Destroy all buffers covering the specified zoneX offset range. This
735 * is called when the related blockmap layer2 entry is freed or when
736 * a direct write bypasses our buffer/buffer-cache subsystem.
738 * The buffers may be referenced by the caller itself. Setting reclaim
739 * will cause the buffer to be destroyed when it's ref count reaches zero.
741 * Return 0 on success, EAGAIN if some buffers could not be destroyed due
742 * to additional references held by other threads, or some other (typically
746 hammer_del_buffers(hammer_mount_t hmp
, hammer_off_t base_offset
,
747 hammer_off_t zone2_offset
, int bytes
,
748 int report_conflicts
)
750 hammer_buffer_t buffer
;
751 hammer_volume_t volume
;
756 vol_no
= HAMMER_VOL_DECODE(zone2_offset
);
757 volume
= hammer_get_volume(hmp
, vol_no
, &ret_error
);
758 KKASSERT(ret_error
== 0);
761 buffer
= RB_LOOKUP(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
,
764 error
= hammer_ref_buffer(buffer
);
765 if (hammer_debug_general
& 0x20000) {
766 kprintf("hammer: delbufr %016jx "
768 (intmax_t)buffer
->zoneX_offset
,
770 hammer_oneref(&buffer
->io
.lock
));
772 if (error
== 0 && !hammer_oneref(&buffer
->io
.lock
)) {
774 hammer_rel_buffer(buffer
, 0);
777 KKASSERT(buffer
->zone2_offset
== zone2_offset
);
778 hammer_io_clear_modify(&buffer
->io
, 1);
779 buffer
->io
.reclaim
= 1;
780 buffer
->io
.waitdep
= 1;
781 KKASSERT(buffer
->io
.volume
== volume
);
782 hammer_rel_buffer(buffer
, 0);
785 error
= hammer_io_inval(volume
, zone2_offset
);
789 if (report_conflicts
||
790 (hammer_debug_general
& 0x8000)) {
791 kprintf("hammer_del_buffers: unable to "
792 "invalidate %016llx buffer=%p rep=%d\n",
793 (long long)base_offset
,
794 buffer
, report_conflicts
);
797 base_offset
+= HAMMER_BUFSIZE
;
798 zone2_offset
+= HAMMER_BUFSIZE
;
799 bytes
-= HAMMER_BUFSIZE
;
801 hammer_rel_volume(volume
, 0);
806 * Given a referenced and interlocked buffer load/validate the data.
808 * The buffer interlock will be released on return. If an error is
809 * returned the buffer reference will also be released (and the buffer
810 * pointer will thus be stale).
813 hammer_load_buffer(hammer_buffer_t buffer
, int isnew
)
815 hammer_volume_t volume
;
819 * Load the buffer's on-disk info
821 volume
= buffer
->io
.volume
;
823 if (hammer_debug_io
& 0x0004) {
824 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
825 (long long)buffer
->zoneX_offset
,
826 (long long)buffer
->zone2_offset
,
827 isnew
, buffer
->ondisk
);
830 if (buffer
->ondisk
== NULL
) {
832 * Issue the read or generate a new buffer. When reading
833 * the limit argument controls any read-ahead clustering
834 * hammer_io_read() is allowed to do.
836 * We cannot read-ahead in the large-data zone and we cannot
837 * cross a largeblock boundary as the next largeblock might
838 * use a different buffer size.
841 error
= hammer_io_new(volume
->devvp
, &buffer
->io
);
842 } else if ((buffer
->zoneX_offset
& HAMMER_OFF_ZONE_MASK
) ==
843 HAMMER_ZONE_LARGE_DATA
) {
844 error
= hammer_io_read(volume
->devvp
, &buffer
->io
,
849 limit
= (buffer
->zone2_offset
+
850 HAMMER_LARGEBLOCK_MASK64
) &
851 ~HAMMER_LARGEBLOCK_MASK64
;
852 limit
-= buffer
->zone2_offset
;
853 error
= hammer_io_read(volume
->devvp
, &buffer
->io
,
857 buffer
->ondisk
= (void *)buffer
->io
.bp
->b_data
;
859 error
= hammer_io_new(volume
->devvp
, &buffer
->io
);
864 hammer_io_advance(&buffer
->io
);
865 hammer_ref_interlock_done(&buffer
->io
.lock
);
867 hammer_rel_buffer(buffer
, 1);
873 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
874 * This routine is only called during unmount or when a volume is
877 * If data != NULL, it specifies a volume whoose buffers should
881 hammer_unload_buffer(hammer_buffer_t buffer
, void *data
)
883 struct hammer_volume
*volume
= (struct hammer_volume
*) data
;
886 * If volume != NULL we are only interested in unloading buffers
887 * associated with a particular volume.
889 if (volume
!= NULL
&& volume
!= buffer
->io
.volume
)
893 * Clean up the persistent ref ioerror might have on the buffer
894 * and acquire a ref. Expect a 0->1 transition.
896 if (buffer
->io
.ioerror
) {
897 hammer_io_clear_error_noassert(&buffer
->io
);
898 --hammer_count_refedbufs
;
900 hammer_ref_interlock_true(&buffer
->io
.lock
);
901 ++hammer_count_refedbufs
;
904 * We must not flush a dirty buffer to disk on umount. It should
905 * have already been dealt with by the flusher, or we may be in
906 * catastrophic failure.
908 * We must set waitdep to ensure that a running buffer is waited
909 * on and released prior to us trying to unload the volume.
911 hammer_io_clear_modify(&buffer
->io
, 1);
912 hammer_flush_buffer_nodes(buffer
);
913 buffer
->io
.waitdep
= 1;
914 hammer_rel_buffer(buffer
, 1);
919 * Reference a buffer that is either already referenced or via a specially
920 * handled pointer (aka cursor->buffer).
923 hammer_ref_buffer(hammer_buffer_t buffer
)
930 * Acquire a ref, plus the buffer will be interlocked on the
933 locked
= hammer_ref_interlock(&buffer
->io
.lock
);
934 hmp
= buffer
->io
.hmp
;
937 * At this point a biodone() will not touch the buffer other then
938 * incidental bits. However, lose_list can be modified via
939 * a biodone() interrupt.
941 * No longer loose. lose_list requires the io_token.
943 if (buffer
->io
.mod_list
== &hmp
->lose_list
) {
944 lwkt_gettoken(&hmp
->io_token
);
945 if (buffer
->io
.mod_list
== &hmp
->lose_list
) {
946 TAILQ_REMOVE(buffer
->io
.mod_list
, &buffer
->io
,
948 buffer
->io
.mod_list
= NULL
;
950 lwkt_reltoken(&hmp
->io_token
);
954 ++hammer_count_refedbufs
;
955 error
= hammer_load_buffer(buffer
, 0);
956 /* NOTE: on error the buffer pointer is stale */
964 * Release a reference on the buffer. On the 1->0 transition the
965 * underlying IO will be released but the data reference is left
968 * Only destroy the structure itself if the related buffer cache buffer
969 * was disassociated from it. This ties the management of the structure
970 * to the buffer cache subsystem. buffer->ondisk determines whether the
971 * embedded io is referenced or not.
974 hammer_rel_buffer(hammer_buffer_t buffer
, int locked
)
976 hammer_volume_t volume
;
978 struct buf
*bp
= NULL
;
981 hmp
= buffer
->io
.hmp
;
983 if (hammer_rel_interlock(&buffer
->io
.lock
, locked
) == 0)
987 * hammer_count_refedbufs accounting. Decrement if we are in
988 * the error path or if CHECK is clear.
990 * If we are not in the error path and CHECK is set the caller
991 * probably just did a hammer_ref() and didn't account for it,
992 * so we don't account for the loss here.
994 if (locked
|| (buffer
->io
.lock
.refs
& HAMMER_REFS_CHECK
) == 0)
995 --hammer_count_refedbufs
;
998 * If the caller locked us or the normal released transitions
999 * from 1->0 (and acquired the lock) attempt to release the
1000 * io. If the called locked us we tell hammer_io_release()
1001 * to flush (which would be the unload or failure path).
1003 bp
= hammer_io_release(&buffer
->io
, locked
);
1006 * If the buffer has no bp association and no refs we can destroy
1009 * NOTE: It is impossible for any associated B-Tree nodes to have
1010 * refs if the buffer has no additional refs.
1012 if (buffer
->io
.bp
== NULL
&& hammer_norefs(&buffer
->io
.lock
)) {
1013 RB_REMOVE(hammer_buf_rb_tree
,
1014 &buffer
->io
.hmp
->rb_bufs_root
,
1016 volume
= buffer
->io
.volume
;
1017 buffer
->io
.volume
= NULL
; /* sanity */
1018 hammer_rel_volume(volume
, 0);
1019 hammer_io_clear_modlist(&buffer
->io
);
1020 hammer_flush_buffer_nodes(buffer
);
1021 KKASSERT(TAILQ_EMPTY(&buffer
->clist
));
1028 hammer_rel_interlock_done(&buffer
->io
.lock
, locked
);
1032 --hammer_count_buffers
;
1033 kfree(buffer
, hmp
->m_misc
);
1038 * Access the filesystem buffer containing the specified hammer offset.
1039 * buf_offset is a conglomeration of the volume number and vol_buf_beg
1040 * relative buffer offset. It must also have bit 55 set to be valid.
1041 * (see hammer_off_t in hammer_disk.h).
1043 * Any prior buffer in *bufferp will be released and replaced by the
1046 * NOTE: The buffer is indexed via its zoneX_offset but we allow the
1047 * passed cached *bufferp to match against either zoneX or zone2.
1051 _hammer_bread(hammer_mount_t hmp
, hammer_off_t buf_offset
, int bytes
,
1052 int *errorp
, struct hammer_buffer
**bufferp
)
1054 hammer_buffer_t buffer
;
1055 int32_t xoff
= (int32_t)buf_offset
& HAMMER_BUFMASK
;
1057 buf_offset
&= ~HAMMER_BUFMASK64
;
1058 KKASSERT((buf_offset
& HAMMER_OFF_ZONE_MASK
) != 0);
1061 if (buffer
== NULL
|| (buffer
->zone2_offset
!= buf_offset
&&
1062 buffer
->zoneX_offset
!= buf_offset
)) {
1064 hammer_rel_buffer(buffer
, 0);
1065 buffer
= hammer_get_buffer(hmp
, buf_offset
, bytes
, 0, errorp
);
1072 * Return a pointer to the buffer data.
1077 return((char *)buffer
->ondisk
+ xoff
);
1081 hammer_bread(hammer_mount_t hmp
, hammer_off_t buf_offset
,
1082 int *errorp
, struct hammer_buffer
**bufferp
)
1084 return(_hammer_bread(hmp
, buf_offset
, HAMMER_BUFSIZE
, errorp
, bufferp
));
1088 hammer_bread_ext(hammer_mount_t hmp
, hammer_off_t buf_offset
, int bytes
,
1089 int *errorp
, struct hammer_buffer
**bufferp
)
1091 bytes
= (bytes
+ HAMMER_BUFMASK
) & ~HAMMER_BUFMASK
;
1092 return(_hammer_bread(hmp
, buf_offset
, bytes
, errorp
, bufferp
));
1096 * Access the filesystem buffer containing the specified hammer offset.
1097 * No disk read operation occurs. The result buffer may contain garbage.
1099 * Any prior buffer in *bufferp will be released and replaced by the
1102 * This function marks the buffer dirty but does not increment its
1103 * modify_refs count.
1107 _hammer_bnew(hammer_mount_t hmp
, hammer_off_t buf_offset
, int bytes
,
1108 int *errorp
, struct hammer_buffer
**bufferp
)
1110 hammer_buffer_t buffer
;
1111 int32_t xoff
= (int32_t)buf_offset
& HAMMER_BUFMASK
;
1113 buf_offset
&= ~HAMMER_BUFMASK64
;
1116 if (buffer
== NULL
|| (buffer
->zone2_offset
!= buf_offset
&&
1117 buffer
->zoneX_offset
!= buf_offset
)) {
1119 hammer_rel_buffer(buffer
, 0);
1120 buffer
= hammer_get_buffer(hmp
, buf_offset
, bytes
, 1, errorp
);
1127 * Return a pointer to the buffer data.
1132 return((char *)buffer
->ondisk
+ xoff
);
1136 hammer_bnew(hammer_mount_t hmp
, hammer_off_t buf_offset
,
1137 int *errorp
, struct hammer_buffer
**bufferp
)
1139 return(_hammer_bnew(hmp
, buf_offset
, HAMMER_BUFSIZE
, errorp
, bufferp
));
1143 hammer_bnew_ext(hammer_mount_t hmp
, hammer_off_t buf_offset
, int bytes
,
1144 int *errorp
, struct hammer_buffer
**bufferp
)
1146 bytes
= (bytes
+ HAMMER_BUFMASK
) & ~HAMMER_BUFMASK
;
1147 return(_hammer_bnew(hmp
, buf_offset
, bytes
, errorp
, bufferp
));
1150 /************************************************************************
1152 ************************************************************************
1154 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
1155 * method used by the HAMMER filesystem.
1157 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1158 * associated with its buffer, and will only referenced the buffer while
1159 * the node itself is referenced.
1161 * A hammer_node can also be passively associated with other HAMMER
1162 * structures, such as inodes, while retaining 0 references. These
1163 * associations can be cleared backwards using a pointer-to-pointer in
1166 * This allows the HAMMER implementation to cache hammer_nodes long-term
1167 * and short-cut a great deal of the infrastructure's complexity. In
1168 * most cases a cached node can be reacquired without having to dip into
1169 * either the buffer or cluster management code.
1171 * The caller must pass a referenced cluster on call and will retain
1172 * ownership of the reference on return. The node will acquire its own
1173 * additional references, if necessary.
1176 hammer_get_node(hammer_transaction_t trans
, hammer_off_t node_offset
,
1177 int isnew
, int *errorp
)
1179 hammer_mount_t hmp
= trans
->hmp
;
1183 KKASSERT((node_offset
& HAMMER_OFF_ZONE_MASK
) == HAMMER_ZONE_BTREE
);
1186 * Locate the structure, allocating one if necessary.
1189 node
= RB_LOOKUP(hammer_nod_rb_tree
, &hmp
->rb_nods_root
, node_offset
);
1191 ++hammer_count_nodes
;
1192 node
= kmalloc(sizeof(*node
), hmp
->m_misc
, M_WAITOK
|M_ZERO
|M_USE_RESERVE
);
1193 node
->node_offset
= node_offset
;
1195 TAILQ_INIT(&node
->cursor_list
);
1196 TAILQ_INIT(&node
->cache_list
);
1197 if (RB_INSERT(hammer_nod_rb_tree
, &hmp
->rb_nods_root
, node
)) {
1198 --hammer_count_nodes
;
1199 kfree(node
, hmp
->m_misc
);
1202 doload
= hammer_ref_interlock_true(&node
->lock
);
1204 doload
= hammer_ref_interlock(&node
->lock
);
1207 *errorp
= hammer_load_node(trans
, node
, isnew
);
1208 trans
->flags
|= HAMMER_TRANSF_DIDIO
;
1212 KKASSERT(node
->ondisk
);
1214 hammer_io_advance(&node
->buffer
->io
);
1220 * Reference an already-referenced node. 0->1 transitions should assert
1221 * so we do not have to deal with hammer_ref() setting CHECK.
1224 hammer_ref_node(hammer_node_t node
)
1226 KKASSERT(hammer_isactive(&node
->lock
) && node
->ondisk
!= NULL
);
1227 hammer_ref(&node
->lock
);
1231 * Load a node's on-disk data reference. Called with the node referenced
1234 * On return the node interlock will be unlocked. If a non-zero error code
1235 * is returned the node will also be dereferenced (and the caller's pointer
1239 hammer_load_node(hammer_transaction_t trans
, hammer_node_t node
, int isnew
)
1241 hammer_buffer_t buffer
;
1242 hammer_off_t buf_offset
;
1246 if (node
->ondisk
== NULL
) {
1248 * This is a little confusing but the jist is that
1249 * node->buffer determines whether the node is on
1250 * the buffer's clist and node->ondisk determines
1251 * whether the buffer is referenced.
1253 * We could be racing a buffer release, in which case
1254 * node->buffer may become NULL while we are blocked
1255 * referencing the buffer.
1257 if ((buffer
= node
->buffer
) != NULL
) {
1258 error
= hammer_ref_buffer(buffer
);
1259 if (error
== 0 && node
->buffer
== NULL
) {
1260 TAILQ_INSERT_TAIL(&buffer
->clist
,
1262 node
->buffer
= buffer
;
1265 buf_offset
= node
->node_offset
& ~HAMMER_BUFMASK64
;
1266 buffer
= hammer_get_buffer(node
->hmp
, buf_offset
,
1267 HAMMER_BUFSIZE
, 0, &error
);
1269 KKASSERT(error
== 0);
1270 TAILQ_INSERT_TAIL(&buffer
->clist
,
1272 node
->buffer
= buffer
;
1277 node
->ondisk
= (void *)((char *)buffer
->ondisk
+
1278 (node
->node_offset
& HAMMER_BUFMASK
));
1281 * Check CRC. NOTE: Neither flag is set and the CRC is not
1282 * generated on new B-Tree nodes.
1285 (node
->flags
& HAMMER_NODE_CRCANY
) == 0) {
1286 if (hammer_crc_test_btree(node
->ondisk
) == 0) {
1287 if (hammer_debug_critical
)
1288 Debugger("CRC FAILED: B-TREE NODE");
1289 node
->flags
|= HAMMER_NODE_CRCBAD
;
1291 node
->flags
|= HAMMER_NODE_CRCGOOD
;
1295 if (node
->flags
& HAMMER_NODE_CRCBAD
) {
1296 if (trans
->flags
& HAMMER_TRANSF_CRCDOM
)
1303 _hammer_rel_node(node
, 1);
1305 hammer_ref_interlock_done(&node
->lock
);
1311 * Safely reference a node, interlock against flushes via the IO subsystem.
1314 hammer_ref_node_safe(hammer_transaction_t trans
, hammer_node_cache_t cache
,
1322 doload
= hammer_ref_interlock(&node
->lock
);
1324 *errorp
= hammer_load_node(trans
, node
, 0);
1328 KKASSERT(node
->ondisk
);
1329 if (node
->flags
& HAMMER_NODE_CRCBAD
) {
1330 if (trans
->flags
& HAMMER_TRANSF_CRCDOM
)
1334 _hammer_rel_node(node
, 0);
1347 * Release a hammer_node. On the last release the node dereferences
1348 * its underlying buffer and may or may not be destroyed.
1350 * If locked is non-zero the passed node has been interlocked by the
1351 * caller and we are in the failure/unload path, otherwise it has not and
1352 * we are doing a normal release.
1354 * This function will dispose of the interlock and the reference.
1355 * On return the node pointer is stale.
1358 _hammer_rel_node(hammer_node_t node
, int locked
)
1360 hammer_buffer_t buffer
;
1363 * Deref the node. If this isn't the 1->0 transition we're basically
1364 * done. If locked is non-zero this function will just deref the
1365 * locked node and return TRUE, otherwise it will deref the locked
1366 * node and either lock and return TRUE on the 1->0 transition or
1367 * not lock and return FALSE.
1369 if (hammer_rel_interlock(&node
->lock
, locked
) == 0)
1373 * Either locked was non-zero and we are interlocked, or the
1374 * hammer_rel_interlock() call returned non-zero and we are
1377 * The ref-count must still be decremented if locked != 0 so
1378 * the cleanup required still varies a bit.
1380 * hammer_flush_node() when called with 1 or 2 will dispose of
1381 * the lock and possible ref-count.
1383 if (node
->ondisk
== NULL
) {
1384 hammer_flush_node(node
, locked
+ 1);
1385 /* node is stale now */
1390 * Do not disassociate the node from the buffer if it represents
1391 * a modified B-Tree node that still needs its crc to be generated.
1393 if (node
->flags
& HAMMER_NODE_NEEDSCRC
) {
1394 hammer_rel_interlock_done(&node
->lock
, locked
);
1399 * Do final cleanups and then either destroy the node and leave it
1400 * passively cached. The buffer reference is removed regardless.
1402 buffer
= node
->buffer
;
1403 node
->ondisk
= NULL
;
1405 if ((node
->flags
& HAMMER_NODE_FLUSH
) == 0) {
1409 hammer_rel_interlock_done(&node
->lock
, locked
);
1414 hammer_flush_node(node
, locked
+ 1);
1418 hammer_rel_buffer(buffer
, 0);
1422 hammer_rel_node(hammer_node_t node
)
1424 _hammer_rel_node(node
, 0);
1428 * Free space on-media associated with a B-Tree node.
1431 hammer_delete_node(hammer_transaction_t trans
, hammer_node_t node
)
1433 KKASSERT((node
->flags
& HAMMER_NODE_DELETED
) == 0);
1434 node
->flags
|= HAMMER_NODE_DELETED
;
1435 hammer_blockmap_free(trans
, node
->node_offset
, sizeof(*node
->ondisk
));
1439 * Passively cache a referenced hammer_node. The caller may release
1440 * the node on return.
1443 hammer_cache_node(hammer_node_cache_t cache
, hammer_node_t node
)
1446 * If the node doesn't exist, or is being deleted, don't cache it!
1448 * The node can only ever be NULL in the I/O failure path.
1450 if (node
== NULL
|| (node
->flags
& HAMMER_NODE_DELETED
))
1452 if (cache
->node
== node
)
1455 hammer_uncache_node(cache
);
1456 if (node
->flags
& HAMMER_NODE_DELETED
)
1459 TAILQ_INSERT_TAIL(&node
->cache_list
, cache
, entry
);
1463 hammer_uncache_node(hammer_node_cache_t cache
)
1467 if ((node
= cache
->node
) != NULL
) {
1468 TAILQ_REMOVE(&node
->cache_list
, cache
, entry
);
1470 if (TAILQ_EMPTY(&node
->cache_list
))
1471 hammer_flush_node(node
, 0);
1476 * Remove a node's cache references and destroy the node if it has no
1477 * other references or backing store.
1479 * locked == 0 Normal unlocked operation
1480 * locked == 1 Call hammer_rel_interlock_done(..., 0);
1481 * locked == 2 Call hammer_rel_interlock_done(..., 1);
1483 * XXX for now this isn't even close to being MPSAFE so the refs check
1487 hammer_flush_node(hammer_node_t node
, int locked
)
1489 hammer_node_cache_t cache
;
1490 hammer_buffer_t buffer
;
1491 hammer_mount_t hmp
= node
->hmp
;
1494 while ((cache
= TAILQ_FIRST(&node
->cache_list
)) != NULL
) {
1495 TAILQ_REMOVE(&node
->cache_list
, cache
, entry
);
1500 * NOTE: refs is predisposed if another thread is blocking and
1501 * will be larger than 0 in that case. We aren't MPSAFE
1504 if (node
->ondisk
== NULL
&& hammer_norefs(&node
->lock
)) {
1505 KKASSERT((node
->flags
& HAMMER_NODE_NEEDSCRC
) == 0);
1506 RB_REMOVE(hammer_nod_rb_tree
, &node
->hmp
->rb_nods_root
, node
);
1507 if ((buffer
= node
->buffer
) != NULL
) {
1508 node
->buffer
= NULL
;
1509 TAILQ_REMOVE(&buffer
->clist
, node
, entry
);
1510 /* buffer is unreferenced because ondisk is NULL */
1518 * Deal with the interlock if locked == 1 or locked == 2.
1521 hammer_rel_interlock_done(&node
->lock
, locked
- 1);
1524 * Destroy if requested
1527 --hammer_count_nodes
;
1528 kfree(node
, hmp
->m_misc
);
1533 * Flush passively cached B-Tree nodes associated with this buffer.
1534 * This is only called when the buffer is about to be destroyed, so
1535 * none of the nodes should have any references. The buffer is locked.
1537 * We may be interlocked with the buffer.
1540 hammer_flush_buffer_nodes(hammer_buffer_t buffer
)
1544 while ((node
= TAILQ_FIRST(&buffer
->clist
)) != NULL
) {
1545 KKASSERT(node
->ondisk
== NULL
);
1546 KKASSERT((node
->flags
& HAMMER_NODE_NEEDSCRC
) == 0);
1548 if (hammer_try_interlock_norefs(&node
->lock
)) {
1549 hammer_ref(&node
->lock
);
1550 node
->flags
|= HAMMER_NODE_FLUSH
;
1551 _hammer_rel_node(node
, 1);
1553 KKASSERT(node
->buffer
!= NULL
);
1554 buffer
= node
->buffer
;
1555 node
->buffer
= NULL
;
1556 TAILQ_REMOVE(&buffer
->clist
, node
, entry
);
1557 /* buffer is unreferenced because ondisk is NULL */
1563 /************************************************************************
1565 ************************************************************************/
1568 * Allocate a B-Tree node.
1571 hammer_alloc_btree(hammer_transaction_t trans
, hammer_off_t hint
, int *errorp
)
1573 hammer_buffer_t buffer
= NULL
;
1574 hammer_node_t node
= NULL
;
1575 hammer_off_t node_offset
;
1577 node_offset
= hammer_blockmap_alloc(trans
, HAMMER_ZONE_BTREE_INDEX
,
1578 sizeof(struct hammer_node_ondisk
),
1581 node
= hammer_get_node(trans
, node_offset
, 1, errorp
);
1582 hammer_modify_node_noundo(trans
, node
);
1583 bzero(node
->ondisk
, sizeof(*node
->ondisk
));
1584 hammer_modify_node_done(node
);
1587 hammer_rel_buffer(buffer
, 0);
1592 * Allocate data. If the address of a data buffer is supplied then
1593 * any prior non-NULL *data_bufferp will be released and *data_bufferp
1594 * will be set to the related buffer. The caller must release it when
1595 * finally done. The initial *data_bufferp should be set to NULL by
1598 * The caller is responsible for making hammer_modify*() calls on the
1602 hammer_alloc_data(hammer_transaction_t trans
, int32_t data_len
,
1603 u_int16_t rec_type
, hammer_off_t
*data_offsetp
,
1604 struct hammer_buffer
**data_bufferp
,
1605 hammer_off_t hint
, int *errorp
)
1615 case HAMMER_RECTYPE_INODE
:
1616 case HAMMER_RECTYPE_DIRENTRY
:
1617 case HAMMER_RECTYPE_EXT
:
1618 case HAMMER_RECTYPE_FIX
:
1619 case HAMMER_RECTYPE_PFS
:
1620 case HAMMER_RECTYPE_SNAPSHOT
:
1621 case HAMMER_RECTYPE_CONFIG
:
1622 zone
= HAMMER_ZONE_META_INDEX
;
1624 case HAMMER_RECTYPE_DATA
:
1625 case HAMMER_RECTYPE_DB
:
1626 if (data_len
<= HAMMER_BUFSIZE
/ 2) {
1627 zone
= HAMMER_ZONE_SMALL_DATA_INDEX
;
1629 data_len
= (data_len
+ HAMMER_BUFMASK
) &
1631 zone
= HAMMER_ZONE_LARGE_DATA_INDEX
;
1635 panic("hammer_alloc_data: rec_type %04x unknown",
1637 zone
= 0; /* NOT REACHED */
1640 *data_offsetp
= hammer_blockmap_alloc(trans
, zone
, data_len
,
1645 if (*errorp
== 0 && data_bufferp
) {
1647 data
= hammer_bread_ext(trans
->hmp
, *data_offsetp
,
1648 data_len
, errorp
, data_bufferp
);
1659 * Sync dirty buffers to the media and clean-up any loose ends.
1661 * These functions do not start the flusher going, they simply
1662 * queue everything up to the flusher.
1664 static int hammer_sync_scan1(struct mount
*mp
, struct vnode
*vp
, void *data
);
1665 static int hammer_sync_scan2(struct mount
*mp
, struct vnode
*vp
, void *data
);
1668 hammer_queue_inodes_flusher(hammer_mount_t hmp
, int waitfor
)
1670 struct hammer_sync_info info
;
1673 info
.waitfor
= waitfor
;
1674 if (waitfor
== MNT_WAIT
) {
1675 vmntvnodescan(hmp
->mp
, VMSC_GETVP
|VMSC_ONEPASS
,
1676 hammer_sync_scan1
, hammer_sync_scan2
, &info
);
1678 vmntvnodescan(hmp
->mp
, VMSC_GETVP
|VMSC_ONEPASS
|VMSC_NOWAIT
,
1679 hammer_sync_scan1
, hammer_sync_scan2
, &info
);
1685 * Filesystem sync. If doing a synchronous sync make a second pass on
1686 * the vnodes in case any were already flushing during the first pass,
1687 * and activate the flusher twice (the second time brings the UNDO FIFO's
1688 * start position up to the end position after the first call).
1691 hammer_sync_hmp(hammer_mount_t hmp
, int waitfor
)
1693 struct hammer_sync_info info
;
1696 info
.waitfor
= MNT_NOWAIT
;
1697 vmntvnodescan(hmp
->mp
, VMSC_GETVP
|VMSC_NOWAIT
,
1698 hammer_sync_scan1
, hammer_sync_scan2
, &info
);
1699 if (info
.error
== 0 && waitfor
== MNT_WAIT
) {
1700 info
.waitfor
= waitfor
;
1701 vmntvnodescan(hmp
->mp
, VMSC_GETVP
,
1702 hammer_sync_scan1
, hammer_sync_scan2
, &info
);
1704 if (waitfor
== MNT_WAIT
) {
1705 hammer_flusher_sync(hmp
);
1706 hammer_flusher_sync(hmp
);
1708 hammer_flusher_async(hmp
, NULL
);
1709 hammer_flusher_async(hmp
, NULL
);
1715 hammer_sync_scan1(struct mount
*mp
, struct vnode
*vp
, void *data
)
1717 struct hammer_inode
*ip
;
1720 if (vp
->v_type
== VNON
|| ip
== NULL
||
1721 ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0 &&
1722 RB_EMPTY(&vp
->v_rbdirty_tree
))) {
1729 hammer_sync_scan2(struct mount
*mp
, struct vnode
*vp
, void *data
)
1731 struct hammer_sync_info
*info
= data
;
1732 struct hammer_inode
*ip
;
1736 if (vp
->v_type
== VNON
|| vp
->v_type
== VBAD
||
1737 ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0 &&
1738 RB_EMPTY(&vp
->v_rbdirty_tree
))) {
1741 error
= VOP_FSYNC(vp
, MNT_NOWAIT
, 0);
1743 info
->error
= error
;