2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.69.2.1 2008/07/16 18:39:31 dillon Exp $
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
48 static void hammer_free_volume(hammer_volume_t volume
);
49 static int hammer_load_volume(hammer_volume_t volume
);
50 static int hammer_load_buffer(hammer_buffer_t buffer
, int isnew
);
51 static int hammer_load_node(hammer_node_t node
, int isnew
);
54 hammer_vol_rb_compare(hammer_volume_t vol1
, hammer_volume_t vol2
)
56 if (vol1
->vol_no
< vol2
->vol_no
)
58 if (vol1
->vol_no
> vol2
->vol_no
)
64 hammer_buf_rb_compare(hammer_buffer_t buf1
, hammer_buffer_t buf2
)
66 if (buf1
->zoneX_offset
< buf2
->zoneX_offset
)
68 if (buf1
->zoneX_offset
> buf2
->zoneX_offset
)
74 hammer_nod_rb_compare(hammer_node_t node1
, hammer_node_t node2
)
76 if (node1
->node_offset
< node2
->node_offset
)
78 if (node1
->node_offset
> node2
->node_offset
)
83 RB_GENERATE2(hammer_vol_rb_tree
, hammer_volume
, rb_node
,
84 hammer_vol_rb_compare
, int32_t, vol_no
);
85 RB_GENERATE2(hammer_buf_rb_tree
, hammer_buffer
, rb_node
,
86 hammer_buf_rb_compare
, hammer_off_t
, zoneX_offset
);
87 RB_GENERATE2(hammer_nod_rb_tree
, hammer_node
, rb_node
,
88 hammer_nod_rb_compare
, hammer_off_t
, node_offset
);
90 /************************************************************************
92 ************************************************************************
94 * Load a HAMMER volume by name. Returns 0 on success or a positive error
95 * code on failure. Volumes must be loaded at mount time, get_volume() will
96 * not load a new volume.
98 * Calls made to hammer_load_volume() or single-threaded
101 hammer_install_volume(struct hammer_mount
*hmp
, const char *volname
)
104 hammer_volume_t volume
;
105 struct hammer_volume_ondisk
*ondisk
;
106 struct nlookupdata nd
;
107 struct buf
*bp
= NULL
;
113 ronly
= ((mp
->mnt_flag
& MNT_RDONLY
) ? 1 : 0);
116 * Allocate a volume structure
118 ++hammer_count_volumes
;
119 volume
= kmalloc(sizeof(*volume
), M_HAMMER
, M_WAITOK
|M_ZERO
);
120 volume
->vol_name
= kstrdup(volname
, M_HAMMER
);
121 hammer_io_init(&volume
->io
, hmp
, HAMMER_STRUCTURE_VOLUME
);
122 volume
->io
.offset
= 0LL;
123 volume
->io
.bytes
= HAMMER_BUFSIZE
;
126 * Get the device vnode
128 error
= nlookup_init(&nd
, volume
->vol_name
, UIO_SYSSPACE
, NLC_FOLLOW
);
130 error
= nlookup(&nd
);
132 error
= cache_vref(&nd
.nl_nch
, nd
.nl_cred
, &volume
->devvp
);
135 if (vn_isdisk(volume
->devvp
, &error
)) {
136 error
= vfs_mountedon(volume
->devvp
);
140 count_udev(volume
->devvp
->v_umajor
, volume
->devvp
->v_uminor
) > 0) {
144 vn_lock(volume
->devvp
, LK_EXCLUSIVE
| LK_RETRY
);
145 error
= vinvalbuf(volume
->devvp
, V_SAVE
, 0, 0);
147 error
= VOP_OPEN(volume
->devvp
,
148 (ronly
? FREAD
: FREAD
|FWRITE
),
151 vn_unlock(volume
->devvp
);
154 hammer_free_volume(volume
);
157 volume
->devvp
->v_rdev
->si_mountpoint
= mp
;
161 * Extract the volume number from the volume header and do various
164 error
= bread(volume
->devvp
, 0LL, HAMMER_BUFSIZE
, &bp
);
167 ondisk
= (void *)bp
->b_data
;
168 if (ondisk
->vol_signature
!= HAMMER_FSBUF_VOLUME
) {
169 kprintf("hammer_mount: volume %s has an invalid header\n",
174 volume
->vol_no
= ondisk
->vol_no
;
175 volume
->buffer_base
= ondisk
->vol_buf_beg
;
176 volume
->vol_flags
= ondisk
->vol_flags
;
177 volume
->nblocks
= ondisk
->vol_nblocks
;
178 volume
->maxbuf_off
= HAMMER_ENCODE_RAW_BUFFER(volume
->vol_no
,
179 ondisk
->vol_buf_end
- ondisk
->vol_buf_beg
);
180 volume
->maxraw_off
= ondisk
->vol_buf_end
;
182 if (RB_EMPTY(&hmp
->rb_vols_root
)) {
183 hmp
->fsid
= ondisk
->vol_fsid
;
184 } else if (bcmp(&hmp
->fsid
, &ondisk
->vol_fsid
, sizeof(uuid_t
))) {
185 kprintf("hammer_mount: volume %s's fsid does not match "
186 "other volumes\n", volume
->vol_name
);
192 * Insert the volume structure into the red-black tree.
194 if (RB_INSERT(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, volume
)) {
195 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
196 volume
->vol_name
, volume
->vol_no
);
201 * Set the root volume . HAMMER special cases rootvol the structure.
202 * We do not hold a ref because this would prevent related I/O
203 * from being flushed.
205 if (error
== 0 && ondisk
->vol_rootvol
== ondisk
->vol_no
) {
206 hmp
->rootvol
= volume
;
207 hmp
->nvolumes
= ondisk
->vol_count
;
212 hmp
->mp
->mnt_stat
.f_blocks
+= ondisk
->vol0_stat_bigblocks
*
213 (HAMMER_LARGEBLOCK_SIZE
/ HAMMER_BUFSIZE
);
214 hmp
->mp
->mnt_vstat
.f_blocks
+= ondisk
->vol0_stat_bigblocks
*
215 (HAMMER_LARGEBLOCK_SIZE
/ HAMMER_BUFSIZE
);
221 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
223 volume
->devvp
->v_rdev
->si_mountpoint
= NULL
;
224 VOP_CLOSE(volume
->devvp
, ronly
? FREAD
: FREAD
|FWRITE
);
225 hammer_free_volume(volume
);
231 * This is called for each volume when updating the mount point from
232 * read-write to read-only or vise-versa.
235 hammer_adjust_volume_mode(hammer_volume_t volume
, void *data __unused
)
238 vn_lock(volume
->devvp
, LK_EXCLUSIVE
| LK_RETRY
);
239 if (volume
->io
.hmp
->ronly
) {
240 /* do not call vinvalbuf */
241 VOP_OPEN(volume
->devvp
, FREAD
, FSCRED
, NULL
);
242 VOP_CLOSE(volume
->devvp
, FREAD
|FWRITE
);
244 /* do not call vinvalbuf */
245 VOP_OPEN(volume
->devvp
, FREAD
|FWRITE
, FSCRED
, NULL
);
246 VOP_CLOSE(volume
->devvp
, FREAD
);
248 vn_unlock(volume
->devvp
);
254 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
255 * so returns -1 on failure.
258 hammer_unload_volume(hammer_volume_t volume
, void *data __unused
)
260 struct hammer_mount
*hmp
= volume
->io
.hmp
;
261 int ronly
= ((hmp
->mp
->mnt_flag
& MNT_RDONLY
) ? 1 : 0);
265 * Clean up the root volume pointer, which is held unlocked in hmp.
267 if (hmp
->rootvol
== volume
)
271 * Release our buffer and flush anything left in the buffer cache.
273 volume
->io
.waitdep
= 1;
274 bp
= hammer_io_release(&volume
->io
, 1);
275 hammer_io_clear_modlist(&volume
->io
);
278 * There should be no references on the volume, no clusters, and
281 KKASSERT(volume
->io
.lock
.refs
== 0);
285 volume
->ondisk
= NULL
;
287 if (volume
->devvp
->v_rdev
&&
288 volume
->devvp
->v_rdev
->si_mountpoint
== hmp
->mp
290 volume
->devvp
->v_rdev
->si_mountpoint
= NULL
;
293 vinvalbuf(volume
->devvp
, 0, 0, 0);
294 VOP_CLOSE(volume
->devvp
, FREAD
);
296 vinvalbuf(volume
->devvp
, V_SAVE
, 0, 0);
297 VOP_CLOSE(volume
->devvp
, FREAD
|FWRITE
);
302 * Destroy the structure
304 RB_REMOVE(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, volume
);
305 hammer_free_volume(volume
);
311 hammer_free_volume(hammer_volume_t volume
)
313 if (volume
->vol_name
) {
314 kfree(volume
->vol_name
, M_HAMMER
);
315 volume
->vol_name
= NULL
;
318 vrele(volume
->devvp
);
319 volume
->devvp
= NULL
;
321 --hammer_count_volumes
;
322 kfree(volume
, M_HAMMER
);
326 * Get a HAMMER volume. The volume must already exist.
329 hammer_get_volume(struct hammer_mount
*hmp
, int32_t vol_no
, int *errorp
)
331 struct hammer_volume
*volume
;
334 * Locate the volume structure
336 volume
= RB_LOOKUP(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, vol_no
);
337 if (volume
== NULL
) {
341 hammer_ref(&volume
->io
.lock
);
344 * Deal with on-disk info
346 if (volume
->ondisk
== NULL
|| volume
->io
.loading
) {
347 *errorp
= hammer_load_volume(volume
);
349 hammer_rel_volume(volume
, 1);
359 hammer_ref_volume(hammer_volume_t volume
)
363 hammer_ref(&volume
->io
.lock
);
366 * Deal with on-disk info
368 if (volume
->ondisk
== NULL
|| volume
->io
.loading
) {
369 error
= hammer_load_volume(volume
);
371 hammer_rel_volume(volume
, 1);
379 hammer_get_root_volume(struct hammer_mount
*hmp
, int *errorp
)
381 hammer_volume_t volume
;
383 volume
= hmp
->rootvol
;
384 KKASSERT(volume
!= NULL
);
385 hammer_ref(&volume
->io
.lock
);
388 * Deal with on-disk info
390 if (volume
->ondisk
== NULL
|| volume
->io
.loading
) {
391 *errorp
= hammer_load_volume(volume
);
393 hammer_rel_volume(volume
, 1);
403 * Load a volume's on-disk information. The volume must be referenced and
404 * not locked. We temporarily acquire an exclusive lock to interlock
405 * against releases or multiple get's.
408 hammer_load_volume(hammer_volume_t volume
)
412 ++volume
->io
.loading
;
413 hammer_lock_ex(&volume
->io
.lock
);
415 if (volume
->ondisk
== NULL
) {
416 error
= hammer_io_read(volume
->devvp
, &volume
->io
,
419 volume
->ondisk
= (void *)volume
->io
.bp
->b_data
;
423 --volume
->io
.loading
;
424 hammer_unlock(&volume
->io
.lock
);
429 * Release a volume. Call hammer_io_release on the last reference. We have
430 * to acquire an exclusive lock to interlock against volume->ondisk tests
431 * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
434 * Volumes are not unloaded from memory during normal operation.
437 hammer_rel_volume(hammer_volume_t volume
, int flush
)
439 struct buf
*bp
= NULL
;
442 if (volume
->io
.lock
.refs
== 1) {
443 ++volume
->io
.loading
;
444 hammer_lock_ex(&volume
->io
.lock
);
445 if (volume
->io
.lock
.refs
== 1) {
446 volume
->ondisk
= NULL
;
447 bp
= hammer_io_release(&volume
->io
, flush
);
449 --volume
->io
.loading
;
450 hammer_unlock(&volume
->io
.lock
);
452 hammer_unref(&volume
->io
.lock
);
459 hammer_mountcheck_volumes(struct hammer_mount
*hmp
)
464 for (i
= 0; i
< hmp
->nvolumes
; ++i
) {
465 vol
= RB_LOOKUP(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, i
);
472 /************************************************************************
474 ************************************************************************
476 * Manage buffers. Currently all blockmap-backed zones are translated
477 * to zone-2 buffer offsets.
480 hammer_get_buffer(hammer_mount_t hmp
, hammer_off_t buf_offset
,
481 int bytes
, int isnew
, int *errorp
)
483 hammer_buffer_t buffer
;
484 hammer_volume_t volume
;
485 hammer_off_t zone2_offset
;
486 hammer_io_type_t iotype
;
490 buf_offset
&= ~HAMMER_BUFMASK64
;
493 * Shortcut if the buffer is already cached
495 buffer
= RB_LOOKUP(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
, buf_offset
);
497 if (buffer
->io
.lock
.refs
== 0)
498 ++hammer_count_refedbufs
;
499 hammer_ref(&buffer
->io
.lock
);
502 * Onced refed the ondisk field will not be cleared by
505 if (buffer
->ondisk
&& buffer
->io
.loading
== 0) {
511 * The buffer is no longer loose if it has a ref, and
512 * cannot become loose once it gains a ref. Loose
513 * buffers will never be in a modified state. This should
514 * only occur on the 0->1 transition of refs.
516 * lose_list can be modified via a biodone() interrupt.
518 if (buffer
->io
.mod_list
== &hmp
->lose_list
) {
519 crit_enter(); /* biodone race against list */
520 TAILQ_REMOVE(buffer
->io
.mod_list
, &buffer
->io
,
523 buffer
->io
.mod_list
= NULL
;
524 KKASSERT(buffer
->io
.modified
== 0);
530 * What is the buffer class?
532 zone
= HAMMER_ZONE_DECODE(buf_offset
);
535 case HAMMER_ZONE_LARGE_DATA_INDEX
:
536 case HAMMER_ZONE_SMALL_DATA_INDEX
:
537 iotype
= HAMMER_STRUCTURE_DATA_BUFFER
;
539 case HAMMER_ZONE_UNDO_INDEX
:
540 iotype
= HAMMER_STRUCTURE_UNDO_BUFFER
;
542 case HAMMER_ZONE_META_INDEX
:
545 * NOTE: inode data and directory entries are placed in this
546 * zone. inode atime/mtime is updated in-place and thus
547 * buffers containing inodes must be synchronized as
548 * meta-buffers, same as buffers containing B-Tree info.
550 iotype
= HAMMER_STRUCTURE_META_BUFFER
;
555 * Handle blockmap offset translations
557 if (zone
>= HAMMER_ZONE_BTREE_INDEX
) {
558 zone2_offset
= hammer_blockmap_lookup(hmp
, buf_offset
, errorp
);
559 } else if (zone
== HAMMER_ZONE_UNDO_INDEX
) {
560 zone2_offset
= hammer_undo_lookup(hmp
, buf_offset
, errorp
);
562 KKASSERT(zone
== HAMMER_ZONE_RAW_BUFFER_INDEX
);
563 zone2_offset
= buf_offset
;
570 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
573 KKASSERT((zone2_offset
& HAMMER_OFF_ZONE_MASK
) ==
574 HAMMER_ZONE_RAW_BUFFER
);
575 vol_no
= HAMMER_VOL_DECODE(zone2_offset
);
576 volume
= hammer_get_volume(hmp
, vol_no
, errorp
);
580 KKASSERT(zone2_offset
< volume
->maxbuf_off
);
583 * Allocate a new buffer structure. We will check for races later.
585 ++hammer_count_buffers
;
586 buffer
= kmalloc(sizeof(*buffer
), M_HAMMER
,
587 M_WAITOK
|M_ZERO
|M_USE_RESERVE
);
588 buffer
->zone2_offset
= zone2_offset
;
589 buffer
->zoneX_offset
= buf_offset
;
590 buffer
->volume
= volume
;
592 hammer_io_init(&buffer
->io
, hmp
, iotype
);
593 buffer
->io
.offset
= volume
->ondisk
->vol_buf_beg
+
594 (zone2_offset
& HAMMER_OFF_SHORT_MASK
);
595 buffer
->io
.bytes
= bytes
;
596 TAILQ_INIT(&buffer
->clist
);
597 hammer_ref(&buffer
->io
.lock
);
600 * Insert the buffer into the RB tree and handle late collisions.
602 if (RB_INSERT(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
, buffer
)) {
603 hammer_unref(&buffer
->io
.lock
);
604 --hammer_count_buffers
;
605 kfree(buffer
, M_HAMMER
);
608 ++hammer_count_refedbufs
;
612 * Deal with on-disk info and loading races.
614 if (buffer
->ondisk
== NULL
|| buffer
->io
.loading
) {
615 *errorp
= hammer_load_buffer(buffer
, isnew
);
617 hammer_rel_buffer(buffer
, 1);
627 * This is used by the direct-read code to deal with large-data buffers
628 * created by the reblocker and mirror-write code. The direct-read code
629 * bypasses the HAMMER buffer subsystem and so any aliased dirty hammer
630 * buffers must be fully synced to disk before we can issue the direct-read.
632 * This code path is not considered critical as only the rebocker and
633 * mirror-write code will create large-data buffers via the HAMMER buffer
634 * subsystem. They do that because they operate at the B-Tree level and
635 * do not access the vnode/inode structures.
638 hammer_sync_buffers(hammer_mount_t hmp
, hammer_off_t base_offset
, int bytes
)
640 hammer_buffer_t buffer
;
643 KKASSERT((base_offset
& HAMMER_OFF_ZONE_MASK
) ==
644 HAMMER_ZONE_LARGE_DATA
);
647 buffer
= RB_LOOKUP(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
,
649 if (buffer
&& buffer
->io
.modified
) {
650 error
= hammer_ref_buffer(buffer
);
651 if (error
== 0 && buffer
->io
.modified
) {
652 hammer_io_write_interlock(&buffer
->io
);
653 hammer_io_flush(&buffer
->io
);
654 hammer_io_done_interlock(&buffer
->io
);
655 hammer_io_wait(&buffer
->io
);
656 hammer_rel_buffer(buffer
, 0);
659 base_offset
+= HAMMER_BUFSIZE
;
660 bytes
-= HAMMER_BUFSIZE
;
665 * Destroy all buffers covering the specified zoneX offset range. This
666 * is called when the related blockmap layer2 entry is freed or when
667 * a direct write bypasses our buffer/buffer-cache subsystem.
669 * The buffers may be referenced by the caller itself. Setting reclaim
670 * will cause the buffer to be destroyed when it's ref count reaches zero.
673 hammer_del_buffers(hammer_mount_t hmp
, hammer_off_t base_offset
,
674 hammer_off_t zone2_offset
, int bytes
)
676 hammer_buffer_t buffer
;
677 hammer_volume_t volume
;
681 vol_no
= HAMMER_VOL_DECODE(zone2_offset
);
682 volume
= hammer_get_volume(hmp
, vol_no
, &error
);
683 KKASSERT(error
== 0);
686 buffer
= RB_LOOKUP(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
,
689 error
= hammer_ref_buffer(buffer
);
691 KKASSERT(buffer
->zone2_offset
== zone2_offset
);
692 hammer_io_clear_modify(&buffer
->io
, 1);
693 buffer
->io
.reclaim
= 1;
694 KKASSERT(buffer
->volume
== volume
);
695 hammer_rel_buffer(buffer
, 0);
698 hammer_io_inval(volume
, zone2_offset
);
700 base_offset
+= HAMMER_BUFSIZE
;
701 zone2_offset
+= HAMMER_BUFSIZE
;
702 bytes
-= HAMMER_BUFSIZE
;
704 hammer_rel_volume(volume
, 0);
708 hammer_load_buffer(hammer_buffer_t buffer
, int isnew
)
710 hammer_volume_t volume
;
714 * Load the buffer's on-disk info
716 volume
= buffer
->volume
;
717 ++buffer
->io
.loading
;
718 hammer_lock_ex(&buffer
->io
.lock
);
720 if (hammer_debug_io
& 0x0001) {
721 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
722 buffer
->zoneX_offset
, buffer
->zone2_offset
, isnew
,
726 if (buffer
->ondisk
== NULL
) {
728 error
= hammer_io_new(volume
->devvp
, &buffer
->io
);
730 error
= hammer_io_read(volume
->devvp
, &buffer
->io
,
734 buffer
->ondisk
= (void *)buffer
->io
.bp
->b_data
;
736 error
= hammer_io_new(volume
->devvp
, &buffer
->io
);
740 --buffer
->io
.loading
;
741 hammer_unlock(&buffer
->io
.lock
);
746 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
749 hammer_unload_buffer(hammer_buffer_t buffer
, void *data __unused
)
751 ++hammer_count_refedbufs
;
752 hammer_ref(&buffer
->io
.lock
);
753 hammer_flush_buffer_nodes(buffer
);
754 KKASSERT(buffer
->io
.lock
.refs
== 1);
755 hammer_rel_buffer(buffer
, 2);
760 * Reference a buffer that is either already referenced or via a specially
761 * handled pointer (aka cursor->buffer).
764 hammer_ref_buffer(hammer_buffer_t buffer
)
768 if (buffer
->io
.lock
.refs
== 0)
769 ++hammer_count_refedbufs
;
770 hammer_ref(&buffer
->io
.lock
);
773 * At this point a biodone() will not touch the buffer other then
774 * incidental bits. However, lose_list can be modified via
775 * a biodone() interrupt.
779 if (buffer
->io
.mod_list
== &buffer
->io
.hmp
->lose_list
) {
781 TAILQ_REMOVE(buffer
->io
.mod_list
, &buffer
->io
, mod_entry
);
782 buffer
->io
.mod_list
= NULL
;
786 if (buffer
->ondisk
== NULL
|| buffer
->io
.loading
) {
787 error
= hammer_load_buffer(buffer
, 0);
789 hammer_rel_buffer(buffer
, 1);
791 * NOTE: buffer pointer can become stale after
802 * Release a buffer. We have to deal with several places where
803 * another thread can ref the buffer.
805 * Only destroy the structure itself if the related buffer cache buffer
806 * was disassociated from it. This ties the management of the structure
807 * to the buffer cache subsystem. buffer->ondisk determines whether the
808 * embedded io is referenced or not.
811 hammer_rel_buffer(hammer_buffer_t buffer
, int flush
)
813 hammer_volume_t volume
;
814 struct buf
*bp
= NULL
;
818 if (buffer
->io
.lock
.refs
== 1) {
819 ++buffer
->io
.loading
; /* force interlock check */
820 hammer_lock_ex(&buffer
->io
.lock
);
821 if (buffer
->io
.lock
.refs
== 1) {
822 bp
= hammer_io_release(&buffer
->io
, flush
);
824 if (buffer
->io
.lock
.refs
== 1)
825 --hammer_count_refedbufs
;
827 if (buffer
->io
.bp
== NULL
&&
828 buffer
->io
.lock
.refs
== 1) {
832 * NOTE: It is impossible for any associated
833 * B-Tree nodes to have refs if the buffer
834 * has no additional refs.
836 RB_REMOVE(hammer_buf_rb_tree
,
837 &buffer
->io
.hmp
->rb_bufs_root
,
839 volume
= buffer
->volume
;
840 buffer
->volume
= NULL
; /* sanity */
841 hammer_rel_volume(volume
, 0);
842 hammer_io_clear_modlist(&buffer
->io
);
843 hammer_flush_buffer_nodes(buffer
);
844 KKASSERT(TAILQ_EMPTY(&buffer
->clist
));
848 --buffer
->io
.loading
;
849 hammer_unlock(&buffer
->io
.lock
);
851 hammer_unref(&buffer
->io
.lock
);
856 --hammer_count_buffers
;
857 kfree(buffer
, M_HAMMER
);
862 * Access the filesystem buffer containing the specified hammer offset.
863 * buf_offset is a conglomeration of the volume number and vol_buf_beg
864 * relative buffer offset. It must also have bit 55 set to be valid.
865 * (see hammer_off_t in hammer_disk.h).
867 * Any prior buffer in *bufferp will be released and replaced by the
872 _hammer_bread(hammer_mount_t hmp
, hammer_off_t buf_offset
, int bytes
,
873 int *errorp
, struct hammer_buffer
**bufferp
)
875 hammer_buffer_t buffer
;
876 int32_t xoff
= (int32_t)buf_offset
& HAMMER_BUFMASK
;
878 buf_offset
&= ~HAMMER_BUFMASK64
;
879 KKASSERT((buf_offset
& HAMMER_OFF_ZONE_MASK
) != 0);
882 if (buffer
== NULL
|| (buffer
->zone2_offset
!= buf_offset
&&
883 buffer
->zoneX_offset
!= buf_offset
)) {
885 hammer_rel_buffer(buffer
, 0);
886 buffer
= hammer_get_buffer(hmp
, buf_offset
, bytes
, 0, errorp
);
893 * Return a pointer to the buffer data.
898 return((char *)buffer
->ondisk
+ xoff
);
902 hammer_bread(hammer_mount_t hmp
, hammer_off_t buf_offset
,
903 int *errorp
, struct hammer_buffer
**bufferp
)
905 return(_hammer_bread(hmp
, buf_offset
, HAMMER_BUFSIZE
, errorp
, bufferp
));
909 hammer_bread_ext(hammer_mount_t hmp
, hammer_off_t buf_offset
, int bytes
,
910 int *errorp
, struct hammer_buffer
**bufferp
)
912 bytes
= (bytes
+ HAMMER_BUFMASK
) & ~HAMMER_BUFMASK
;
913 return(_hammer_bread(hmp
, buf_offset
, bytes
, errorp
, bufferp
));
917 * Access the filesystem buffer containing the specified hammer offset.
918 * No disk read operation occurs. The result buffer may contain garbage.
920 * Any prior buffer in *bufferp will be released and replaced by the
923 * This function marks the buffer dirty but does not increment its
928 _hammer_bnew(hammer_mount_t hmp
, hammer_off_t buf_offset
, int bytes
,
929 int *errorp
, struct hammer_buffer
**bufferp
)
931 hammer_buffer_t buffer
;
932 int32_t xoff
= (int32_t)buf_offset
& HAMMER_BUFMASK
;
934 buf_offset
&= ~HAMMER_BUFMASK64
;
937 if (buffer
== NULL
|| (buffer
->zone2_offset
!= buf_offset
&&
938 buffer
->zoneX_offset
!= buf_offset
)) {
940 hammer_rel_buffer(buffer
, 0);
941 buffer
= hammer_get_buffer(hmp
, buf_offset
, bytes
, 1, errorp
);
948 * Return a pointer to the buffer data.
953 return((char *)buffer
->ondisk
+ xoff
);
957 hammer_bnew(hammer_mount_t hmp
, hammer_off_t buf_offset
,
958 int *errorp
, struct hammer_buffer
**bufferp
)
960 return(_hammer_bnew(hmp
, buf_offset
, HAMMER_BUFSIZE
, errorp
, bufferp
));
964 hammer_bnew_ext(hammer_mount_t hmp
, hammer_off_t buf_offset
, int bytes
,
965 int *errorp
, struct hammer_buffer
**bufferp
)
967 bytes
= (bytes
+ HAMMER_BUFMASK
) & ~HAMMER_BUFMASK
;
968 return(_hammer_bnew(hmp
, buf_offset
, bytes
, errorp
, bufferp
));
971 /************************************************************************
973 ************************************************************************
975 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
976 * method used by the HAMMER filesystem.
978 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
979 * associated with its buffer, and will only referenced the buffer while
980 * the node itself is referenced.
982 * A hammer_node can also be passively associated with other HAMMER
983 * structures, such as inodes, while retaining 0 references. These
984 * associations can be cleared backwards using a pointer-to-pointer in
987 * This allows the HAMMER implementation to cache hammer_nodes long-term
988 * and short-cut a great deal of the infrastructure's complexity. In
989 * most cases a cached node can be reacquired without having to dip into
990 * either the buffer or cluster management code.
992 * The caller must pass a referenced cluster on call and will retain
993 * ownership of the reference on return. The node will acquire its own
994 * additional references, if necessary.
997 hammer_get_node(hammer_mount_t hmp
, hammer_off_t node_offset
,
998 int isnew
, int *errorp
)
1002 KKASSERT((node_offset
& HAMMER_OFF_ZONE_MASK
) == HAMMER_ZONE_BTREE
);
1005 * Locate the structure, allocating one if necessary.
1008 node
= RB_LOOKUP(hammer_nod_rb_tree
, &hmp
->rb_nods_root
, node_offset
);
1010 ++hammer_count_nodes
;
1011 node
= kmalloc(sizeof(*node
), M_HAMMER
, M_WAITOK
|M_ZERO
|M_USE_RESERVE
);
1012 node
->node_offset
= node_offset
;
1014 TAILQ_INIT(&node
->cursor_list
);
1015 TAILQ_INIT(&node
->cache_list
);
1016 if (RB_INSERT(hammer_nod_rb_tree
, &hmp
->rb_nods_root
, node
)) {
1017 --hammer_count_nodes
;
1018 kfree(node
, M_HAMMER
);
1022 hammer_ref(&node
->lock
);
1026 *errorp
= hammer_load_node(node
, isnew
);
1028 hammer_rel_node(node
);
1035 * Reference an already-referenced node.
1038 hammer_ref_node(hammer_node_t node
)
1040 KKASSERT(node
->lock
.refs
> 0 && node
->ondisk
!= NULL
);
1041 hammer_ref(&node
->lock
);
1045 * Load a node's on-disk data reference.
1048 hammer_load_node(hammer_node_t node
, int isnew
)
1050 hammer_buffer_t buffer
;
1051 hammer_off_t buf_offset
;
1056 hammer_lock_ex(&node
->lock
);
1057 if (node
->ondisk
== NULL
) {
1059 * This is a little confusing but the jist is that
1060 * node->buffer determines whether the node is on
1061 * the buffer's clist and node->ondisk determines
1062 * whether the buffer is referenced.
1064 * We could be racing a buffer release, in which case
1065 * node->buffer may become NULL while we are blocked
1066 * referencing the buffer.
1068 if ((buffer
= node
->buffer
) != NULL
) {
1069 error
= hammer_ref_buffer(buffer
);
1070 if (error
== 0 && node
->buffer
== NULL
) {
1071 TAILQ_INSERT_TAIL(&buffer
->clist
,
1073 node
->buffer
= buffer
;
1076 buf_offset
= node
->node_offset
& ~HAMMER_BUFMASK64
;
1077 buffer
= hammer_get_buffer(node
->hmp
, buf_offset
,
1078 HAMMER_BUFSIZE
, 0, &error
);
1080 KKASSERT(error
== 0);
1081 TAILQ_INSERT_TAIL(&buffer
->clist
,
1083 node
->buffer
= buffer
;
1088 node
->ondisk
= (void *)((char *)buffer
->ondisk
+
1089 (node
->node_offset
& HAMMER_BUFMASK
));
1091 (node
->flags
& HAMMER_NODE_CRCGOOD
) == 0) {
1092 if (hammer_crc_test_btree(node
->ondisk
) == 0)
1093 Debugger("CRC FAILED: B-TREE NODE");
1094 node
->flags
|= HAMMER_NODE_CRCGOOD
;
1099 hammer_unlock(&node
->lock
);
1104 * Safely reference a node, interlock against flushes via the IO subsystem.
1107 hammer_ref_node_safe(struct hammer_mount
*hmp
, hammer_node_cache_t cache
,
1114 hammer_ref(&node
->lock
);
1118 *errorp
= hammer_load_node(node
, 0);
1120 hammer_rel_node(node
);
1130 * Release a hammer_node. On the last release the node dereferences
1131 * its underlying buffer and may or may not be destroyed.
1134 hammer_rel_node(hammer_node_t node
)
1136 hammer_buffer_t buffer
;
1139 * If this isn't the last ref just decrement the ref count and
1142 if (node
->lock
.refs
> 1) {
1143 hammer_unref(&node
->lock
);
1148 * If there is no ondisk info or no buffer the node failed to load,
1149 * remove the last reference and destroy the node.
1151 if (node
->ondisk
== NULL
) {
1152 hammer_unref(&node
->lock
);
1153 hammer_flush_node(node
);
1154 /* node is stale now */
1159 * Do not disassociate the node from the buffer if it represents
1160 * a modified B-Tree node that still needs its crc to be generated.
1162 if (node
->flags
& HAMMER_NODE_NEEDSCRC
)
1166 * Do final cleanups and then either destroy the node and leave it
1167 * passively cached. The buffer reference is removed regardless.
1169 buffer
= node
->buffer
;
1170 node
->ondisk
= NULL
;
1172 if ((node
->flags
& HAMMER_NODE_FLUSH
) == 0) {
1173 hammer_unref(&node
->lock
);
1174 hammer_rel_buffer(buffer
, 0);
1181 hammer_unref(&node
->lock
);
1182 hammer_flush_node(node
);
1184 hammer_rel_buffer(buffer
, 0);
1188 * Free space on-media associated with a B-Tree node.
1191 hammer_delete_node(hammer_transaction_t trans
, hammer_node_t node
)
1193 KKASSERT((node
->flags
& HAMMER_NODE_DELETED
) == 0);
1194 node
->flags
|= HAMMER_NODE_DELETED
;
1195 hammer_blockmap_free(trans
, node
->node_offset
, sizeof(*node
->ondisk
));
1199 * Passively cache a referenced hammer_node. The caller may release
1200 * the node on return.
1203 hammer_cache_node(hammer_node_cache_t cache
, hammer_node_t node
)
1206 * If the node is being deleted, don't cache it!
1208 if (node
->flags
& HAMMER_NODE_DELETED
)
1210 if (cache
->node
== node
)
1213 hammer_uncache_node(cache
);
1214 if (node
->flags
& HAMMER_NODE_DELETED
)
1217 TAILQ_INSERT_TAIL(&node
->cache_list
, cache
, entry
);
1221 hammer_uncache_node(hammer_node_cache_t cache
)
1225 if ((node
= cache
->node
) != NULL
) {
1226 TAILQ_REMOVE(&node
->cache_list
, cache
, entry
);
1228 if (TAILQ_EMPTY(&node
->cache_list
))
1229 hammer_flush_node(node
);
1234 * Remove a node's cache references and destroy the node if it has no
1235 * other references or backing store.
1238 hammer_flush_node(hammer_node_t node
)
1240 hammer_node_cache_t cache
;
1241 hammer_buffer_t buffer
;
1243 while ((cache
= TAILQ_FIRST(&node
->cache_list
)) != NULL
) {
1244 TAILQ_REMOVE(&node
->cache_list
, cache
, entry
);
1247 if (node
->lock
.refs
== 0 && node
->ondisk
== NULL
) {
1248 KKASSERT((node
->flags
& HAMMER_NODE_NEEDSCRC
) == 0);
1249 RB_REMOVE(hammer_nod_rb_tree
, &node
->hmp
->rb_nods_root
, node
);
1250 if ((buffer
= node
->buffer
) != NULL
) {
1251 node
->buffer
= NULL
;
1252 TAILQ_REMOVE(&buffer
->clist
, node
, entry
);
1253 /* buffer is unreferenced because ondisk is NULL */
1255 --hammer_count_nodes
;
1256 kfree(node
, M_HAMMER
);
1261 * Flush passively cached B-Tree nodes associated with this buffer.
1262 * This is only called when the buffer is about to be destroyed, so
1263 * none of the nodes should have any references. The buffer is locked.
1265 * We may be interlocked with the buffer.
1268 hammer_flush_buffer_nodes(hammer_buffer_t buffer
)
1272 while ((node
= TAILQ_FIRST(&buffer
->clist
)) != NULL
) {
1273 KKASSERT(node
->ondisk
== NULL
);
1274 KKASSERT((node
->flags
& HAMMER_NODE_NEEDSCRC
) == 0);
1276 if (node
->lock
.refs
== 0) {
1277 hammer_ref(&node
->lock
);
1278 node
->flags
|= HAMMER_NODE_FLUSH
;
1279 hammer_rel_node(node
);
1281 KKASSERT(node
->loading
!= 0);
1282 KKASSERT(node
->buffer
!= NULL
);
1283 buffer
= node
->buffer
;
1284 node
->buffer
= NULL
;
1285 TAILQ_REMOVE(&buffer
->clist
, node
, entry
);
1286 /* buffer is unreferenced because ondisk is NULL */
1292 /************************************************************************
1294 ************************************************************************/
1297 * Allocate a B-Tree node.
1300 hammer_alloc_btree(hammer_transaction_t trans
, int *errorp
)
1302 hammer_buffer_t buffer
= NULL
;
1303 hammer_node_t node
= NULL
;
1304 hammer_off_t node_offset
;
1306 node_offset
= hammer_blockmap_alloc(trans
, HAMMER_ZONE_BTREE_INDEX
,
1307 sizeof(struct hammer_node_ondisk
),
1310 node
= hammer_get_node(trans
->hmp
, node_offset
, 1, errorp
);
1311 hammer_modify_node_noundo(trans
, node
);
1312 bzero(node
->ondisk
, sizeof(*node
->ondisk
));
1313 hammer_modify_node_done(node
);
1316 hammer_rel_buffer(buffer
, 0);
1321 * Allocate data. If the address of a data buffer is supplied then
1322 * any prior non-NULL *data_bufferp will be released and *data_bufferp
1323 * will be set to the related buffer. The caller must release it when
1324 * finally done. The initial *data_bufferp should be set to NULL by
1327 * The caller is responsible for making hammer_modify*() calls on the
1331 hammer_alloc_data(hammer_transaction_t trans
, int32_t data_len
,
1332 u_int16_t rec_type
, hammer_off_t
*data_offsetp
,
1333 struct hammer_buffer
**data_bufferp
, int *errorp
)
1343 case HAMMER_RECTYPE_INODE
:
1344 case HAMMER_RECTYPE_DIRENTRY
:
1345 case HAMMER_RECTYPE_EXT
:
1346 case HAMMER_RECTYPE_FIX
:
1347 case HAMMER_RECTYPE_PFS
:
1348 zone
= HAMMER_ZONE_META_INDEX
;
1350 case HAMMER_RECTYPE_DATA
:
1351 case HAMMER_RECTYPE_DB
:
1352 if (data_len
<= HAMMER_BUFSIZE
/ 2) {
1353 zone
= HAMMER_ZONE_SMALL_DATA_INDEX
;
1355 data_len
= (data_len
+ HAMMER_BUFMASK
) &
1357 zone
= HAMMER_ZONE_LARGE_DATA_INDEX
;
1361 panic("hammer_alloc_data: rec_type %04x unknown",
1363 zone
= 0; /* NOT REACHED */
1366 *data_offsetp
= hammer_blockmap_alloc(trans
, zone
,
1371 if (*errorp
== 0 && data_bufferp
) {
1373 data
= hammer_bread_ext(trans
->hmp
, *data_offsetp
,
1374 data_len
, errorp
, data_bufferp
);
1375 KKASSERT(*errorp
== 0);
1382 KKASSERT(*errorp
== 0);
1387 * Sync dirty buffers to the media and clean-up any loose ends.
1389 * These functions do not start the flusher going, they simply
1390 * queue everything up to the flusher.
1392 static int hammer_sync_scan1(struct mount
*mp
, struct vnode
*vp
, void *data
);
1393 static int hammer_sync_scan2(struct mount
*mp
, struct vnode
*vp
, void *data
);
1396 hammer_queue_inodes_flusher(hammer_mount_t hmp
, int waitfor
)
1398 struct hammer_sync_info info
;
1401 info
.waitfor
= waitfor
;
1402 if (waitfor
== MNT_WAIT
) {
1403 vmntvnodescan(hmp
->mp
, VMSC_GETVP
|VMSC_ONEPASS
,
1404 hammer_sync_scan1
, hammer_sync_scan2
, &info
);
1406 vmntvnodescan(hmp
->mp
, VMSC_GETVP
|VMSC_ONEPASS
|VMSC_NOWAIT
,
1407 hammer_sync_scan1
, hammer_sync_scan2
, &info
);
1413 * Filesystem sync. If doing a synchronous sync make a second pass on
1414 * the vnodes in case any were already flushing during the first pass,
1415 * and activate the flusher twice (the second time brings the UNDO FIFO's
1416 * start position up to the end position after the first call).
1419 hammer_sync_hmp(hammer_mount_t hmp
, int waitfor
)
1421 struct hammer_sync_info info
;
1424 info
.waitfor
= MNT_NOWAIT
;
1425 vmntvnodescan(hmp
->mp
, VMSC_GETVP
|VMSC_NOWAIT
,
1426 hammer_sync_scan1
, hammer_sync_scan2
, &info
);
1427 if (info
.error
== 0 && waitfor
== MNT_WAIT
) {
1428 info
.waitfor
= waitfor
;
1429 vmntvnodescan(hmp
->mp
, VMSC_GETVP
,
1430 hammer_sync_scan1
, hammer_sync_scan2
, &info
);
1432 if (waitfor
== MNT_WAIT
) {
1433 hammer_flusher_sync(hmp
);
1434 hammer_flusher_sync(hmp
);
1436 hammer_flusher_async(hmp
, NULL
);
1442 hammer_sync_scan1(struct mount
*mp
, struct vnode
*vp
, void *data
)
1444 struct hammer_inode
*ip
;
1447 if (vp
->v_type
== VNON
|| ip
== NULL
||
1448 ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0 &&
1449 RB_EMPTY(&vp
->v_rbdirty_tree
))) {
1456 hammer_sync_scan2(struct mount
*mp
, struct vnode
*vp
, void *data
)
1458 struct hammer_sync_info
*info
= data
;
1459 struct hammer_inode
*ip
;
1463 if (vp
->v_type
== VNON
|| vp
->v_type
== VBAD
||
1464 ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0 &&
1465 RB_EMPTY(&vp
->v_rbdirty_tree
))) {
1468 error
= VOP_FSYNC(vp
, MNT_NOWAIT
);
1470 info
->error
= error
;