2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.45 2008/05/15 03:36:40 dillon Exp $
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
48 static void hammer_free_volume(hammer_volume_t volume
);
49 static int hammer_load_volume(hammer_volume_t volume
);
50 static int hammer_load_buffer(hammer_buffer_t buffer
, int isnew
);
51 static int hammer_load_node(hammer_node_t node
, int isnew
);
54 * Red-Black tree support for various structures
57 hammer_ino_rb_compare(hammer_inode_t ip1
, hammer_inode_t ip2
)
59 if (ip1
->obj_id
< ip2
->obj_id
)
61 if (ip1
->obj_id
> ip2
->obj_id
)
63 if (ip1
->obj_asof
< ip2
->obj_asof
)
65 if (ip1
->obj_asof
> ip2
->obj_asof
)
71 hammer_inode_info_cmp(hammer_inode_info_t info
, hammer_inode_t ip
)
73 if (info
->obj_id
< ip
->obj_id
)
75 if (info
->obj_id
> ip
->obj_id
)
77 if (info
->obj_asof
< ip
->obj_asof
)
79 if (info
->obj_asof
> ip
->obj_asof
)
85 hammer_vol_rb_compare(hammer_volume_t vol1
, hammer_volume_t vol2
)
87 if (vol1
->vol_no
< vol2
->vol_no
)
89 if (vol1
->vol_no
> vol2
->vol_no
)
95 hammer_buf_rb_compare(hammer_buffer_t buf1
, hammer_buffer_t buf2
)
97 if (buf1
->zone2_offset
< buf2
->zone2_offset
)
99 if (buf1
->zone2_offset
> buf2
->zone2_offset
)
105 hammer_nod_rb_compare(hammer_node_t node1
, hammer_node_t node2
)
107 if (node1
->node_offset
< node2
->node_offset
)
109 if (node1
->node_offset
> node2
->node_offset
)
115 * Note: The lookup function for hammer_ino_rb_tree winds up being named
116 * hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info). The other lookup
117 * functions are normal, e.g. hammer_buf_rb_tree_RB_LOOKUP(root, zone2_offset).
119 RB_GENERATE(hammer_ino_rb_tree
, hammer_inode
, rb_node
, hammer_ino_rb_compare
);
120 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree
, INFO
, hammer_inode
, rb_node
,
121 hammer_inode_info_cmp
, hammer_inode_info_t
);
122 RB_GENERATE2(hammer_vol_rb_tree
, hammer_volume
, rb_node
,
123 hammer_vol_rb_compare
, int32_t, vol_no
);
124 RB_GENERATE2(hammer_buf_rb_tree
, hammer_buffer
, rb_node
,
125 hammer_buf_rb_compare
, hammer_off_t
, zone2_offset
);
126 RB_GENERATE2(hammer_nod_rb_tree
, hammer_node
, rb_node
,
127 hammer_nod_rb_compare
, hammer_off_t
, node_offset
);
129 /************************************************************************
131 ************************************************************************
133 * Load a HAMMER volume by name. Returns 0 on success or a positive error
134 * code on failure. Volumes must be loaded at mount time, get_volume() will
135 * not load a new volume.
137 * Calls made to hammer_load_volume() or single-threaded
140 hammer_install_volume(struct hammer_mount
*hmp
, const char *volname
)
143 hammer_volume_t volume
;
144 struct hammer_volume_ondisk
*ondisk
;
145 struct nlookupdata nd
;
146 struct buf
*bp
= NULL
;
152 ronly
= ((mp
->mnt_flag
& MNT_RDONLY
) ? 1 : 0);
155 * Allocate a volume structure
157 ++hammer_count_volumes
;
158 volume
= kmalloc(sizeof(*volume
), M_HAMMER
, M_WAITOK
|M_ZERO
);
159 volume
->vol_name
= kstrdup(volname
, M_HAMMER
);
160 hammer_io_init(&volume
->io
, hmp
, HAMMER_STRUCTURE_VOLUME
);
161 volume
->io
.offset
= 0LL;
164 * Get the device vnode
166 error
= nlookup_init(&nd
, volume
->vol_name
, UIO_SYSSPACE
, NLC_FOLLOW
);
168 error
= nlookup(&nd
);
170 error
= cache_vref(&nd
.nl_nch
, nd
.nl_cred
, &volume
->devvp
);
173 if (vn_isdisk(volume
->devvp
, &error
)) {
174 error
= vfs_mountedon(volume
->devvp
);
178 count_udev(volume
->devvp
->v_umajor
, volume
->devvp
->v_uminor
) > 0) {
182 vn_lock(volume
->devvp
, LK_EXCLUSIVE
| LK_RETRY
);
183 error
= vinvalbuf(volume
->devvp
, V_SAVE
, 0, 0);
185 error
= VOP_OPEN(volume
->devvp
,
186 (ronly
? FREAD
: FREAD
|FWRITE
),
189 vn_unlock(volume
->devvp
);
192 hammer_free_volume(volume
);
195 volume
->devvp
->v_rdev
->si_mountpoint
= mp
;
199 * Extract the volume number from the volume header and do various
202 error
= bread(volume
->devvp
, 0LL, HAMMER_BUFSIZE
, &bp
);
205 ondisk
= (void *)bp
->b_data
;
206 if (ondisk
->vol_signature
!= HAMMER_FSBUF_VOLUME
) {
207 kprintf("hammer_mount: volume %s has an invalid header\n",
212 volume
->vol_no
= ondisk
->vol_no
;
213 volume
->buffer_base
= ondisk
->vol_buf_beg
;
214 volume
->vol_flags
= ondisk
->vol_flags
;
215 volume
->nblocks
= ondisk
->vol_nblocks
;
216 volume
->maxbuf_off
= HAMMER_ENCODE_RAW_BUFFER(volume
->vol_no
,
217 ondisk
->vol_buf_end
- ondisk
->vol_buf_beg
);
218 RB_INIT(&volume
->rb_bufs_root
);
220 if (RB_EMPTY(&hmp
->rb_vols_root
)) {
221 hmp
->fsid
= ondisk
->vol_fsid
;
222 } else if (bcmp(&hmp
->fsid
, &ondisk
->vol_fsid
, sizeof(uuid_t
))) {
223 kprintf("hammer_mount: volume %s's fsid does not match "
224 "other volumes\n", volume
->vol_name
);
230 * Insert the volume structure into the red-black tree.
232 if (RB_INSERT(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, volume
)) {
233 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
234 volume
->vol_name
, volume
->vol_no
);
239 * Set the root volume . HAMMER special cases rootvol the structure.
240 * We do not hold a ref because this would prevent related I/O
241 * from being flushed.
243 if (error
== 0 && ondisk
->vol_rootvol
== ondisk
->vol_no
) {
244 hmp
->rootvol
= volume
;
249 hmp
->fsid_udev
= dev2udev(vn_todev(volume
->devvp
));
250 hmp
->mp
->mnt_stat
.f_blocks
+= ondisk
->vol0_stat_bigblocks
*
251 (HAMMER_LARGEBLOCK_SIZE
/ HAMMER_BUFSIZE
);
257 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
259 volume
->devvp
->v_rdev
->si_mountpoint
= NULL
;
260 VOP_CLOSE(volume
->devvp
, ronly
? FREAD
: FREAD
|FWRITE
);
261 hammer_free_volume(volume
);
267 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
268 * so returns -1 on failure.
271 hammer_unload_volume(hammer_volume_t volume
, void *data __unused
)
273 struct hammer_mount
*hmp
= volume
->io
.hmp
;
274 int ronly
= ((hmp
->mp
->mnt_flag
& MNT_RDONLY
) ? 1 : 0);
277 * Clean up the root volume pointer, which is held unlocked in hmp.
279 if (hmp
->rootvol
== volume
)
285 RB_SCAN(hammer_buf_rb_tree
, &volume
->rb_bufs_root
, NULL
,
286 hammer_unload_buffer
, NULL
);
289 * Release our buffer and flush anything left in the buffer cache.
291 volume
->io
.waitdep
= 1;
292 hammer_io_release(&volume
->io
, 1);
295 * There should be no references on the volume, no clusters, and
298 KKASSERT(volume
->io
.lock
.refs
== 0);
299 KKASSERT(RB_EMPTY(&volume
->rb_bufs_root
));
301 volume
->ondisk
= NULL
;
303 if (volume
->devvp
->v_rdev
&&
304 volume
->devvp
->v_rdev
->si_mountpoint
== hmp
->mp
306 volume
->devvp
->v_rdev
->si_mountpoint
= NULL
;
309 vinvalbuf(volume
->devvp
, 0, 0, 0);
310 VOP_CLOSE(volume
->devvp
, FREAD
);
312 vinvalbuf(volume
->devvp
, V_SAVE
, 0, 0);
313 VOP_CLOSE(volume
->devvp
, FREAD
|FWRITE
);
318 * Destroy the structure
320 RB_REMOVE(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, volume
);
321 hammer_free_volume(volume
);
327 hammer_free_volume(hammer_volume_t volume
)
329 if (volume
->vol_name
) {
330 kfree(volume
->vol_name
, M_HAMMER
);
331 volume
->vol_name
= NULL
;
334 vrele(volume
->devvp
);
335 volume
->devvp
= NULL
;
337 --hammer_count_volumes
;
338 kfree(volume
, M_HAMMER
);
342 * Get a HAMMER volume. The volume must already exist.
345 hammer_get_volume(struct hammer_mount
*hmp
, int32_t vol_no
, int *errorp
)
347 struct hammer_volume
*volume
;
350 * Locate the volume structure
352 volume
= RB_LOOKUP(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, vol_no
);
353 if (volume
== NULL
) {
357 hammer_ref(&volume
->io
.lock
);
360 * Deal with on-disk info
362 if (volume
->ondisk
== NULL
|| volume
->io
.loading
) {
363 *errorp
= hammer_load_volume(volume
);
365 hammer_rel_volume(volume
, 1);
375 hammer_ref_volume(hammer_volume_t volume
)
379 hammer_ref(&volume
->io
.lock
);
382 * Deal with on-disk info
384 if (volume
->ondisk
== NULL
|| volume
->io
.loading
) {
385 error
= hammer_load_volume(volume
);
387 hammer_rel_volume(volume
, 1);
395 hammer_get_root_volume(struct hammer_mount
*hmp
, int *errorp
)
397 hammer_volume_t volume
;
399 volume
= hmp
->rootvol
;
400 KKASSERT(volume
!= NULL
);
401 hammer_ref(&volume
->io
.lock
);
404 * Deal with on-disk info
406 if (volume
->ondisk
== NULL
|| volume
->io
.loading
) {
407 *errorp
= hammer_load_volume(volume
);
409 hammer_rel_volume(volume
, 1);
419 * Load a volume's on-disk information. The volume must be referenced and
420 * not locked. We temporarily acquire an exclusive lock to interlock
421 * against releases or multiple get's.
424 hammer_load_volume(hammer_volume_t volume
)
428 ++volume
->io
.loading
;
429 hammer_lock_ex(&volume
->io
.lock
);
431 if (volume
->ondisk
== NULL
) {
432 error
= hammer_io_read(volume
->devvp
, &volume
->io
);
434 volume
->ondisk
= (void *)volume
->io
.bp
->b_data
;
438 --volume
->io
.loading
;
439 hammer_unlock(&volume
->io
.lock
);
444 * Release a volume. Call hammer_io_release on the last reference. We have
445 * to acquire an exclusive lock to interlock against volume->ondisk tests
446 * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
449 * Volumes are not unloaded from memory during normal operation.
452 hammer_rel_volume(hammer_volume_t volume
, int flush
)
455 if (volume
->io
.lock
.refs
== 1) {
456 ++volume
->io
.loading
;
457 hammer_lock_ex(&volume
->io
.lock
);
458 if (volume
->io
.lock
.refs
== 1) {
459 volume
->ondisk
= NULL
;
460 hammer_io_release(&volume
->io
, flush
);
462 --volume
->io
.loading
;
463 hammer_unlock(&volume
->io
.lock
);
465 hammer_unref(&volume
->io
.lock
);
469 /************************************************************************
471 ************************************************************************
473 * Manage buffers. Currently all blockmap-backed zones are translated
474 * to zone-2 buffer offsets.
477 hammer_get_buffer(hammer_mount_t hmp
, hammer_off_t buf_offset
,
478 int isnew
, int *errorp
)
480 hammer_buffer_t buffer
;
481 hammer_volume_t volume
;
482 hammer_off_t zoneX_offset
;
483 hammer_io_type_t iotype
;
487 zoneX_offset
= buf_offset
;
488 zone
= HAMMER_ZONE_DECODE(buf_offset
);
491 * What is the buffer class?
494 case HAMMER_ZONE_LARGE_DATA_INDEX
:
495 case HAMMER_ZONE_SMALL_DATA_INDEX
:
496 iotype
= HAMMER_STRUCTURE_DATA_BUFFER
;
498 case HAMMER_ZONE_UNDO_INDEX
:
499 iotype
= HAMMER_STRUCTURE_UNDO_BUFFER
;
502 iotype
= HAMMER_STRUCTURE_META_BUFFER
;
507 * Handle blockmap offset translations
509 if (zone
>= HAMMER_ZONE_BTREE_INDEX
) {
510 buf_offset
= hammer_blockmap_lookup(hmp
, buf_offset
, errorp
);
511 KKASSERT(*errorp
== 0);
512 } else if (zone
== HAMMER_ZONE_UNDO_INDEX
) {
513 buf_offset
= hammer_undo_lookup(hmp
, buf_offset
, errorp
);
514 KKASSERT(*errorp
== 0);
518 * Locate the buffer given its zone-2 offset.
520 buf_offset
&= ~HAMMER_BUFMASK64
;
521 KKASSERT((buf_offset
& HAMMER_ZONE_RAW_BUFFER
) ==
522 HAMMER_ZONE_RAW_BUFFER
);
523 vol_no
= HAMMER_VOL_DECODE(buf_offset
);
524 volume
= hammer_get_volume(hmp
, vol_no
, errorp
);
529 * NOTE: buf_offset and maxbuf_off are both full offset
532 KKASSERT(buf_offset
< volume
->maxbuf_off
);
535 * Locate and lock the buffer structure, creating one if necessary.
538 buffer
= RB_LOOKUP(hammer_buf_rb_tree
, &volume
->rb_bufs_root
,
540 if (buffer
== NULL
) {
541 ++hammer_count_buffers
;
542 buffer
= kmalloc(sizeof(*buffer
), M_HAMMER
, M_WAITOK
|M_ZERO
);
543 buffer
->zone2_offset
= buf_offset
;
544 buffer
->volume
= volume
;
546 hammer_io_init(&buffer
->io
, hmp
, iotype
);
547 buffer
->io
.offset
= volume
->ondisk
->vol_buf_beg
+
548 (buf_offset
& HAMMER_OFF_SHORT_MASK
);
549 TAILQ_INIT(&buffer
->clist
);
550 hammer_ref(&buffer
->io
.lock
);
553 * Insert the buffer into the RB tree and handle late
556 if (RB_INSERT(hammer_buf_rb_tree
, &volume
->rb_bufs_root
, buffer
)) {
557 hammer_unref(&buffer
->io
.lock
);
558 --hammer_count_buffers
;
559 kfree(buffer
, M_HAMMER
);
562 hammer_ref(&volume
->io
.lock
);
564 hammer_ref(&buffer
->io
.lock
);
567 * The buffer is no longer loose if it has a ref.
569 if (buffer
->io
.mod_list
== &hmp
->lose_list
) {
570 TAILQ_REMOVE(buffer
->io
.mod_list
, &buffer
->io
,
572 buffer
->io
.mod_list
= NULL
;
574 if (buffer
->io
.lock
.refs
== 1)
575 hammer_io_reinit(&buffer
->io
, iotype
);
577 KKASSERT(buffer
->io
.type
== iotype
);
581 * Cache the blockmap translation
583 if ((zoneX_offset
& HAMMER_ZONE_RAW_BUFFER
) != HAMMER_ZONE_RAW_BUFFER
)
584 buffer
->zoneX_offset
= zoneX_offset
;
587 * Deal with on-disk info
589 if (buffer
->ondisk
== NULL
|| buffer
->io
.loading
) {
590 *errorp
= hammer_load_buffer(buffer
, isnew
);
592 hammer_rel_buffer(buffer
, 1);
598 hammer_rel_volume(volume
, 0);
603 hammer_load_buffer(hammer_buffer_t buffer
, int isnew
)
605 hammer_volume_t volume
;
609 * Load the buffer's on-disk info
611 volume
= buffer
->volume
;
612 ++buffer
->io
.loading
;
613 hammer_lock_ex(&buffer
->io
.lock
);
615 if (buffer
->ondisk
== NULL
) {
617 error
= hammer_io_new(volume
->devvp
, &buffer
->io
);
619 error
= hammer_io_read(volume
->devvp
, &buffer
->io
);
622 buffer
->ondisk
= (void *)buffer
->io
.bp
->b_data
;
624 error
= hammer_io_new(volume
->devvp
, &buffer
->io
);
628 --buffer
->io
.loading
;
629 hammer_unlock(&buffer
->io
.lock
);
634 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
637 hammer_unload_buffer(hammer_buffer_t buffer
, void *data __unused
)
639 hammer_ref(&buffer
->io
.lock
);
640 hammer_flush_buffer_nodes(buffer
);
641 KKASSERT(buffer
->io
.lock
.refs
== 1);
642 hammer_rel_buffer(buffer
, 2);
647 * Reference a buffer that is either already referenced or via a specially
648 * handled pointer (aka cursor->buffer).
651 hammer_ref_buffer(hammer_buffer_t buffer
)
655 hammer_ref(&buffer
->io
.lock
);
660 if (buffer
->io
.mod_list
== &buffer
->io
.hmp
->lose_list
) {
661 TAILQ_REMOVE(buffer
->io
.mod_list
, &buffer
->io
, mod_entry
);
662 buffer
->io
.mod_list
= NULL
;
665 if (buffer
->ondisk
== NULL
|| buffer
->io
.loading
) {
666 error
= hammer_load_buffer(buffer
, 0);
668 hammer_rel_buffer(buffer
, 1);
670 * NOTE: buffer pointer can become stale after
681 * Release a buffer. We have to deal with several places where
682 * another thread can ref the buffer.
684 * Only destroy the structure itself if the related buffer cache buffer
685 * was disassociated from it. This ties the management of the structure
686 * to the buffer cache subsystem. buffer->ondisk determines whether the
687 * embedded io is referenced or not.
690 hammer_rel_buffer(hammer_buffer_t buffer
, int flush
)
692 hammer_volume_t volume
;
696 if (buffer
->io
.lock
.refs
== 1) {
697 ++buffer
->io
.loading
; /* force interlock check */
698 hammer_lock_ex(&buffer
->io
.lock
);
699 if (buffer
->io
.lock
.refs
== 1) {
700 hammer_io_release(&buffer
->io
, flush
);
701 hammer_flush_buffer_nodes(buffer
);
702 KKASSERT(TAILQ_EMPTY(&buffer
->clist
));
704 if (buffer
->io
.bp
== NULL
&&
705 buffer
->io
.lock
.refs
== 1) {
709 volume
= buffer
->volume
;
710 RB_REMOVE(hammer_buf_rb_tree
,
711 &volume
->rb_bufs_root
, buffer
);
712 buffer
->volume
= NULL
; /* sanity */
713 hammer_rel_volume(volume
, 0);
717 --buffer
->io
.loading
;
718 hammer_unlock(&buffer
->io
.lock
);
720 hammer_unref(&buffer
->io
.lock
);
723 KKASSERT(buffer
->io
.mod_list
== NULL
);
724 --hammer_count_buffers
;
725 kfree(buffer
, M_HAMMER
);
730 * Remove the zoneX translation cache for a buffer given its zone-2 offset.
733 hammer_uncache_buffer(hammer_mount_t hmp
, hammer_off_t buf_offset
)
735 hammer_volume_t volume
;
736 hammer_buffer_t buffer
;
740 buf_offset
&= ~HAMMER_BUFMASK64
;
741 KKASSERT((buf_offset
& HAMMER_ZONE_RAW_BUFFER
) ==
742 HAMMER_ZONE_RAW_BUFFER
);
743 vol_no
= HAMMER_VOL_DECODE(buf_offset
);
744 volume
= hammer_get_volume(hmp
, vol_no
, &error
);
745 KKASSERT(volume
!= 0);
746 KKASSERT(buf_offset
< volume
->maxbuf_off
);
748 buffer
= RB_LOOKUP(hammer_buf_rb_tree
, &volume
->rb_bufs_root
,
751 buffer
->zoneX_offset
= 0;
752 hammer_rel_volume(volume
, 0);
756 * Access the filesystem buffer containing the specified hammer offset.
757 * buf_offset is a conglomeration of the volume number and vol_buf_beg
758 * relative buffer offset. It must also have bit 55 set to be valid.
759 * (see hammer_off_t in hammer_disk.h).
761 * Any prior buffer in *bufferp will be released and replaced by the
765 hammer_bread(hammer_mount_t hmp
, hammer_off_t buf_offset
, int *errorp
,
766 struct hammer_buffer
**bufferp
)
768 hammer_buffer_t buffer
;
769 int32_t xoff
= (int32_t)buf_offset
& HAMMER_BUFMASK
;
771 buf_offset
&= ~HAMMER_BUFMASK64
;
772 KKASSERT((buf_offset
& HAMMER_OFF_ZONE_MASK
) != 0);
775 if (buffer
== NULL
|| (buffer
->zone2_offset
!= buf_offset
&&
776 buffer
->zoneX_offset
!= buf_offset
)) {
778 hammer_rel_buffer(buffer
, 0);
779 buffer
= hammer_get_buffer(hmp
, buf_offset
, 0, errorp
);
786 * Return a pointer to the buffer data.
791 return((char *)buffer
->ondisk
+ xoff
);
795 * Access the filesystem buffer containing the specified hammer offset.
796 * No disk read operation occurs. The result buffer may contain garbage.
798 * Any prior buffer in *bufferp will be released and replaced by the
801 * This function marks the buffer dirty but does not increment its
805 hammer_bnew(hammer_mount_t hmp
, hammer_off_t buf_offset
, int *errorp
,
806 struct hammer_buffer
**bufferp
)
808 hammer_buffer_t buffer
;
809 int32_t xoff
= (int32_t)buf_offset
& HAMMER_BUFMASK
;
811 buf_offset
&= ~HAMMER_BUFMASK64
;
814 if (buffer
== NULL
|| (buffer
->zone2_offset
!= buf_offset
&&
815 buffer
->zoneX_offset
!= buf_offset
)) {
817 hammer_rel_buffer(buffer
, 0);
818 buffer
= hammer_get_buffer(hmp
, buf_offset
, 1, errorp
);
825 * Return a pointer to the buffer data.
830 return((char *)buffer
->ondisk
+ xoff
);
833 /************************************************************************
835 ************************************************************************
837 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
838 * method used by the HAMMER filesystem.
840 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
841 * associated with its buffer, and will only referenced the buffer while
842 * the node itself is referenced.
844 * A hammer_node can also be passively associated with other HAMMER
845 * structures, such as inodes, while retaining 0 references. These
846 * associations can be cleared backwards using a pointer-to-pointer in
849 * This allows the HAMMER implementation to cache hammer_nodes long-term
850 * and short-cut a great deal of the infrastructure's complexity. In
851 * most cases a cached node can be reacquired without having to dip into
852 * either the buffer or cluster management code.
854 * The caller must pass a referenced cluster on call and will retain
855 * ownership of the reference on return. The node will acquire its own
856 * additional references, if necessary.
859 hammer_get_node(hammer_mount_t hmp
, hammer_off_t node_offset
,
860 int isnew
, int *errorp
)
864 KKASSERT((node_offset
& HAMMER_OFF_ZONE_MASK
) == HAMMER_ZONE_BTREE
);
867 * Locate the structure, allocating one if necessary.
870 node
= RB_LOOKUP(hammer_nod_rb_tree
, &hmp
->rb_nods_root
, node_offset
);
872 ++hammer_count_nodes
;
873 node
= kmalloc(sizeof(*node
), M_HAMMER
, M_WAITOK
|M_ZERO
);
874 node
->node_offset
= node_offset
;
876 if (RB_INSERT(hammer_nod_rb_tree
, &hmp
->rb_nods_root
, node
)) {
877 --hammer_count_nodes
;
878 kfree(node
, M_HAMMER
);
882 hammer_ref(&node
->lock
);
886 *errorp
= hammer_load_node(node
, isnew
);
888 hammer_rel_node(node
);
895 * Reference an already-referenced node.
898 hammer_ref_node(hammer_node_t node
)
900 KKASSERT(node
->lock
.refs
> 0 && node
->ondisk
!= NULL
);
901 hammer_ref(&node
->lock
);
905 * Load a node's on-disk data reference.
908 hammer_load_node(hammer_node_t node
, int isnew
)
910 hammer_buffer_t buffer
;
915 hammer_lock_ex(&node
->lock
);
916 if (node
->ondisk
== NULL
) {
918 * This is a little confusing but the jist is that
919 * node->buffer determines whether the node is on
920 * the buffer's clist and node->ondisk determines
921 * whether the buffer is referenced.
923 * We could be racing a buffer release, in which case
924 * node->buffer may become NULL while we are blocked
925 * referencing the buffer.
927 if ((buffer
= node
->buffer
) != NULL
) {
928 error
= hammer_ref_buffer(buffer
);
929 if (error
== 0 && node
->buffer
== NULL
) {
930 TAILQ_INSERT_TAIL(&buffer
->clist
,
932 node
->buffer
= buffer
;
935 buffer
= hammer_get_buffer(node
->hmp
,
936 node
->node_offset
, 0,
939 KKASSERT(error
== 0);
940 TAILQ_INSERT_TAIL(&buffer
->clist
,
942 node
->buffer
= buffer
;
946 node
->ondisk
= (void *)((char *)buffer
->ondisk
+
947 (node
->node_offset
& HAMMER_BUFMASK
));
949 hammer_crc_test_btree(node
->ondisk
) == 0) {
950 Debugger("CRC FAILED: B-TREE NODE");
955 hammer_unlock(&node
->lock
);
960 * Safely reference a node, interlock against flushes via the IO subsystem.
963 hammer_ref_node_safe(struct hammer_mount
*hmp
, struct hammer_node
**cache
,
970 hammer_ref(&node
->lock
);
974 *errorp
= hammer_load_node(node
, 0);
976 hammer_rel_node(node
);
986 * Release a hammer_node. On the last release the node dereferences
987 * its underlying buffer and may or may not be destroyed.
990 hammer_rel_node(hammer_node_t node
)
992 hammer_buffer_t buffer
;
995 * If this isn't the last ref just decrement the ref count and
998 if (node
->lock
.refs
> 1) {
999 hammer_unref(&node
->lock
);
1004 * If there is no ondisk info or no buffer the node failed to load,
1005 * remove the last reference and destroy the node.
1007 if (node
->ondisk
== NULL
) {
1008 hammer_unref(&node
->lock
);
1009 hammer_flush_node(node
);
1010 /* node is stale now */
1015 * Do final cleanups and then either destroy the node and leave it
1016 * passively cached. The buffer reference is removed regardless.
1018 buffer
= node
->buffer
;
1019 node
->ondisk
= NULL
;
1021 if ((node
->flags
& HAMMER_NODE_FLUSH
) == 0) {
1022 hammer_unref(&node
->lock
);
1023 hammer_rel_buffer(buffer
, 0);
1030 hammer_unref(&node
->lock
);
1031 hammer_flush_node(node
);
1033 hammer_rel_buffer(buffer
, 0);
1037 * Free space on-media associated with a B-Tree node.
1040 hammer_delete_node(hammer_transaction_t trans
, hammer_node_t node
)
1042 KKASSERT((node
->flags
& HAMMER_NODE_DELETED
) == 0);
1043 node
->flags
|= HAMMER_NODE_DELETED
;
1044 hammer_blockmap_free(trans
, node
->node_offset
, sizeof(*node
->ondisk
));
1048 * Passively cache a referenced hammer_node in *cache. The caller may
1049 * release the node on return.
1052 hammer_cache_node(hammer_node_t node
, struct hammer_node
**cache
)
1057 * If the node is being deleted, don't cache it!
1059 if (node
->flags
& HAMMER_NODE_DELETED
)
1063 * Cache the node. If we previously cached a different node we
1064 * have to give HAMMER a chance to destroy it.
1067 if (node
->cache1
!= cache
) {
1068 if (node
->cache2
!= cache
) {
1069 if ((old
= *cache
) != NULL
) {
1070 KKASSERT(node
->lock
.refs
!= 0);
1071 hammer_uncache_node(cache
);
1075 *node
->cache2
= NULL
;
1076 node
->cache2
= node
->cache1
;
1077 node
->cache1
= cache
;
1080 struct hammer_node
**tmp
;
1082 node
->cache1
= node
->cache2
;
1089 hammer_uncache_node(struct hammer_node
**cache
)
1093 if ((node
= *cache
) != NULL
) {
1095 if (node
->cache1
== cache
) {
1096 node
->cache1
= node
->cache2
;
1097 node
->cache2
= NULL
;
1098 } else if (node
->cache2
== cache
) {
1099 node
->cache2
= NULL
;
1101 panic("hammer_uncache_node: missing cache linkage");
1103 if (node
->cache1
== NULL
&& node
->cache2
== NULL
)
1104 hammer_flush_node(node
);
1109 * Remove a node's cache references and destroy the node if it has no
1110 * other references or backing store.
1113 hammer_flush_node(hammer_node_t node
)
1115 hammer_buffer_t buffer
;
1118 *node
->cache1
= NULL
;
1120 *node
->cache2
= NULL
;
1121 if (node
->lock
.refs
== 0 && node
->ondisk
== NULL
) {
1122 RB_REMOVE(hammer_nod_rb_tree
, &node
->hmp
->rb_nods_root
, node
);
1123 if ((buffer
= node
->buffer
) != NULL
) {
1124 node
->buffer
= NULL
;
1125 TAILQ_REMOVE(&buffer
->clist
, node
, entry
);
1126 /* buffer is unreferenced because ondisk is NULL */
1128 --hammer_count_nodes
;
1129 kfree(node
, M_HAMMER
);
1134 * Flush passively cached B-Tree nodes associated with this buffer.
1135 * This is only called when the buffer is about to be destroyed, so
1136 * none of the nodes should have any references. The buffer is locked.
1138 * We may be interlocked with the buffer.
1141 hammer_flush_buffer_nodes(hammer_buffer_t buffer
)
1145 while ((node
= TAILQ_FIRST(&buffer
->clist
)) != NULL
) {
1146 KKASSERT(node
->ondisk
== NULL
);
1148 if (node
->lock
.refs
== 0) {
1149 hammer_ref(&node
->lock
);
1150 node
->flags
|= HAMMER_NODE_FLUSH
;
1151 hammer_rel_node(node
);
1153 KKASSERT(node
->loading
!= 0);
1154 KKASSERT(node
->buffer
!= NULL
);
1155 buffer
= node
->buffer
;
1156 node
->buffer
= NULL
;
1157 TAILQ_REMOVE(&buffer
->clist
, node
, entry
);
1158 /* buffer is unreferenced because ondisk is NULL */
1164 /************************************************************************
1166 ************************************************************************/
1169 * Allocate a B-Tree node.
1172 hammer_alloc_btree(hammer_transaction_t trans
, int *errorp
)
1174 hammer_buffer_t buffer
= NULL
;
1175 hammer_node_t node
= NULL
;
1176 hammer_off_t node_offset
;
1178 node_offset
= hammer_blockmap_alloc(trans
, HAMMER_ZONE_BTREE_INDEX
,
1179 sizeof(struct hammer_node_ondisk
),
1182 node
= hammer_get_node(trans
->hmp
, node_offset
, 1, errorp
);
1183 hammer_modify_node_noundo(trans
, node
);
1184 bzero(node
->ondisk
, sizeof(*node
->ondisk
));
1185 hammer_modify_node_done(node
);
1188 hammer_rel_buffer(buffer
, 0);
1195 * The returned buffers are already appropriately marked as being modified.
1196 * If the caller marks them again unnecessary undo records may be generated.
1198 * In-band data is indicated by data_bufferp == NULL. Pass a data_len of 0
1199 * for zero-fill (caller modifies data_len afterwords).
1201 * If the caller is responsible for calling hammer_modify_*() prior to making
1202 * any additional modifications to either the returned record buffer or the
1203 * returned data buffer.
1206 hammer_alloc_record(hammer_transaction_t trans
,
1207 hammer_off_t
*rec_offp
, u_int16_t rec_type
,
1208 struct hammer_buffer
**rec_bufferp
,
1209 int32_t data_len
, void **datap
,
1210 hammer_off_t
*data_offp
,
1211 struct hammer_buffer
**data_bufferp
, int *errorp
)
1213 hammer_record_ondisk_t rec
;
1214 hammer_off_t rec_offset
;
1215 hammer_off_t data_offset
;
1222 * Allocate the record
1224 rec_offset
= hammer_blockmap_alloc(trans
, HAMMER_ZONE_RECORD_INDEX
,
1225 HAMMER_RECORD_SIZE
, errorp
);
1235 if (data_bufferp
== NULL
) {
1237 case HAMMER_RECTYPE_DATA
:
1238 reclen
= offsetof(struct hammer_data_record
,
1241 case HAMMER_RECTYPE_DIRENTRY
:
1242 reclen
= offsetof(struct hammer_entry_record
,
1246 panic("hammer_alloc_record: illegal "
1252 KKASSERT(reclen
+ data_len
<= HAMMER_RECORD_SIZE
);
1253 data_offset
= rec_offset
+ reclen
;
1254 } else if (data_len
< HAMMER_BUFSIZE
) {
1255 data_offset
= hammer_blockmap_alloc(trans
,
1256 HAMMER_ZONE_SMALL_DATA_INDEX
,
1258 *data_offp
= data_offset
;
1260 data_offset
= hammer_blockmap_alloc(trans
,
1261 HAMMER_ZONE_LARGE_DATA_INDEX
,
1263 *data_offp
= data_offset
;
1269 hammer_blockmap_free(trans
, rec_offset
, HAMMER_RECORD_SIZE
);
1274 * Basic return values.
1276 * Note that because this is a 'new' buffer, there is no need to
1277 * generate UNDO records for it.
1279 *rec_offp
= rec_offset
;
1280 rec
= hammer_bread(trans
->hmp
, rec_offset
, errorp
, rec_bufferp
);
1281 hammer_modify_buffer(trans
, *rec_bufferp
, NULL
, 0);
1282 bzero(rec
, sizeof(*rec
));
1283 KKASSERT(*errorp
== 0);
1284 rec
->base
.data_off
= data_offset
;
1285 rec
->base
.data_len
= data_len
;
1286 hammer_modify_buffer_done(*rec_bufferp
);
1290 *datap
= hammer_bread(trans
->hmp
, data_offset
, errorp
,
1292 KKASSERT(*errorp
== 0);
1296 } else if (data_len
) {
1297 KKASSERT(data_offset
+ data_len
- rec_offset
<=
1298 HAMMER_RECORD_SIZE
);
1300 *datap
= (void *)((char *)rec
+
1301 (int32_t)(data_offset
- rec_offset
));
1304 KKASSERT(datap
== NULL
);
1306 KKASSERT(*errorp
== 0);
1313 * Allocate data. If the address of a data buffer is supplied then
1314 * any prior non-NULL *data_bufferp will be released and *data_bufferp
1315 * will be set to the related buffer. The caller must release it when
1316 * finally done. The initial *data_bufferp should be set to NULL by
1319 * The caller is responsible for making hammer_modify*() calls on the
1323 hammer_alloc_data(hammer_transaction_t trans
, int32_t data_len
,
1324 hammer_off_t
*data_offsetp
,
1325 struct hammer_buffer
**data_bufferp
, int *errorp
)
1333 if (data_len
< HAMMER_BUFSIZE
) {
1334 *data_offsetp
= hammer_blockmap_alloc(trans
,
1335 HAMMER_ZONE_SMALL_DATA_INDEX
,
1338 *data_offsetp
= hammer_blockmap_alloc(trans
,
1339 HAMMER_ZONE_LARGE_DATA_INDEX
,
1345 if (*errorp
== 0 && data_bufferp
) {
1347 data
= hammer_bread(trans
->hmp
, *data_offsetp
, errorp
,
1349 KKASSERT(*errorp
== 0);
1356 KKASSERT(*errorp
== 0);
1361 * Sync dirty buffers to the media and clean-up any loose ends.
1363 static int hammer_sync_scan1(struct mount
*mp
, struct vnode
*vp
, void *data
);
1364 static int hammer_sync_scan2(struct mount
*mp
, struct vnode
*vp
, void *data
);
1367 hammer_queue_inodes_flusher(hammer_mount_t hmp
, int waitfor
)
1369 struct hammer_sync_info info
;
1372 info
.waitfor
= waitfor
;
1373 if (waitfor
== MNT_WAIT
) {
1374 vmntvnodescan(hmp
->mp
, VMSC_GETVP
,
1375 hammer_sync_scan1
, hammer_sync_scan2
, &info
);
1377 vmntvnodescan(hmp
->mp
, VMSC_GETVP
|VMSC_NOWAIT
,
1378 hammer_sync_scan1
, hammer_sync_scan2
, &info
);
1384 hammer_sync_hmp(hammer_mount_t hmp
, int waitfor
)
1386 struct hammer_sync_info info
;
1389 info
.waitfor
= waitfor
;
1391 vmntvnodescan(hmp
->mp
, VMSC_GETVP
|VMSC_NOWAIT
,
1392 hammer_sync_scan1
, hammer_sync_scan2
, &info
);
1393 if (waitfor
== MNT_WAIT
)
1394 hammer_flusher_sync(hmp
);
1396 hammer_flusher_async(hmp
);
1402 hammer_sync_scan1(struct mount
*mp
, struct vnode
*vp
, void *data
)
1404 struct hammer_inode
*ip
;
1407 if (vp
->v_type
== VNON
|| ip
== NULL
||
1408 ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0 &&
1409 RB_EMPTY(&vp
->v_rbdirty_tree
))) {
1416 hammer_sync_scan2(struct mount
*mp
, struct vnode
*vp
, void *data
)
1418 struct hammer_sync_info
*info
= data
;
1419 struct hammer_inode
*ip
;
1423 if (vp
->v_type
== VNON
|| vp
->v_type
== VBAD
||
1424 ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0 &&
1425 RB_EMPTY(&vp
->v_rbdirty_tree
))) {
1428 error
= VOP_FSYNC(vp
, info
->waitfor
);
1430 info
->error
= error
;