HAMMER 56A/Many: Performance tuning - MEDIA STRUCTURES CHANGED!
[dragonfly.git] / sys / vfs / hammer / hammer_ondisk.c
blob8bf38584066b37e804f1acd3029c31062987516f
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.58 2008/06/17 04:02:38 dillon Exp $
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
42 #include "hammer.h"
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
45 #include <sys/buf.h>
46 #include <sys/buf2.h>
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_node_t node, int isnew);
54 * Red-Black tree support for various structures
56 int
57 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
59 if (ip1->obj_id < ip2->obj_id)
60 return(-1);
61 if (ip1->obj_id > ip2->obj_id)
62 return(1);
63 if (ip1->obj_asof < ip2->obj_asof)
64 return(-1);
65 if (ip1->obj_asof > ip2->obj_asof)
66 return(1);
67 return(0);
70 static int
71 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
73 if (info->obj_id < ip->obj_id)
74 return(-1);
75 if (info->obj_id > ip->obj_id)
76 return(1);
77 if (info->obj_asof < ip->obj_asof)
78 return(-1);
79 if (info->obj_asof > ip->obj_asof)
80 return(1);
81 return(0);
84 static int
85 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
87 if (vol1->vol_no < vol2->vol_no)
88 return(-1);
89 if (vol1->vol_no > vol2->vol_no)
90 return(1);
91 return(0);
94 static int
95 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
97 if (buf1->zoneX_offset < buf2->zoneX_offset)
98 return(-1);
99 if (buf1->zoneX_offset > buf2->zoneX_offset)
100 return(1);
101 return(0);
104 static int
105 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
107 if (node1->node_offset < node2->node_offset)
108 return(-1);
109 if (node1->node_offset > node2->node_offset)
110 return(1);
111 return(0);
115 * Note: The lookup function for hammer_ino_rb_tree winds up being named
116 * hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info). The other lookup
117 * functions are normal, e.g. hammer_buf_rb_tree_RB_LOOKUP(root, zone2_offset).
119 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
120 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
121 hammer_inode_info_cmp, hammer_inode_info_t);
122 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
123 hammer_vol_rb_compare, int32_t, vol_no);
124 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
125 hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
126 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
127 hammer_nod_rb_compare, hammer_off_t, node_offset);
129 /************************************************************************
130 * VOLUMES *
131 ************************************************************************
133 * Load a HAMMER volume by name. Returns 0 on success or a positive error
134 * code on failure. Volumes must be loaded at mount time, get_volume() will
135 * not load a new volume.
137 * Calls made to hammer_load_volume() or single-threaded
140 hammer_install_volume(struct hammer_mount *hmp, const char *volname)
142 struct mount *mp;
143 hammer_volume_t volume;
144 struct hammer_volume_ondisk *ondisk;
145 struct nlookupdata nd;
146 struct buf *bp = NULL;
147 int error;
148 int ronly;
149 int setmp = 0;
151 mp = hmp->mp;
152 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
155 * Allocate a volume structure
157 ++hammer_count_volumes;
158 volume = kmalloc(sizeof(*volume), M_HAMMER, M_WAITOK|M_ZERO);
159 volume->vol_name = kstrdup(volname, M_HAMMER);
160 hammer_io_init(&volume->io, hmp, HAMMER_STRUCTURE_VOLUME);
161 volume->io.offset = 0LL;
164 * Get the device vnode
166 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
167 if (error == 0)
168 error = nlookup(&nd);
169 if (error == 0)
170 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
171 nlookup_done(&nd);
172 if (error == 0) {
173 if (vn_isdisk(volume->devvp, &error)) {
174 error = vfs_mountedon(volume->devvp);
177 if (error == 0 &&
178 count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) {
179 error = EBUSY;
181 if (error == 0) {
182 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
183 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
184 if (error == 0) {
185 error = VOP_OPEN(volume->devvp,
186 (ronly ? FREAD : FREAD|FWRITE),
187 FSCRED, NULL);
189 vn_unlock(volume->devvp);
191 if (error) {
192 hammer_free_volume(volume);
193 return(error);
195 volume->devvp->v_rdev->si_mountpoint = mp;
196 setmp = 1;
199 * Extract the volume number from the volume header and do various
200 * sanity checks.
202 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
203 if (error)
204 goto late_failure;
205 ondisk = (void *)bp->b_data;
206 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
207 kprintf("hammer_mount: volume %s has an invalid header\n",
208 volume->vol_name);
209 error = EFTYPE;
210 goto late_failure;
212 volume->vol_no = ondisk->vol_no;
213 volume->buffer_base = ondisk->vol_buf_beg;
214 volume->vol_flags = ondisk->vol_flags;
215 volume->nblocks = ondisk->vol_nblocks;
216 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
217 ondisk->vol_buf_end - ondisk->vol_buf_beg);
218 volume->maxraw_off = ondisk->vol_buf_end;
220 if (RB_EMPTY(&hmp->rb_vols_root)) {
221 hmp->fsid = ondisk->vol_fsid;
222 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
223 kprintf("hammer_mount: volume %s's fsid does not match "
224 "other volumes\n", volume->vol_name);
225 error = EFTYPE;
226 goto late_failure;
230 * Insert the volume structure into the red-black tree.
232 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
233 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
234 volume->vol_name, volume->vol_no);
235 error = EEXIST;
239 * Set the root volume . HAMMER special cases rootvol the structure.
240 * We do not hold a ref because this would prevent related I/O
241 * from being flushed.
243 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
244 hmp->rootvol = volume;
245 if (bp) {
246 brelse(bp);
247 bp = NULL;
249 hmp->fsid_udev = dev2udev(vn_todev(volume->devvp));
250 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
251 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
252 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
253 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
255 late_failure:
256 if (bp)
257 brelse(bp);
258 if (error) {
259 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
260 if (setmp)
261 volume->devvp->v_rdev->si_mountpoint = NULL;
262 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
263 hammer_free_volume(volume);
265 return (error);
269 * This is called for each volume when updating the mount point from
270 * read-write to read-only or vise-versa.
273 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
275 if (volume->devvp) {
276 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
277 if (volume->io.hmp->ronly) {
278 /* do not call vinvalbuf */
279 VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
280 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
281 } else {
282 /* do not call vinvalbuf */
283 VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
284 VOP_CLOSE(volume->devvp, FREAD);
286 vn_unlock(volume->devvp);
288 return(0);
292 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
293 * so returns -1 on failure.
296 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
298 struct hammer_mount *hmp = volume->io.hmp;
299 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
302 * Clean up the root volume pointer, which is held unlocked in hmp.
304 if (hmp->rootvol == volume)
305 hmp->rootvol = NULL;
308 * Release our buffer and flush anything left in the buffer cache.
310 volume->io.waitdep = 1;
311 hammer_io_release(&volume->io, 1);
312 hammer_io_clear_modlist(&volume->io);
315 * There should be no references on the volume, no clusters, and
316 * no super-clusters.
318 KKASSERT(volume->io.lock.refs == 0);
320 volume->ondisk = NULL;
321 if (volume->devvp) {
322 if (volume->devvp->v_rdev &&
323 volume->devvp->v_rdev->si_mountpoint == hmp->mp
325 volume->devvp->v_rdev->si_mountpoint = NULL;
327 if (ronly) {
328 vinvalbuf(volume->devvp, 0, 0, 0);
329 VOP_CLOSE(volume->devvp, FREAD);
330 } else {
331 vinvalbuf(volume->devvp, V_SAVE, 0, 0);
332 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
337 * Destroy the structure
339 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
340 hammer_free_volume(volume);
341 return(0);
344 static
345 void
346 hammer_free_volume(hammer_volume_t volume)
348 if (volume->vol_name) {
349 kfree(volume->vol_name, M_HAMMER);
350 volume->vol_name = NULL;
352 if (volume->devvp) {
353 vrele(volume->devvp);
354 volume->devvp = NULL;
356 --hammer_count_volumes;
357 kfree(volume, M_HAMMER);
361 * Get a HAMMER volume. The volume must already exist.
363 hammer_volume_t
364 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
366 struct hammer_volume *volume;
369 * Locate the volume structure
371 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
372 if (volume == NULL) {
373 *errorp = ENOENT;
374 return(NULL);
376 hammer_ref(&volume->io.lock);
379 * Deal with on-disk info
381 if (volume->ondisk == NULL || volume->io.loading) {
382 *errorp = hammer_load_volume(volume);
383 if (*errorp) {
384 hammer_rel_volume(volume, 1);
385 volume = NULL;
387 } else {
388 *errorp = 0;
390 return(volume);
394 hammer_ref_volume(hammer_volume_t volume)
396 int error;
398 hammer_ref(&volume->io.lock);
401 * Deal with on-disk info
403 if (volume->ondisk == NULL || volume->io.loading) {
404 error = hammer_load_volume(volume);
405 if (error)
406 hammer_rel_volume(volume, 1);
407 } else {
408 error = 0;
410 return (error);
413 hammer_volume_t
414 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
416 hammer_volume_t volume;
418 volume = hmp->rootvol;
419 KKASSERT(volume != NULL);
420 hammer_ref(&volume->io.lock);
423 * Deal with on-disk info
425 if (volume->ondisk == NULL || volume->io.loading) {
426 *errorp = hammer_load_volume(volume);
427 if (*errorp) {
428 hammer_rel_volume(volume, 1);
429 volume = NULL;
431 } else {
432 *errorp = 0;
434 return (volume);
438 * Load a volume's on-disk information. The volume must be referenced and
439 * not locked. We temporarily acquire an exclusive lock to interlock
440 * against releases or multiple get's.
442 static int
443 hammer_load_volume(hammer_volume_t volume)
445 int error;
447 ++volume->io.loading;
448 hammer_lock_ex(&volume->io.lock);
450 if (volume->ondisk == NULL) {
451 error = hammer_io_read(volume->devvp, &volume->io,
452 volume->maxraw_off);
453 if (error == 0)
454 volume->ondisk = (void *)volume->io.bp->b_data;
455 } else {
456 error = 0;
458 --volume->io.loading;
459 hammer_unlock(&volume->io.lock);
460 return(error);
464 * Release a volume. Call hammer_io_release on the last reference. We have
465 * to acquire an exclusive lock to interlock against volume->ondisk tests
466 * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
467 * lock to be held.
469 * Volumes are not unloaded from memory during normal operation.
471 void
472 hammer_rel_volume(hammer_volume_t volume, int flush)
474 crit_enter();
475 if (volume->io.lock.refs == 1) {
476 ++volume->io.loading;
477 hammer_lock_ex(&volume->io.lock);
478 if (volume->io.lock.refs == 1) {
479 volume->ondisk = NULL;
480 hammer_io_release(&volume->io, flush);
482 --volume->io.loading;
483 hammer_unlock(&volume->io.lock);
485 hammer_unref(&volume->io.lock);
486 crit_exit();
489 /************************************************************************
490 * BUFFERS *
491 ************************************************************************
493 * Manage buffers. Currently all blockmap-backed zones are translated
494 * to zone-2 buffer offsets.
496 hammer_buffer_t
497 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
498 int isnew, int *errorp)
500 hammer_buffer_t buffer;
501 hammer_volume_t volume;
502 hammer_off_t zone2_offset;
503 hammer_io_type_t iotype;
504 int vol_no;
505 int zone;
507 again:
509 * Shortcut if the buffer is already cached
511 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
512 buf_offset & ~HAMMER_BUFMASK64);
513 if (buffer) {
514 if (buffer->io.lock.refs == 0)
515 ++hammer_count_refedbufs;
516 hammer_ref(&buffer->io.lock);
519 * Onced refed the ondisk field will not be cleared by
520 * any other action.
522 if (buffer->ondisk && buffer->io.loading == 0) {
523 *errorp = 0;
524 return(buffer);
528 * The buffer is no longer loose if it has a ref, and
529 * cannot become loose once it gains a ref. Loose
530 * buffers will never be in a modified state. This should
531 * only occur on the 0->1 transition of refs.
533 * lose_list can be modified via a biodone() interrupt.
535 if (buffer->io.mod_list == &hmp->lose_list) {
536 crit_enter(); /* biodone race against list */
537 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
538 mod_entry);
539 crit_exit();
540 buffer->io.mod_list = NULL;
541 KKASSERT(buffer->io.modified == 0);
543 goto found;
547 * What is the buffer class?
549 zone = HAMMER_ZONE_DECODE(buf_offset);
551 switch(zone) {
552 case HAMMER_ZONE_LARGE_DATA_INDEX:
553 case HAMMER_ZONE_SMALL_DATA_INDEX:
554 case HAMMER_ZONE_META_INDEX: /* meta-data isn't a meta-buffer */
555 iotype = HAMMER_STRUCTURE_DATA_BUFFER;
556 break;
557 case HAMMER_ZONE_UNDO_INDEX:
558 iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
559 break;
560 default:
561 iotype = HAMMER_STRUCTURE_META_BUFFER;
562 break;
566 * Handle blockmap offset translations
568 if (zone >= HAMMER_ZONE_BTREE_INDEX) {
569 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
570 } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
571 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
572 } else {
573 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
574 zone2_offset = buf_offset;
575 *errorp = 0;
577 if (*errorp)
578 return(NULL);
581 * Calculate the base zone2-offset and acquire the volume
583 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
584 * specifications.
586 zone2_offset &= ~HAMMER_BUFMASK64;
587 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
588 HAMMER_ZONE_RAW_BUFFER);
589 vol_no = HAMMER_VOL_DECODE(zone2_offset);
590 volume = hammer_get_volume(hmp, vol_no, errorp);
591 if (volume == NULL)
592 return(NULL);
594 KKASSERT(zone2_offset < volume->maxbuf_off);
597 * Allocate a new buffer structure. We will check for races later.
599 ++hammer_count_buffers;
600 buffer = kmalloc(sizeof(*buffer), M_HAMMER, M_WAITOK|M_ZERO);
601 buffer->zone2_offset = zone2_offset;
602 buffer->zoneX_offset = buf_offset;
603 buffer->volume = volume;
605 hammer_io_init(&buffer->io, hmp, iotype);
606 buffer->io.offset = volume->ondisk->vol_buf_beg +
607 (zone2_offset & HAMMER_OFF_SHORT_MASK);
608 TAILQ_INIT(&buffer->clist);
609 hammer_ref(&buffer->io.lock);
612 * Insert the buffer into the RB tree and handle late collisions.
614 if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
615 hammer_unref(&buffer->io.lock);
616 --hammer_count_buffers;
617 kfree(buffer, M_HAMMER);
618 goto again;
620 ++hammer_count_refedbufs;
621 found:
624 * Deal with on-disk info and loading races.
626 if (buffer->ondisk == NULL || buffer->io.loading) {
627 *errorp = hammer_load_buffer(buffer, isnew);
628 if (*errorp) {
629 hammer_rel_buffer(buffer, 1);
630 buffer = NULL;
632 } else {
633 *errorp = 0;
635 return(buffer);
639 * Destroy all buffers covering the specified zoneX offset range. This
640 * is called when the related blockmap layer2 entry is freed or when
641 * a direct write bypasses our buffer/buffer-cache subsystem.
643 * The buffers may be referenced by the caller itself. Setting reclaim
644 * will cause the buffer to be destroyed when it's ref count reaches zero.
646 void
647 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
648 hammer_off_t zone2_offset, int bytes)
650 hammer_buffer_t buffer;
651 hammer_volume_t volume;
652 int vol_no;
653 int error;
655 vol_no = HAMMER_VOL_DECODE(zone2_offset);
656 volume = hammer_get_volume(hmp, vol_no, &error);
657 KKASSERT(error == 0);
659 while (bytes > 0) {
660 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
661 base_offset);
662 if (buffer) {
663 KKASSERT(buffer->zone2_offset == zone2_offset);
664 hammer_io_clear_modify(&buffer->io);
665 buffer->io.reclaim = 1;
666 KKASSERT(buffer->volume == volume);
667 if (buffer->io.lock.refs == 0)
668 hammer_unload_buffer(buffer, NULL);
669 } else {
670 hammer_io_inval(volume, zone2_offset);
672 base_offset += HAMMER_BUFSIZE;
673 zone2_offset += HAMMER_BUFSIZE;
674 bytes -= HAMMER_BUFSIZE;
676 hammer_rel_volume(volume, 0);
679 static int
680 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
682 hammer_volume_t volume;
683 int error;
686 * Load the buffer's on-disk info
688 volume = buffer->volume;
689 ++buffer->io.loading;
690 hammer_lock_ex(&buffer->io.lock);
692 if (hammer_debug_io & 0x0001) {
693 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
694 buffer->zoneX_offset, buffer->zone2_offset, isnew,
695 buffer->ondisk);
698 if (buffer->ondisk == NULL) {
699 if (isnew) {
700 error = hammer_io_new(volume->devvp, &buffer->io);
701 } else {
702 error = hammer_io_read(volume->devvp, &buffer->io,
703 volume->maxraw_off);
705 if (error == 0)
706 buffer->ondisk = (void *)buffer->io.bp->b_data;
707 } else if (isnew) {
708 error = hammer_io_new(volume->devvp, &buffer->io);
709 } else {
710 error = 0;
712 --buffer->io.loading;
713 hammer_unlock(&buffer->io.lock);
714 return (error);
718 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
721 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
723 ++hammer_count_refedbufs;
724 hammer_ref(&buffer->io.lock);
725 hammer_flush_buffer_nodes(buffer);
726 KKASSERT(buffer->io.lock.refs == 1);
727 hammer_rel_buffer(buffer, 2);
728 return(0);
732 * Reference a buffer that is either already referenced or via a specially
733 * handled pointer (aka cursor->buffer).
736 hammer_ref_buffer(hammer_buffer_t buffer)
738 int error;
740 if (buffer->io.lock.refs == 0)
741 ++hammer_count_refedbufs;
742 hammer_ref(&buffer->io.lock);
745 * At this point a biodone() will not touch the buffer other then
746 * incidental bits. However, lose_list can be modified via
747 * a biodone() interrupt.
749 * No longer loose
751 if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
752 crit_enter();
753 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry);
754 buffer->io.mod_list = NULL;
755 crit_exit();
758 if (buffer->ondisk == NULL || buffer->io.loading) {
759 error = hammer_load_buffer(buffer, 0);
760 if (error) {
761 hammer_rel_buffer(buffer, 1);
763 * NOTE: buffer pointer can become stale after
764 * the above release.
767 } else {
768 error = 0;
770 return(error);
774 * Release a buffer. We have to deal with several places where
775 * another thread can ref the buffer.
777 * Only destroy the structure itself if the related buffer cache buffer
778 * was disassociated from it. This ties the management of the structure
779 * to the buffer cache subsystem. buffer->ondisk determines whether the
780 * embedded io is referenced or not.
782 void
783 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
785 hammer_volume_t volume;
786 int freeme = 0;
788 crit_enter();
789 if (buffer->io.lock.refs == 1) {
790 ++buffer->io.loading; /* force interlock check */
791 hammer_lock_ex(&buffer->io.lock);
792 if (buffer->io.lock.refs == 1) {
793 hammer_io_release(&buffer->io, flush);
795 if (buffer->io.bp == NULL &&
796 buffer->io.lock.refs == 1) {
798 * Final cleanup
800 * NOTE: It is impossible for any associated
801 * B-Tree nodes to have refs if the buffer
802 * has no additional refs.
804 RB_REMOVE(hammer_buf_rb_tree,
805 &buffer->io.hmp->rb_bufs_root,
806 buffer);
807 volume = buffer->volume;
808 buffer->volume = NULL; /* sanity */
809 hammer_rel_volume(volume, 0);
810 hammer_io_clear_modlist(&buffer->io);
811 hammer_flush_buffer_nodes(buffer);
812 KKASSERT(TAILQ_EMPTY(&buffer->clist));
813 if (buffer->io.lock.refs == 1)
814 --hammer_count_refedbufs;
815 freeme = 1;
818 --buffer->io.loading;
819 hammer_unlock(&buffer->io.lock);
821 hammer_unref(&buffer->io.lock);
822 crit_exit();
823 if (freeme) {
824 --hammer_count_buffers;
825 kfree(buffer, M_HAMMER);
830 * Access the filesystem buffer containing the specified hammer offset.
831 * buf_offset is a conglomeration of the volume number and vol_buf_beg
832 * relative buffer offset. It must also have bit 55 set to be valid.
833 * (see hammer_off_t in hammer_disk.h).
835 * Any prior buffer in *bufferp will be released and replaced by the
836 * requested buffer.
838 void *
839 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int *errorp,
840 struct hammer_buffer **bufferp)
842 hammer_buffer_t buffer;
843 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
845 buf_offset &= ~HAMMER_BUFMASK64;
846 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
848 buffer = *bufferp;
849 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
850 buffer->zoneX_offset != buf_offset)) {
851 if (buffer)
852 hammer_rel_buffer(buffer, 0);
853 buffer = hammer_get_buffer(hmp, buf_offset, 0, errorp);
854 *bufferp = buffer;
855 } else {
856 *errorp = 0;
860 * Return a pointer to the buffer data.
862 if (buffer == NULL)
863 return(NULL);
864 else
865 return((char *)buffer->ondisk + xoff);
869 * Access the filesystem buffer containing the specified hammer offset.
870 * No disk read operation occurs. The result buffer may contain garbage.
872 * Any prior buffer in *bufferp will be released and replaced by the
873 * requested buffer.
875 * This function marks the buffer dirty but does not increment its
876 * modify_refs count.
878 void *
879 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int *errorp,
880 struct hammer_buffer **bufferp)
882 hammer_buffer_t buffer;
883 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
885 buf_offset &= ~HAMMER_BUFMASK64;
887 buffer = *bufferp;
888 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
889 buffer->zoneX_offset != buf_offset)) {
890 if (buffer)
891 hammer_rel_buffer(buffer, 0);
892 buffer = hammer_get_buffer(hmp, buf_offset, 1, errorp);
893 *bufferp = buffer;
894 } else {
895 *errorp = 0;
899 * Return a pointer to the buffer data.
901 if (buffer == NULL)
902 return(NULL);
903 else
904 return((char *)buffer->ondisk + xoff);
907 /************************************************************************
908 * NODES *
909 ************************************************************************
911 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
912 * method used by the HAMMER filesystem.
914 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
915 * associated with its buffer, and will only referenced the buffer while
916 * the node itself is referenced.
918 * A hammer_node can also be passively associated with other HAMMER
919 * structures, such as inodes, while retaining 0 references. These
920 * associations can be cleared backwards using a pointer-to-pointer in
921 * the hammer_node.
923 * This allows the HAMMER implementation to cache hammer_nodes long-term
924 * and short-cut a great deal of the infrastructure's complexity. In
925 * most cases a cached node can be reacquired without having to dip into
926 * either the buffer or cluster management code.
928 * The caller must pass a referenced cluster on call and will retain
929 * ownership of the reference on return. The node will acquire its own
930 * additional references, if necessary.
932 hammer_node_t
933 hammer_get_node(hammer_mount_t hmp, hammer_off_t node_offset,
934 int isnew, int *errorp)
936 hammer_node_t node;
938 KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
941 * Locate the structure, allocating one if necessary.
943 again:
944 node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
945 if (node == NULL) {
946 ++hammer_count_nodes;
947 node = kmalloc(sizeof(*node), M_HAMMER, M_WAITOK|M_ZERO);
948 node->node_offset = node_offset;
949 node->hmp = hmp;
950 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
951 --hammer_count_nodes;
952 kfree(node, M_HAMMER);
953 goto again;
956 hammer_ref(&node->lock);
957 if (node->ondisk)
958 *errorp = 0;
959 else
960 *errorp = hammer_load_node(node, isnew);
961 if (*errorp) {
962 hammer_rel_node(node);
963 node = NULL;
965 return(node);
969 * Reference an already-referenced node.
971 void
972 hammer_ref_node(hammer_node_t node)
974 KKASSERT(node->lock.refs > 0 && node->ondisk != NULL);
975 hammer_ref(&node->lock);
979 * Load a node's on-disk data reference.
981 static int
982 hammer_load_node(hammer_node_t node, int isnew)
984 hammer_buffer_t buffer;
985 hammer_off_t buf_offset;
986 int error;
988 error = 0;
989 ++node->loading;
990 hammer_lock_ex(&node->lock);
991 if (node->ondisk == NULL) {
993 * This is a little confusing but the jist is that
994 * node->buffer determines whether the node is on
995 * the buffer's clist and node->ondisk determines
996 * whether the buffer is referenced.
998 * We could be racing a buffer release, in which case
999 * node->buffer may become NULL while we are blocked
1000 * referencing the buffer.
1002 if ((buffer = node->buffer) != NULL) {
1003 error = hammer_ref_buffer(buffer);
1004 if (error == 0 && node->buffer == NULL) {
1005 TAILQ_INSERT_TAIL(&buffer->clist,
1006 node, entry);
1007 node->buffer = buffer;
1009 } else {
1010 buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1011 buffer = hammer_get_buffer(node->hmp, buf_offset,
1012 0, &error);
1013 if (buffer) {
1014 KKASSERT(error == 0);
1015 TAILQ_INSERT_TAIL(&buffer->clist,
1016 node, entry);
1017 node->buffer = buffer;
1020 if (error == 0) {
1021 node->ondisk = (void *)((char *)buffer->ondisk +
1022 (node->node_offset & HAMMER_BUFMASK));
1023 if (isnew == 0 &&
1024 hammer_crc_test_btree(node->ondisk) == 0) {
1025 Debugger("CRC FAILED: B-TREE NODE");
1029 --node->loading;
1030 hammer_unlock(&node->lock);
1031 return (error);
1035 * Safely reference a node, interlock against flushes via the IO subsystem.
1037 hammer_node_t
1038 hammer_ref_node_safe(struct hammer_mount *hmp, struct hammer_node **cache,
1039 int *errorp)
1041 hammer_node_t node;
1043 node = *cache;
1044 if (node != NULL) {
1045 hammer_ref(&node->lock);
1046 if (node->ondisk)
1047 *errorp = 0;
1048 else
1049 *errorp = hammer_load_node(node, 0);
1050 if (*errorp) {
1051 hammer_rel_node(node);
1052 node = NULL;
1054 } else {
1055 *errorp = ENOENT;
1057 return(node);
1061 * Release a hammer_node. On the last release the node dereferences
1062 * its underlying buffer and may or may not be destroyed.
1064 void
1065 hammer_rel_node(hammer_node_t node)
1067 hammer_buffer_t buffer;
1070 * If this isn't the last ref just decrement the ref count and
1071 * return.
1073 if (node->lock.refs > 1) {
1074 hammer_unref(&node->lock);
1075 return;
1079 * If there is no ondisk info or no buffer the node failed to load,
1080 * remove the last reference and destroy the node.
1082 if (node->ondisk == NULL) {
1083 hammer_unref(&node->lock);
1084 hammer_flush_node(node);
1085 /* node is stale now */
1086 return;
1090 * Do final cleanups and then either destroy the node and leave it
1091 * passively cached. The buffer reference is removed regardless.
1093 buffer = node->buffer;
1094 node->ondisk = NULL;
1096 if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1097 hammer_unref(&node->lock);
1098 hammer_rel_buffer(buffer, 0);
1099 return;
1103 * Destroy the node.
1105 hammer_unref(&node->lock);
1106 hammer_flush_node(node);
1107 /* node is stale */
1108 hammer_rel_buffer(buffer, 0);
1112 * Free space on-media associated with a B-Tree node.
1114 void
1115 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1117 KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1118 node->flags |= HAMMER_NODE_DELETED;
1119 hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1123 * Passively cache a referenced hammer_node in *cache. The caller may
1124 * release the node on return.
1126 void
1127 hammer_cache_node(hammer_node_t node, struct hammer_node **cache)
1129 hammer_node_t old;
1132 * If the node is being deleted, don't cache it!
1134 if (node->flags & HAMMER_NODE_DELETED)
1135 return;
1138 * Cache the node. If we previously cached a different node we
1139 * have to give HAMMER a chance to destroy it.
1141 again:
1142 if (node->cache1 != cache) {
1143 if (node->cache2 != cache) {
1144 if ((old = *cache) != NULL) {
1145 KKASSERT(node->lock.refs != 0);
1146 hammer_uncache_node(cache);
1147 goto again;
1149 if (node->cache2)
1150 *node->cache2 = NULL;
1151 node->cache2 = node->cache1;
1152 node->cache1 = cache;
1153 *cache = node;
1154 } else {
1155 struct hammer_node **tmp;
1156 tmp = node->cache1;
1157 node->cache1 = node->cache2;
1158 node->cache2 = tmp;
1163 void
1164 hammer_uncache_node(struct hammer_node **cache)
1166 hammer_node_t node;
1168 if ((node = *cache) != NULL) {
1169 *cache = NULL;
1170 if (node->cache1 == cache) {
1171 node->cache1 = node->cache2;
1172 node->cache2 = NULL;
1173 } else if (node->cache2 == cache) {
1174 node->cache2 = NULL;
1175 } else {
1176 panic("hammer_uncache_node: missing cache linkage");
1178 if (node->cache1 == NULL && node->cache2 == NULL) {
1179 hammer_flush_node(node);
1185 * Remove a node's cache references and destroy the node if it has no
1186 * other references or backing store.
1188 void
1189 hammer_flush_node(hammer_node_t node)
1191 hammer_buffer_t buffer;
1193 if (node->cache1)
1194 *node->cache1 = NULL;
1195 if (node->cache2)
1196 *node->cache2 = NULL;
1197 if (node->lock.refs == 0 && node->ondisk == NULL) {
1198 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1199 if ((buffer = node->buffer) != NULL) {
1200 node->buffer = NULL;
1201 TAILQ_REMOVE(&buffer->clist, node, entry);
1202 /* buffer is unreferenced because ondisk is NULL */
1204 --hammer_count_nodes;
1205 kfree(node, M_HAMMER);
1210 * Flush passively cached B-Tree nodes associated with this buffer.
1211 * This is only called when the buffer is about to be destroyed, so
1212 * none of the nodes should have any references. The buffer is locked.
1214 * We may be interlocked with the buffer.
1216 void
1217 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1219 hammer_node_t node;
1221 while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1222 KKASSERT(node->ondisk == NULL);
1224 if (node->lock.refs == 0) {
1225 hammer_ref(&node->lock);
1226 node->flags |= HAMMER_NODE_FLUSH;
1227 hammer_rel_node(node);
1228 } else {
1229 KKASSERT(node->loading != 0);
1230 KKASSERT(node->buffer != NULL);
1231 buffer = node->buffer;
1232 node->buffer = NULL;
1233 TAILQ_REMOVE(&buffer->clist, node, entry);
1234 /* buffer is unreferenced because ondisk is NULL */
1240 /************************************************************************
1241 * ALLOCATORS *
1242 ************************************************************************/
1245 * Allocate a B-Tree node.
1247 hammer_node_t
1248 hammer_alloc_btree(hammer_transaction_t trans, int *errorp)
1250 hammer_buffer_t buffer = NULL;
1251 hammer_node_t node = NULL;
1252 hammer_off_t node_offset;
1254 node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1255 sizeof(struct hammer_node_ondisk),
1256 errorp);
1257 if (*errorp == 0) {
1258 node = hammer_get_node(trans->hmp, node_offset, 1, errorp);
1259 hammer_modify_node_noundo(trans, node);
1260 bzero(node->ondisk, sizeof(*node->ondisk));
1261 hammer_modify_node_done(node);
1263 if (buffer)
1264 hammer_rel_buffer(buffer, 0);
1265 return(node);
1269 * Allocate data. If the address of a data buffer is supplied then
1270 * any prior non-NULL *data_bufferp will be released and *data_bufferp
1271 * will be set to the related buffer. The caller must release it when
1272 * finally done. The initial *data_bufferp should be set to NULL by
1273 * the caller.
1275 * The caller is responsible for making hammer_modify*() calls on the
1276 * *data_bufferp.
1278 void *
1279 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1280 u_int16_t rec_type, hammer_off_t *data_offsetp,
1281 struct hammer_buffer **data_bufferp, int *errorp)
1283 void *data;
1284 int zone;
1287 * Allocate data
1289 if (data_len) {
1290 switch(rec_type) {
1291 case HAMMER_RECTYPE_INODE:
1292 case HAMMER_RECTYPE_PSEUDO_INODE:
1293 case HAMMER_RECTYPE_DIRENTRY:
1294 case HAMMER_RECTYPE_EXT:
1295 case HAMMER_RECTYPE_FIX:
1296 zone = HAMMER_ZONE_META_INDEX;
1297 break;
1298 case HAMMER_RECTYPE_DATA:
1299 case HAMMER_RECTYPE_DB:
1300 if (data_len <= HAMMER_BUFSIZE / 2)
1301 zone = HAMMER_ZONE_SMALL_DATA_INDEX;
1302 else
1303 zone = HAMMER_ZONE_LARGE_DATA_INDEX;
1304 break;
1305 default:
1306 panic("hammer_alloc_data: rec_type %04x unknown",
1307 rec_type);
1308 zone = 0; /* NOT REACHED */
1309 break;
1311 *data_offsetp = hammer_blockmap_alloc(trans, zone,
1312 data_len, errorp);
1313 } else {
1314 *data_offsetp = 0;
1316 if (*errorp == 0 && data_bufferp) {
1317 if (data_len) {
1318 data = hammer_bread(trans->hmp, *data_offsetp, errorp,
1319 data_bufferp);
1320 KKASSERT(*errorp == 0);
1321 } else {
1322 data = NULL;
1324 } else {
1325 data = NULL;
1327 KKASSERT(*errorp == 0);
1328 return(data);
1332 * Sync dirty buffers to the media and clean-up any loose ends.
1334 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1335 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1338 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1340 struct hammer_sync_info info;
1342 info.error = 0;
1343 info.waitfor = waitfor;
1344 if (waitfor == MNT_WAIT) {
1345 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS,
1346 hammer_sync_scan1, hammer_sync_scan2, &info);
1347 } else {
1348 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT,
1349 hammer_sync_scan1, hammer_sync_scan2, &info);
1351 return(info.error);
1355 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1357 struct hammer_sync_info info;
1359 info.error = 0;
1360 info.waitfor = waitfor;
1362 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1363 hammer_sync_scan1, hammer_sync_scan2, &info);
1364 if (waitfor == MNT_WAIT)
1365 hammer_flusher_sync(hmp);
1366 else
1367 hammer_flusher_async(hmp);
1369 return(info.error);
1372 static int
1373 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1375 struct hammer_inode *ip;
1377 ip = VTOI(vp);
1378 if (vp->v_type == VNON || ip == NULL ||
1379 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1380 RB_EMPTY(&vp->v_rbdirty_tree))) {
1381 return(-1);
1383 return(0);
1386 static int
1387 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1389 struct hammer_sync_info *info = data;
1390 struct hammer_inode *ip;
1391 int error;
1393 ip = VTOI(vp);
1394 if (vp->v_type == VNON || vp->v_type == VBAD ||
1395 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1396 RB_EMPTY(&vp->v_rbdirty_tree))) {
1397 return(0);
1399 error = VOP_FSYNC(vp, info->waitfor);
1400 if (error)
1401 info->error = error;
1402 return(0);