HAMMER 60D/Many: Mirroring, bug fixes
[dragonfly.git] / sys / vfs / hammer / hammer_ondisk.c
blob1ce3aa7fb438fe2c8f0a2fcce4e523b9651a4378
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.65 2008/07/05 18:59:27 dillon Exp $
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
42 #include "hammer.h"
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
45 #include <sys/buf.h>
46 #include <sys/buf2.h>
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_node_t node, int isnew);
53 static int
54 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
56 if (vol1->vol_no < vol2->vol_no)
57 return(-1);
58 if (vol1->vol_no > vol2->vol_no)
59 return(1);
60 return(0);
63 static int
64 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
66 if (buf1->zoneX_offset < buf2->zoneX_offset)
67 return(-1);
68 if (buf1->zoneX_offset > buf2->zoneX_offset)
69 return(1);
70 return(0);
73 static int
74 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
76 if (node1->node_offset < node2->node_offset)
77 return(-1);
78 if (node1->node_offset > node2->node_offset)
79 return(1);
80 return(0);
83 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
84 hammer_vol_rb_compare, int32_t, vol_no);
85 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
86 hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
87 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
88 hammer_nod_rb_compare, hammer_off_t, node_offset);
90 /************************************************************************
91 * VOLUMES *
92 ************************************************************************
94 * Load a HAMMER volume by name. Returns 0 on success or a positive error
95 * code on failure. Volumes must be loaded at mount time, get_volume() will
96 * not load a new volume.
98 * Calls made to hammer_load_volume() or single-threaded
101 hammer_install_volume(struct hammer_mount *hmp, const char *volname)
103 struct mount *mp;
104 hammer_volume_t volume;
105 struct hammer_volume_ondisk *ondisk;
106 struct nlookupdata nd;
107 struct buf *bp = NULL;
108 int error;
109 int ronly;
110 int setmp = 0;
112 mp = hmp->mp;
113 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
116 * Allocate a volume structure
118 ++hammer_count_volumes;
119 volume = kmalloc(sizeof(*volume), M_HAMMER, M_WAITOK|M_ZERO);
120 volume->vol_name = kstrdup(volname, M_HAMMER);
121 hammer_io_init(&volume->io, hmp, HAMMER_STRUCTURE_VOLUME);
122 volume->io.offset = 0LL;
123 volume->io.bytes = HAMMER_BUFSIZE;
126 * Get the device vnode
128 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
129 if (error == 0)
130 error = nlookup(&nd);
131 if (error == 0)
132 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
133 nlookup_done(&nd);
134 if (error == 0) {
135 if (vn_isdisk(volume->devvp, &error)) {
136 error = vfs_mountedon(volume->devvp);
139 if (error == 0 &&
140 count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) {
141 error = EBUSY;
143 if (error == 0) {
144 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
145 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
146 if (error == 0) {
147 error = VOP_OPEN(volume->devvp,
148 (ronly ? FREAD : FREAD|FWRITE),
149 FSCRED, NULL);
151 vn_unlock(volume->devvp);
153 if (error) {
154 hammer_free_volume(volume);
155 return(error);
157 volume->devvp->v_rdev->si_mountpoint = mp;
158 setmp = 1;
161 * Extract the volume number from the volume header and do various
162 * sanity checks.
164 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
165 if (error)
166 goto late_failure;
167 ondisk = (void *)bp->b_data;
168 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
169 kprintf("hammer_mount: volume %s has an invalid header\n",
170 volume->vol_name);
171 error = EFTYPE;
172 goto late_failure;
174 volume->vol_no = ondisk->vol_no;
175 volume->buffer_base = ondisk->vol_buf_beg;
176 volume->vol_flags = ondisk->vol_flags;
177 volume->nblocks = ondisk->vol_nblocks;
178 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
179 ondisk->vol_buf_end - ondisk->vol_buf_beg);
180 volume->maxraw_off = ondisk->vol_buf_end;
182 if (RB_EMPTY(&hmp->rb_vols_root)) {
183 hmp->fsid = ondisk->vol_fsid;
184 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
185 kprintf("hammer_mount: volume %s's fsid does not match "
186 "other volumes\n", volume->vol_name);
187 error = EFTYPE;
188 goto late_failure;
192 * Insert the volume structure into the red-black tree.
194 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
195 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
196 volume->vol_name, volume->vol_no);
197 error = EEXIST;
201 * Set the root volume . HAMMER special cases rootvol the structure.
202 * We do not hold a ref because this would prevent related I/O
203 * from being flushed.
205 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
206 hmp->rootvol = volume;
207 if (bp) {
208 brelse(bp);
209 bp = NULL;
211 hmp->fsid_udev = dev2udev(vn_todev(volume->devvp));
212 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
213 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
214 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
215 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
217 late_failure:
218 if (bp)
219 brelse(bp);
220 if (error) {
221 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
222 if (setmp)
223 volume->devvp->v_rdev->si_mountpoint = NULL;
224 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
225 hammer_free_volume(volume);
227 return (error);
231 * This is called for each volume when updating the mount point from
232 * read-write to read-only or vise-versa.
235 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
237 if (volume->devvp) {
238 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
239 if (volume->io.hmp->ronly) {
240 /* do not call vinvalbuf */
241 VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
242 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
243 } else {
244 /* do not call vinvalbuf */
245 VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
246 VOP_CLOSE(volume->devvp, FREAD);
248 vn_unlock(volume->devvp);
250 return(0);
254 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
255 * so returns -1 on failure.
258 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
260 struct hammer_mount *hmp = volume->io.hmp;
261 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
262 struct buf *bp;
265 * Clean up the root volume pointer, which is held unlocked in hmp.
267 if (hmp->rootvol == volume)
268 hmp->rootvol = NULL;
271 * Release our buffer and flush anything left in the buffer cache.
273 volume->io.waitdep = 1;
274 bp = hammer_io_release(&volume->io, 1);
275 hammer_io_clear_modlist(&volume->io);
278 * There should be no references on the volume, no clusters, and
279 * no super-clusters.
281 KKASSERT(volume->io.lock.refs == 0);
282 if (bp)
283 brelse(bp);
285 volume->ondisk = NULL;
286 if (volume->devvp) {
287 if (volume->devvp->v_rdev &&
288 volume->devvp->v_rdev->si_mountpoint == hmp->mp
290 volume->devvp->v_rdev->si_mountpoint = NULL;
292 if (ronly) {
293 vinvalbuf(volume->devvp, 0, 0, 0);
294 VOP_CLOSE(volume->devvp, FREAD);
295 } else {
296 vinvalbuf(volume->devvp, V_SAVE, 0, 0);
297 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
302 * Destroy the structure
304 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
305 hammer_free_volume(volume);
306 return(0);
309 static
310 void
311 hammer_free_volume(hammer_volume_t volume)
313 if (volume->vol_name) {
314 kfree(volume->vol_name, M_HAMMER);
315 volume->vol_name = NULL;
317 if (volume->devvp) {
318 vrele(volume->devvp);
319 volume->devvp = NULL;
321 --hammer_count_volumes;
322 kfree(volume, M_HAMMER);
326 * Get a HAMMER volume. The volume must already exist.
328 hammer_volume_t
329 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
331 struct hammer_volume *volume;
334 * Locate the volume structure
336 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
337 if (volume == NULL) {
338 *errorp = ENOENT;
339 return(NULL);
341 hammer_ref(&volume->io.lock);
344 * Deal with on-disk info
346 if (volume->ondisk == NULL || volume->io.loading) {
347 *errorp = hammer_load_volume(volume);
348 if (*errorp) {
349 hammer_rel_volume(volume, 1);
350 volume = NULL;
352 } else {
353 *errorp = 0;
355 return(volume);
359 hammer_ref_volume(hammer_volume_t volume)
361 int error;
363 hammer_ref(&volume->io.lock);
366 * Deal with on-disk info
368 if (volume->ondisk == NULL || volume->io.loading) {
369 error = hammer_load_volume(volume);
370 if (error)
371 hammer_rel_volume(volume, 1);
372 } else {
373 error = 0;
375 return (error);
378 hammer_volume_t
379 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
381 hammer_volume_t volume;
383 volume = hmp->rootvol;
384 KKASSERT(volume != NULL);
385 hammer_ref(&volume->io.lock);
388 * Deal with on-disk info
390 if (volume->ondisk == NULL || volume->io.loading) {
391 *errorp = hammer_load_volume(volume);
392 if (*errorp) {
393 hammer_rel_volume(volume, 1);
394 volume = NULL;
396 } else {
397 *errorp = 0;
399 return (volume);
403 * Load a volume's on-disk information. The volume must be referenced and
404 * not locked. We temporarily acquire an exclusive lock to interlock
405 * against releases or multiple get's.
407 static int
408 hammer_load_volume(hammer_volume_t volume)
410 int error;
412 ++volume->io.loading;
413 hammer_lock_ex(&volume->io.lock);
415 if (volume->ondisk == NULL) {
416 error = hammer_io_read(volume->devvp, &volume->io,
417 volume->maxraw_off);
418 if (error == 0)
419 volume->ondisk = (void *)volume->io.bp->b_data;
420 } else {
421 error = 0;
423 --volume->io.loading;
424 hammer_unlock(&volume->io.lock);
425 return(error);
429 * Release a volume. Call hammer_io_release on the last reference. We have
430 * to acquire an exclusive lock to interlock against volume->ondisk tests
431 * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
432 * lock to be held.
434 * Volumes are not unloaded from memory during normal operation.
436 void
437 hammer_rel_volume(hammer_volume_t volume, int flush)
439 struct buf *bp = NULL;
441 crit_enter();
442 if (volume->io.lock.refs == 1) {
443 ++volume->io.loading;
444 hammer_lock_ex(&volume->io.lock);
445 if (volume->io.lock.refs == 1) {
446 volume->ondisk = NULL;
447 bp = hammer_io_release(&volume->io, flush);
449 --volume->io.loading;
450 hammer_unlock(&volume->io.lock);
452 hammer_unref(&volume->io.lock);
453 if (bp)
454 brelse(bp);
455 crit_exit();
458 /************************************************************************
459 * BUFFERS *
460 ************************************************************************
462 * Manage buffers. Currently all blockmap-backed zones are translated
463 * to zone-2 buffer offsets.
465 hammer_buffer_t
466 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
467 int bytes, int isnew, int *errorp)
469 hammer_buffer_t buffer;
470 hammer_volume_t volume;
471 hammer_off_t zone2_offset;
472 hammer_io_type_t iotype;
473 int vol_no;
474 int zone;
476 buf_offset &= ~HAMMER_BUFMASK64;
477 again:
479 * Shortcut if the buffer is already cached
481 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
482 if (buffer) {
483 if (buffer->io.lock.refs == 0)
484 ++hammer_count_refedbufs;
485 hammer_ref(&buffer->io.lock);
488 * Onced refed the ondisk field will not be cleared by
489 * any other action.
491 if (buffer->ondisk && buffer->io.loading == 0) {
492 *errorp = 0;
493 return(buffer);
497 * The buffer is no longer loose if it has a ref, and
498 * cannot become loose once it gains a ref. Loose
499 * buffers will never be in a modified state. This should
500 * only occur on the 0->1 transition of refs.
502 * lose_list can be modified via a biodone() interrupt.
504 if (buffer->io.mod_list == &hmp->lose_list) {
505 crit_enter(); /* biodone race against list */
506 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
507 mod_entry);
508 crit_exit();
509 buffer->io.mod_list = NULL;
510 KKASSERT(buffer->io.modified == 0);
512 goto found;
516 * What is the buffer class?
518 zone = HAMMER_ZONE_DECODE(buf_offset);
520 switch(zone) {
521 case HAMMER_ZONE_LARGE_DATA_INDEX:
522 case HAMMER_ZONE_SMALL_DATA_INDEX:
523 iotype = HAMMER_STRUCTURE_DATA_BUFFER;
524 break;
525 case HAMMER_ZONE_UNDO_INDEX:
526 iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
527 break;
528 case HAMMER_ZONE_META_INDEX:
529 default:
531 * NOTE: inode data and directory entries are placed in this
532 * zone. inode atime/mtime is updated in-place and thus
533 * buffers containing inodes must be synchronized as
534 * meta-buffers, same as buffers containing B-Tree info.
536 iotype = HAMMER_STRUCTURE_META_BUFFER;
537 break;
541 * Handle blockmap offset translations
543 if (zone >= HAMMER_ZONE_BTREE_INDEX) {
544 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
545 } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
546 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
547 } else {
548 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
549 zone2_offset = buf_offset;
550 *errorp = 0;
552 if (*errorp)
553 return(NULL);
556 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
557 * specifications.
559 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
560 HAMMER_ZONE_RAW_BUFFER);
561 vol_no = HAMMER_VOL_DECODE(zone2_offset);
562 volume = hammer_get_volume(hmp, vol_no, errorp);
563 if (volume == NULL)
564 return(NULL);
566 KKASSERT(zone2_offset < volume->maxbuf_off);
569 * Allocate a new buffer structure. We will check for races later.
571 ++hammer_count_buffers;
572 buffer = kmalloc(sizeof(*buffer), M_HAMMER,
573 M_WAITOK|M_ZERO|M_USE_RESERVE);
574 buffer->zone2_offset = zone2_offset;
575 buffer->zoneX_offset = buf_offset;
576 buffer->volume = volume;
578 hammer_io_init(&buffer->io, hmp, iotype);
579 buffer->io.offset = volume->ondisk->vol_buf_beg +
580 (zone2_offset & HAMMER_OFF_SHORT_MASK);
581 buffer->io.bytes = bytes;
582 TAILQ_INIT(&buffer->clist);
583 hammer_ref(&buffer->io.lock);
586 * Insert the buffer into the RB tree and handle late collisions.
588 if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
589 hammer_unref(&buffer->io.lock);
590 --hammer_count_buffers;
591 kfree(buffer, M_HAMMER);
592 goto again;
594 ++hammer_count_refedbufs;
595 found:
598 * Deal with on-disk info and loading races.
600 if (buffer->ondisk == NULL || buffer->io.loading) {
601 *errorp = hammer_load_buffer(buffer, isnew);
602 if (*errorp) {
603 hammer_rel_buffer(buffer, 1);
604 buffer = NULL;
606 } else {
607 *errorp = 0;
609 return(buffer);
613 * Destroy all buffers covering the specified zoneX offset range. This
614 * is called when the related blockmap layer2 entry is freed or when
615 * a direct write bypasses our buffer/buffer-cache subsystem.
617 * The buffers may be referenced by the caller itself. Setting reclaim
618 * will cause the buffer to be destroyed when it's ref count reaches zero.
620 void
621 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
622 hammer_off_t zone2_offset, int bytes)
624 hammer_buffer_t buffer;
625 hammer_volume_t volume;
626 int vol_no;
627 int error;
629 vol_no = HAMMER_VOL_DECODE(zone2_offset);
630 volume = hammer_get_volume(hmp, vol_no, &error);
631 KKASSERT(error == 0);
633 while (bytes > 0) {
634 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
635 base_offset);
636 if (buffer) {
637 KKASSERT(buffer->zone2_offset == zone2_offset);
638 hammer_io_clear_modify(&buffer->io, 1);
639 buffer->io.reclaim = 1;
640 KKASSERT(buffer->volume == volume);
641 if (buffer->io.lock.refs == 0)
642 hammer_unload_buffer(buffer, NULL);
643 } else {
644 hammer_io_inval(volume, zone2_offset);
646 base_offset += HAMMER_BUFSIZE;
647 zone2_offset += HAMMER_BUFSIZE;
648 bytes -= HAMMER_BUFSIZE;
650 hammer_rel_volume(volume, 0);
653 static int
654 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
656 hammer_volume_t volume;
657 int error;
660 * Load the buffer's on-disk info
662 volume = buffer->volume;
663 ++buffer->io.loading;
664 hammer_lock_ex(&buffer->io.lock);
666 if (hammer_debug_io & 0x0001) {
667 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
668 buffer->zoneX_offset, buffer->zone2_offset, isnew,
669 buffer->ondisk);
672 if (buffer->ondisk == NULL) {
673 if (isnew) {
674 error = hammer_io_new(volume->devvp, &buffer->io);
675 } else {
676 error = hammer_io_read(volume->devvp, &buffer->io,
677 volume->maxraw_off);
679 if (error == 0)
680 buffer->ondisk = (void *)buffer->io.bp->b_data;
681 } else if (isnew) {
682 error = hammer_io_new(volume->devvp, &buffer->io);
683 } else {
684 error = 0;
686 --buffer->io.loading;
687 hammer_unlock(&buffer->io.lock);
688 return (error);
692 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
695 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
697 ++hammer_count_refedbufs;
698 hammer_ref(&buffer->io.lock);
699 hammer_flush_buffer_nodes(buffer);
700 KKASSERT(buffer->io.lock.refs == 1);
701 hammer_rel_buffer(buffer, 2);
702 return(0);
706 * Reference a buffer that is either already referenced or via a specially
707 * handled pointer (aka cursor->buffer).
710 hammer_ref_buffer(hammer_buffer_t buffer)
712 int error;
714 if (buffer->io.lock.refs == 0)
715 ++hammer_count_refedbufs;
716 hammer_ref(&buffer->io.lock);
719 * At this point a biodone() will not touch the buffer other then
720 * incidental bits. However, lose_list can be modified via
721 * a biodone() interrupt.
723 * No longer loose
725 if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
726 crit_enter();
727 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry);
728 buffer->io.mod_list = NULL;
729 crit_exit();
732 if (buffer->ondisk == NULL || buffer->io.loading) {
733 error = hammer_load_buffer(buffer, 0);
734 if (error) {
735 hammer_rel_buffer(buffer, 1);
737 * NOTE: buffer pointer can become stale after
738 * the above release.
741 } else {
742 error = 0;
744 return(error);
748 * Release a buffer. We have to deal with several places where
749 * another thread can ref the buffer.
751 * Only destroy the structure itself if the related buffer cache buffer
752 * was disassociated from it. This ties the management of the structure
753 * to the buffer cache subsystem. buffer->ondisk determines whether the
754 * embedded io is referenced or not.
756 void
757 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
759 hammer_volume_t volume;
760 struct buf *bp = NULL;
761 int freeme = 0;
763 crit_enter();
764 if (buffer->io.lock.refs == 1) {
765 ++buffer->io.loading; /* force interlock check */
766 hammer_lock_ex(&buffer->io.lock);
767 if (buffer->io.lock.refs == 1) {
768 bp = hammer_io_release(&buffer->io, flush);
770 if (buffer->io.lock.refs == 1)
771 --hammer_count_refedbufs;
773 if (buffer->io.bp == NULL &&
774 buffer->io.lock.refs == 1) {
776 * Final cleanup
778 * NOTE: It is impossible for any associated
779 * B-Tree nodes to have refs if the buffer
780 * has no additional refs.
782 RB_REMOVE(hammer_buf_rb_tree,
783 &buffer->io.hmp->rb_bufs_root,
784 buffer);
785 volume = buffer->volume;
786 buffer->volume = NULL; /* sanity */
787 hammer_rel_volume(volume, 0);
788 hammer_io_clear_modlist(&buffer->io);
789 hammer_flush_buffer_nodes(buffer);
790 KKASSERT(TAILQ_EMPTY(&buffer->clist));
791 freeme = 1;
794 --buffer->io.loading;
795 hammer_unlock(&buffer->io.lock);
797 hammer_unref(&buffer->io.lock);
798 crit_exit();
799 if (bp)
800 brelse(bp);
801 if (freeme) {
802 --hammer_count_buffers;
803 kfree(buffer, M_HAMMER);
808 * Access the filesystem buffer containing the specified hammer offset.
809 * buf_offset is a conglomeration of the volume number and vol_buf_beg
810 * relative buffer offset. It must also have bit 55 set to be valid.
811 * (see hammer_off_t in hammer_disk.h).
813 * Any prior buffer in *bufferp will be released and replaced by the
814 * requested buffer.
816 static __inline
817 void *
818 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
819 int *errorp, struct hammer_buffer **bufferp)
821 hammer_buffer_t buffer;
822 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
824 buf_offset &= ~HAMMER_BUFMASK64;
825 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
827 buffer = *bufferp;
828 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
829 buffer->zoneX_offset != buf_offset)) {
830 if (buffer)
831 hammer_rel_buffer(buffer, 0);
832 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp);
833 *bufferp = buffer;
834 } else {
835 *errorp = 0;
839 * Return a pointer to the buffer data.
841 if (buffer == NULL)
842 return(NULL);
843 else
844 return((char *)buffer->ondisk + xoff);
847 void *
848 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
849 int *errorp, struct hammer_buffer **bufferp)
851 return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
854 void *
855 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
856 int *errorp, struct hammer_buffer **bufferp)
858 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
859 return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp));
863 * Access the filesystem buffer containing the specified hammer offset.
864 * No disk read operation occurs. The result buffer may contain garbage.
866 * Any prior buffer in *bufferp will be released and replaced by the
867 * requested buffer.
869 * This function marks the buffer dirty but does not increment its
870 * modify_refs count.
872 static __inline
873 void *
874 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
875 int *errorp, struct hammer_buffer **bufferp)
877 hammer_buffer_t buffer;
878 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
880 buf_offset &= ~HAMMER_BUFMASK64;
882 buffer = *bufferp;
883 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
884 buffer->zoneX_offset != buf_offset)) {
885 if (buffer)
886 hammer_rel_buffer(buffer, 0);
887 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp);
888 *bufferp = buffer;
889 } else {
890 *errorp = 0;
894 * Return a pointer to the buffer data.
896 if (buffer == NULL)
897 return(NULL);
898 else
899 return((char *)buffer->ondisk + xoff);
902 void *
903 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
904 int *errorp, struct hammer_buffer **bufferp)
906 return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
909 void *
910 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
911 int *errorp, struct hammer_buffer **bufferp)
913 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
914 return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp));
917 /************************************************************************
918 * NODES *
919 ************************************************************************
921 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
922 * method used by the HAMMER filesystem.
924 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
925 * associated with its buffer, and will only referenced the buffer while
926 * the node itself is referenced.
928 * A hammer_node can also be passively associated with other HAMMER
929 * structures, such as inodes, while retaining 0 references. These
930 * associations can be cleared backwards using a pointer-to-pointer in
931 * the hammer_node.
933 * This allows the HAMMER implementation to cache hammer_nodes long-term
934 * and short-cut a great deal of the infrastructure's complexity. In
935 * most cases a cached node can be reacquired without having to dip into
936 * either the buffer or cluster management code.
938 * The caller must pass a referenced cluster on call and will retain
939 * ownership of the reference on return. The node will acquire its own
940 * additional references, if necessary.
942 hammer_node_t
943 hammer_get_node(hammer_mount_t hmp, hammer_off_t node_offset,
944 int isnew, int *errorp)
946 hammer_node_t node;
948 KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
951 * Locate the structure, allocating one if necessary.
953 again:
954 node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
955 if (node == NULL) {
956 ++hammer_count_nodes;
957 node = kmalloc(sizeof(*node), M_HAMMER, M_WAITOK|M_ZERO|M_USE_RESERVE);
958 node->node_offset = node_offset;
959 node->hmp = hmp;
960 TAILQ_INIT(&node->cursor_list);
961 TAILQ_INIT(&node->cache_list);
962 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
963 --hammer_count_nodes;
964 kfree(node, M_HAMMER);
965 goto again;
968 hammer_ref(&node->lock);
969 if (node->ondisk)
970 *errorp = 0;
971 else
972 *errorp = hammer_load_node(node, isnew);
973 if (*errorp) {
974 hammer_rel_node(node);
975 node = NULL;
977 return(node);
981 * Reference an already-referenced node.
983 void
984 hammer_ref_node(hammer_node_t node)
986 KKASSERT(node->lock.refs > 0 && node->ondisk != NULL);
987 hammer_ref(&node->lock);
991 * Load a node's on-disk data reference.
993 static int
994 hammer_load_node(hammer_node_t node, int isnew)
996 hammer_buffer_t buffer;
997 hammer_off_t buf_offset;
998 int error;
1000 error = 0;
1001 ++node->loading;
1002 hammer_lock_ex(&node->lock);
1003 if (node->ondisk == NULL) {
1005 * This is a little confusing but the jist is that
1006 * node->buffer determines whether the node is on
1007 * the buffer's clist and node->ondisk determines
1008 * whether the buffer is referenced.
1010 * We could be racing a buffer release, in which case
1011 * node->buffer may become NULL while we are blocked
1012 * referencing the buffer.
1014 if ((buffer = node->buffer) != NULL) {
1015 error = hammer_ref_buffer(buffer);
1016 if (error == 0 && node->buffer == NULL) {
1017 TAILQ_INSERT_TAIL(&buffer->clist,
1018 node, entry);
1019 node->buffer = buffer;
1021 } else {
1022 buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1023 buffer = hammer_get_buffer(node->hmp, buf_offset,
1024 HAMMER_BUFSIZE, 0, &error);
1025 if (buffer) {
1026 KKASSERT(error == 0);
1027 TAILQ_INSERT_TAIL(&buffer->clist,
1028 node, entry);
1029 node->buffer = buffer;
1032 if (error)
1033 goto failed;
1034 node->ondisk = (void *)((char *)buffer->ondisk +
1035 (node->node_offset & HAMMER_BUFMASK));
1036 if (isnew == 0 &&
1037 (node->flags & HAMMER_NODE_CRCGOOD) == 0) {
1038 if (hammer_crc_test_btree(node->ondisk) == 0)
1039 Debugger("CRC FAILED: B-TREE NODE");
1040 node->flags |= HAMMER_NODE_CRCGOOD;
1043 failed:
1044 --node->loading;
1045 hammer_unlock(&node->lock);
1046 return (error);
1050 * Safely reference a node, interlock against flushes via the IO subsystem.
1052 hammer_node_t
1053 hammer_ref_node_safe(struct hammer_mount *hmp, hammer_node_cache_t cache,
1054 int *errorp)
1056 hammer_node_t node;
1058 node = cache->node;
1059 if (node != NULL) {
1060 hammer_ref(&node->lock);
1061 if (node->ondisk)
1062 *errorp = 0;
1063 else
1064 *errorp = hammer_load_node(node, 0);
1065 if (*errorp) {
1066 hammer_rel_node(node);
1067 node = NULL;
1069 } else {
1070 *errorp = ENOENT;
1072 return(node);
1076 * Release a hammer_node. On the last release the node dereferences
1077 * its underlying buffer and may or may not be destroyed.
1079 void
1080 hammer_rel_node(hammer_node_t node)
1082 hammer_buffer_t buffer;
1085 * If this isn't the last ref just decrement the ref count and
1086 * return.
1088 if (node->lock.refs > 1) {
1089 hammer_unref(&node->lock);
1090 return;
1094 * If there is no ondisk info or no buffer the node failed to load,
1095 * remove the last reference and destroy the node.
1097 if (node->ondisk == NULL) {
1098 hammer_unref(&node->lock);
1099 hammer_flush_node(node);
1100 /* node is stale now */
1101 return;
1105 * Do not disassociate the node from the buffer if it represents
1106 * a modified B-Tree node that still needs its crc to be generated.
1108 if (node->flags & HAMMER_NODE_NEEDSCRC)
1109 return;
1112 * Do final cleanups and then either destroy the node and leave it
1113 * passively cached. The buffer reference is removed regardless.
1115 buffer = node->buffer;
1116 node->ondisk = NULL;
1118 if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1119 hammer_unref(&node->lock);
1120 hammer_rel_buffer(buffer, 0);
1121 return;
1125 * Destroy the node.
1127 hammer_unref(&node->lock);
1128 hammer_flush_node(node);
1129 /* node is stale */
1130 hammer_rel_buffer(buffer, 0);
1134 * Free space on-media associated with a B-Tree node.
1136 void
1137 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1139 KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1140 node->flags |= HAMMER_NODE_DELETED;
1141 hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1145 * Passively cache a referenced hammer_node. The caller may release
1146 * the node on return.
1148 void
1149 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1152 * If the node is being deleted, don't cache it!
1154 if (node->flags & HAMMER_NODE_DELETED)
1155 return;
1156 if (cache->node == node)
1157 return;
1158 while (cache->node)
1159 hammer_uncache_node(cache);
1160 if (node->flags & HAMMER_NODE_DELETED)
1161 return;
1162 cache->node = node;
1163 TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1166 void
1167 hammer_uncache_node(hammer_node_cache_t cache)
1169 hammer_node_t node;
1171 if ((node = cache->node) != NULL) {
1172 TAILQ_REMOVE(&node->cache_list, cache, entry);
1173 cache->node = NULL;
1174 if (TAILQ_EMPTY(&node->cache_list))
1175 hammer_flush_node(node);
1180 * Remove a node's cache references and destroy the node if it has no
1181 * other references or backing store.
1183 void
1184 hammer_flush_node(hammer_node_t node)
1186 hammer_node_cache_t cache;
1187 hammer_buffer_t buffer;
1189 while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1190 TAILQ_REMOVE(&node->cache_list, cache, entry);
1191 cache->node = NULL;
1193 if (node->lock.refs == 0 && node->ondisk == NULL) {
1194 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1195 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1196 if ((buffer = node->buffer) != NULL) {
1197 node->buffer = NULL;
1198 TAILQ_REMOVE(&buffer->clist, node, entry);
1199 /* buffer is unreferenced because ondisk is NULL */
1201 --hammer_count_nodes;
1202 kfree(node, M_HAMMER);
1207 * Flush passively cached B-Tree nodes associated with this buffer.
1208 * This is only called when the buffer is about to be destroyed, so
1209 * none of the nodes should have any references. The buffer is locked.
1211 * We may be interlocked with the buffer.
1213 void
1214 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1216 hammer_node_t node;
1218 while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1219 KKASSERT(node->ondisk == NULL);
1220 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1222 if (node->lock.refs == 0) {
1223 hammer_ref(&node->lock);
1224 node->flags |= HAMMER_NODE_FLUSH;
1225 hammer_rel_node(node);
1226 } else {
1227 KKASSERT(node->loading != 0);
1228 KKASSERT(node->buffer != NULL);
1229 buffer = node->buffer;
1230 node->buffer = NULL;
1231 TAILQ_REMOVE(&buffer->clist, node, entry);
1232 /* buffer is unreferenced because ondisk is NULL */
1238 /************************************************************************
1239 * ALLOCATORS *
1240 ************************************************************************/
1243 * Allocate a B-Tree node.
1245 hammer_node_t
1246 hammer_alloc_btree(hammer_transaction_t trans, int *errorp)
1248 hammer_buffer_t buffer = NULL;
1249 hammer_node_t node = NULL;
1250 hammer_off_t node_offset;
1252 node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1253 sizeof(struct hammer_node_ondisk),
1254 errorp);
1255 if (*errorp == 0) {
1256 node = hammer_get_node(trans->hmp, node_offset, 1, errorp);
1257 hammer_modify_node_noundo(trans, node);
1258 bzero(node->ondisk, sizeof(*node->ondisk));
1259 hammer_modify_node_done(node);
1261 if (buffer)
1262 hammer_rel_buffer(buffer, 0);
1263 return(node);
1267 * Allocate data. If the address of a data buffer is supplied then
1268 * any prior non-NULL *data_bufferp will be released and *data_bufferp
1269 * will be set to the related buffer. The caller must release it when
1270 * finally done. The initial *data_bufferp should be set to NULL by
1271 * the caller.
1273 * The caller is responsible for making hammer_modify*() calls on the
1274 * *data_bufferp.
1276 void *
1277 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1278 u_int16_t rec_type, hammer_off_t *data_offsetp,
1279 struct hammer_buffer **data_bufferp, int *errorp)
1281 void *data;
1282 int zone;
1285 * Allocate data
1287 if (data_len) {
1288 switch(rec_type) {
1289 case HAMMER_RECTYPE_INODE:
1290 case HAMMER_RECTYPE_DIRENTRY:
1291 case HAMMER_RECTYPE_EXT:
1292 case HAMMER_RECTYPE_FIX:
1293 zone = HAMMER_ZONE_META_INDEX;
1294 break;
1295 case HAMMER_RECTYPE_DATA:
1296 case HAMMER_RECTYPE_DB:
1297 if (data_len <= HAMMER_BUFSIZE / 2) {
1298 zone = HAMMER_ZONE_SMALL_DATA_INDEX;
1299 } else {
1300 data_len = (data_len + HAMMER_BUFMASK) &
1301 ~HAMMER_BUFMASK;
1302 zone = HAMMER_ZONE_LARGE_DATA_INDEX;
1304 break;
1305 default:
1306 panic("hammer_alloc_data: rec_type %04x unknown",
1307 rec_type);
1308 zone = 0; /* NOT REACHED */
1309 break;
1311 *data_offsetp = hammer_blockmap_alloc(trans, zone,
1312 data_len, errorp);
1313 } else {
1314 *data_offsetp = 0;
1316 if (*errorp == 0 && data_bufferp) {
1317 if (data_len) {
1318 data = hammer_bread_ext(trans->hmp, *data_offsetp,
1319 data_len, errorp, data_bufferp);
1320 KKASSERT(*errorp == 0);
1321 } else {
1322 data = NULL;
1324 } else {
1325 data = NULL;
1327 KKASSERT(*errorp == 0);
1328 return(data);
1332 * Sync dirty buffers to the media and clean-up any loose ends.
1334 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1335 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1338 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1340 struct hammer_sync_info info;
1342 info.error = 0;
1343 info.waitfor = waitfor;
1344 if (waitfor == MNT_WAIT) {
1345 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS,
1346 hammer_sync_scan1, hammer_sync_scan2, &info);
1347 } else {
1348 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT,
1349 hammer_sync_scan1, hammer_sync_scan2, &info);
1351 return(info.error);
1355 * Filesystem sync. If doing a synchronous sync make a second pass on
1356 * the vnodes in case any were already flushing during the first pass,
1357 * and activate the flusher twice (the second time brings the UNDO FIFO's
1358 * start position up to the end position after the first call).
1361 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1363 struct hammer_sync_info info;
1365 info.error = 0;
1366 info.waitfor = MNT_NOWAIT;
1367 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1368 hammer_sync_scan1, hammer_sync_scan2, &info);
1369 if (info.error == 0 && waitfor == MNT_WAIT) {
1370 info.waitfor = waitfor;
1371 vmntvnodescan(hmp->mp, VMSC_GETVP,
1372 hammer_sync_scan1, hammer_sync_scan2, &info);
1374 if (waitfor == MNT_WAIT) {
1375 hammer_flusher_sync(hmp);
1376 hammer_flusher_sync(hmp);
1377 } else {
1378 hammer_flusher_async(hmp);
1380 return(info.error);
1383 static int
1384 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1386 struct hammer_inode *ip;
1388 ip = VTOI(vp);
1389 if (vp->v_type == VNON || ip == NULL ||
1390 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1391 RB_EMPTY(&vp->v_rbdirty_tree))) {
1392 return(-1);
1394 return(0);
1397 static int
1398 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1400 struct hammer_sync_info *info = data;
1401 struct hammer_inode *ip;
1402 int error;
1404 ip = VTOI(vp);
1405 if (vp->v_type == VNON || vp->v_type == VBAD ||
1406 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1407 RB_EMPTY(&vp->v_rbdirty_tree))) {
1408 return(0);
1410 error = VOP_FSYNC(vp, info->waitfor);
1411 if (error)
1412 info->error = error;
1413 return(0);