pnpinfo(8): Oops, add missing semicolons.
[dragonfly.git] / sys / vfs / hammer / hammer_ondisk.c
blobff24acfc65d94a8f139f860e0cc6113c4a1cd86a
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_ondisk.c,v 1.76 2008/08/29 20:19:08 dillon Exp $
37 * Manage HAMMER's on-disk structures. These routines are primarily
38 * responsible for interfacing with the kernel's I/O subsystem and for
39 * managing in-memory structures.
42 #include "hammer.h"
43 #include <sys/fcntl.h>
44 #include <sys/nlookup.h>
45 #include <sys/buf.h>
46 #include <sys/buf2.h>
48 static void hammer_free_volume(hammer_volume_t volume);
49 static int hammer_load_volume(hammer_volume_t volume);
50 static int hammer_load_buffer(hammer_buffer_t buffer, int isnew);
51 static int hammer_load_node(hammer_transaction_t trans,
52 hammer_node_t node, int isnew);
54 static int
55 hammer_vol_rb_compare(hammer_volume_t vol1, hammer_volume_t vol2)
57 if (vol1->vol_no < vol2->vol_no)
58 return(-1);
59 if (vol1->vol_no > vol2->vol_no)
60 return(1);
61 return(0);
65 * hammer_buffer structures are indexed via their zoneX_offset, not
66 * their zone2_offset.
68 static int
69 hammer_buf_rb_compare(hammer_buffer_t buf1, hammer_buffer_t buf2)
71 if (buf1->zoneX_offset < buf2->zoneX_offset)
72 return(-1);
73 if (buf1->zoneX_offset > buf2->zoneX_offset)
74 return(1);
75 return(0);
78 static int
79 hammer_nod_rb_compare(hammer_node_t node1, hammer_node_t node2)
81 if (node1->node_offset < node2->node_offset)
82 return(-1);
83 if (node1->node_offset > node2->node_offset)
84 return(1);
85 return(0);
88 RB_GENERATE2(hammer_vol_rb_tree, hammer_volume, rb_node,
89 hammer_vol_rb_compare, int32_t, vol_no);
90 RB_GENERATE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
91 hammer_buf_rb_compare, hammer_off_t, zoneX_offset);
92 RB_GENERATE2(hammer_nod_rb_tree, hammer_node, rb_node,
93 hammer_nod_rb_compare, hammer_off_t, node_offset);
95 /************************************************************************
96 * VOLUMES *
97 ************************************************************************
99 * Load a HAMMER volume by name. Returns 0 on success or a positive error
100 * code on failure. Volumes must be loaded at mount time, get_volume() will
101 * not load a new volume.
103 * Calls made to hammer_load_volume() or single-threaded
106 hammer_install_volume(struct hammer_mount *hmp, const char *volname,
107 struct vnode *devvp)
109 struct mount *mp;
110 hammer_volume_t volume;
111 struct hammer_volume_ondisk *ondisk;
112 struct nlookupdata nd;
113 struct buf *bp = NULL;
114 int error;
115 int ronly;
116 int setmp = 0;
118 mp = hmp->mp;
119 ronly = ((mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
122 * Allocate a volume structure
124 ++hammer_count_volumes;
125 volume = kmalloc(sizeof(*volume), hmp->m_misc, M_WAITOK|M_ZERO);
126 volume->vol_name = kstrdup(volname, hmp->m_misc);
127 volume->io.hmp = hmp; /* bootstrap */
128 hammer_io_init(&volume->io, volume, HAMMER_STRUCTURE_VOLUME);
129 volume->io.offset = 0LL;
130 volume->io.bytes = HAMMER_BUFSIZE;
133 * Get the device vnode
135 if (devvp == NULL) {
136 error = nlookup_init(&nd, volume->vol_name, UIO_SYSSPACE, NLC_FOLLOW);
137 if (error == 0)
138 error = nlookup(&nd);
139 if (error == 0)
140 error = cache_vref(&nd.nl_nch, nd.nl_cred, &volume->devvp);
141 nlookup_done(&nd);
142 } else {
143 error = 0;
144 volume->devvp = devvp;
147 if (error == 0) {
148 if (vn_isdisk(volume->devvp, &error)) {
149 error = vfs_mountedon(volume->devvp);
152 if (error == 0 &&
153 count_udev(volume->devvp->v_umajor, volume->devvp->v_uminor) > 0) {
154 error = EBUSY;
156 if (error == 0) {
157 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
158 error = vinvalbuf(volume->devvp, V_SAVE, 0, 0);
159 if (error == 0) {
160 error = VOP_OPEN(volume->devvp,
161 (ronly ? FREAD : FREAD|FWRITE),
162 FSCRED, NULL);
164 vn_unlock(volume->devvp);
166 if (error) {
167 hammer_free_volume(volume);
168 return(error);
170 volume->devvp->v_rdev->si_mountpoint = mp;
171 setmp = 1;
174 * Extract the volume number from the volume header and do various
175 * sanity checks.
177 error = bread(volume->devvp, 0LL, HAMMER_BUFSIZE, &bp);
178 if (error)
179 goto late_failure;
180 ondisk = (void *)bp->b_data;
181 if (ondisk->vol_signature != HAMMER_FSBUF_VOLUME) {
182 kprintf("hammer_mount: volume %s has an invalid header\n",
183 volume->vol_name);
184 error = EFTYPE;
185 goto late_failure;
187 volume->vol_no = ondisk->vol_no;
188 volume->buffer_base = ondisk->vol_buf_beg;
189 volume->vol_flags = ondisk->vol_flags;
190 volume->nblocks = ondisk->vol_nblocks;
191 volume->maxbuf_off = HAMMER_ENCODE_RAW_BUFFER(volume->vol_no,
192 ondisk->vol_buf_end - ondisk->vol_buf_beg);
193 volume->maxraw_off = ondisk->vol_buf_end;
195 if (RB_EMPTY(&hmp->rb_vols_root)) {
196 hmp->fsid = ondisk->vol_fsid;
197 } else if (bcmp(&hmp->fsid, &ondisk->vol_fsid, sizeof(uuid_t))) {
198 kprintf("hammer_mount: volume %s's fsid does not match "
199 "other volumes\n", volume->vol_name);
200 error = EFTYPE;
201 goto late_failure;
205 * Insert the volume structure into the red-black tree.
207 if (RB_INSERT(hammer_vol_rb_tree, &hmp->rb_vols_root, volume)) {
208 kprintf("hammer_mount: volume %s has a duplicate vol_no %d\n",
209 volume->vol_name, volume->vol_no);
210 error = EEXIST;
214 * Set the root volume . HAMMER special cases rootvol the structure.
215 * We do not hold a ref because this would prevent related I/O
216 * from being flushed.
218 if (error == 0 && ondisk->vol_rootvol == ondisk->vol_no) {
219 hmp->rootvol = volume;
220 hmp->nvolumes = ondisk->vol_count;
221 if (bp) {
222 brelse(bp);
223 bp = NULL;
225 hmp->mp->mnt_stat.f_blocks += ondisk->vol0_stat_bigblocks *
226 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
227 hmp->mp->mnt_vstat.f_blocks += ondisk->vol0_stat_bigblocks *
228 (HAMMER_LARGEBLOCK_SIZE / HAMMER_BUFSIZE);
230 late_failure:
231 if (bp)
232 brelse(bp);
233 if (error) {
234 /*vinvalbuf(volume->devvp, V_SAVE, 0, 0);*/
235 if (setmp)
236 volume->devvp->v_rdev->si_mountpoint = NULL;
237 VOP_CLOSE(volume->devvp, ronly ? FREAD : FREAD|FWRITE);
238 hammer_free_volume(volume);
240 return (error);
244 * This is called for each volume when updating the mount point from
245 * read-write to read-only or vise-versa.
248 hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused)
250 if (volume->devvp) {
251 vn_lock(volume->devvp, LK_EXCLUSIVE | LK_RETRY);
252 if (volume->io.hmp->ronly) {
253 /* do not call vinvalbuf */
254 VOP_OPEN(volume->devvp, FREAD, FSCRED, NULL);
255 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
256 } else {
257 /* do not call vinvalbuf */
258 VOP_OPEN(volume->devvp, FREAD|FWRITE, FSCRED, NULL);
259 VOP_CLOSE(volume->devvp, FREAD);
261 vn_unlock(volume->devvp);
263 return(0);
267 * Unload and free a HAMMER volume. Must return >= 0 to continue scan
268 * so returns -1 on failure.
271 hammer_unload_volume(hammer_volume_t volume, void *data __unused)
273 hammer_mount_t hmp = volume->io.hmp;
274 int ronly = ((hmp->mp->mnt_flag & MNT_RDONLY) ? 1 : 0);
275 struct buf *bp;
278 * Clean up the root volume pointer, which is held unlocked in hmp.
280 if (hmp->rootvol == volume)
281 hmp->rootvol = NULL;
284 * We must not flush a dirty buffer to disk on umount. It should
285 * have already been dealt with by the flusher, or we may be in
286 * catastrophic failure.
288 hammer_io_clear_modify(&volume->io, 1);
289 volume->io.waitdep = 1;
290 bp = hammer_io_release(&volume->io, 1);
293 * Clean up the persistent ref ioerror might have on the volume
295 if (volume->io.ioerror) {
296 volume->io.ioerror = 0;
297 hammer_unref(&volume->io.lock);
301 * There should be no references on the volume, no clusters, and
302 * no super-clusters.
304 KKASSERT(volume->io.lock.refs == 0);
305 if (bp)
306 brelse(bp);
308 volume->ondisk = NULL;
309 if (volume->devvp) {
310 if (volume->devvp->v_rdev &&
311 volume->devvp->v_rdev->si_mountpoint == hmp->mp
313 volume->devvp->v_rdev->si_mountpoint = NULL;
315 if (ronly) {
317 * Make sure we don't sync anything to disk if we
318 * are in read-only mode (1) or critically-errored
319 * (2). Note that there may be dirty buffers in
320 * normal read-only mode from crash recovery.
322 vinvalbuf(volume->devvp, 0, 0, 0);
323 VOP_CLOSE(volume->devvp, FREAD);
324 } else {
326 * Normal termination, save any dirty buffers
327 * (XXX there really shouldn't be any).
329 vinvalbuf(volume->devvp, V_SAVE, 0, 0);
330 VOP_CLOSE(volume->devvp, FREAD|FWRITE);
335 * Destroy the structure
337 RB_REMOVE(hammer_vol_rb_tree, &hmp->rb_vols_root, volume);
338 hammer_free_volume(volume);
339 return(0);
342 static
343 void
344 hammer_free_volume(hammer_volume_t volume)
346 hammer_mount_t hmp = volume->io.hmp;
348 if (volume->vol_name) {
349 kfree(volume->vol_name, hmp->m_misc);
350 volume->vol_name = NULL;
352 if (volume->devvp) {
353 vrele(volume->devvp);
354 volume->devvp = NULL;
356 --hammer_count_volumes;
357 kfree(volume, hmp->m_misc);
361 * Get a HAMMER volume. The volume must already exist.
363 hammer_volume_t
364 hammer_get_volume(struct hammer_mount *hmp, int32_t vol_no, int *errorp)
366 struct hammer_volume *volume;
369 * Locate the volume structure
371 volume = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, vol_no);
372 if (volume == NULL) {
373 *errorp = ENOENT;
374 return(NULL);
376 hammer_ref(&volume->io.lock);
379 * Deal with on-disk info
381 if (volume->ondisk == NULL || volume->io.loading) {
382 *errorp = hammer_load_volume(volume);
383 if (*errorp) {
384 hammer_rel_volume(volume, 1);
385 volume = NULL;
387 } else {
388 *errorp = 0;
390 return(volume);
394 hammer_ref_volume(hammer_volume_t volume)
396 int error;
398 hammer_ref(&volume->io.lock);
401 * Deal with on-disk info
403 if (volume->ondisk == NULL || volume->io.loading) {
404 error = hammer_load_volume(volume);
405 if (error)
406 hammer_rel_volume(volume, 1);
407 } else {
408 error = 0;
410 return (error);
413 hammer_volume_t
414 hammer_get_root_volume(struct hammer_mount *hmp, int *errorp)
416 hammer_volume_t volume;
418 volume = hmp->rootvol;
419 KKASSERT(volume != NULL);
420 hammer_ref(&volume->io.lock);
423 * Deal with on-disk info
425 if (volume->ondisk == NULL || volume->io.loading) {
426 *errorp = hammer_load_volume(volume);
427 if (*errorp) {
428 hammer_rel_volume(volume, 1);
429 volume = NULL;
431 } else {
432 *errorp = 0;
434 return (volume);
438 * Load a volume's on-disk information. The volume must be referenced and
439 * not locked. We temporarily acquire an exclusive lock to interlock
440 * against releases or multiple get's.
442 static int
443 hammer_load_volume(hammer_volume_t volume)
445 int error;
447 ++volume->io.loading;
448 hammer_lock_ex(&volume->io.lock);
450 if (volume->ondisk == NULL) {
451 error = hammer_io_read(volume->devvp, &volume->io,
452 volume->maxraw_off);
453 if (error == 0)
454 volume->ondisk = (void *)volume->io.bp->b_data;
455 } else {
456 error = 0;
458 --volume->io.loading;
459 hammer_unlock(&volume->io.lock);
460 return(error);
464 * Release a volume. Call hammer_io_release on the last reference. We have
465 * to acquire an exclusive lock to interlock against volume->ondisk tests
466 * in hammer_load_volume(), and hammer_io_release() also expects an exclusive
467 * lock to be held.
469 * Volumes are not unloaded from memory during normal operation.
471 void
472 hammer_rel_volume(hammer_volume_t volume, int flush)
474 struct buf *bp = NULL;
476 crit_enter();
477 if (volume->io.lock.refs == 1) {
478 ++volume->io.loading;
479 hammer_lock_ex(&volume->io.lock);
480 if (volume->io.lock.refs == 1) {
481 volume->ondisk = NULL;
482 bp = hammer_io_release(&volume->io, flush);
484 --volume->io.loading;
485 hammer_unlock(&volume->io.lock);
487 hammer_unref(&volume->io.lock);
488 if (bp)
489 brelse(bp);
490 crit_exit();
494 hammer_mountcheck_volumes(struct hammer_mount *hmp)
496 hammer_volume_t vol;
497 int i;
499 for (i = 0; i < hmp->nvolumes; ++i) {
500 vol = RB_LOOKUP(hammer_vol_rb_tree, &hmp->rb_vols_root, i);
501 if (vol == NULL)
502 return(EINVAL);
504 return(0);
507 /************************************************************************
508 * BUFFERS *
509 ************************************************************************
511 * Manage buffers. Currently all blockmap-backed zones are direct-mapped
512 * to zone-2 buffer offsets, without a translation stage. However, the
513 * hammer_buffer structure is indexed by its zoneX_offset, not its
514 * zone2_offset.
516 * The proper zone must be maintained throughout the code-base all the way
517 * through to the big-block allocator, or routines like hammer_del_buffers()
518 * will not be able to locate all potentially conflicting buffers.
520 hammer_buffer_t
521 hammer_get_buffer(hammer_mount_t hmp, hammer_off_t buf_offset,
522 int bytes, int isnew, int *errorp)
524 hammer_buffer_t buffer;
525 hammer_volume_t volume;
526 hammer_off_t zone2_offset;
527 hammer_io_type_t iotype;
528 int vol_no;
529 int zone;
531 buf_offset &= ~HAMMER_BUFMASK64;
532 again:
534 * Shortcut if the buffer is already cached
536 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root, buf_offset);
537 if (buffer) {
538 if (buffer->io.lock.refs == 0)
539 ++hammer_count_refedbufs;
540 hammer_ref(&buffer->io.lock);
543 * Once refed the ondisk field will not be cleared by
544 * any other action.
546 if (buffer->ondisk && buffer->io.loading == 0) {
547 *errorp = 0;
548 return(buffer);
552 * The buffer is no longer loose if it has a ref, and
553 * cannot become loose once it gains a ref. Loose
554 * buffers will never be in a modified state. This should
555 * only occur on the 0->1 transition of refs.
557 * lose_list can be modified via a biodone() interrupt.
559 if (buffer->io.mod_list == &hmp->lose_list) {
560 crit_enter(); /* biodone race against list */
561 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io,
562 mod_entry);
563 crit_exit();
564 buffer->io.mod_list = NULL;
565 KKASSERT(buffer->io.modified == 0);
567 goto found;
571 * What is the buffer class?
573 zone = HAMMER_ZONE_DECODE(buf_offset);
575 switch(zone) {
576 case HAMMER_ZONE_LARGE_DATA_INDEX:
577 case HAMMER_ZONE_SMALL_DATA_INDEX:
578 iotype = HAMMER_STRUCTURE_DATA_BUFFER;
579 break;
580 case HAMMER_ZONE_UNDO_INDEX:
581 iotype = HAMMER_STRUCTURE_UNDO_BUFFER;
582 break;
583 case HAMMER_ZONE_META_INDEX:
584 default:
586 * NOTE: inode data and directory entries are placed in this
587 * zone. inode atime/mtime is updated in-place and thus
588 * buffers containing inodes must be synchronized as
589 * meta-buffers, same as buffers containing B-Tree info.
591 iotype = HAMMER_STRUCTURE_META_BUFFER;
592 break;
596 * Handle blockmap offset translations
598 if (zone >= HAMMER_ZONE_BTREE_INDEX) {
599 zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, errorp);
600 } else if (zone == HAMMER_ZONE_UNDO_INDEX) {
601 zone2_offset = hammer_undo_lookup(hmp, buf_offset, errorp);
602 } else {
603 KKASSERT(zone == HAMMER_ZONE_RAW_BUFFER_INDEX);
604 zone2_offset = buf_offset;
605 *errorp = 0;
607 if (*errorp)
608 return(NULL);
611 * NOTE: zone2_offset and maxbuf_off are both full zone-2 offset
612 * specifications.
614 KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
615 HAMMER_ZONE_RAW_BUFFER);
616 vol_no = HAMMER_VOL_DECODE(zone2_offset);
617 volume = hammer_get_volume(hmp, vol_no, errorp);
618 if (volume == NULL)
619 return(NULL);
621 KKASSERT(zone2_offset < volume->maxbuf_off);
624 * Allocate a new buffer structure. We will check for races later.
626 ++hammer_count_buffers;
627 buffer = kmalloc(sizeof(*buffer), hmp->m_misc,
628 M_WAITOK|M_ZERO|M_USE_RESERVE);
629 buffer->zone2_offset = zone2_offset;
630 buffer->zoneX_offset = buf_offset;
632 hammer_io_init(&buffer->io, volume, iotype);
633 buffer->io.offset = volume->ondisk->vol_buf_beg +
634 (zone2_offset & HAMMER_OFF_SHORT_MASK);
635 buffer->io.bytes = bytes;
636 TAILQ_INIT(&buffer->clist);
637 hammer_ref(&buffer->io.lock);
640 * Insert the buffer into the RB tree and handle late collisions.
642 if (RB_INSERT(hammer_buf_rb_tree, &hmp->rb_bufs_root, buffer)) {
643 hammer_unref(&buffer->io.lock); /* safety */
644 --hammer_count_buffers;
645 hammer_rel_volume(volume, 0);
646 buffer->io.volume = NULL; /* safety */
647 kfree(buffer, hmp->m_misc);
648 goto again;
650 ++hammer_count_refedbufs;
651 found:
654 * Deal with on-disk info and loading races.
656 if (buffer->ondisk == NULL || buffer->io.loading) {
657 *errorp = hammer_load_buffer(buffer, isnew);
658 if (*errorp) {
659 hammer_rel_buffer(buffer, 1);
660 buffer = NULL;
662 } else {
663 *errorp = 0;
665 return(buffer);
669 * This is used by the direct-read code to deal with large-data buffers
670 * created by the reblocker and mirror-write code. The direct-read code
671 * bypasses the HAMMER buffer subsystem and so any aliased dirty or write-
672 * running hammer buffers must be fully synced to disk before we can issue
673 * the direct-read.
675 * This code path is not considered critical as only the rebocker and
676 * mirror-write code will create large-data buffers via the HAMMER buffer
677 * subsystem. They do that because they operate at the B-Tree level and
678 * do not access the vnode/inode structures.
680 void
681 hammer_sync_buffers(hammer_mount_t hmp, hammer_off_t base_offset, int bytes)
683 hammer_buffer_t buffer;
684 int error;
686 KKASSERT((base_offset & HAMMER_OFF_ZONE_MASK) ==
687 HAMMER_ZONE_LARGE_DATA);
689 while (bytes > 0) {
690 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
691 base_offset);
692 if (buffer && (buffer->io.modified || buffer->io.running)) {
693 error = hammer_ref_buffer(buffer);
694 if (error == 0) {
695 hammer_io_wait(&buffer->io);
696 if (buffer->io.modified) {
697 hammer_io_write_interlock(&buffer->io);
698 hammer_io_flush(&buffer->io);
699 hammer_io_done_interlock(&buffer->io);
700 hammer_io_wait(&buffer->io);
702 hammer_rel_buffer(buffer, 0);
705 base_offset += HAMMER_BUFSIZE;
706 bytes -= HAMMER_BUFSIZE;
711 * Destroy all buffers covering the specified zoneX offset range. This
712 * is called when the related blockmap layer2 entry is freed or when
713 * a direct write bypasses our buffer/buffer-cache subsystem.
715 * The buffers may be referenced by the caller itself. Setting reclaim
716 * will cause the buffer to be destroyed when it's ref count reaches zero.
718 * Return 0 on success, EAGAIN if some buffers could not be destroyed due
719 * to additional references held by other threads, or some other (typically
720 * fatal) error.
723 hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
724 hammer_off_t zone2_offset, int bytes,
725 int report_conflicts)
727 hammer_buffer_t buffer;
728 hammer_volume_t volume;
729 int vol_no;
730 int error;
731 int ret_error;
733 vol_no = HAMMER_VOL_DECODE(zone2_offset);
734 volume = hammer_get_volume(hmp, vol_no, &ret_error);
735 KKASSERT(ret_error == 0);
737 while (bytes > 0) {
738 buffer = RB_LOOKUP(hammer_buf_rb_tree, &hmp->rb_bufs_root,
739 base_offset);
740 if (buffer) {
741 error = hammer_ref_buffer(buffer);
742 if (error == 0 && buffer->io.lock.refs != 1) {
743 error = EAGAIN;
744 hammer_rel_buffer(buffer, 0);
746 if (error == 0) {
747 KKASSERT(buffer->zone2_offset == zone2_offset);
748 hammer_io_clear_modify(&buffer->io, 1);
749 buffer->io.reclaim = 1;
750 buffer->io.waitdep = 1;
751 KKASSERT(buffer->io.volume == volume);
752 hammer_rel_buffer(buffer, 0);
754 } else {
755 error = hammer_io_inval(volume, zone2_offset);
757 if (error) {
758 ret_error = error;
759 if (report_conflicts || (hammer_debug_general & 0x8000))
760 kprintf("hammer_del_buffers: unable to invalidate %016llx buffer=%p rep=%d\n", base_offset, buffer, report_conflicts);
762 base_offset += HAMMER_BUFSIZE;
763 zone2_offset += HAMMER_BUFSIZE;
764 bytes -= HAMMER_BUFSIZE;
766 hammer_rel_volume(volume, 0);
767 return (ret_error);
770 static int
771 hammer_load_buffer(hammer_buffer_t buffer, int isnew)
773 hammer_volume_t volume;
774 int error;
777 * Load the buffer's on-disk info
779 volume = buffer->io.volume;
780 ++buffer->io.loading;
781 hammer_lock_ex(&buffer->io.lock);
783 if (hammer_debug_io & 0x0001) {
784 kprintf("load_buffer %016llx %016llx isnew=%d od=%p\n",
785 buffer->zoneX_offset, buffer->zone2_offset, isnew,
786 buffer->ondisk);
789 if (buffer->ondisk == NULL) {
790 if (isnew) {
791 error = hammer_io_new(volume->devvp, &buffer->io);
792 } else {
793 error = hammer_io_read(volume->devvp, &buffer->io,
794 volume->maxraw_off);
796 if (error == 0)
797 buffer->ondisk = (void *)buffer->io.bp->b_data;
798 } else if (isnew) {
799 error = hammer_io_new(volume->devvp, &buffer->io);
800 } else {
801 error = 0;
803 --buffer->io.loading;
804 hammer_unlock(&buffer->io.lock);
805 return (error);
809 * NOTE: Called from RB_SCAN, must return >= 0 for scan to continue.
810 * This routine is only called during unmount.
813 hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused)
816 * Clean up the persistent ref ioerror might have on the buffer
817 * and acquire a ref (steal ioerror's if we can).
819 if (buffer->io.ioerror) {
820 buffer->io.ioerror = 0;
821 } else {
822 if (buffer->io.lock.refs == 0)
823 ++hammer_count_refedbufs;
824 hammer_ref(&buffer->io.lock);
828 * We must not flush a dirty buffer to disk on umount. It should
829 * have already been dealt with by the flusher, or we may be in
830 * catastrophic failure.
832 hammer_io_clear_modify(&buffer->io, 1);
833 hammer_flush_buffer_nodes(buffer);
834 KKASSERT(buffer->io.lock.refs == 1);
835 hammer_rel_buffer(buffer, 2);
836 return(0);
840 * Reference a buffer that is either already referenced or via a specially
841 * handled pointer (aka cursor->buffer).
844 hammer_ref_buffer(hammer_buffer_t buffer)
846 int error;
848 if (buffer->io.lock.refs == 0)
849 ++hammer_count_refedbufs;
850 hammer_ref(&buffer->io.lock);
853 * At this point a biodone() will not touch the buffer other then
854 * incidental bits. However, lose_list can be modified via
855 * a biodone() interrupt.
857 * No longer loose
859 if (buffer->io.mod_list == &buffer->io.hmp->lose_list) {
860 crit_enter();
861 TAILQ_REMOVE(buffer->io.mod_list, &buffer->io, mod_entry);
862 buffer->io.mod_list = NULL;
863 crit_exit();
866 if (buffer->ondisk == NULL || buffer->io.loading) {
867 error = hammer_load_buffer(buffer, 0);
868 if (error) {
869 hammer_rel_buffer(buffer, 1);
871 * NOTE: buffer pointer can become stale after
872 * the above release.
875 } else {
876 error = 0;
878 return(error);
882 * Release a buffer. We have to deal with several places where
883 * another thread can ref the buffer.
885 * Only destroy the structure itself if the related buffer cache buffer
886 * was disassociated from it. This ties the management of the structure
887 * to the buffer cache subsystem. buffer->ondisk determines whether the
888 * embedded io is referenced or not.
890 void
891 hammer_rel_buffer(hammer_buffer_t buffer, int flush)
893 hammer_volume_t volume;
894 hammer_mount_t hmp;
895 struct buf *bp = NULL;
896 int freeme = 0;
898 hmp = buffer->io.hmp;
900 crit_enter();
901 if (buffer->io.lock.refs == 1) {
902 ++buffer->io.loading; /* force interlock check */
903 hammer_lock_ex(&buffer->io.lock);
904 if (buffer->io.lock.refs == 1) {
905 bp = hammer_io_release(&buffer->io, flush);
907 if (buffer->io.lock.refs == 1)
908 --hammer_count_refedbufs;
910 if (buffer->io.bp == NULL &&
911 buffer->io.lock.refs == 1) {
913 * Final cleanup
915 * NOTE: It is impossible for any associated
916 * B-Tree nodes to have refs if the buffer
917 * has no additional refs.
919 RB_REMOVE(hammer_buf_rb_tree,
920 &buffer->io.hmp->rb_bufs_root,
921 buffer);
922 volume = buffer->io.volume;
923 buffer->io.volume = NULL; /* sanity */
924 hammer_rel_volume(volume, 0);
925 hammer_io_clear_modlist(&buffer->io);
926 hammer_flush_buffer_nodes(buffer);
927 KKASSERT(TAILQ_EMPTY(&buffer->clist));
928 freeme = 1;
931 --buffer->io.loading;
932 hammer_unlock(&buffer->io.lock);
934 hammer_unref(&buffer->io.lock);
935 crit_exit();
936 if (bp)
937 brelse(bp);
938 if (freeme) {
939 --hammer_count_buffers;
940 kfree(buffer, hmp->m_misc);
945 * Access the filesystem buffer containing the specified hammer offset.
946 * buf_offset is a conglomeration of the volume number and vol_buf_beg
947 * relative buffer offset. It must also have bit 55 set to be valid.
948 * (see hammer_off_t in hammer_disk.h).
950 * Any prior buffer in *bufferp will be released and replaced by the
951 * requested buffer.
953 * NOTE: The buffer is indexed via its zoneX_offset but we allow the
954 * passed cached *bufferp to match against either zoneX or zone2.
956 static __inline
957 void *
958 _hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
959 int *errorp, struct hammer_buffer **bufferp)
961 hammer_buffer_t buffer;
962 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
964 buf_offset &= ~HAMMER_BUFMASK64;
965 KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) != 0);
967 buffer = *bufferp;
968 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
969 buffer->zoneX_offset != buf_offset)) {
970 if (buffer)
971 hammer_rel_buffer(buffer, 0);
972 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 0, errorp);
973 *bufferp = buffer;
974 } else {
975 *errorp = 0;
979 * Return a pointer to the buffer data.
981 if (buffer == NULL)
982 return(NULL);
983 else
984 return((char *)buffer->ondisk + xoff);
987 void *
988 hammer_bread(hammer_mount_t hmp, hammer_off_t buf_offset,
989 int *errorp, struct hammer_buffer **bufferp)
991 return(_hammer_bread(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
994 void *
995 hammer_bread_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
996 int *errorp, struct hammer_buffer **bufferp)
998 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
999 return(_hammer_bread(hmp, buf_offset, bytes, errorp, bufferp));
1003 * Access the filesystem buffer containing the specified hammer offset.
1004 * No disk read operation occurs. The result buffer may contain garbage.
1006 * Any prior buffer in *bufferp will be released and replaced by the
1007 * requested buffer.
1009 * This function marks the buffer dirty but does not increment its
1010 * modify_refs count.
1012 static __inline
1013 void *
1014 _hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1015 int *errorp, struct hammer_buffer **bufferp)
1017 hammer_buffer_t buffer;
1018 int32_t xoff = (int32_t)buf_offset & HAMMER_BUFMASK;
1020 buf_offset &= ~HAMMER_BUFMASK64;
1022 buffer = *bufferp;
1023 if (buffer == NULL || (buffer->zone2_offset != buf_offset &&
1024 buffer->zoneX_offset != buf_offset)) {
1025 if (buffer)
1026 hammer_rel_buffer(buffer, 0);
1027 buffer = hammer_get_buffer(hmp, buf_offset, bytes, 1, errorp);
1028 *bufferp = buffer;
1029 } else {
1030 *errorp = 0;
1034 * Return a pointer to the buffer data.
1036 if (buffer == NULL)
1037 return(NULL);
1038 else
1039 return((char *)buffer->ondisk + xoff);
1042 void *
1043 hammer_bnew(hammer_mount_t hmp, hammer_off_t buf_offset,
1044 int *errorp, struct hammer_buffer **bufferp)
1046 return(_hammer_bnew(hmp, buf_offset, HAMMER_BUFSIZE, errorp, bufferp));
1049 void *
1050 hammer_bnew_ext(hammer_mount_t hmp, hammer_off_t buf_offset, int bytes,
1051 int *errorp, struct hammer_buffer **bufferp)
1053 bytes = (bytes + HAMMER_BUFMASK) & ~HAMMER_BUFMASK;
1054 return(_hammer_bnew(hmp, buf_offset, bytes, errorp, bufferp));
1057 /************************************************************************
1058 * NODES *
1059 ************************************************************************
1061 * Manage B-Tree nodes. B-Tree nodes represent the primary indexing
1062 * method used by the HAMMER filesystem.
1064 * Unlike other HAMMER structures, a hammer_node can be PASSIVELY
1065 * associated with its buffer, and will only referenced the buffer while
1066 * the node itself is referenced.
1068 * A hammer_node can also be passively associated with other HAMMER
1069 * structures, such as inodes, while retaining 0 references. These
1070 * associations can be cleared backwards using a pointer-to-pointer in
1071 * the hammer_node.
1073 * This allows the HAMMER implementation to cache hammer_nodes long-term
1074 * and short-cut a great deal of the infrastructure's complexity. In
1075 * most cases a cached node can be reacquired without having to dip into
1076 * either the buffer or cluster management code.
1078 * The caller must pass a referenced cluster on call and will retain
1079 * ownership of the reference on return. The node will acquire its own
1080 * additional references, if necessary.
1082 hammer_node_t
1083 hammer_get_node(hammer_transaction_t trans, hammer_off_t node_offset,
1084 int isnew, int *errorp)
1086 hammer_mount_t hmp = trans->hmp;
1087 hammer_node_t node;
1089 KKASSERT((node_offset & HAMMER_OFF_ZONE_MASK) == HAMMER_ZONE_BTREE);
1092 * Locate the structure, allocating one if necessary.
1094 again:
1095 node = RB_LOOKUP(hammer_nod_rb_tree, &hmp->rb_nods_root, node_offset);
1096 if (node == NULL) {
1097 ++hammer_count_nodes;
1098 node = kmalloc(sizeof(*node), hmp->m_misc, M_WAITOK|M_ZERO|M_USE_RESERVE);
1099 node->node_offset = node_offset;
1100 node->hmp = hmp;
1101 TAILQ_INIT(&node->cursor_list);
1102 TAILQ_INIT(&node->cache_list);
1103 if (RB_INSERT(hammer_nod_rb_tree, &hmp->rb_nods_root, node)) {
1104 --hammer_count_nodes;
1105 kfree(node, hmp->m_misc);
1106 goto again;
1109 hammer_ref(&node->lock);
1110 if (node->ondisk) {
1111 *errorp = 0;
1112 } else {
1113 *errorp = hammer_load_node(trans, node, isnew);
1114 trans->flags |= HAMMER_TRANSF_DIDIO;
1116 if (*errorp) {
1117 hammer_rel_node(node);
1118 node = NULL;
1120 return(node);
1124 * Reference an already-referenced node.
1126 void
1127 hammer_ref_node(hammer_node_t node)
1129 KKASSERT(node->lock.refs > 0 && node->ondisk != NULL);
1130 hammer_ref(&node->lock);
1134 * Load a node's on-disk data reference.
1136 static int
1137 hammer_load_node(hammer_transaction_t trans, hammer_node_t node, int isnew)
1139 hammer_buffer_t buffer;
1140 hammer_off_t buf_offset;
1141 int error;
1143 error = 0;
1144 ++node->loading;
1145 hammer_lock_ex(&node->lock);
1146 if (node->ondisk == NULL) {
1148 * This is a little confusing but the jist is that
1149 * node->buffer determines whether the node is on
1150 * the buffer's clist and node->ondisk determines
1151 * whether the buffer is referenced.
1153 * We could be racing a buffer release, in which case
1154 * node->buffer may become NULL while we are blocked
1155 * referencing the buffer.
1157 if ((buffer = node->buffer) != NULL) {
1158 error = hammer_ref_buffer(buffer);
1159 if (error == 0 && node->buffer == NULL) {
1160 TAILQ_INSERT_TAIL(&buffer->clist,
1161 node, entry);
1162 node->buffer = buffer;
1164 } else {
1165 buf_offset = node->node_offset & ~HAMMER_BUFMASK64;
1166 buffer = hammer_get_buffer(node->hmp, buf_offset,
1167 HAMMER_BUFSIZE, 0, &error);
1168 if (buffer) {
1169 KKASSERT(error == 0);
1170 TAILQ_INSERT_TAIL(&buffer->clist,
1171 node, entry);
1172 node->buffer = buffer;
1175 if (error)
1176 goto failed;
1177 node->ondisk = (void *)((char *)buffer->ondisk +
1178 (node->node_offset & HAMMER_BUFMASK));
1181 * Check CRC. NOTE: Neither flag is set and the CRC is not
1182 * generated on new B-Tree nodes.
1184 if (isnew == 0 &&
1185 (node->flags & HAMMER_NODE_CRCANY) == 0) {
1186 if (hammer_crc_test_btree(node->ondisk) == 0) {
1187 if (hammer_debug_debug & 0x0002)
1188 Debugger("CRC FAILED: B-TREE NODE");
1189 node->flags |= HAMMER_NODE_CRCBAD;
1190 } else {
1191 node->flags |= HAMMER_NODE_CRCGOOD;
1195 if (node->flags & HAMMER_NODE_CRCBAD) {
1196 if (trans->flags & HAMMER_TRANSF_CRCDOM)
1197 error = EDOM;
1198 else
1199 error = EIO;
1201 failed:
1202 --node->loading;
1203 hammer_unlock(&node->lock);
1204 return (error);
1208 * Safely reference a node, interlock against flushes via the IO subsystem.
1210 hammer_node_t
1211 hammer_ref_node_safe(hammer_transaction_t trans, hammer_node_cache_t cache,
1212 int *errorp)
1214 hammer_node_t node;
1216 node = cache->node;
1217 if (node != NULL) {
1218 hammer_ref(&node->lock);
1219 if (node->ondisk) {
1220 if (node->flags & HAMMER_NODE_CRCBAD) {
1221 if (trans->flags & HAMMER_TRANSF_CRCDOM)
1222 *errorp = EDOM;
1223 else
1224 *errorp = EIO;
1225 } else {
1226 *errorp = 0;
1228 } else {
1229 *errorp = hammer_load_node(trans, node, 0);
1231 if (*errorp) {
1232 hammer_rel_node(node);
1233 node = NULL;
1235 } else {
1236 *errorp = ENOENT;
1238 return(node);
1242 * Release a hammer_node. On the last release the node dereferences
1243 * its underlying buffer and may or may not be destroyed.
1245 void
1246 hammer_rel_node(hammer_node_t node)
1248 hammer_buffer_t buffer;
1251 * If this isn't the last ref just decrement the ref count and
1252 * return.
1254 if (node->lock.refs > 1) {
1255 hammer_unref(&node->lock);
1256 return;
1260 * If there is no ondisk info or no buffer the node failed to load,
1261 * remove the last reference and destroy the node.
1263 if (node->ondisk == NULL) {
1264 hammer_unref(&node->lock);
1265 hammer_flush_node(node);
1266 /* node is stale now */
1267 return;
1271 * Do not disassociate the node from the buffer if it represents
1272 * a modified B-Tree node that still needs its crc to be generated.
1274 if (node->flags & HAMMER_NODE_NEEDSCRC)
1275 return;
1278 * Do final cleanups and then either destroy the node and leave it
1279 * passively cached. The buffer reference is removed regardless.
1281 buffer = node->buffer;
1282 node->ondisk = NULL;
1284 if ((node->flags & HAMMER_NODE_FLUSH) == 0) {
1285 hammer_unref(&node->lock);
1286 hammer_rel_buffer(buffer, 0);
1287 return;
1291 * Destroy the node.
1293 hammer_unref(&node->lock);
1294 hammer_flush_node(node);
1295 /* node is stale */
1296 hammer_rel_buffer(buffer, 0);
1300 * Free space on-media associated with a B-Tree node.
1302 void
1303 hammer_delete_node(hammer_transaction_t trans, hammer_node_t node)
1305 KKASSERT((node->flags & HAMMER_NODE_DELETED) == 0);
1306 node->flags |= HAMMER_NODE_DELETED;
1307 hammer_blockmap_free(trans, node->node_offset, sizeof(*node->ondisk));
1311 * Passively cache a referenced hammer_node. The caller may release
1312 * the node on return.
1314 void
1315 hammer_cache_node(hammer_node_cache_t cache, hammer_node_t node)
1318 * If the node doesn't exist, or is being deleted, don't cache it!
1320 * The node can only ever be NULL in the I/O failure path.
1322 if (node == NULL || (node->flags & HAMMER_NODE_DELETED))
1323 return;
1324 if (cache->node == node)
1325 return;
1326 while (cache->node)
1327 hammer_uncache_node(cache);
1328 if (node->flags & HAMMER_NODE_DELETED)
1329 return;
1330 cache->node = node;
1331 TAILQ_INSERT_TAIL(&node->cache_list, cache, entry);
1334 void
1335 hammer_uncache_node(hammer_node_cache_t cache)
1337 hammer_node_t node;
1339 if ((node = cache->node) != NULL) {
1340 TAILQ_REMOVE(&node->cache_list, cache, entry);
1341 cache->node = NULL;
1342 if (TAILQ_EMPTY(&node->cache_list))
1343 hammer_flush_node(node);
1348 * Remove a node's cache references and destroy the node if it has no
1349 * other references or backing store.
1351 void
1352 hammer_flush_node(hammer_node_t node)
1354 hammer_node_cache_t cache;
1355 hammer_buffer_t buffer;
1356 hammer_mount_t hmp = node->hmp;
1358 while ((cache = TAILQ_FIRST(&node->cache_list)) != NULL) {
1359 TAILQ_REMOVE(&node->cache_list, cache, entry);
1360 cache->node = NULL;
1362 if (node->lock.refs == 0 && node->ondisk == NULL) {
1363 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1364 RB_REMOVE(hammer_nod_rb_tree, &node->hmp->rb_nods_root, node);
1365 if ((buffer = node->buffer) != NULL) {
1366 node->buffer = NULL;
1367 TAILQ_REMOVE(&buffer->clist, node, entry);
1368 /* buffer is unreferenced because ondisk is NULL */
1370 --hammer_count_nodes;
1371 kfree(node, hmp->m_misc);
1376 * Flush passively cached B-Tree nodes associated with this buffer.
1377 * This is only called when the buffer is about to be destroyed, so
1378 * none of the nodes should have any references. The buffer is locked.
1380 * We may be interlocked with the buffer.
1382 void
1383 hammer_flush_buffer_nodes(hammer_buffer_t buffer)
1385 hammer_node_t node;
1387 while ((node = TAILQ_FIRST(&buffer->clist)) != NULL) {
1388 KKASSERT(node->ondisk == NULL);
1389 KKASSERT((node->flags & HAMMER_NODE_NEEDSCRC) == 0);
1391 if (node->lock.refs == 0) {
1392 hammer_ref(&node->lock);
1393 node->flags |= HAMMER_NODE_FLUSH;
1394 hammer_rel_node(node);
1395 } else {
1396 KKASSERT(node->loading != 0);
1397 KKASSERT(node->buffer != NULL);
1398 buffer = node->buffer;
1399 node->buffer = NULL;
1400 TAILQ_REMOVE(&buffer->clist, node, entry);
1401 /* buffer is unreferenced because ondisk is NULL */
1407 /************************************************************************
1408 * ALLOCATORS *
1409 ************************************************************************/
1412 * Allocate a B-Tree node.
1414 hammer_node_t
1415 hammer_alloc_btree(hammer_transaction_t trans, hammer_off_t hint, int *errorp)
1417 hammer_buffer_t buffer = NULL;
1418 hammer_node_t node = NULL;
1419 hammer_off_t node_offset;
1421 node_offset = hammer_blockmap_alloc(trans, HAMMER_ZONE_BTREE_INDEX,
1422 sizeof(struct hammer_node_ondisk),
1423 hint, errorp);
1424 if (*errorp == 0) {
1425 node = hammer_get_node(trans, node_offset, 1, errorp);
1426 hammer_modify_node_noundo(trans, node);
1427 bzero(node->ondisk, sizeof(*node->ondisk));
1428 hammer_modify_node_done(node);
1430 if (buffer)
1431 hammer_rel_buffer(buffer, 0);
1432 return(node);
1436 * Allocate data. If the address of a data buffer is supplied then
1437 * any prior non-NULL *data_bufferp will be released and *data_bufferp
1438 * will be set to the related buffer. The caller must release it when
1439 * finally done. The initial *data_bufferp should be set to NULL by
1440 * the caller.
1442 * The caller is responsible for making hammer_modify*() calls on the
1443 * *data_bufferp.
1445 void *
1446 hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
1447 u_int16_t rec_type, hammer_off_t *data_offsetp,
1448 struct hammer_buffer **data_bufferp,
1449 hammer_off_t hint, int *errorp)
1451 void *data;
1452 int zone;
1455 * Allocate data
1457 if (data_len) {
1458 switch(rec_type) {
1459 case HAMMER_RECTYPE_INODE:
1460 case HAMMER_RECTYPE_DIRENTRY:
1461 case HAMMER_RECTYPE_EXT:
1462 case HAMMER_RECTYPE_FIX:
1463 case HAMMER_RECTYPE_PFS:
1464 zone = HAMMER_ZONE_META_INDEX;
1465 break;
1466 case HAMMER_RECTYPE_DATA:
1467 case HAMMER_RECTYPE_DB:
1468 if (data_len <= HAMMER_BUFSIZE / 2) {
1469 zone = HAMMER_ZONE_SMALL_DATA_INDEX;
1470 } else {
1471 data_len = (data_len + HAMMER_BUFMASK) &
1472 ~HAMMER_BUFMASK;
1473 zone = HAMMER_ZONE_LARGE_DATA_INDEX;
1475 break;
1476 default:
1477 panic("hammer_alloc_data: rec_type %04x unknown",
1478 rec_type);
1479 zone = 0; /* NOT REACHED */
1480 break;
1482 *data_offsetp = hammer_blockmap_alloc(trans, zone, data_len,
1483 hint, errorp);
1484 } else {
1485 *data_offsetp = 0;
1487 if (*errorp == 0 && data_bufferp) {
1488 if (data_len) {
1489 data = hammer_bread_ext(trans->hmp, *data_offsetp,
1490 data_len, errorp, data_bufferp);
1491 } else {
1492 data = NULL;
1494 } else {
1495 data = NULL;
1497 return(data);
1501 * Sync dirty buffers to the media and clean-up any loose ends.
1503 * These functions do not start the flusher going, they simply
1504 * queue everything up to the flusher.
1506 static int hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data);
1507 static int hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data);
1510 hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor)
1512 struct hammer_sync_info info;
1514 info.error = 0;
1515 info.waitfor = waitfor;
1516 if (waitfor == MNT_WAIT) {
1517 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS,
1518 hammer_sync_scan1, hammer_sync_scan2, &info);
1519 } else {
1520 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_ONEPASS|VMSC_NOWAIT,
1521 hammer_sync_scan1, hammer_sync_scan2, &info);
1523 return(info.error);
1527 * Filesystem sync. If doing a synchronous sync make a second pass on
1528 * the vnodes in case any were already flushing during the first pass,
1529 * and activate the flusher twice (the second time brings the UNDO FIFO's
1530 * start position up to the end position after the first call).
1533 hammer_sync_hmp(hammer_mount_t hmp, int waitfor)
1535 struct hammer_sync_info info;
1537 info.error = 0;
1538 info.waitfor = MNT_NOWAIT;
1539 vmntvnodescan(hmp->mp, VMSC_GETVP|VMSC_NOWAIT,
1540 hammer_sync_scan1, hammer_sync_scan2, &info);
1541 if (info.error == 0 && waitfor == MNT_WAIT) {
1542 info.waitfor = waitfor;
1543 vmntvnodescan(hmp->mp, VMSC_GETVP,
1544 hammer_sync_scan1, hammer_sync_scan2, &info);
1546 if (waitfor == MNT_WAIT) {
1547 hammer_flusher_sync(hmp);
1548 hammer_flusher_sync(hmp);
1549 } else {
1550 hammer_flusher_async(hmp, NULL);
1551 hammer_flusher_async(hmp, NULL);
1553 return(info.error);
1556 static int
1557 hammer_sync_scan1(struct mount *mp, struct vnode *vp, void *data)
1559 struct hammer_inode *ip;
1561 ip = VTOI(vp);
1562 if (vp->v_type == VNON || ip == NULL ||
1563 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1564 RB_EMPTY(&vp->v_rbdirty_tree))) {
1565 return(-1);
1567 return(0);
1570 static int
1571 hammer_sync_scan2(struct mount *mp, struct vnode *vp, void *data)
1573 struct hammer_sync_info *info = data;
1574 struct hammer_inode *ip;
1575 int error;
1577 ip = VTOI(vp);
1578 if (vp->v_type == VNON || vp->v_type == VBAD ||
1579 ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1580 RB_EMPTY(&vp->v_rbdirty_tree))) {
1581 return(0);
1583 error = VOP_FSYNC(vp, MNT_NOWAIT);
1584 if (error)
1585 info->error = error;
1586 return(0);