2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.63.2.3 2008/07/19 18:46:20 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/vnode.h>
41 #include <sys/mount.h>
42 #include <sys/malloc.h>
43 #include <sys/nlookup.h>
44 #include <sys/fcntl.h>
45 #include <sys/sysctl.h>
51 int hammer_debug_general
;
52 int hammer_debug_debug
= 1; /* medium-error panics */
53 int hammer_debug_inode
;
54 int hammer_debug_locks
;
55 int hammer_debug_btree
;
57 int hammer_debug_recover
; /* -1 will disable, +1 will force */
58 int hammer_debug_recover_faults
;
59 int hammer_cluster_enable
= 1; /* enable read clustering by default */
60 int hammer_count_fsyncs
;
61 int hammer_count_inodes
;
62 int hammer_count_iqueued
;
63 int hammer_count_reclaiming
;
64 int hammer_count_records
;
65 int hammer_count_record_datas
;
66 int hammer_count_volumes
;
67 int hammer_count_buffers
;
68 int hammer_count_nodes
;
69 int64_t hammer_count_extra_space_used
;
70 int64_t hammer_stats_btree_lookups
;
71 int64_t hammer_stats_btree_searches
;
72 int64_t hammer_stats_btree_inserts
;
73 int64_t hammer_stats_btree_deletes
;
74 int64_t hammer_stats_btree_elements
;
75 int64_t hammer_stats_btree_splits
;
76 int64_t hammer_stats_btree_iterations
;
77 int64_t hammer_stats_record_iterations
;
79 int64_t hammer_stats_file_read
;
80 int64_t hammer_stats_file_write
;
81 int64_t hammer_stats_file_iopsr
;
82 int64_t hammer_stats_file_iopsw
;
83 int64_t hammer_stats_disk_read
;
84 int64_t hammer_stats_disk_write
;
85 int64_t hammer_stats_inode_flushes
;
86 int64_t hammer_stats_commits
;
88 int hammer_count_dirtybufspace
; /* global */
89 int hammer_count_refedbufs
; /* global */
90 int hammer_count_reservations
;
91 int hammer_count_io_running_read
;
92 int hammer_count_io_running_write
;
93 int hammer_count_io_locked
;
94 int hammer_limit_dirtybufspace
; /* per-mount */
95 int hammer_limit_recs
; /* as a whole XXX */
96 int hammer_limit_iqueued
; /* per-mount */
98 int hammer_verify_zone
;
99 int hammer_verify_data
= 1;
100 int hammer_write_mode
;
101 int64_t hammer_contention_count
;
102 int64_t hammer_zone_limit
;
104 SYSCTL_NODE(_vfs
, OID_AUTO
, hammer
, CTLFLAG_RW
, 0, "HAMMER filesystem");
105 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_general
, CTLFLAG_RW
,
106 &hammer_debug_general
, 0, "");
107 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_io
, CTLFLAG_RW
,
108 &hammer_debug_io
, 0, "");
109 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_debug
, CTLFLAG_RW
,
110 &hammer_debug_debug
, 0, "");
111 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_inode
, CTLFLAG_RW
,
112 &hammer_debug_inode
, 0, "");
113 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_locks
, CTLFLAG_RW
,
114 &hammer_debug_locks
, 0, "");
115 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_btree
, CTLFLAG_RW
,
116 &hammer_debug_btree
, 0, "");
117 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_tid
, CTLFLAG_RW
,
118 &hammer_debug_tid
, 0, "");
119 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_recover
, CTLFLAG_RW
,
120 &hammer_debug_recover
, 0, "");
121 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_recover_faults
, CTLFLAG_RW
,
122 &hammer_debug_recover_faults
, 0, "");
123 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, cluster_enable
, CTLFLAG_RW
,
124 &hammer_cluster_enable
, 0, "");
126 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, limit_dirtybufspace
, CTLFLAG_RW
,
127 &hammer_limit_dirtybufspace
, 0, "");
128 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, limit_recs
, CTLFLAG_RW
,
129 &hammer_limit_recs
, 0, "");
130 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, limit_iqueued
, CTLFLAG_RW
,
131 &hammer_limit_iqueued
, 0, "");
133 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_fsyncs
, CTLFLAG_RD
,
134 &hammer_count_fsyncs
, 0, "");
135 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_inodes
, CTLFLAG_RD
,
136 &hammer_count_inodes
, 0, "");
137 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_iqueued
, CTLFLAG_RD
,
138 &hammer_count_iqueued
, 0, "");
139 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_reclaiming
, CTLFLAG_RD
,
140 &hammer_count_reclaiming
, 0, "");
141 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_records
, CTLFLAG_RD
,
142 &hammer_count_records
, 0, "");
143 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_record_datas
, CTLFLAG_RD
,
144 &hammer_count_record_datas
, 0, "");
145 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_volumes
, CTLFLAG_RD
,
146 &hammer_count_volumes
, 0, "");
147 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_buffers
, CTLFLAG_RD
,
148 &hammer_count_buffers
, 0, "");
149 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_nodes
, CTLFLAG_RD
,
150 &hammer_count_nodes
, 0, "");
151 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, count_extra_space_used
, CTLFLAG_RD
,
152 &hammer_count_extra_space_used
, 0, "");
154 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_searches
, CTLFLAG_RD
,
155 &hammer_stats_btree_searches
, 0, "");
156 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_lookups
, CTLFLAG_RD
,
157 &hammer_stats_btree_lookups
, 0, "");
158 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_inserts
, CTLFLAG_RD
,
159 &hammer_stats_btree_inserts
, 0, "");
160 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_deletes
, CTLFLAG_RD
,
161 &hammer_stats_btree_deletes
, 0, "");
162 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_elements
, CTLFLAG_RD
,
163 &hammer_stats_btree_elements
, 0, "");
164 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_splits
, CTLFLAG_RD
,
165 &hammer_stats_btree_splits
, 0, "");
166 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_iterations
, CTLFLAG_RD
,
167 &hammer_stats_btree_iterations
, 0, "");
168 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_record_iterations
, CTLFLAG_RD
,
169 &hammer_stats_record_iterations
, 0, "");
171 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_file_read
, CTLFLAG_RD
,
172 &hammer_stats_file_read
, 0, "");
173 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_file_write
, CTLFLAG_RD
,
174 &hammer_stats_file_write
, 0, "");
175 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_file_iopsr
, CTLFLAG_RD
,
176 &hammer_stats_file_iopsr
, 0, "");
177 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_file_iopsw
, CTLFLAG_RD
,
178 &hammer_stats_file_iopsw
, 0, "");
179 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_disk_read
, CTLFLAG_RD
,
180 &hammer_stats_disk_read
, 0, "");
181 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_disk_write
, CTLFLAG_RD
,
182 &hammer_stats_disk_write
, 0, "");
183 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_inode_flushes
, CTLFLAG_RD
,
184 &hammer_stats_inode_flushes
, 0, "");
185 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_commits
, CTLFLAG_RD
,
186 &hammer_stats_commits
, 0, "");
188 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_dirtybufspace
, CTLFLAG_RD
,
189 &hammer_count_dirtybufspace
, 0, "");
190 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_refedbufs
, CTLFLAG_RD
,
191 &hammer_count_refedbufs
, 0, "");
192 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_reservations
, CTLFLAG_RD
,
193 &hammer_count_reservations
, 0, "");
194 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_io_running_read
, CTLFLAG_RD
,
195 &hammer_count_io_running_read
, 0, "");
196 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_io_locked
, CTLFLAG_RD
,
197 &hammer_count_io_locked
, 0, "");
198 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_io_running_write
, CTLFLAG_RD
,
199 &hammer_count_io_running_write
, 0, "");
200 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, zone_limit
, CTLFLAG_RW
,
201 &hammer_zone_limit
, 0, "");
202 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, contention_count
, CTLFLAG_RW
,
203 &hammer_contention_count
, 0, "");
204 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, verify_zone
, CTLFLAG_RW
,
205 &hammer_verify_zone
, 0, "");
206 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, verify_data
, CTLFLAG_RW
,
207 &hammer_verify_data
, 0, "");
208 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, write_mode
, CTLFLAG_RW
,
209 &hammer_write_mode
, 0, "");
211 KTR_INFO_MASTER(hammer
);
216 static void hammer_free_hmp(struct mount
*mp
);
218 static int hammer_vfs_mount(struct mount
*mp
, char *path
, caddr_t data
,
220 static int hammer_vfs_unmount(struct mount
*mp
, int mntflags
);
221 static int hammer_vfs_root(struct mount
*mp
, struct vnode
**vpp
);
222 static int hammer_vfs_statfs(struct mount
*mp
, struct statfs
*sbp
,
224 static int hammer_vfs_statvfs(struct mount
*mp
, struct statvfs
*sbp
,
226 static int hammer_vfs_sync(struct mount
*mp
, int waitfor
);
227 static int hammer_vfs_vget(struct mount
*mp
, ino_t ino
,
229 static int hammer_vfs_init(struct vfsconf
*conf
);
230 static int hammer_vfs_fhtovp(struct mount
*mp
, struct fid
*fhp
,
232 static int hammer_vfs_vptofh(struct vnode
*vp
, struct fid
*fhp
);
233 static int hammer_vfs_checkexp(struct mount
*mp
, struct sockaddr
*nam
,
234 int *exflagsp
, struct ucred
**credanonp
);
237 static struct vfsops hammer_vfsops
= {
238 .vfs_mount
= hammer_vfs_mount
,
239 .vfs_unmount
= hammer_vfs_unmount
,
240 .vfs_root
= hammer_vfs_root
,
241 .vfs_statfs
= hammer_vfs_statfs
,
242 .vfs_statvfs
= hammer_vfs_statvfs
,
243 .vfs_sync
= hammer_vfs_sync
,
244 .vfs_vget
= hammer_vfs_vget
,
245 .vfs_init
= hammer_vfs_init
,
246 .vfs_vptofh
= hammer_vfs_vptofh
,
247 .vfs_fhtovp
= hammer_vfs_fhtovp
,
248 .vfs_checkexp
= hammer_vfs_checkexp
251 MALLOC_DEFINE(M_HAMMER
, "hammer-mount", "hammer mount");
253 VFS_SET(hammer_vfsops
, hammer
, 0);
254 MODULE_VERSION(hammer
, 1);
257 hammer_vfs_init(struct vfsconf
*conf
)
261 if (hammer_limit_recs
== 0) {
262 hammer_limit_recs
= nbuf
* 25;
263 n
= kmalloc_limit(M_HAMMER
) / 512;
264 if (hammer_limit_recs
> n
)
265 hammer_limit_recs
= n
;
267 if (hammer_limit_dirtybufspace
== 0) {
268 hammer_limit_dirtybufspace
= hidirtybufspace
/ 2;
269 if (hammer_limit_dirtybufspace
< 100)
270 hammer_limit_dirtybufspace
= 100;
272 if (hammer_limit_iqueued
== 0)
273 hammer_limit_iqueued
= desiredvnodes
/ 5;
278 hammer_vfs_mount(struct mount
*mp
, char *mntpt
, caddr_t data
,
281 struct hammer_mount_info info
;
283 hammer_volume_t rootvol
;
284 struct vnode
*rootvp
;
285 const char *upath
; /* volume name in userspace */
286 char *path
; /* volume name in system space */
291 if ((error
= copyin(data
, &info
, sizeof(info
))) != 0)
295 * updating or new mount
297 if (mp
->mnt_flag
& MNT_UPDATE
) {
298 hmp
= (void *)mp
->mnt_data
;
299 KKASSERT(hmp
!= NULL
);
301 if (info
.nvolumes
<= 0 || info
.nvolumes
>= 32768)
307 * master-id validation. The master id may not be changed by a
310 if (info
.hflags
& HMNT_MASTERID
) {
311 if (hmp
&& hmp
->master_id
!= info
.master_id
) {
312 kprintf("hammer: cannot change master id "
313 "with mount update\n");
316 master_id
= info
.master_id
;
317 if (master_id
< -1 || master_id
>= HAMMER_MAX_MASTERS
)
321 master_id
= hmp
->master_id
;
327 * Interal mount data structure
330 hmp
= kmalloc(sizeof(*hmp
), M_HAMMER
, M_WAITOK
| M_ZERO
);
331 mp
->mnt_data
= (qaddr_t
)hmp
;
333 hmp
->namekey_iterator
= mycpu
->gd_time_seconds
;
334 /*TAILQ_INIT(&hmp->recycle_list);*/
336 hmp
->root_btree_beg
.localization
= 0x00000000U
;
337 hmp
->root_btree_beg
.obj_id
= -0x8000000000000000LL
;
338 hmp
->root_btree_beg
.key
= -0x8000000000000000LL
;
339 hmp
->root_btree_beg
.create_tid
= 1;
340 hmp
->root_btree_beg
.delete_tid
= 1;
341 hmp
->root_btree_beg
.rec_type
= 0;
342 hmp
->root_btree_beg
.obj_type
= 0;
344 hmp
->root_btree_end
.localization
= 0xFFFFFFFFU
;
345 hmp
->root_btree_end
.obj_id
= 0x7FFFFFFFFFFFFFFFLL
;
346 hmp
->root_btree_end
.key
= 0x7FFFFFFFFFFFFFFFLL
;
347 hmp
->root_btree_end
.create_tid
= 0xFFFFFFFFFFFFFFFFULL
;
348 hmp
->root_btree_end
.delete_tid
= 0; /* special case */
349 hmp
->root_btree_end
.rec_type
= 0xFFFFU
;
350 hmp
->root_btree_end
.obj_type
= 0;
352 hmp
->krate
.freq
= 1; /* maximum reporting rate (hz) */
353 hmp
->krate
.count
= -16; /* initial burst */
355 hmp
->sync_lock
.refs
= 1;
356 hmp
->free_lock
.refs
= 1;
357 hmp
->undo_lock
.refs
= 1;
358 hmp
->blkmap_lock
.refs
= 1;
360 TAILQ_INIT(&hmp
->delay_list
);
361 TAILQ_INIT(&hmp
->flush_group_list
);
362 TAILQ_INIT(&hmp
->objid_cache_list
);
363 TAILQ_INIT(&hmp
->undo_lru_list
);
364 TAILQ_INIT(&hmp
->reclaim_list
);
366 hmp
->hflags
&= ~HMNT_USERFLAGS
;
367 hmp
->hflags
|= info
.hflags
& HMNT_USERFLAGS
;
369 hmp
->master_id
= master_id
;
373 mp
->mnt_flag
|= MNT_RDONLY
;
374 hmp
->asof
= info
.asof
;
376 hmp
->asof
= HAMMER_MAX_TID
;
380 * Re-open read-write if originally read-only, or vise-versa.
382 if (mp
->mnt_flag
& MNT_UPDATE
) {
384 if (hmp
->ronly
&& (mp
->mnt_kern_flag
& MNTK_WANTRDWR
)) {
385 kprintf("HAMMER read-only -> read-write\n");
387 RB_SCAN(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, NULL
,
388 hammer_adjust_volume_mode
, NULL
);
389 rootvol
= hammer_get_root_volume(hmp
, &error
);
391 hammer_recover_flush_buffers(hmp
, rootvol
, 1);
392 bcopy(rootvol
->ondisk
->vol0_blockmap
,
394 sizeof(hmp
->blockmap
));
395 hammer_rel_volume(rootvol
, 0);
397 RB_SCAN(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, NULL
,
398 hammer_reload_inode
, NULL
);
399 /* kernel clears MNT_RDONLY */
400 } else if (hmp
->ronly
== 0 && (mp
->mnt_flag
& MNT_RDONLY
)) {
401 kprintf("HAMMER read-write -> read-only\n");
402 hmp
->ronly
= 1; /* messy */
403 RB_SCAN(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, NULL
,
404 hammer_reload_inode
, NULL
);
406 hammer_flusher_sync(hmp
);
407 hammer_flusher_sync(hmp
);
408 hammer_flusher_sync(hmp
);
410 RB_SCAN(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, NULL
,
411 hammer_adjust_volume_mode
, NULL
);
416 RB_INIT(&hmp
->rb_vols_root
);
417 RB_INIT(&hmp
->rb_inos_root
);
418 RB_INIT(&hmp
->rb_nods_root
);
419 RB_INIT(&hmp
->rb_undo_root
);
420 RB_INIT(&hmp
->rb_resv_root
);
421 RB_INIT(&hmp
->rb_bufs_root
);
422 RB_INIT(&hmp
->rb_pfsm_root
);
424 hmp
->ronly
= ((mp
->mnt_flag
& MNT_RDONLY
) != 0);
426 TAILQ_INIT(&hmp
->volu_list
);
427 TAILQ_INIT(&hmp
->undo_list
);
428 TAILQ_INIT(&hmp
->data_list
);
429 TAILQ_INIT(&hmp
->meta_list
);
430 TAILQ_INIT(&hmp
->lose_list
);
435 path
= objcache_get(namei_oc
, M_WAITOK
);
437 for (i
= 0; i
< info
.nvolumes
; ++i
) {
438 error
= copyin(&info
.volumes
[i
], &upath
, sizeof(char *));
440 error
= copyinstr(upath
, path
, MAXPATHLEN
, NULL
);
442 error
= hammer_install_volume(hmp
, path
);
446 objcache_put(namei_oc
, path
);
449 * Make sure we found a root volume
451 if (error
== 0 && hmp
->rootvol
== NULL
) {
452 kprintf("hammer_mount: No root volume found!\n");
457 * Check that all required volumes are available
459 if (error
== 0 && hammer_mountcheck_volumes(hmp
)) {
460 kprintf("hammer_mount: Missing volumes, cannot mount!\n");
470 * No errors, setup enough of the mount point so we can lookup the
473 mp
->mnt_iosize_max
= MAXPHYS
;
474 mp
->mnt_kern_flag
|= MNTK_FSMID
;
477 * note: f_iosize is used by vnode_pager_haspage() when constructing
480 mp
->mnt_stat
.f_iosize
= HAMMER_BUFSIZE
;
481 mp
->mnt_stat
.f_bsize
= HAMMER_BUFSIZE
;
483 mp
->mnt_vstat
.f_frsize
= HAMMER_BUFSIZE
;
484 mp
->mnt_vstat
.f_bsize
= HAMMER_BUFSIZE
;
486 mp
->mnt_maxsymlinklen
= 255;
487 mp
->mnt_flag
|= MNT_LOCAL
;
489 vfs_add_vnodeops(mp
, &hammer_vnode_vops
, &mp
->mnt_vn_norm_ops
);
490 vfs_add_vnodeops(mp
, &hammer_spec_vops
, &mp
->mnt_vn_spec_ops
);
491 vfs_add_vnodeops(mp
, &hammer_fifo_vops
, &mp
->mnt_vn_fifo_ops
);
494 * The root volume's ondisk pointer is only valid if we hold a
497 rootvol
= hammer_get_root_volume(hmp
, &error
);
502 * Perform any necessary UNDO operations. The recovery code does
503 * call hammer_undo_lookup() so we have to pre-cache the blockmap,
504 * and then re-copy it again after recovery is complete.
506 * If this is a read-only mount the UNDO information is retained
507 * in memory in the form of dirty buffer cache buffers, and not
508 * written back to the media.
510 bcopy(rootvol
->ondisk
->vol0_blockmap
, hmp
->blockmap
,
511 sizeof(hmp
->blockmap
));
514 * The undo_rec_limit limits the size of flush groups to avoid
515 * blowing out the UNDO FIFO. This calculation is typically in
516 * the tens of thousands and is designed primarily when small
517 * HAMMER filesystems are created.
519 hmp
->undo_rec_limit
= hammer_undo_max(hmp
) / 8192 + 100;
520 if (hammer_debug_general
& 0x0001)
521 kprintf("HAMMER: undo_rec_limit %d\n", hmp
->undo_rec_limit
);
523 error
= hammer_recover(hmp
, rootvol
);
525 kprintf("Failed to recover HAMMER filesystem on mount\n");
530 * Finish setup now that we have a good root volume.
532 * The top 16 bits of fsid.val[1] is a pfs id.
534 ksnprintf(mp
->mnt_stat
.f_mntfromname
,
535 sizeof(mp
->mnt_stat
.f_mntfromname
), "%s",
536 rootvol
->ondisk
->vol_name
);
537 mp
->mnt_stat
.f_fsid
.val
[0] =
538 crc32((char *)&rootvol
->ondisk
->vol_fsid
+ 0, 8);
539 mp
->mnt_stat
.f_fsid
.val
[1] =
540 crc32((char *)&rootvol
->ondisk
->vol_fsid
+ 8, 8);
541 mp
->mnt_stat
.f_fsid
.val
[1] &= 0x0000FFFF;
543 mp
->mnt_vstat
.f_fsid_uuid
= rootvol
->ondisk
->vol_fsid
;
544 mp
->mnt_vstat
.f_fsid
= crc32(&mp
->mnt_vstat
.f_fsid_uuid
,
545 sizeof(mp
->mnt_vstat
.f_fsid_uuid
));
548 * Certain often-modified fields in the root volume are cached in
549 * the hammer_mount structure so we do not have to generate lots
550 * of little UNDO structures for them.
552 * Recopy after recovery. This also has the side effect of
553 * setting our cached undo FIFO's first_offset, which serves to
554 * placemark the FIFO start for the NEXT flush cycle while the
555 * on-disk first_offset represents the LAST flush cycle.
557 hmp
->next_tid
= rootvol
->ondisk
->vol0_next_tid
;
558 bcopy(rootvol
->ondisk
->vol0_blockmap
, hmp
->blockmap
,
559 sizeof(hmp
->blockmap
));
560 hmp
->copy_stat_freebigblocks
= rootvol
->ondisk
->vol0_stat_freebigblocks
;
562 hammer_flusher_create(hmp
);
565 * Locate the root directory using the root cluster's B-Tree as a
566 * starting point. The root directory uses an obj_id of 1.
568 * FUTURE: Leave the root directory cached referenced but unlocked
569 * in hmp->rootvp (need to flush it on unmount).
571 error
= hammer_vfs_vget(mp
, 1, &rootvp
);
575 /*vn_unlock(hmp->rootvp);*/
578 hammer_rel_volume(rootvol
, 0);
581 * Cleanup and return.
589 hammer_vfs_unmount(struct mount
*mp
, int mntflags
)
592 struct hammer_mount
*hmp
= (void *)mp
->mnt_data
;
598 * Clean out the vnodes
601 if (mntflags
& MNT_FORCE
)
603 if ((error
= vflush(mp
, 0, flags
)) != 0)
607 * Clean up the internal mount structure and related entities. This
615 * Clean up the internal mount structure and disassociate it from the mount.
616 * This may issue I/O.
619 hammer_free_hmp(struct mount
*mp
)
621 struct hammer_mount
*hmp
= (void *)mp
->mnt_data
;
622 hammer_flush_group_t flg
;
626 * Flush anything dirty. This won't even run if the
627 * filesystem errored-out.
630 while (hammer_flusher_haswork(hmp
)) {
631 hammer_flusher_sync(hmp
);
635 kprintf("HAMMER: umount flushing.");
638 tsleep(hmp
, 0, "hmrufl", hz
);
641 kprintf("giving up\n");
645 if (count
>= 5 && count
< 30)
649 * If the mount had a critical error we have to destroy any
650 * remaining inodes before we can finish cleaning up the flusher.
652 if (hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
) {
653 RB_SCAN(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, NULL
,
654 hammer_destroy_inode_callback
, NULL
);
658 * There shouldn't be any inodes left now and any left over
659 * flush groups should now be empty.
661 KKASSERT(RB_EMPTY(&hmp
->rb_inos_root
));
662 while ((flg
= TAILQ_FIRST(&hmp
->flush_group_list
)) != NULL
) {
663 TAILQ_REMOVE(&hmp
->flush_group_list
, flg
, flush_entry
);
664 KKASSERT(TAILQ_EMPTY(&flg
->flush_list
));
666 kprintf("HAMMER: Warning, flush_group %p was "
667 "not empty on umount!\n", flg
);
669 kfree(flg
, M_HAMMER
);
673 * We can finally destroy the flusher
675 hammer_flusher_destroy(hmp
);
678 * Unload buffers and then volumes
680 RB_SCAN(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
, NULL
,
681 hammer_unload_buffer
, NULL
);
682 RB_SCAN(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, NULL
,
683 hammer_unload_volume
, NULL
);
686 mp
->mnt_flag
&= ~MNT_LOCAL
;
688 hammer_destroy_objid_cache(hmp
);
689 kfree(hmp
, M_HAMMER
);
693 * Report critical errors. ip may be NULL.
696 hammer_critical_error(hammer_mount_t hmp
, hammer_inode_t ip
,
697 int error
, const char *msg
)
699 hmp
->flags
|= HAMMER_MOUNT_CRITICAL_ERROR
;
700 krateprintf(&hmp
->krate
,
701 "HAMMER(%s): Critical error inode=%lld %s\n",
702 hmp
->mp
->mnt_stat
.f_mntfromname
,
703 (ip
? ip
->obj_id
: -1), msg
);
704 if (hmp
->ronly
== 0) {
705 hmp
->ronly
= 2; /* special errored read-only mode */
706 hmp
->mp
->mnt_flag
|= MNT_RDONLY
;
707 kprintf("HAMMER(%s): Forcing read-only mode\n",
708 hmp
->mp
->mnt_stat
.f_mntfromname
);
715 * Obtain a vnode for the specified inode number. An exclusively locked
719 hammer_vfs_vget(struct mount
*mp
, ino_t ino
, struct vnode
**vpp
)
721 struct hammer_transaction trans
;
722 struct hammer_mount
*hmp
= (void *)mp
->mnt_data
;
723 struct hammer_inode
*ip
;
726 hammer_simple_transaction(&trans
, hmp
);
729 * Lookup the requested HAMMER inode. The structure must be
730 * left unlocked while we manipulate the related vnode to avoid
733 ip
= hammer_get_inode(&trans
, NULL
, ino
,
734 hmp
->asof
, HAMMER_DEF_LOCALIZATION
,
740 error
= hammer_get_vnode(ip
, vpp
);
741 hammer_rel_inode(ip
, 0);
742 hammer_done_transaction(&trans
);
747 * Return the root vnode for the filesystem.
749 * HAMMER stores the root vnode in the hammer_mount structure so
750 * getting it is easy.
753 hammer_vfs_root(struct mount
*mp
, struct vnode
**vpp
)
756 struct hammer_mount
*hmp
= (void *)mp
->mnt_data
;
760 error
= hammer_vfs_vget(mp
, 1, vpp
);
765 hammer_vfs_statfs(struct mount
*mp
, struct statfs
*sbp
, struct ucred
*cred
)
767 struct hammer_mount
*hmp
= (void *)mp
->mnt_data
;
768 hammer_volume_t volume
;
769 hammer_volume_ondisk_t ondisk
;
773 volume
= hammer_get_root_volume(hmp
, &error
);
776 ondisk
= volume
->ondisk
;
781 mp
->mnt_stat
.f_files
= ondisk
->vol0_stat_inodes
;
782 bfree
= ondisk
->vol0_stat_freebigblocks
* HAMMER_LARGEBLOCK_SIZE
;
783 hammer_rel_volume(volume
, 0);
785 mp
->mnt_stat
.f_bfree
= bfree
/ HAMMER_BUFSIZE
;
786 mp
->mnt_stat
.f_bavail
= mp
->mnt_stat
.f_bfree
;
787 if (mp
->mnt_stat
.f_files
< 0)
788 mp
->mnt_stat
.f_files
= 0;
795 hammer_vfs_statvfs(struct mount
*mp
, struct statvfs
*sbp
, struct ucred
*cred
)
797 struct hammer_mount
*hmp
= (void *)mp
->mnt_data
;
798 hammer_volume_t volume
;
799 hammer_volume_ondisk_t ondisk
;
803 volume
= hammer_get_root_volume(hmp
, &error
);
806 ondisk
= volume
->ondisk
;
811 mp
->mnt_vstat
.f_files
= ondisk
->vol0_stat_inodes
;
812 bfree
= ondisk
->vol0_stat_freebigblocks
* HAMMER_LARGEBLOCK_SIZE
;
813 hammer_rel_volume(volume
, 0);
815 mp
->mnt_vstat
.f_bfree
= bfree
/ HAMMER_BUFSIZE
;
816 mp
->mnt_vstat
.f_bavail
= mp
->mnt_stat
.f_bfree
;
817 if (mp
->mnt_vstat
.f_files
< 0)
818 mp
->mnt_vstat
.f_files
= 0;
819 *sbp
= mp
->mnt_vstat
;
824 * Sync the filesystem. Currently we have to run it twice, the second
825 * one will advance the undo start index to the end index, so if a crash
826 * occurs no undos will be run on mount.
828 * We do not sync the filesystem if we are called from a panic. If we did
829 * we might end up blowing up a sync that was already in progress.
832 hammer_vfs_sync(struct mount
*mp
, int waitfor
)
834 struct hammer_mount
*hmp
= (void *)mp
->mnt_data
;
837 if (panicstr
== NULL
) {
838 error
= hammer_sync_hmp(hmp
, waitfor
);
840 error
= hammer_sync_hmp(hmp
, waitfor
);
848 * Convert a vnode to a file handle.
851 hammer_vfs_vptofh(struct vnode
*vp
, struct fid
*fhp
)
855 KKASSERT(MAXFIDSZ
>= 16);
857 fhp
->fid_len
= offsetof(struct fid
, fid_data
[16]);
858 fhp
->fid_ext
= ip
->obj_localization
>> 16;
859 bcopy(&ip
->obj_id
, fhp
->fid_data
+ 0, sizeof(ip
->obj_id
));
860 bcopy(&ip
->obj_asof
, fhp
->fid_data
+ 8, sizeof(ip
->obj_asof
));
866 * Convert a file handle back to a vnode.
869 hammer_vfs_fhtovp(struct mount
*mp
, struct fid
*fhp
, struct vnode
**vpp
)
871 struct hammer_transaction trans
;
872 struct hammer_inode
*ip
;
873 struct hammer_inode_info info
;
875 u_int32_t localization
;
877 bcopy(fhp
->fid_data
+ 0, &info
.obj_id
, sizeof(info
.obj_id
));
878 bcopy(fhp
->fid_data
+ 8, &info
.obj_asof
, sizeof(info
.obj_asof
));
879 localization
= (u_int32_t
)fhp
->fid_ext
<< 16;
881 hammer_simple_transaction(&trans
, (void *)mp
->mnt_data
);
884 * Get/allocate the hammer_inode structure. The structure must be
885 * unlocked while we manipulate the related vnode to avoid a
888 ip
= hammer_get_inode(&trans
, NULL
, info
.obj_id
,
889 info
.obj_asof
, localization
, 0, &error
);
894 error
= hammer_get_vnode(ip
, vpp
);
895 hammer_rel_inode(ip
, 0);
896 hammer_done_transaction(&trans
);
901 hammer_vfs_checkexp(struct mount
*mp
, struct sockaddr
*nam
,
902 int *exflagsp
, struct ucred
**credanonp
)
904 hammer_mount_t hmp
= (void *)mp
->mnt_data
;
908 np
= vfs_export_lookup(mp
, &hmp
->export
, nam
);
910 *exflagsp
= np
->netc_exflags
;
911 *credanonp
= &np
->netc_anon
;
921 hammer_vfs_export(struct mount
*mp
, int op
, const struct export_args
*export
)
923 hammer_mount_t hmp
= (void *)mp
->mnt_data
;
927 case MOUNTCTL_SET_EXPORT
:
928 error
= vfs_export(mp
, &hmp
->export
, export
);