kernel - fine-grained namecache and partial vnode MPSAFE work
[dragonfly.git] / sys / vfs / hammer / hammer_vfsops.c
blob347d0dfcd28608e743f1e1dafebefb4a98e19134
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.74 2008/11/13 02:18:43 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/vnode.h>
41 #include <sys/mount.h>
42 #include <sys/malloc.h>
43 #include <sys/nlookup.h>
44 #include <sys/fcntl.h>
45 #include <sys/sysctl.h>
46 #include <sys/buf.h>
47 #include <sys/buf2.h>
48 #include "hammer.h"
51 * NOTE! Global statistics may not be MPSAFE so HAMMER never uses them
52 * in conditionals.
54 int hammer_supported_version = HAMMER_VOL_VERSION_DEFAULT;
55 int hammer_debug_io;
56 int hammer_debug_general;
57 int hammer_debug_debug = 1; /* medium-error panics */
58 int hammer_debug_inode;
59 int hammer_debug_locks;
60 int hammer_debug_btree;
61 int hammer_debug_tid;
62 int hammer_debug_recover; /* -1 will disable, +1 will force */
63 int hammer_debug_recover_faults;
64 int hammer_debug_critical; /* non-zero enter debugger on error */
65 int hammer_cluster_enable = 1; /* enable read clustering by default */
66 int hammer_count_fsyncs;
67 int hammer_count_inodes;
68 int hammer_count_iqueued;
69 int hammer_count_reclaiming;
70 int hammer_count_records;
71 int hammer_count_record_datas;
72 int hammer_count_volumes;
73 int hammer_count_buffers;
74 int hammer_count_nodes;
75 int64_t hammer_count_extra_space_used;
76 int64_t hammer_stats_btree_lookups;
77 int64_t hammer_stats_btree_searches;
78 int64_t hammer_stats_btree_inserts;
79 int64_t hammer_stats_btree_deletes;
80 int64_t hammer_stats_btree_elements;
81 int64_t hammer_stats_btree_splits;
82 int64_t hammer_stats_btree_iterations;
83 int64_t hammer_stats_btree_root_iterations;
84 int64_t hammer_stats_record_iterations;
86 int64_t hammer_stats_file_read;
87 int64_t hammer_stats_file_write;
88 int64_t hammer_stats_file_iopsr;
89 int64_t hammer_stats_file_iopsw;
90 int64_t hammer_stats_disk_read;
91 int64_t hammer_stats_disk_write;
92 int64_t hammer_stats_inode_flushes;
93 int64_t hammer_stats_commits;
94 int64_t hammer_stats_undo;
96 int hammer_count_dirtybufspace; /* global */
97 int hammer_count_refedbufs; /* global */
98 int hammer_count_reservations;
99 int hammer_count_io_running_read;
100 int hammer_count_io_running_write;
101 int hammer_count_io_locked;
102 int hammer_limit_dirtybufspace; /* per-mount */
103 int hammer_limit_recs; /* as a whole XXX */
104 int hammer_limit_inode_recs = 1024; /* per inode */
105 int hammer_limit_reclaim = HAMMER_RECLAIM_WAIT;
106 int hammer_autoflush = 2000; /* auto flush */
107 int hammer_bio_count;
108 int hammer_verify_zone;
109 int hammer_verify_data = 1;
110 int hammer_write_mode;
111 int hammer_yield_check = 16;
112 int hammer_fsync_mode;
113 int64_t hammer_contention_count;
114 int64_t hammer_zone_limit;
116 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem");
117 SYSCTL_INT(_vfs_hammer, OID_AUTO, supported_version, CTLFLAG_RD,
118 &hammer_supported_version, 0, "");
119 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW,
120 &hammer_debug_general, 0, "");
121 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW,
122 &hammer_debug_io, 0, "");
123 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_debug, CTLFLAG_RW,
124 &hammer_debug_debug, 0, "");
125 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW,
126 &hammer_debug_inode, 0, "");
127 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW,
128 &hammer_debug_locks, 0, "");
129 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW,
130 &hammer_debug_btree, 0, "");
131 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW,
132 &hammer_debug_tid, 0, "");
133 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW,
134 &hammer_debug_recover, 0, "");
135 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW,
136 &hammer_debug_recover_faults, 0, "");
137 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_critical, CTLFLAG_RW,
138 &hammer_debug_critical, 0, "");
139 SYSCTL_INT(_vfs_hammer, OID_AUTO, cluster_enable, CTLFLAG_RW,
140 &hammer_cluster_enable, 0, "");
142 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_dirtybufspace, CTLFLAG_RW,
143 &hammer_limit_dirtybufspace, 0, "");
144 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_recs, CTLFLAG_RW,
145 &hammer_limit_recs, 0, "");
146 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_inode_recs, CTLFLAG_RW,
147 &hammer_limit_inode_recs, 0, "");
148 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_reclaim, CTLFLAG_RW,
149 &hammer_limit_reclaim, 0, "");
151 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_fsyncs, CTLFLAG_RD,
152 &hammer_count_fsyncs, 0, "");
153 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD,
154 &hammer_count_inodes, 0, "");
155 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_iqueued, CTLFLAG_RD,
156 &hammer_count_iqueued, 0, "");
157 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reclaiming, CTLFLAG_RD,
158 &hammer_count_reclaiming, 0, "");
159 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD,
160 &hammer_count_records, 0, "");
161 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD,
162 &hammer_count_record_datas, 0, "");
163 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD,
164 &hammer_count_volumes, 0, "");
165 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD,
166 &hammer_count_buffers, 0, "");
167 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD,
168 &hammer_count_nodes, 0, "");
169 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, count_extra_space_used, CTLFLAG_RD,
170 &hammer_count_extra_space_used, 0, "");
172 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_searches, CTLFLAG_RD,
173 &hammer_stats_btree_searches, 0, "");
174 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_lookups, CTLFLAG_RD,
175 &hammer_stats_btree_lookups, 0, "");
176 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_inserts, CTLFLAG_RD,
177 &hammer_stats_btree_inserts, 0, "");
178 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_deletes, CTLFLAG_RD,
179 &hammer_stats_btree_deletes, 0, "");
180 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_elements, CTLFLAG_RD,
181 &hammer_stats_btree_elements, 0, "");
182 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_splits, CTLFLAG_RD,
183 &hammer_stats_btree_splits, 0, "");
184 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_iterations, CTLFLAG_RD,
185 &hammer_stats_btree_iterations, 0, "");
186 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_root_iterations, CTLFLAG_RD,
187 &hammer_stats_btree_root_iterations, 0, "");
188 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_record_iterations, CTLFLAG_RD,
189 &hammer_stats_record_iterations, 0, "");
191 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_read, CTLFLAG_RD,
192 &hammer_stats_file_read, 0, "");
193 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_write, CTLFLAG_RD,
194 &hammer_stats_file_write, 0, "");
195 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsr, CTLFLAG_RD,
196 &hammer_stats_file_iopsr, 0, "");
197 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsw, CTLFLAG_RD,
198 &hammer_stats_file_iopsw, 0, "");
199 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_read, CTLFLAG_RD,
200 &hammer_stats_disk_read, 0, "");
201 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_write, CTLFLAG_RD,
202 &hammer_stats_disk_write, 0, "");
203 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_inode_flushes, CTLFLAG_RD,
204 &hammer_stats_inode_flushes, 0, "");
205 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_commits, CTLFLAG_RD,
206 &hammer_stats_commits, 0, "");
207 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_undo, CTLFLAG_RD,
208 &hammer_stats_undo, 0, "");
210 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_dirtybufspace, CTLFLAG_RD,
211 &hammer_count_dirtybufspace, 0, "");
212 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_refedbufs, CTLFLAG_RD,
213 &hammer_count_refedbufs, 0, "");
214 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reservations, CTLFLAG_RD,
215 &hammer_count_reservations, 0, "");
216 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_read, CTLFLAG_RD,
217 &hammer_count_io_running_read, 0, "");
218 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_locked, CTLFLAG_RD,
219 &hammer_count_io_locked, 0, "");
220 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_write, CTLFLAG_RD,
221 &hammer_count_io_running_write, 0, "");
222 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW,
223 &hammer_zone_limit, 0, "");
224 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW,
225 &hammer_contention_count, 0, "");
226 SYSCTL_INT(_vfs_hammer, OID_AUTO, autoflush, CTLFLAG_RW,
227 &hammer_autoflush, 0, "");
228 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_zone, CTLFLAG_RW,
229 &hammer_verify_zone, 0, "");
230 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_data, CTLFLAG_RW,
231 &hammer_verify_data, 0, "");
232 SYSCTL_INT(_vfs_hammer, OID_AUTO, write_mode, CTLFLAG_RW,
233 &hammer_write_mode, 0, "");
234 SYSCTL_INT(_vfs_hammer, OID_AUTO, yield_check, CTLFLAG_RW,
235 &hammer_yield_check, 0, "");
236 SYSCTL_INT(_vfs_hammer, OID_AUTO, fsync_mode, CTLFLAG_RW,
237 &hammer_fsync_mode, 0, "");
239 KTR_INFO_MASTER(hammer);
242 * VFS ABI
244 static void hammer_free_hmp(struct mount *mp);
246 static int hammer_vfs_mount(struct mount *mp, char *path, caddr_t data,
247 struct ucred *cred);
248 static int hammer_vfs_unmount(struct mount *mp, int mntflags);
249 static int hammer_vfs_root(struct mount *mp, struct vnode **vpp);
250 static int hammer_vfs_statfs(struct mount *mp, struct statfs *sbp,
251 struct ucred *cred);
252 static int hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
253 struct ucred *cred);
254 static int hammer_vfs_sync(struct mount *mp, int waitfor);
255 static int hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
256 ino_t ino, struct vnode **vpp);
257 static int hammer_vfs_init(struct vfsconf *conf);
258 static int hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
259 struct fid *fhp, struct vnode **vpp);
260 static int hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp);
261 static int hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
262 int *exflagsp, struct ucred **credanonp);
265 static struct vfsops hammer_vfsops = {
266 .vfs_mount = hammer_vfs_mount,
267 .vfs_unmount = hammer_vfs_unmount,
268 .vfs_root = hammer_vfs_root,
269 .vfs_statfs = hammer_vfs_statfs,
270 .vfs_statvfs = hammer_vfs_statvfs,
271 .vfs_sync = hammer_vfs_sync,
272 .vfs_vget = hammer_vfs_vget,
273 .vfs_init = hammer_vfs_init,
274 .vfs_vptofh = hammer_vfs_vptofh,
275 .vfs_fhtovp = hammer_vfs_fhtovp,
276 .vfs_checkexp = hammer_vfs_checkexp
279 MALLOC_DEFINE(M_HAMMER, "HAMMER-mount", "");
281 VFS_SET(hammer_vfsops, hammer, 0);
282 MODULE_VERSION(hammer, 1);
284 static int
285 hammer_vfs_init(struct vfsconf *conf)
287 int n;
289 if (hammer_limit_recs == 0) {
290 hammer_limit_recs = nbuf * 25;
291 n = kmalloc_limit(M_HAMMER) / 512;
292 if (hammer_limit_recs > n)
293 hammer_limit_recs = n;
295 if (hammer_limit_dirtybufspace == 0) {
296 hammer_limit_dirtybufspace = hidirtybufspace / 2;
297 if (hammer_limit_dirtybufspace < 100)
298 hammer_limit_dirtybufspace = 100;
300 return(0);
303 static int
304 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
305 struct ucred *cred)
307 struct hammer_mount_info info;
308 hammer_mount_t hmp;
309 hammer_volume_t rootvol;
310 struct vnode *rootvp;
311 struct vnode *devvp = NULL;
312 const char *upath; /* volume name in userspace */
313 char *path; /* volume name in system space */
314 int error;
315 int i;
316 int master_id;
317 int maxinodes;
318 char *next_volume_ptr = NULL;
321 * Accept hammer_mount_info. mntpt is NULL for root mounts at boot.
323 if (mntpt == NULL) {
324 bzero(&info, sizeof(info));
325 info.asof = 0;
326 info.hflags = 0;
327 info.nvolumes = 1;
329 next_volume_ptr = mp->mnt_stat.f_mntfromname;
331 /* Count number of volumes separated by ':' */
332 for (char *p = next_volume_ptr; *p != '\0'; ++p) {
333 if (*p == ':') {
334 ++info.nvolumes;
338 mp->mnt_flag &= ~MNT_RDONLY; /* mount R/W */
339 } else {
340 if ((error = copyin(data, &info, sizeof(info))) != 0)
341 return (error);
345 * updating or new mount
347 if (mp->mnt_flag & MNT_UPDATE) {
348 hmp = (void *)mp->mnt_data;
349 KKASSERT(hmp != NULL);
350 } else {
351 if (info.nvolumes <= 0 || info.nvolumes >= 32768)
352 return (EINVAL);
353 hmp = NULL;
357 * master-id validation. The master id may not be changed by a
358 * mount update.
360 if (info.hflags & HMNT_MASTERID) {
361 if (hmp && hmp->master_id != info.master_id) {
362 kprintf("hammer: cannot change master id "
363 "with mount update\n");
364 return(EINVAL);
366 master_id = info.master_id;
367 if (master_id < -1 || master_id >= HAMMER_MAX_MASTERS)
368 return (EINVAL);
369 } else {
370 if (hmp)
371 master_id = hmp->master_id;
372 else
373 master_id = 0;
377 * Interal mount data structure
379 if (hmp == NULL) {
380 hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO);
381 mp->mnt_data = (qaddr_t)hmp;
382 hmp->mp = mp;
383 /*TAILQ_INIT(&hmp->recycle_list);*/
386 * Make sure kmalloc type limits are set appropriately. If root
387 * increases the vnode limit you may have to do a dummy remount
388 * to adjust the HAMMER inode limit.
390 kmalloc_create(&hmp->m_misc, "HAMMER-others");
391 kmalloc_create(&hmp->m_inodes, "HAMMER-inodes");
393 maxinodes = desiredvnodes + desiredvnodes / 5 +
394 hammer_limit_reclaim * 2;
395 kmalloc_raise_limit(hmp->m_inodes,
396 maxinodes * sizeof(struct hammer_inode));
398 hmp->root_btree_beg.localization = 0x00000000U;
399 hmp->root_btree_beg.obj_id = -0x8000000000000000LL;
400 hmp->root_btree_beg.key = -0x8000000000000000LL;
401 hmp->root_btree_beg.create_tid = 1;
402 hmp->root_btree_beg.delete_tid = 1;
403 hmp->root_btree_beg.rec_type = 0;
404 hmp->root_btree_beg.obj_type = 0;
406 hmp->root_btree_end.localization = 0xFFFFFFFFU;
407 hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL;
408 hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL;
409 hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL;
410 hmp->root_btree_end.delete_tid = 0; /* special case */
411 hmp->root_btree_end.rec_type = 0xFFFFU;
412 hmp->root_btree_end.obj_type = 0;
414 hmp->krate.freq = 1; /* maximum reporting rate (hz) */
415 hmp->krate.count = -16; /* initial burst */
417 hmp->sync_lock.refs = 1;
418 hmp->free_lock.refs = 1;
419 hmp->undo_lock.refs = 1;
420 hmp->blkmap_lock.refs = 1;
421 hmp->snapshot_lock.refs = 1;
422 hmp->volume_lock.refs = 1;
424 TAILQ_INIT(&hmp->delay_list);
425 TAILQ_INIT(&hmp->flush_group_list);
426 TAILQ_INIT(&hmp->objid_cache_list);
427 TAILQ_INIT(&hmp->undo_lru_list);
428 TAILQ_INIT(&hmp->reclaim_list);
430 hmp->hflags &= ~HMNT_USERFLAGS;
431 hmp->hflags |= info.hflags & HMNT_USERFLAGS;
433 hmp->master_id = master_id;
435 if (info.asof) {
436 mp->mnt_flag |= MNT_RDONLY;
437 hmp->asof = info.asof;
438 } else {
439 hmp->asof = HAMMER_MAX_TID;
442 hmp->volume_to_remove = -1;
445 * Re-open read-write if originally read-only, or vise-versa.
447 * When going from read-only to read-write execute the stage2
448 * recovery if it has not already been run.
450 if (mp->mnt_flag & MNT_UPDATE) {
451 error = 0;
452 if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
453 kprintf("HAMMER read-only -> read-write\n");
454 hmp->ronly = 0;
455 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
456 hammer_adjust_volume_mode, NULL);
457 rootvol = hammer_get_root_volume(hmp, &error);
458 if (rootvol) {
459 hammer_recover_flush_buffers(hmp, rootvol, 1);
460 error = hammer_recover_stage2(hmp, rootvol);
461 bcopy(rootvol->ondisk->vol0_blockmap,
462 hmp->blockmap,
463 sizeof(hmp->blockmap));
464 hammer_rel_volume(rootvol, 0);
466 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
467 hammer_reload_inode, NULL);
468 /* kernel clears MNT_RDONLY */
469 } else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
470 kprintf("HAMMER read-write -> read-only\n");
471 hmp->ronly = 1; /* messy */
472 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
473 hammer_reload_inode, NULL);
474 hmp->ronly = 0;
475 hammer_flusher_sync(hmp);
476 hammer_flusher_sync(hmp);
477 hammer_flusher_sync(hmp);
478 hmp->ronly = 1;
479 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
480 hammer_adjust_volume_mode, NULL);
482 return(error);
485 RB_INIT(&hmp->rb_vols_root);
486 RB_INIT(&hmp->rb_inos_root);
487 RB_INIT(&hmp->rb_nods_root);
488 RB_INIT(&hmp->rb_undo_root);
489 RB_INIT(&hmp->rb_resv_root);
490 RB_INIT(&hmp->rb_bufs_root);
491 RB_INIT(&hmp->rb_pfsm_root);
493 hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
495 TAILQ_INIT(&hmp->volu_list);
496 TAILQ_INIT(&hmp->undo_list);
497 TAILQ_INIT(&hmp->data_list);
498 TAILQ_INIT(&hmp->meta_list);
499 TAILQ_INIT(&hmp->lose_list);
502 * Load volumes
504 path = objcache_get(namei_oc, M_WAITOK);
505 hmp->nvolumes = -1;
506 for (i = 0; i < info.nvolumes; ++i) {
507 if (mntpt == NULL) {
509 * Root mount.
511 KKASSERT(next_volume_ptr != NULL);
512 strcpy(path, "");
513 if (*next_volume_ptr != '/') {
514 /* relative path */
515 strcpy(path, "/dev/");
517 int k;
518 for (k = strlen(path); k < MAXPATHLEN-1; ++k) {
519 if (*next_volume_ptr == '\0') {
520 break;
521 } else if (*next_volume_ptr == ':') {
522 ++next_volume_ptr;
523 break;
524 } else {
525 path[k] = *next_volume_ptr;
526 ++next_volume_ptr;
529 path[k] = '\0';
531 error = 0;
532 cdev_t dev = kgetdiskbyname(path);
533 error = bdevvp(dev, &devvp);
534 if (error) {
535 kprintf("hammer_mountroot: can't find devvp\n");
537 } else {
538 error = copyin(&info.volumes[i], &upath,
539 sizeof(char *));
540 if (error == 0)
541 error = copyinstr(upath, path,
542 MAXPATHLEN, NULL);
544 if (error == 0)
545 error = hammer_install_volume(hmp, path, devvp);
546 if (error)
547 break;
549 objcache_put(namei_oc, path);
552 * Make sure we found a root volume
554 if (error == 0 && hmp->rootvol == NULL) {
555 kprintf("hammer_mount: No root volume found!\n");
556 error = EINVAL;
560 * Check that all required volumes are available
562 if (error == 0 && hammer_mountcheck_volumes(hmp)) {
563 kprintf("hammer_mount: Missing volumes, cannot mount!\n");
564 error = EINVAL;
567 if (error) {
568 hammer_free_hmp(mp);
569 return (error);
573 * No errors, setup enough of the mount point so we can lookup the
574 * root vnode.
576 mp->mnt_iosize_max = MAXPHYS;
577 mp->mnt_kern_flag |= MNTK_FSMID;
580 * MPSAFE code. Note that VOPs and VFSops which are not MPSAFE
581 * will acquire a per-mount token prior to entry and release it
582 * on return, so even if we do not specify it we no longer get
583 * the BGL regardlless of how we are flagged.
585 mp->mnt_kern_flag |= MNTK_RD_MPSAFE | MNTK_GA_MPSAFE |
586 MNTK_IN_MPSAFE;
589 * note: f_iosize is used by vnode_pager_haspage() when constructing
590 * its VOP_BMAP call.
592 mp->mnt_stat.f_iosize = HAMMER_BUFSIZE;
593 mp->mnt_stat.f_bsize = HAMMER_BUFSIZE;
595 mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE;
596 mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE;
598 mp->mnt_maxsymlinklen = 255;
599 mp->mnt_flag |= MNT_LOCAL;
601 vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops);
602 vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops);
603 vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops);
606 * The root volume's ondisk pointer is only valid if we hold a
607 * reference to it.
609 rootvol = hammer_get_root_volume(hmp, &error);
610 if (error)
611 goto failed;
614 * Perform any necessary UNDO operations. The recovery code does
615 * call hammer_undo_lookup() so we have to pre-cache the blockmap,
616 * and then re-copy it again after recovery is complete.
618 * If this is a read-only mount the UNDO information is retained
619 * in memory in the form of dirty buffer cache buffers, and not
620 * written back to the media.
622 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
623 sizeof(hmp->blockmap));
626 * Check filesystem version
628 hmp->version = rootvol->ondisk->vol_version;
629 if (hmp->version < HAMMER_VOL_VERSION_MIN ||
630 hmp->version > HAMMER_VOL_VERSION_MAX) {
631 kprintf("HAMMER: mount unsupported fs version %d\n",
632 hmp->version);
633 error = ERANGE;
634 goto done;
638 * The undo_rec_limit limits the size of flush groups to avoid
639 * blowing out the UNDO FIFO. This calculation is typically in
640 * the tens of thousands and is designed primarily when small
641 * HAMMER filesystems are created.
643 hmp->undo_rec_limit = hammer_undo_max(hmp) / 8192 + 100;
644 if (hammer_debug_general & 0x0001)
645 kprintf("HAMMER: undo_rec_limit %d\n", hmp->undo_rec_limit);
648 * NOTE: Recover stage1 not only handles meta-data recovery, it
649 * also sets hmp->undo_seqno for HAMMER VERSION 4+ filesystems.
651 error = hammer_recover_stage1(hmp, rootvol);
652 if (error) {
653 kprintf("Failed to recover HAMMER filesystem on mount\n");
654 goto done;
658 * Finish setup now that we have a good root volume.
660 * The top 16 bits of fsid.val[1] is a pfs id.
662 ksnprintf(mp->mnt_stat.f_mntfromname,
663 sizeof(mp->mnt_stat.f_mntfromname), "%s",
664 rootvol->ondisk->vol_name);
665 mp->mnt_stat.f_fsid.val[0] =
666 crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8);
667 mp->mnt_stat.f_fsid.val[1] =
668 crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8);
669 mp->mnt_stat.f_fsid.val[1] &= 0x0000FFFF;
671 mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid;
672 mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid,
673 sizeof(mp->mnt_vstat.f_fsid_uuid));
676 * Certain often-modified fields in the root volume are cached in
677 * the hammer_mount structure so we do not have to generate lots
678 * of little UNDO structures for them.
680 * Recopy after recovery. This also has the side effect of
681 * setting our cached undo FIFO's first_offset, which serves to
682 * placemark the FIFO start for the NEXT flush cycle while the
683 * on-disk first_offset represents the LAST flush cycle.
685 hmp->next_tid = rootvol->ondisk->vol0_next_tid;
686 hmp->flush_tid1 = hmp->next_tid;
687 hmp->flush_tid2 = hmp->next_tid;
688 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
689 sizeof(hmp->blockmap));
690 hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks;
692 hammer_flusher_create(hmp);
695 * Locate the root directory using the root cluster's B-Tree as a
696 * starting point. The root directory uses an obj_id of 1.
698 * FUTURE: Leave the root directory cached referenced but unlocked
699 * in hmp->rootvp (need to flush it on unmount).
701 error = hammer_vfs_vget(mp, NULL, 1, &rootvp);
702 if (error)
703 goto done;
704 vput(rootvp);
705 /*vn_unlock(hmp->rootvp);*/
706 if (hmp->ronly == 0)
707 error = hammer_recover_stage2(hmp, rootvol);
709 done:
710 hammer_rel_volume(rootvol, 0);
711 failed:
713 * Cleanup and return.
715 if (error)
716 hammer_free_hmp(mp);
717 return (error);
720 static int
721 hammer_vfs_unmount(struct mount *mp, int mntflags)
723 #if 0
724 struct hammer_mount *hmp = (void *)mp->mnt_data;
725 #endif
726 int flags;
727 int error;
730 * Clean out the vnodes
732 flags = 0;
733 if (mntflags & MNT_FORCE)
734 flags |= FORCECLOSE;
735 if ((error = vflush(mp, 0, flags)) != 0)
736 return (error);
739 * Clean up the internal mount structure and related entities. This
740 * may issue I/O.
742 hammer_free_hmp(mp);
743 return(0);
747 * Clean up the internal mount structure and disassociate it from the mount.
748 * This may issue I/O.
750 static void
751 hammer_free_hmp(struct mount *mp)
753 struct hammer_mount *hmp = (void *)mp->mnt_data;
754 hammer_flush_group_t flg;
755 int count;
756 int dummy;
759 * Flush anything dirty. This won't even run if the
760 * filesystem errored-out.
762 count = 0;
763 while (hammer_flusher_haswork(hmp)) {
764 hammer_flusher_sync(hmp);
765 ++count;
766 if (count >= 5) {
767 if (count == 5)
768 kprintf("HAMMER: umount flushing.");
769 else
770 kprintf(".");
771 tsleep(&dummy, 0, "hmrufl", hz);
773 if (count == 30) {
774 kprintf("giving up\n");
775 break;
778 if (count >= 5 && count < 30)
779 kprintf("\n");
782 * If the mount had a critical error we have to destroy any
783 * remaining inodes before we can finish cleaning up the flusher.
785 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
786 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
787 hammer_destroy_inode_callback, NULL);
791 * There shouldn't be any inodes left now and any left over
792 * flush groups should now be empty.
794 KKASSERT(RB_EMPTY(&hmp->rb_inos_root));
795 while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
796 TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
797 KKASSERT(RB_EMPTY(&flg->flush_tree));
798 if (flg->refs) {
799 kprintf("HAMMER: Warning, flush_group %p was "
800 "not empty on umount!\n", flg);
802 kfree(flg, hmp->m_misc);
806 * We can finally destroy the flusher
808 hammer_flusher_destroy(hmp);
811 * We may have held recovered buffers due to a read-only mount.
812 * These must be discarded.
814 if (hmp->ronly)
815 hammer_recover_flush_buffers(hmp, NULL, -1);
818 * Unload buffers and then volumes
820 RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
821 hammer_unload_buffer, NULL);
822 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
823 hammer_unload_volume, NULL);
825 mp->mnt_data = NULL;
826 mp->mnt_flag &= ~MNT_LOCAL;
827 hmp->mp = NULL;
828 hammer_destroy_objid_cache(hmp);
829 kmalloc_destroy(&hmp->m_misc);
830 kmalloc_destroy(&hmp->m_inodes);
831 kfree(hmp, M_HAMMER);
835 * Report critical errors. ip may be NULL.
837 void
838 hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip,
839 int error, const char *msg)
841 hmp->flags |= HAMMER_MOUNT_CRITICAL_ERROR;
843 krateprintf(&hmp->krate,
844 "HAMMER(%s): Critical error inode=%jd error=%d %s\n",
845 hmp->mp->mnt_stat.f_mntfromname,
846 (intmax_t)(ip ? ip->obj_id : -1),
847 error, msg);
849 if (hmp->ronly == 0) {
850 hmp->ronly = 2; /* special errored read-only mode */
851 hmp->mp->mnt_flag |= MNT_RDONLY;
852 kprintf("HAMMER(%s): Forcing read-only mode\n",
853 hmp->mp->mnt_stat.f_mntfromname);
855 hmp->error = error;
856 if (hammer_debug_critical)
857 Debugger("Entering debugger");
862 * Obtain a vnode for the specified inode number. An exclusively locked
863 * vnode is returned.
866 hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
867 ino_t ino, struct vnode **vpp)
869 struct hammer_transaction trans;
870 struct hammer_mount *hmp = (void *)mp->mnt_data;
871 struct hammer_inode *ip;
872 int error;
873 u_int32_t localization;
875 hammer_simple_transaction(&trans, hmp);
878 * If a directory vnode is supplied (mainly NFS) then we can acquire
879 * the PFS domain from it. Otherwise we would only be able to vget
880 * inodes in the root PFS.
882 if (dvp) {
883 localization = HAMMER_DEF_LOCALIZATION +
884 VTOI(dvp)->obj_localization;
885 } else {
886 localization = HAMMER_DEF_LOCALIZATION;
890 * Lookup the requested HAMMER inode. The structure must be
891 * left unlocked while we manipulate the related vnode to avoid
892 * a deadlock.
894 ip = hammer_get_inode(&trans, NULL, ino,
895 hmp->asof, localization,
896 0, &error);
897 if (ip == NULL) {
898 *vpp = NULL;
899 hammer_done_transaction(&trans);
900 return(error);
902 error = hammer_get_vnode(ip, vpp);
903 hammer_rel_inode(ip, 0);
904 hammer_done_transaction(&trans);
905 return (error);
909 * Return the root vnode for the filesystem.
911 * HAMMER stores the root vnode in the hammer_mount structure so
912 * getting it is easy.
914 static int
915 hammer_vfs_root(struct mount *mp, struct vnode **vpp)
917 #if 0
918 struct hammer_mount *hmp = (void *)mp->mnt_data;
919 #endif
920 int error;
922 error = hammer_vfs_vget(mp, NULL, 1, vpp);
923 return (error);
926 static int
927 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
929 struct hammer_mount *hmp = (void *)mp->mnt_data;
930 hammer_volume_t volume;
931 hammer_volume_ondisk_t ondisk;
932 int error;
933 int64_t bfree;
934 int64_t breserved;
936 volume = hammer_get_root_volume(hmp, &error);
937 if (error)
938 return(error);
939 ondisk = volume->ondisk;
942 * Basic stats
944 _hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
945 mp->mnt_stat.f_files = ondisk->vol0_stat_inodes;
946 bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
947 hammer_rel_volume(volume, 0);
949 mp->mnt_stat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
950 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
951 if (mp->mnt_stat.f_files < 0)
952 mp->mnt_stat.f_files = 0;
954 *sbp = mp->mnt_stat;
955 return(0);
958 static int
959 hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
961 struct hammer_mount *hmp = (void *)mp->mnt_data;
962 hammer_volume_t volume;
963 hammer_volume_ondisk_t ondisk;
964 int error;
965 int64_t bfree;
966 int64_t breserved;
968 volume = hammer_get_root_volume(hmp, &error);
969 if (error)
970 return(error);
971 ondisk = volume->ondisk;
974 * Basic stats
976 _hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
977 mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes;
978 bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
979 hammer_rel_volume(volume, 0);
981 mp->mnt_vstat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
982 mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
983 if (mp->mnt_vstat.f_files < 0)
984 mp->mnt_vstat.f_files = 0;
985 *sbp = mp->mnt_vstat;
986 return(0);
990 * Sync the filesystem. Currently we have to run it twice, the second
991 * one will advance the undo start index to the end index, so if a crash
992 * occurs no undos will be run on mount.
994 * We do not sync the filesystem if we are called from a panic. If we did
995 * we might end up blowing up a sync that was already in progress.
997 static int
998 hammer_vfs_sync(struct mount *mp, int waitfor)
1000 struct hammer_mount *hmp = (void *)mp->mnt_data;
1001 int error;
1003 if (panicstr == NULL) {
1004 error = hammer_sync_hmp(hmp, waitfor);
1005 } else {
1006 error = EIO;
1008 return (error);
1012 * Convert a vnode to a file handle.
1014 static int
1015 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp)
1017 hammer_inode_t ip;
1019 KKASSERT(MAXFIDSZ >= 16);
1020 ip = VTOI(vp);
1021 fhp->fid_len = offsetof(struct fid, fid_data[16]);
1022 fhp->fid_ext = ip->obj_localization >> 16;
1023 bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id));
1024 bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof));
1025 return(0);
1030 * Convert a file handle back to a vnode.
1032 * Use rootvp to enforce PFS isolation when a PFS is exported via a
1033 * null mount.
1035 static int
1036 hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
1037 struct fid *fhp, struct vnode **vpp)
1039 struct hammer_transaction trans;
1040 struct hammer_inode *ip;
1041 struct hammer_inode_info info;
1042 int error;
1043 u_int32_t localization;
1045 bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id));
1046 bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof));
1047 if (rootvp)
1048 localization = VTOI(rootvp)->obj_localization;
1049 else
1050 localization = (u_int32_t)fhp->fid_ext << 16;
1052 hammer_simple_transaction(&trans, (void *)mp->mnt_data);
1055 * Get/allocate the hammer_inode structure. The structure must be
1056 * unlocked while we manipulate the related vnode to avoid a
1057 * deadlock.
1059 ip = hammer_get_inode(&trans, NULL, info.obj_id,
1060 info.obj_asof, localization, 0, &error);
1061 if (ip == NULL) {
1062 *vpp = NULL;
1063 return(error);
1065 error = hammer_get_vnode(ip, vpp);
1066 hammer_rel_inode(ip, 0);
1067 hammer_done_transaction(&trans);
1068 return (error);
1071 static int
1072 hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
1073 int *exflagsp, struct ucred **credanonp)
1075 hammer_mount_t hmp = (void *)mp->mnt_data;
1076 struct netcred *np;
1077 int error;
1079 np = vfs_export_lookup(mp, &hmp->export, nam);
1080 if (np) {
1081 *exflagsp = np->netc_exflags;
1082 *credanonp = &np->netc_anon;
1083 error = 0;
1084 } else {
1085 error = EACCES;
1087 return (error);
1092 hammer_vfs_export(struct mount *mp, int op, const struct export_args *export)
1094 hammer_mount_t hmp = (void *)mp->mnt_data;
1095 int error;
1097 switch(op) {
1098 case MOUNTCTL_SET_EXPORT:
1099 error = vfs_export(mp, &hmp->export, export);
1100 break;
1101 default:
1102 error = EOPNOTSUPP;
1103 break;
1105 return(error);