dma: factor out mail handling code
[dragonfly.git] / sys / vfs / hammer / hammer_vfsops.c
blobd8e609fd5df385c95e0099fd4924daebae41ffa3
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.74 2008/11/13 02:18:43 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/vnode.h>
41 #include <sys/mount.h>
42 #include <sys/malloc.h>
43 #include <sys/nlookup.h>
44 #include <sys/fcntl.h>
45 #include <sys/sysctl.h>
46 #include <sys/buf.h>
47 #include <sys/buf2.h>
48 #include "hammer.h"
50 int hammer_supported_version = HAMMER_VOL_VERSION_TWO;
51 int hammer_debug_io;
52 int hammer_debug_general;
53 int hammer_debug_debug = 1; /* medium-error panics */
54 int hammer_debug_inode;
55 int hammer_debug_locks;
56 int hammer_debug_btree;
57 int hammer_debug_tid;
58 int hammer_debug_recover; /* -1 will disable, +1 will force */
59 int hammer_debug_recover_faults;
60 int hammer_cluster_enable = 1; /* enable read clustering by default */
61 int hammer_count_fsyncs;
62 int hammer_count_inodes;
63 int hammer_count_iqueued;
64 int hammer_count_reclaiming;
65 int hammer_count_records;
66 int hammer_count_record_datas;
67 int hammer_count_volumes;
68 int hammer_count_buffers;
69 int hammer_count_nodes;
70 int64_t hammer_count_extra_space_used;
71 int64_t hammer_stats_btree_lookups;
72 int64_t hammer_stats_btree_searches;
73 int64_t hammer_stats_btree_inserts;
74 int64_t hammer_stats_btree_deletes;
75 int64_t hammer_stats_btree_elements;
76 int64_t hammer_stats_btree_splits;
77 int64_t hammer_stats_btree_iterations;
78 int64_t hammer_stats_btree_root_iterations;
79 int64_t hammer_stats_record_iterations;
81 int64_t hammer_stats_file_read;
82 int64_t hammer_stats_file_write;
83 int64_t hammer_stats_file_iopsr;
84 int64_t hammer_stats_file_iopsw;
85 int64_t hammer_stats_disk_read;
86 int64_t hammer_stats_disk_write;
87 int64_t hammer_stats_inode_flushes;
88 int64_t hammer_stats_commits;
90 int hammer_count_dirtybufspace; /* global */
91 int hammer_count_refedbufs; /* global */
92 int hammer_count_reservations;
93 int hammer_count_io_running_read;
94 int hammer_count_io_running_write;
95 int hammer_count_io_locked;
96 int hammer_limit_dirtybufspace; /* per-mount */
97 int hammer_limit_recs; /* as a whole XXX */
98 int hammer_limit_inode_recs = 1024; /* per inode */
99 int hammer_autoflush = 2000; /* auto flush */
100 int hammer_bio_count;
101 int hammer_verify_zone;
102 int hammer_verify_data = 1;
103 int hammer_write_mode;
104 int hammer_yield_check = 16;
105 int hammer_fsync_mode;
106 int64_t hammer_contention_count;
107 int64_t hammer_zone_limit;
109 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem");
110 SYSCTL_INT(_vfs_hammer, OID_AUTO, supported_version, CTLFLAG_RD,
111 &hammer_supported_version, 0, "");
112 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW,
113 &hammer_debug_general, 0, "");
114 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW,
115 &hammer_debug_io, 0, "");
116 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_debug, CTLFLAG_RW,
117 &hammer_debug_debug, 0, "");
118 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW,
119 &hammer_debug_inode, 0, "");
120 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW,
121 &hammer_debug_locks, 0, "");
122 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW,
123 &hammer_debug_btree, 0, "");
124 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW,
125 &hammer_debug_tid, 0, "");
126 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW,
127 &hammer_debug_recover, 0, "");
128 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW,
129 &hammer_debug_recover_faults, 0, "");
130 SYSCTL_INT(_vfs_hammer, OID_AUTO, cluster_enable, CTLFLAG_RW,
131 &hammer_cluster_enable, 0, "");
133 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_dirtybufspace, CTLFLAG_RW,
134 &hammer_limit_dirtybufspace, 0, "");
135 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_recs, CTLFLAG_RW,
136 &hammer_limit_recs, 0, "");
137 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_inode_recs, CTLFLAG_RW,
138 &hammer_limit_inode_recs, 0, "");
140 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_fsyncs, CTLFLAG_RD,
141 &hammer_count_fsyncs, 0, "");
142 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD,
143 &hammer_count_inodes, 0, "");
144 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_iqueued, CTLFLAG_RD,
145 &hammer_count_iqueued, 0, "");
146 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reclaiming, CTLFLAG_RD,
147 &hammer_count_reclaiming, 0, "");
148 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD,
149 &hammer_count_records, 0, "");
150 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD,
151 &hammer_count_record_datas, 0, "");
152 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD,
153 &hammer_count_volumes, 0, "");
154 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD,
155 &hammer_count_buffers, 0, "");
156 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD,
157 &hammer_count_nodes, 0, "");
158 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, count_extra_space_used, CTLFLAG_RD,
159 &hammer_count_extra_space_used, 0, "");
161 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_searches, CTLFLAG_RD,
162 &hammer_stats_btree_searches, 0, "");
163 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_lookups, CTLFLAG_RD,
164 &hammer_stats_btree_lookups, 0, "");
165 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_inserts, CTLFLAG_RD,
166 &hammer_stats_btree_inserts, 0, "");
167 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_deletes, CTLFLAG_RD,
168 &hammer_stats_btree_deletes, 0, "");
169 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_elements, CTLFLAG_RD,
170 &hammer_stats_btree_elements, 0, "");
171 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_splits, CTLFLAG_RD,
172 &hammer_stats_btree_splits, 0, "");
173 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_iterations, CTLFLAG_RD,
174 &hammer_stats_btree_iterations, 0, "");
175 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_root_iterations, CTLFLAG_RD,
176 &hammer_stats_btree_root_iterations, 0, "");
177 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_record_iterations, CTLFLAG_RD,
178 &hammer_stats_record_iterations, 0, "");
180 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_read, CTLFLAG_RD,
181 &hammer_stats_file_read, 0, "");
182 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_write, CTLFLAG_RD,
183 &hammer_stats_file_write, 0, "");
184 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsr, CTLFLAG_RD,
185 &hammer_stats_file_iopsr, 0, "");
186 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsw, CTLFLAG_RD,
187 &hammer_stats_file_iopsw, 0, "");
188 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_read, CTLFLAG_RD,
189 &hammer_stats_disk_read, 0, "");
190 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_write, CTLFLAG_RD,
191 &hammer_stats_disk_write, 0, "");
192 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_inode_flushes, CTLFLAG_RD,
193 &hammer_stats_inode_flushes, 0, "");
194 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_commits, CTLFLAG_RD,
195 &hammer_stats_commits, 0, "");
197 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_dirtybufspace, CTLFLAG_RD,
198 &hammer_count_dirtybufspace, 0, "");
199 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_refedbufs, CTLFLAG_RD,
200 &hammer_count_refedbufs, 0, "");
201 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reservations, CTLFLAG_RD,
202 &hammer_count_reservations, 0, "");
203 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_read, CTLFLAG_RD,
204 &hammer_count_io_running_read, 0, "");
205 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_locked, CTLFLAG_RD,
206 &hammer_count_io_locked, 0, "");
207 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_write, CTLFLAG_RD,
208 &hammer_count_io_running_write, 0, "");
209 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW,
210 &hammer_zone_limit, 0, "");
211 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW,
212 &hammer_contention_count, 0, "");
213 SYSCTL_INT(_vfs_hammer, OID_AUTO, autoflush, CTLFLAG_RW,
214 &hammer_autoflush, 0, "");
215 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_zone, CTLFLAG_RW,
216 &hammer_verify_zone, 0, "");
217 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_data, CTLFLAG_RW,
218 &hammer_verify_data, 0, "");
219 SYSCTL_INT(_vfs_hammer, OID_AUTO, write_mode, CTLFLAG_RW,
220 &hammer_write_mode, 0, "");
221 SYSCTL_INT(_vfs_hammer, OID_AUTO, yield_check, CTLFLAG_RW,
222 &hammer_yield_check, 0, "");
223 SYSCTL_INT(_vfs_hammer, OID_AUTO, fsync_mode, CTLFLAG_RW,
224 &hammer_fsync_mode, 0, "");
226 KTR_INFO_MASTER(hammer);
229 * VFS ABI
231 static void hammer_free_hmp(struct mount *mp);
233 static int hammer_vfs_mount(struct mount *mp, char *path, caddr_t data,
234 struct ucred *cred);
235 static int hammer_vfs_unmount(struct mount *mp, int mntflags);
236 static int hammer_vfs_root(struct mount *mp, struct vnode **vpp);
237 static int hammer_vfs_statfs(struct mount *mp, struct statfs *sbp,
238 struct ucred *cred);
239 static int hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
240 struct ucred *cred);
241 static int hammer_vfs_sync(struct mount *mp, int waitfor);
242 static int hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
243 ino_t ino, struct vnode **vpp);
244 static int hammer_vfs_init(struct vfsconf *conf);
245 static int hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
246 struct fid *fhp, struct vnode **vpp);
247 static int hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp);
248 static int hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
249 int *exflagsp, struct ucred **credanonp);
252 static struct vfsops hammer_vfsops = {
253 .vfs_mount = hammer_vfs_mount,
254 .vfs_unmount = hammer_vfs_unmount,
255 .vfs_root = hammer_vfs_root,
256 .vfs_statfs = hammer_vfs_statfs,
257 .vfs_statvfs = hammer_vfs_statvfs,
258 .vfs_sync = hammer_vfs_sync,
259 .vfs_vget = hammer_vfs_vget,
260 .vfs_init = hammer_vfs_init,
261 .vfs_vptofh = hammer_vfs_vptofh,
262 .vfs_fhtovp = hammer_vfs_fhtovp,
263 .vfs_checkexp = hammer_vfs_checkexp
266 MALLOC_DEFINE(M_HAMMER, "HAMMER-mount", "");
268 VFS_SET(hammer_vfsops, hammer, 0);
269 MODULE_VERSION(hammer, 1);
271 static int
272 hammer_vfs_init(struct vfsconf *conf)
274 int n;
276 if (hammer_limit_recs == 0) {
277 hammer_limit_recs = nbuf * 25;
278 n = kmalloc_limit(M_HAMMER) / 512;
279 if (hammer_limit_recs > n)
280 hammer_limit_recs = n;
282 if (hammer_limit_dirtybufspace == 0) {
283 hammer_limit_dirtybufspace = hidirtybufspace / 2;
284 if (hammer_limit_dirtybufspace < 100)
285 hammer_limit_dirtybufspace = 100;
287 return(0);
290 static int
291 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
292 struct ucred *cred)
294 struct hammer_mount_info info;
295 hammer_mount_t hmp;
296 hammer_volume_t rootvol;
297 struct vnode *rootvp;
298 struct vnode *devvp = NULL;
299 const char *upath; /* volume name in userspace */
300 char *path; /* volume name in system space */
301 int error;
302 int i;
303 int master_id;
304 int maxinodes;
307 * Accept hammer_mount_info. mntpt is NULL for root mounts at boot.
309 if (mntpt == NULL) {
310 if ((error = bdevvp(rootdev, &devvp))) {
311 kprintf("hammer_mountroot: can't find devvp\n");
312 return (error);
314 mp->mnt_flag &= ~MNT_RDONLY; /* mount R/W */
315 bzero(&info, sizeof(info));
316 info.asof = 0;
317 info.hflags = 0;
318 info.nvolumes = 1;
319 } else {
320 if ((error = copyin(data, &info, sizeof(info))) != 0)
321 return (error);
325 * updating or new mount
327 if (mp->mnt_flag & MNT_UPDATE) {
328 hmp = (void *)mp->mnt_data;
329 KKASSERT(hmp != NULL);
330 } else {
331 if (info.nvolumes <= 0 || info.nvolumes >= 32768)
332 return (EINVAL);
333 hmp = NULL;
337 * master-id validation. The master id may not be changed by a
338 * mount update.
340 if (info.hflags & HMNT_MASTERID) {
341 if (hmp && hmp->master_id != info.master_id) {
342 kprintf("hammer: cannot change master id "
343 "with mount update\n");
344 return(EINVAL);
346 master_id = info.master_id;
347 if (master_id < -1 || master_id >= HAMMER_MAX_MASTERS)
348 return (EINVAL);
349 } else {
350 if (hmp)
351 master_id = hmp->master_id;
352 else
353 master_id = 0;
357 * Interal mount data structure
359 if (hmp == NULL) {
360 hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO);
361 mp->mnt_data = (qaddr_t)hmp;
362 hmp->mp = mp;
363 /*TAILQ_INIT(&hmp->recycle_list);*/
366 * Make sure kmalloc type limits are set appropriately. If root
367 * increases the vnode limit you may have to do a dummy remount
368 * to adjust the HAMMER inode limit.
370 kmalloc_create(&hmp->m_misc, "HAMMER-others");
371 kmalloc_create(&hmp->m_inodes, "HAMMER-inodes");
373 maxinodes = desiredvnodes + desiredvnodes / 5 +
374 HAMMER_RECLAIM_WAIT;
375 kmalloc_raise_limit(hmp->m_inodes,
376 maxinodes * sizeof(struct hammer_inode));
378 hmp->root_btree_beg.localization = 0x00000000U;
379 hmp->root_btree_beg.obj_id = -0x8000000000000000LL;
380 hmp->root_btree_beg.key = -0x8000000000000000LL;
381 hmp->root_btree_beg.create_tid = 1;
382 hmp->root_btree_beg.delete_tid = 1;
383 hmp->root_btree_beg.rec_type = 0;
384 hmp->root_btree_beg.obj_type = 0;
386 hmp->root_btree_end.localization = 0xFFFFFFFFU;
387 hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL;
388 hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL;
389 hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL;
390 hmp->root_btree_end.delete_tid = 0; /* special case */
391 hmp->root_btree_end.rec_type = 0xFFFFU;
392 hmp->root_btree_end.obj_type = 0;
394 hmp->krate.freq = 1; /* maximum reporting rate (hz) */
395 hmp->krate.count = -16; /* initial burst */
397 hmp->sync_lock.refs = 1;
398 hmp->free_lock.refs = 1;
399 hmp->undo_lock.refs = 1;
400 hmp->blkmap_lock.refs = 1;
402 TAILQ_INIT(&hmp->delay_list);
403 TAILQ_INIT(&hmp->flush_group_list);
404 TAILQ_INIT(&hmp->objid_cache_list);
405 TAILQ_INIT(&hmp->undo_lru_list);
406 TAILQ_INIT(&hmp->reclaim_list);
408 hmp->hflags &= ~HMNT_USERFLAGS;
409 hmp->hflags |= info.hflags & HMNT_USERFLAGS;
411 hmp->master_id = master_id;
413 if (info.asof) {
414 mp->mnt_flag |= MNT_RDONLY;
415 hmp->asof = info.asof;
416 } else {
417 hmp->asof = HAMMER_MAX_TID;
421 * Re-open read-write if originally read-only, or vise-versa.
423 if (mp->mnt_flag & MNT_UPDATE) {
424 error = 0;
425 if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
426 kprintf("HAMMER read-only -> read-write\n");
427 hmp->ronly = 0;
428 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
429 hammer_adjust_volume_mode, NULL);
430 rootvol = hammer_get_root_volume(hmp, &error);
431 if (rootvol) {
432 hammer_recover_flush_buffers(hmp, rootvol, 1);
433 bcopy(rootvol->ondisk->vol0_blockmap,
434 hmp->blockmap,
435 sizeof(hmp->blockmap));
436 hammer_rel_volume(rootvol, 0);
438 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
439 hammer_reload_inode, NULL);
440 /* kernel clears MNT_RDONLY */
441 } else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
442 kprintf("HAMMER read-write -> read-only\n");
443 hmp->ronly = 1; /* messy */
444 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
445 hammer_reload_inode, NULL);
446 hmp->ronly = 0;
447 hammer_flusher_sync(hmp);
448 hammer_flusher_sync(hmp);
449 hammer_flusher_sync(hmp);
450 hmp->ronly = 1;
451 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
452 hammer_adjust_volume_mode, NULL);
454 return(error);
457 RB_INIT(&hmp->rb_vols_root);
458 RB_INIT(&hmp->rb_inos_root);
459 RB_INIT(&hmp->rb_nods_root);
460 RB_INIT(&hmp->rb_undo_root);
461 RB_INIT(&hmp->rb_resv_root);
462 RB_INIT(&hmp->rb_bufs_root);
463 RB_INIT(&hmp->rb_pfsm_root);
465 hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
467 TAILQ_INIT(&hmp->volu_list);
468 TAILQ_INIT(&hmp->undo_list);
469 TAILQ_INIT(&hmp->data_list);
470 TAILQ_INIT(&hmp->meta_list);
471 TAILQ_INIT(&hmp->lose_list);
474 * Load volumes
476 path = objcache_get(namei_oc, M_WAITOK);
477 hmp->nvolumes = -1;
478 for (i = 0; i < info.nvolumes; ++i) {
479 if (mntpt == NULL) {
481 * Root mount.
482 * Only one volume; and no need for copyin.
484 KKASSERT(info.nvolumes == 1);
485 ksnprintf(path, MAXPATHLEN, "/dev/%s",
486 mp->mnt_stat.f_mntfromname);
487 error = 0;
488 } else {
489 error = copyin(&info.volumes[i], &upath,
490 sizeof(char *));
491 if (error == 0)
492 error = copyinstr(upath, path,
493 MAXPATHLEN, NULL);
495 if (error == 0)
496 error = hammer_install_volume(hmp, path, devvp);
497 if (error)
498 break;
500 objcache_put(namei_oc, path);
503 * Make sure we found a root volume
505 if (error == 0 && hmp->rootvol == NULL) {
506 kprintf("hammer_mount: No root volume found!\n");
507 error = EINVAL;
511 * Check that all required volumes are available
513 if (error == 0 && hammer_mountcheck_volumes(hmp)) {
514 kprintf("hammer_mount: Missing volumes, cannot mount!\n");
515 error = EINVAL;
518 if (error) {
519 hammer_free_hmp(mp);
520 return (error);
524 * No errors, setup enough of the mount point so we can lookup the
525 * root vnode.
527 mp->mnt_iosize_max = MAXPHYS;
528 mp->mnt_kern_flag |= MNTK_FSMID;
531 * note: f_iosize is used by vnode_pager_haspage() when constructing
532 * its VOP_BMAP call.
534 mp->mnt_stat.f_iosize = HAMMER_BUFSIZE;
535 mp->mnt_stat.f_bsize = HAMMER_BUFSIZE;
537 mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE;
538 mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE;
540 mp->mnt_maxsymlinklen = 255;
541 mp->mnt_flag |= MNT_LOCAL;
543 vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops);
544 vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops);
545 vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops);
548 * The root volume's ondisk pointer is only valid if we hold a
549 * reference to it.
551 rootvol = hammer_get_root_volume(hmp, &error);
552 if (error)
553 goto failed;
556 * Perform any necessary UNDO operations. The recovery code does
557 * call hammer_undo_lookup() so we have to pre-cache the blockmap,
558 * and then re-copy it again after recovery is complete.
560 * If this is a read-only mount the UNDO information is retained
561 * in memory in the form of dirty buffer cache buffers, and not
562 * written back to the media.
564 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
565 sizeof(hmp->blockmap));
568 * Check filesystem version
570 hmp->version = rootvol->ondisk->vol_version;
571 if (hmp->version < HAMMER_VOL_VERSION_MIN ||
572 hmp->version > HAMMER_VOL_VERSION_MAX) {
573 kprintf("HAMMER: mount unsupported fs version %d\n",
574 hmp->version);
575 error = ERANGE;
576 goto done;
580 * The undo_rec_limit limits the size of flush groups to avoid
581 * blowing out the UNDO FIFO. This calculation is typically in
582 * the tens of thousands and is designed primarily when small
583 * HAMMER filesystems are created.
585 hmp->undo_rec_limit = hammer_undo_max(hmp) / 8192 + 100;
586 if (hammer_debug_general & 0x0001)
587 kprintf("HAMMER: undo_rec_limit %d\n", hmp->undo_rec_limit);
589 error = hammer_recover(hmp, rootvol);
590 if (error) {
591 kprintf("Failed to recover HAMMER filesystem on mount\n");
592 goto done;
596 * Finish setup now that we have a good root volume.
598 * The top 16 bits of fsid.val[1] is a pfs id.
600 ksnprintf(mp->mnt_stat.f_mntfromname,
601 sizeof(mp->mnt_stat.f_mntfromname), "%s",
602 rootvol->ondisk->vol_name);
603 mp->mnt_stat.f_fsid.val[0] =
604 crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8);
605 mp->mnt_stat.f_fsid.val[1] =
606 crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8);
607 mp->mnt_stat.f_fsid.val[1] &= 0x0000FFFF;
609 mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid;
610 mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid,
611 sizeof(mp->mnt_vstat.f_fsid_uuid));
614 * Certain often-modified fields in the root volume are cached in
615 * the hammer_mount structure so we do not have to generate lots
616 * of little UNDO structures for them.
618 * Recopy after recovery. This also has the side effect of
619 * setting our cached undo FIFO's first_offset, which serves to
620 * placemark the FIFO start for the NEXT flush cycle while the
621 * on-disk first_offset represents the LAST flush cycle.
623 hmp->next_tid = rootvol->ondisk->vol0_next_tid;
624 hmp->flush_tid1 = hmp->next_tid;
625 hmp->flush_tid2 = hmp->next_tid;
626 bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
627 sizeof(hmp->blockmap));
628 hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks;
630 hammer_flusher_create(hmp);
633 * Locate the root directory using the root cluster's B-Tree as a
634 * starting point. The root directory uses an obj_id of 1.
636 * FUTURE: Leave the root directory cached referenced but unlocked
637 * in hmp->rootvp (need to flush it on unmount).
639 error = hammer_vfs_vget(mp, NULL, 1, &rootvp);
640 if (error)
641 goto done;
642 vput(rootvp);
643 /*vn_unlock(hmp->rootvp);*/
645 done:
646 hammer_rel_volume(rootvol, 0);
647 failed:
649 * Cleanup and return.
651 if (error)
652 hammer_free_hmp(mp);
653 return (error);
656 static int
657 hammer_vfs_unmount(struct mount *mp, int mntflags)
659 #if 0
660 struct hammer_mount *hmp = (void *)mp->mnt_data;
661 #endif
662 int flags;
663 int error;
666 * Clean out the vnodes
668 flags = 0;
669 if (mntflags & MNT_FORCE)
670 flags |= FORCECLOSE;
671 if ((error = vflush(mp, 0, flags)) != 0)
672 return (error);
675 * Clean up the internal mount structure and related entities. This
676 * may issue I/O.
678 hammer_free_hmp(mp);
679 return(0);
683 * Clean up the internal mount structure and disassociate it from the mount.
684 * This may issue I/O.
686 static void
687 hammer_free_hmp(struct mount *mp)
689 struct hammer_mount *hmp = (void *)mp->mnt_data;
690 hammer_flush_group_t flg;
691 int count;
692 int dummy;
695 * Flush anything dirty. This won't even run if the
696 * filesystem errored-out.
698 count = 0;
699 while (hammer_flusher_haswork(hmp)) {
700 hammer_flusher_sync(hmp);
701 ++count;
702 if (count >= 5) {
703 if (count == 5)
704 kprintf("HAMMER: umount flushing.");
705 else
706 kprintf(".");
707 tsleep(&dummy, 0, "hmrufl", hz);
709 if (count == 30) {
710 kprintf("giving up\n");
711 break;
714 if (count >= 5 && count < 30)
715 kprintf("\n");
718 * If the mount had a critical error we have to destroy any
719 * remaining inodes before we can finish cleaning up the flusher.
721 if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
722 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
723 hammer_destroy_inode_callback, NULL);
727 * There shouldn't be any inodes left now and any left over
728 * flush groups should now be empty.
730 KKASSERT(RB_EMPTY(&hmp->rb_inos_root));
731 while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
732 TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
733 KKASSERT(TAILQ_EMPTY(&flg->flush_list));
734 if (flg->refs) {
735 kprintf("HAMMER: Warning, flush_group %p was "
736 "not empty on umount!\n", flg);
738 kfree(flg, hmp->m_misc);
742 * We can finally destroy the flusher
744 hammer_flusher_destroy(hmp);
747 * We may have held recovered buffers due to a read-only mount.
748 * These must be discarded.
750 if (hmp->ronly)
751 hammer_recover_flush_buffers(hmp, NULL, -1);
754 * Unload buffers and then volumes
756 RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
757 hammer_unload_buffer, NULL);
758 RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
759 hammer_unload_volume, NULL);
761 mp->mnt_data = NULL;
762 mp->mnt_flag &= ~MNT_LOCAL;
763 hmp->mp = NULL;
764 hammer_destroy_objid_cache(hmp);
765 kmalloc_destroy(&hmp->m_misc);
766 kmalloc_destroy(&hmp->m_inodes);
767 kfree(hmp, M_HAMMER);
771 * Report critical errors. ip may be NULL.
773 void
774 hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip,
775 int error, const char *msg)
777 hmp->flags |= HAMMER_MOUNT_CRITICAL_ERROR;
778 krateprintf(&hmp->krate,
779 "HAMMER(%s): Critical error inode=%lld %s\n",
780 hmp->mp->mnt_stat.f_mntfromname,
781 (long long)(ip ? ip->obj_id : -1), msg);
782 if (hmp->ronly == 0) {
783 hmp->ronly = 2; /* special errored read-only mode */
784 hmp->mp->mnt_flag |= MNT_RDONLY;
785 kprintf("HAMMER(%s): Forcing read-only mode\n",
786 hmp->mp->mnt_stat.f_mntfromname);
788 hmp->error = error;
793 * Obtain a vnode for the specified inode number. An exclusively locked
794 * vnode is returned.
797 hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
798 ino_t ino, struct vnode **vpp)
800 struct hammer_transaction trans;
801 struct hammer_mount *hmp = (void *)mp->mnt_data;
802 struct hammer_inode *ip;
803 int error;
804 u_int32_t localization;
806 hammer_simple_transaction(&trans, hmp);
809 * If a directory vnode is supplied (mainly NFS) then we can acquire
810 * the PFS domain from it. Otherwise we would only be able to vget
811 * inodes in the root PFS.
813 if (dvp) {
814 localization = HAMMER_DEF_LOCALIZATION +
815 VTOI(dvp)->obj_localization;
816 } else {
817 localization = HAMMER_DEF_LOCALIZATION;
821 * Lookup the requested HAMMER inode. The structure must be
822 * left unlocked while we manipulate the related vnode to avoid
823 * a deadlock.
825 ip = hammer_get_inode(&trans, NULL, ino,
826 hmp->asof, localization,
827 0, &error);
828 if (ip == NULL) {
829 *vpp = NULL;
830 hammer_done_transaction(&trans);
831 return(error);
833 error = hammer_get_vnode(ip, vpp);
834 hammer_rel_inode(ip, 0);
835 hammer_done_transaction(&trans);
836 return (error);
840 * Return the root vnode for the filesystem.
842 * HAMMER stores the root vnode in the hammer_mount structure so
843 * getting it is easy.
845 static int
846 hammer_vfs_root(struct mount *mp, struct vnode **vpp)
848 #if 0
849 struct hammer_mount *hmp = (void *)mp->mnt_data;
850 #endif
851 int error;
853 error = hammer_vfs_vget(mp, NULL, 1, vpp);
854 return (error);
857 static int
858 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
860 struct hammer_mount *hmp = (void *)mp->mnt_data;
861 hammer_volume_t volume;
862 hammer_volume_ondisk_t ondisk;
863 int error;
864 int64_t bfree;
865 int64_t breserved;
867 volume = hammer_get_root_volume(hmp, &error);
868 if (error)
869 return(error);
870 ondisk = volume->ondisk;
873 * Basic stats
875 _hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
876 mp->mnt_stat.f_files = ondisk->vol0_stat_inodes;
877 bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
878 hammer_rel_volume(volume, 0);
880 mp->mnt_stat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
881 mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
882 if (mp->mnt_stat.f_files < 0)
883 mp->mnt_stat.f_files = 0;
885 *sbp = mp->mnt_stat;
886 return(0);
889 static int
890 hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
892 struct hammer_mount *hmp = (void *)mp->mnt_data;
893 hammer_volume_t volume;
894 hammer_volume_ondisk_t ondisk;
895 int error;
896 int64_t bfree;
897 int64_t breserved;
899 volume = hammer_get_root_volume(hmp, &error);
900 if (error)
901 return(error);
902 ondisk = volume->ondisk;
905 * Basic stats
907 _hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
908 mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes;
909 bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
910 hammer_rel_volume(volume, 0);
912 mp->mnt_vstat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
913 mp->mnt_vstat.f_bavail = mp->mnt_stat.f_bfree;
914 if (mp->mnt_vstat.f_files < 0)
915 mp->mnt_vstat.f_files = 0;
916 *sbp = mp->mnt_vstat;
917 return(0);
921 * Sync the filesystem. Currently we have to run it twice, the second
922 * one will advance the undo start index to the end index, so if a crash
923 * occurs no undos will be run on mount.
925 * We do not sync the filesystem if we are called from a panic. If we did
926 * we might end up blowing up a sync that was already in progress.
928 static int
929 hammer_vfs_sync(struct mount *mp, int waitfor)
931 struct hammer_mount *hmp = (void *)mp->mnt_data;
932 int error;
934 if (panicstr == NULL) {
935 error = hammer_sync_hmp(hmp, waitfor);
936 } else {
937 error = EIO;
939 return (error);
943 * Convert a vnode to a file handle.
945 static int
946 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp)
948 hammer_inode_t ip;
950 KKASSERT(MAXFIDSZ >= 16);
951 ip = VTOI(vp);
952 fhp->fid_len = offsetof(struct fid, fid_data[16]);
953 fhp->fid_ext = ip->obj_localization >> 16;
954 bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id));
955 bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof));
956 return(0);
961 * Convert a file handle back to a vnode.
963 * Use rootvp to enforce PFS isolation when a PFS is exported via a
964 * null mount.
966 static int
967 hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
968 struct fid *fhp, struct vnode **vpp)
970 struct hammer_transaction trans;
971 struct hammer_inode *ip;
972 struct hammer_inode_info info;
973 int error;
974 u_int32_t localization;
976 bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id));
977 bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof));
978 if (rootvp)
979 localization = VTOI(rootvp)->obj_localization;
980 else
981 localization = (u_int32_t)fhp->fid_ext << 16;
983 hammer_simple_transaction(&trans, (void *)mp->mnt_data);
986 * Get/allocate the hammer_inode structure. The structure must be
987 * unlocked while we manipulate the related vnode to avoid a
988 * deadlock.
990 ip = hammer_get_inode(&trans, NULL, info.obj_id,
991 info.obj_asof, localization, 0, &error);
992 if (ip == NULL) {
993 *vpp = NULL;
994 return(error);
996 error = hammer_get_vnode(ip, vpp);
997 hammer_rel_inode(ip, 0);
998 hammer_done_transaction(&trans);
999 return (error);
1002 static int
1003 hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
1004 int *exflagsp, struct ucred **credanonp)
1006 hammer_mount_t hmp = (void *)mp->mnt_data;
1007 struct netcred *np;
1008 int error;
1010 np = vfs_export_lookup(mp, &hmp->export, nam);
1011 if (np) {
1012 *exflagsp = np->netc_exflags;
1013 *credanonp = &np->netc_anon;
1014 error = 0;
1015 } else {
1016 error = EACCES;
1018 return (error);
1023 hammer_vfs_export(struct mount *mp, int op, const struct export_args *export)
1025 hammer_mount_t hmp = (void *)mp->mnt_data;
1026 int error;
1028 switch(op) {
1029 case MOUNTCTL_SET_EXPORT:
1030 error = vfs_export(mp, &hmp->export, export);
1031 break;
1032 default:
1033 error = EOPNOTSUPP;
1034 break;
1036 return(error);