2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.74 2008/11/13 02:18:43 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/vnode.h>
41 #include <sys/mount.h>
42 #include <sys/malloc.h>
43 #include <sys/nlookup.h>
44 #include <sys/fcntl.h>
45 #include <sys/sysctl.h>
51 * NOTE! Global statistics may not be MPSAFE so HAMMER never uses them
54 int hammer_supported_version
= HAMMER_VOL_VERSION_DEFAULT
;
56 int hammer_debug_general
;
57 int hammer_debug_debug
= 1; /* medium-error panics */
58 int hammer_debug_inode
;
59 int hammer_debug_locks
;
60 int hammer_debug_btree
;
62 int hammer_debug_recover
; /* -1 will disable, +1 will force */
63 int hammer_debug_recover_faults
;
64 int hammer_debug_critical
; /* non-zero enter debugger on error */
65 int hammer_cluster_enable
= 1; /* enable read clustering by default */
66 int hammer_live_dedup
= 0;
67 int hammer_count_fsyncs
;
68 int hammer_count_inodes
;
69 int hammer_count_iqueued
;
70 int hammer_count_reclaiming
;
71 int hammer_count_records
;
72 int hammer_count_record_datas
;
73 int hammer_count_volumes
;
74 int hammer_count_buffers
;
75 int hammer_count_nodes
;
76 int64_t hammer_count_extra_space_used
;
77 int64_t hammer_stats_btree_lookups
;
78 int64_t hammer_stats_btree_searches
;
79 int64_t hammer_stats_btree_inserts
;
80 int64_t hammer_stats_btree_deletes
;
81 int64_t hammer_stats_btree_elements
;
82 int64_t hammer_stats_btree_splits
;
83 int64_t hammer_stats_btree_iterations
;
84 int64_t hammer_stats_btree_root_iterations
;
85 int64_t hammer_stats_record_iterations
;
87 int64_t hammer_stats_file_read
;
88 int64_t hammer_stats_file_write
;
89 int64_t hammer_stats_file_iopsr
;
90 int64_t hammer_stats_file_iopsw
;
91 int64_t hammer_stats_disk_read
;
92 int64_t hammer_stats_disk_write
;
93 int64_t hammer_stats_inode_flushes
;
94 int64_t hammer_stats_commits
;
95 int64_t hammer_stats_undo
;
96 int64_t hammer_stats_redo
;
98 int hammer_count_dirtybufspace
; /* global */
99 int hammer_count_refedbufs
; /* global */
100 int hammer_count_reservations
;
101 int hammer_count_io_running_read
;
102 int hammer_count_io_running_write
;
103 int hammer_count_io_locked
;
104 int hammer_limit_dirtybufspace
; /* per-mount */
105 int hammer_limit_running_io
; /* per-mount */
106 int hammer_limit_recs
; /* as a whole XXX */
107 int hammer_limit_inode_recs
= 1024; /* per inode */
108 int hammer_limit_reclaim
= HAMMER_RECLAIM_WAIT
;
109 int hammer_live_dedup_cache_size
= DEDUP_CACHE_SIZE
;
110 int hammer_limit_redo
= 4096 * 1024; /* per inode */
111 int hammer_autoflush
= 2000; /* auto flush */
112 int hammer_bio_count
;
113 int hammer_verify_zone
;
114 int hammer_verify_data
= 1;
115 int hammer_write_mode
;
116 int hammer_yield_check
= 16;
117 int hammer_fsync_mode
= 3;
118 int64_t hammer_contention_count
;
119 int64_t hammer_zone_limit
;
122 * Live dedup debug counters (sysctls are writable so that counters
123 * can be reset from userspace).
125 int64_t hammer_live_dedup_vnode_bcmps
= 0;
126 int64_t hammer_live_dedup_device_bcmps
= 0;
127 int64_t hammer_live_dedup_findblk_failures
= 0;
128 int64_t hammer_live_dedup_bmap_saves
= 0;
131 SYSCTL_NODE(_vfs
, OID_AUTO
, hammer
, CTLFLAG_RW
, 0, "HAMMER filesystem");
133 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, supported_version
, CTLFLAG_RD
,
134 &hammer_supported_version
, 0, "");
135 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_general
, CTLFLAG_RW
,
136 &hammer_debug_general
, 0, "");
137 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_io
, CTLFLAG_RW
,
138 &hammer_debug_io
, 0, "");
139 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_debug
, CTLFLAG_RW
,
140 &hammer_debug_debug
, 0, "");
141 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_inode
, CTLFLAG_RW
,
142 &hammer_debug_inode
, 0, "");
143 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_locks
, CTLFLAG_RW
,
144 &hammer_debug_locks
, 0, "");
145 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_btree
, CTLFLAG_RW
,
146 &hammer_debug_btree
, 0, "");
147 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_tid
, CTLFLAG_RW
,
148 &hammer_debug_tid
, 0, "");
149 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_recover
, CTLFLAG_RW
,
150 &hammer_debug_recover
, 0, "");
151 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_recover_faults
, CTLFLAG_RW
,
152 &hammer_debug_recover_faults
, 0, "");
153 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_critical
, CTLFLAG_RW
,
154 &hammer_debug_critical
, 0, "");
155 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, cluster_enable
, CTLFLAG_RW
,
156 &hammer_cluster_enable
, 0, "");
158 * 0 - live dedup is disabled
159 * 1 - dedup cache is populated on reads only
160 * 2 - dedup cache is populated on both reads and writes
162 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, live_dedup
, CTLFLAG_RW
,
163 &hammer_live_dedup
, 0, "Enable live dedup");
165 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, limit_dirtybufspace
, CTLFLAG_RW
,
166 &hammer_limit_dirtybufspace
, 0, "");
167 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, limit_running_io
, CTLFLAG_RW
,
168 &hammer_limit_running_io
, 0, "");
169 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, limit_recs
, CTLFLAG_RW
,
170 &hammer_limit_recs
, 0, "");
171 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, limit_inode_recs
, CTLFLAG_RW
,
172 &hammer_limit_inode_recs
, 0, "");
173 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, limit_reclaim
, CTLFLAG_RW
,
174 &hammer_limit_reclaim
, 0, "");
175 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, live_dedup_cache_size
, CTLFLAG_RW
,
176 &hammer_live_dedup_cache_size
, 0,
177 "Number of cache entries");
178 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, limit_redo
, CTLFLAG_RW
,
179 &hammer_limit_redo
, 0, "");
181 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_fsyncs
, CTLFLAG_RD
,
182 &hammer_count_fsyncs
, 0, "");
183 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_inodes
, CTLFLAG_RD
,
184 &hammer_count_inodes
, 0, "");
185 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_iqueued
, CTLFLAG_RD
,
186 &hammer_count_iqueued
, 0, "");
187 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_reclaiming
, CTLFLAG_RD
,
188 &hammer_count_reclaiming
, 0, "");
189 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_records
, CTLFLAG_RD
,
190 &hammer_count_records
, 0, "");
191 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_record_datas
, CTLFLAG_RD
,
192 &hammer_count_record_datas
, 0, "");
193 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_volumes
, CTLFLAG_RD
,
194 &hammer_count_volumes
, 0, "");
195 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_buffers
, CTLFLAG_RD
,
196 &hammer_count_buffers
, 0, "");
197 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_nodes
, CTLFLAG_RD
,
198 &hammer_count_nodes
, 0, "");
199 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, count_extra_space_used
, CTLFLAG_RD
,
200 &hammer_count_extra_space_used
, 0, "");
202 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_searches
, CTLFLAG_RD
,
203 &hammer_stats_btree_searches
, 0, "");
204 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_lookups
, CTLFLAG_RD
,
205 &hammer_stats_btree_lookups
, 0, "");
206 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_inserts
, CTLFLAG_RD
,
207 &hammer_stats_btree_inserts
, 0, "");
208 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_deletes
, CTLFLAG_RD
,
209 &hammer_stats_btree_deletes
, 0, "");
210 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_elements
, CTLFLAG_RD
,
211 &hammer_stats_btree_elements
, 0, "");
212 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_splits
, CTLFLAG_RD
,
213 &hammer_stats_btree_splits
, 0, "");
214 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_iterations
, CTLFLAG_RD
,
215 &hammer_stats_btree_iterations
, 0, "");
216 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_root_iterations
, CTLFLAG_RD
,
217 &hammer_stats_btree_root_iterations
, 0, "");
218 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_record_iterations
, CTLFLAG_RD
,
219 &hammer_stats_record_iterations
, 0, "");
221 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_file_read
, CTLFLAG_RD
,
222 &hammer_stats_file_read
, 0, "");
223 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_file_write
, CTLFLAG_RD
,
224 &hammer_stats_file_write
, 0, "");
225 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_file_iopsr
, CTLFLAG_RD
,
226 &hammer_stats_file_iopsr
, 0, "");
227 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_file_iopsw
, CTLFLAG_RD
,
228 &hammer_stats_file_iopsw
, 0, "");
229 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_disk_read
, CTLFLAG_RD
,
230 &hammer_stats_disk_read
, 0, "");
231 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_disk_write
, CTLFLAG_RD
,
232 &hammer_stats_disk_write
, 0, "");
233 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_inode_flushes
, CTLFLAG_RD
,
234 &hammer_stats_inode_flushes
, 0, "");
235 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_commits
, CTLFLAG_RD
,
236 &hammer_stats_commits
, 0, "");
237 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_undo
, CTLFLAG_RD
,
238 &hammer_stats_undo
, 0, "");
239 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_redo
, CTLFLAG_RD
,
240 &hammer_stats_redo
, 0, "");
242 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, live_dedup_vnode_bcmps
, CTLFLAG_RW
,
243 &hammer_live_dedup_vnode_bcmps
, 0,
244 "successful vnode buffer comparisons");
245 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, live_dedup_device_bcmps
, CTLFLAG_RW
,
246 &hammer_live_dedup_device_bcmps
, 0,
247 "successful device buffer comparisons");
248 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, live_dedup_findblk_failures
, CTLFLAG_RW
,
249 &hammer_live_dedup_findblk_failures
, 0,
250 "block lookup failures for comparison");
251 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, live_dedup_bmap_saves
, CTLFLAG_RW
,
252 &hammer_live_dedup_bmap_saves
, 0,
253 "useful physical block lookups");
255 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_dirtybufspace
, CTLFLAG_RD
,
256 &hammer_count_dirtybufspace
, 0, "");
257 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_refedbufs
, CTLFLAG_RD
,
258 &hammer_count_refedbufs
, 0, "");
259 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_reservations
, CTLFLAG_RD
,
260 &hammer_count_reservations
, 0, "");
261 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_io_running_read
, CTLFLAG_RD
,
262 &hammer_count_io_running_read
, 0, "");
263 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_io_locked
, CTLFLAG_RD
,
264 &hammer_count_io_locked
, 0, "");
265 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_io_running_write
, CTLFLAG_RD
,
266 &hammer_count_io_running_write
, 0, "");
267 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, zone_limit
, CTLFLAG_RW
,
268 &hammer_zone_limit
, 0, "");
269 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, contention_count
, CTLFLAG_RW
,
270 &hammer_contention_count
, 0, "");
271 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, autoflush
, CTLFLAG_RW
,
272 &hammer_autoflush
, 0, "");
273 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, verify_zone
, CTLFLAG_RW
,
274 &hammer_verify_zone
, 0, "");
275 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, verify_data
, CTLFLAG_RW
,
276 &hammer_verify_data
, 0, "");
277 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, write_mode
, CTLFLAG_RW
,
278 &hammer_write_mode
, 0, "");
279 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, yield_check
, CTLFLAG_RW
,
280 &hammer_yield_check
, 0, "");
281 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, fsync_mode
, CTLFLAG_RW
,
282 &hammer_fsync_mode
, 0, "");
284 KTR_INFO_MASTER(hammer
);
289 static void hammer_free_hmp(struct mount
*mp
);
291 static int hammer_vfs_mount(struct mount
*mp
, char *path
, caddr_t data
,
293 static int hammer_vfs_unmount(struct mount
*mp
, int mntflags
);
294 static int hammer_vfs_root(struct mount
*mp
, struct vnode
**vpp
);
295 static int hammer_vfs_statfs(struct mount
*mp
, struct statfs
*sbp
,
297 static int hammer_vfs_statvfs(struct mount
*mp
, struct statvfs
*sbp
,
299 static int hammer_vfs_sync(struct mount
*mp
, int waitfor
);
300 static int hammer_vfs_vget(struct mount
*mp
, struct vnode
*dvp
,
301 ino_t ino
, struct vnode
**vpp
);
302 static int hammer_vfs_init(struct vfsconf
*conf
);
303 static int hammer_vfs_fhtovp(struct mount
*mp
, struct vnode
*rootvp
,
304 struct fid
*fhp
, struct vnode
**vpp
);
305 static int hammer_vfs_vptofh(struct vnode
*vp
, struct fid
*fhp
);
306 static int hammer_vfs_checkexp(struct mount
*mp
, struct sockaddr
*nam
,
307 int *exflagsp
, struct ucred
**credanonp
);
310 static struct vfsops hammer_vfsops
= {
311 .vfs_mount
= hammer_vfs_mount
,
312 .vfs_unmount
= hammer_vfs_unmount
,
313 .vfs_root
= hammer_vfs_root
,
314 .vfs_statfs
= hammer_vfs_statfs
,
315 .vfs_statvfs
= hammer_vfs_statvfs
,
316 .vfs_sync
= hammer_vfs_sync
,
317 .vfs_vget
= hammer_vfs_vget
,
318 .vfs_init
= hammer_vfs_init
,
319 .vfs_vptofh
= hammer_vfs_vptofh
,
320 .vfs_fhtovp
= hammer_vfs_fhtovp
,
321 .vfs_checkexp
= hammer_vfs_checkexp
324 MALLOC_DEFINE(M_HAMMER
, "HAMMER-mount", "");
326 VFS_SET(hammer_vfsops
, hammer
, 0);
327 MODULE_VERSION(hammer
, 1);
330 hammer_vfs_init(struct vfsconf
*conf
)
334 if (hammer_limit_recs
== 0) {
335 hammer_limit_recs
= nbuf
* 25;
336 n
= kmalloc_limit(M_HAMMER
) / 512;
337 if (hammer_limit_recs
> n
)
338 hammer_limit_recs
= n
;
340 if (hammer_limit_dirtybufspace
== 0) {
341 hammer_limit_dirtybufspace
= hidirtybufspace
/ 2;
342 if (hammer_limit_dirtybufspace
< 100)
343 hammer_limit_dirtybufspace
= 100;
347 * Set reasonable limits to maintain an I/O pipeline. This is
348 * used by the flush code which explicitly initiates I/O, and
351 * The system-driven buffer cache uses vfs.lorunningspace and
352 * vfs.hirunningspace globally.
354 if (hammer_limit_running_io
== 0)
355 hammer_limit_running_io
= hammer_limit_dirtybufspace
;
356 if (hammer_limit_running_io
> 10 * 1024 * 1024)
357 hammer_limit_running_io
= 10 * 1024 * 1024;
362 hammer_vfs_mount(struct mount
*mp
, char *mntpt
, caddr_t data
,
365 struct hammer_mount_info info
;
367 hammer_volume_t rootvol
;
368 struct vnode
*rootvp
;
369 struct vnode
*devvp
= NULL
;
370 const char *upath
; /* volume name in userspace */
371 char *path
; /* volume name in system space */
375 char *next_volume_ptr
= NULL
;
378 * Accept hammer_mount_info. mntpt is NULL for root mounts at boot.
381 bzero(&info
, sizeof(info
));
386 next_volume_ptr
= mp
->mnt_stat
.f_mntfromname
;
388 /* Count number of volumes separated by ':' */
389 for (char *p
= next_volume_ptr
; *p
!= '\0'; ++p
) {
395 mp
->mnt_flag
&= ~MNT_RDONLY
; /* mount R/W */
397 if ((error
= copyin(data
, &info
, sizeof(info
))) != 0)
402 * updating or new mount
404 if (mp
->mnt_flag
& MNT_UPDATE
) {
405 hmp
= (void *)mp
->mnt_data
;
406 KKASSERT(hmp
!= NULL
);
408 if (info
.nvolumes
<= 0 || info
.nvolumes
>= 32768)
414 * master-id validation. The master id may not be changed by a
417 if (info
.hflags
& HMNT_MASTERID
) {
418 if (hmp
&& hmp
->master_id
!= info
.master_id
) {
419 kprintf("hammer: cannot change master id "
420 "with mount update\n");
423 master_id
= info
.master_id
;
424 if (master_id
< -1 || master_id
>= HAMMER_MAX_MASTERS
)
428 master_id
= hmp
->master_id
;
434 * Internal mount data structure
437 hmp
= kmalloc(sizeof(*hmp
), M_HAMMER
, M_WAITOK
| M_ZERO
);
438 mp
->mnt_data
= (qaddr_t
)hmp
;
440 /*TAILQ_INIT(&hmp->recycle_list);*/
443 * Make sure kmalloc type limits are set appropriately.
445 * Our inode kmalloc group is sized based on maxvnodes
446 * (controlled by the system, not us).
448 kmalloc_create(&hmp
->m_misc
, "HAMMER-others");
449 kmalloc_create(&hmp
->m_inodes
, "HAMMER-inodes");
451 kmalloc_raise_limit(hmp
->m_inodes
, 0); /* unlimited */
453 hmp
->root_btree_beg
.localization
= 0x00000000U
;
454 hmp
->root_btree_beg
.obj_id
= -0x8000000000000000LL
;
455 hmp
->root_btree_beg
.key
= -0x8000000000000000LL
;
456 hmp
->root_btree_beg
.create_tid
= 1;
457 hmp
->root_btree_beg
.delete_tid
= 1;
458 hmp
->root_btree_beg
.rec_type
= 0;
459 hmp
->root_btree_beg
.obj_type
= 0;
461 hmp
->root_btree_end
.localization
= 0xFFFFFFFFU
;
462 hmp
->root_btree_end
.obj_id
= 0x7FFFFFFFFFFFFFFFLL
;
463 hmp
->root_btree_end
.key
= 0x7FFFFFFFFFFFFFFFLL
;
464 hmp
->root_btree_end
.create_tid
= 0xFFFFFFFFFFFFFFFFULL
;
465 hmp
->root_btree_end
.delete_tid
= 0; /* special case */
466 hmp
->root_btree_end
.rec_type
= 0xFFFFU
;
467 hmp
->root_btree_end
.obj_type
= 0;
469 hmp
->krate
.freq
= 1; /* maximum reporting rate (hz) */
470 hmp
->krate
.count
= -16; /* initial burst */
472 hmp
->sync_lock
.refs
= 1;
473 hmp
->free_lock
.refs
= 1;
474 hmp
->undo_lock
.refs
= 1;
475 hmp
->blkmap_lock
.refs
= 1;
476 hmp
->snapshot_lock
.refs
= 1;
477 hmp
->volume_lock
.refs
= 1;
479 TAILQ_INIT(&hmp
->delay_list
);
480 TAILQ_INIT(&hmp
->flush_group_list
);
481 TAILQ_INIT(&hmp
->objid_cache_list
);
482 TAILQ_INIT(&hmp
->undo_lru_list
);
483 TAILQ_INIT(&hmp
->reclaim_list
);
485 RB_INIT(&hmp
->rb_dedup_crc_root
);
486 RB_INIT(&hmp
->rb_dedup_off_root
);
487 TAILQ_INIT(&hmp
->dedup_lru_list
);
489 hmp
->hflags
&= ~HMNT_USERFLAGS
;
490 hmp
->hflags
|= info
.hflags
& HMNT_USERFLAGS
;
492 hmp
->master_id
= master_id
;
495 mp
->mnt_flag
|= MNT_RDONLY
;
496 hmp
->asof
= info
.asof
;
498 hmp
->asof
= HAMMER_MAX_TID
;
501 hmp
->volume_to_remove
= -1;
504 * Re-open read-write if originally read-only, or vise-versa.
506 * When going from read-only to read-write execute the stage2
507 * recovery if it has not already been run.
509 if (mp
->mnt_flag
& MNT_UPDATE
) {
510 lwkt_gettoken(&hmp
->fs_token
);
512 if (hmp
->ronly
&& (mp
->mnt_kern_flag
& MNTK_WANTRDWR
)) {
513 kprintf("HAMMER read-only -> read-write\n");
515 RB_SCAN(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, NULL
,
516 hammer_adjust_volume_mode
, NULL
);
517 rootvol
= hammer_get_root_volume(hmp
, &error
);
519 hammer_recover_flush_buffers(hmp
, rootvol
, 1);
520 error
= hammer_recover_stage2(hmp
, rootvol
);
521 bcopy(rootvol
->ondisk
->vol0_blockmap
,
523 sizeof(hmp
->blockmap
));
524 hammer_rel_volume(rootvol
, 0);
526 RB_SCAN(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, NULL
,
527 hammer_reload_inode
, NULL
);
528 /* kernel clears MNT_RDONLY */
529 } else if (hmp
->ronly
== 0 && (mp
->mnt_flag
& MNT_RDONLY
)) {
530 kprintf("HAMMER read-write -> read-only\n");
531 hmp
->ronly
= 1; /* messy */
532 RB_SCAN(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, NULL
,
533 hammer_reload_inode
, NULL
);
535 hammer_flusher_sync(hmp
);
536 hammer_flusher_sync(hmp
);
537 hammer_flusher_sync(hmp
);
539 RB_SCAN(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, NULL
,
540 hammer_adjust_volume_mode
, NULL
);
542 lwkt_reltoken(&hmp
->fs_token
);
546 RB_INIT(&hmp
->rb_vols_root
);
547 RB_INIT(&hmp
->rb_inos_root
);
548 RB_INIT(&hmp
->rb_redo_root
);
549 RB_INIT(&hmp
->rb_nods_root
);
550 RB_INIT(&hmp
->rb_undo_root
);
551 RB_INIT(&hmp
->rb_resv_root
);
552 RB_INIT(&hmp
->rb_bufs_root
);
553 RB_INIT(&hmp
->rb_pfsm_root
);
555 hmp
->ronly
= ((mp
->mnt_flag
& MNT_RDONLY
) != 0);
557 TAILQ_INIT(&hmp
->volu_list
);
558 TAILQ_INIT(&hmp
->undo_list
);
559 TAILQ_INIT(&hmp
->data_list
);
560 TAILQ_INIT(&hmp
->meta_list
);
561 TAILQ_INIT(&hmp
->lose_list
);
562 TAILQ_INIT(&hmp
->iorun_list
);
564 lwkt_token_init(&hmp
->fs_token
, 1, "hammerfs");
565 lwkt_token_init(&hmp
->io_token
, 1, "hammerio");
567 lwkt_gettoken(&hmp
->fs_token
);
572 path
= objcache_get(namei_oc
, M_WAITOK
);
574 for (i
= 0; i
< info
.nvolumes
; ++i
) {
579 KKASSERT(next_volume_ptr
!= NULL
);
581 if (*next_volume_ptr
!= '/') {
583 strcpy(path
, "/dev/");
586 for (k
= strlen(path
); k
< MAXPATHLEN
-1; ++k
) {
587 if (*next_volume_ptr
== '\0') {
589 } else if (*next_volume_ptr
== ':') {
593 path
[k
] = *next_volume_ptr
;
600 cdev_t dev
= kgetdiskbyname(path
);
601 error
= bdevvp(dev
, &devvp
);
603 kprintf("hammer_mountroot: can't find devvp\n");
606 error
= copyin(&info
.volumes
[i
], &upath
,
609 error
= copyinstr(upath
, path
,
613 error
= hammer_install_volume(hmp
, path
, devvp
);
617 objcache_put(namei_oc
, path
);
620 * Make sure we found a root volume
622 if (error
== 0 && hmp
->rootvol
== NULL
) {
623 kprintf("hammer_mount: No root volume found!\n");
628 * Check that all required volumes are available
630 if (error
== 0 && hammer_mountcheck_volumes(hmp
)) {
631 kprintf("hammer_mount: Missing volumes, cannot mount!\n");
636 /* called with fs_token held */
642 * No errors, setup enough of the mount point so we can lookup the
645 mp
->mnt_iosize_max
= MAXPHYS
;
646 mp
->mnt_kern_flag
|= MNTK_FSMID
;
649 * MPSAFE code. Note that VOPs and VFSops which are not MPSAFE
650 * will acquire a per-mount token prior to entry and release it
651 * on return, so even if we do not specify it we no longer get
652 * the BGL regardlless of how we are flagged.
654 mp
->mnt_kern_flag
|= MNTK_ALL_MPSAFE
;
655 /*MNTK_RD_MPSAFE | MNTK_GA_MPSAFE | MNTK_IN_MPSAFE;*/
658 * note: f_iosize is used by vnode_pager_haspage() when constructing
661 mp
->mnt_stat
.f_iosize
= HAMMER_BUFSIZE
;
662 mp
->mnt_stat
.f_bsize
= HAMMER_BUFSIZE
;
664 mp
->mnt_vstat
.f_frsize
= HAMMER_BUFSIZE
;
665 mp
->mnt_vstat
.f_bsize
= HAMMER_BUFSIZE
;
667 mp
->mnt_maxsymlinklen
= 255;
668 mp
->mnt_flag
|= MNT_LOCAL
;
670 vfs_add_vnodeops(mp
, &hammer_vnode_vops
, &mp
->mnt_vn_norm_ops
);
671 vfs_add_vnodeops(mp
, &hammer_spec_vops
, &mp
->mnt_vn_spec_ops
);
672 vfs_add_vnodeops(mp
, &hammer_fifo_vops
, &mp
->mnt_vn_fifo_ops
);
675 * The root volume's ondisk pointer is only valid if we hold a
678 rootvol
= hammer_get_root_volume(hmp
, &error
);
683 * Perform any necessary UNDO operations. The recovery code does
684 * call hammer_undo_lookup() so we have to pre-cache the blockmap,
685 * and then re-copy it again after recovery is complete.
687 * If this is a read-only mount the UNDO information is retained
688 * in memory in the form of dirty buffer cache buffers, and not
689 * written back to the media.
691 bcopy(rootvol
->ondisk
->vol0_blockmap
, hmp
->blockmap
,
692 sizeof(hmp
->blockmap
));
695 * Check filesystem version
697 hmp
->version
= rootvol
->ondisk
->vol_version
;
698 if (hmp
->version
< HAMMER_VOL_VERSION_MIN
||
699 hmp
->version
> HAMMER_VOL_VERSION_MAX
) {
700 kprintf("HAMMER: mount unsupported fs version %d\n",
707 * The undo_rec_limit limits the size of flush groups to avoid
708 * blowing out the UNDO FIFO. This calculation is typically in
709 * the tens of thousands and is designed primarily when small
710 * HAMMER filesystems are created.
712 hmp
->undo_rec_limit
= hammer_undo_max(hmp
) / 8192 + 100;
713 if (hammer_debug_general
& 0x0001)
714 kprintf("HAMMER: undo_rec_limit %d\n", hmp
->undo_rec_limit
);
717 * NOTE: Recover stage1 not only handles meta-data recovery, it
718 * also sets hmp->undo_seqno for HAMMER VERSION 4+ filesystems.
720 error
= hammer_recover_stage1(hmp
, rootvol
);
722 kprintf("Failed to recover HAMMER filesystem on mount\n");
727 * Finish setup now that we have a good root volume.
729 * The top 16 bits of fsid.val[1] is a pfs id.
731 ksnprintf(mp
->mnt_stat
.f_mntfromname
,
732 sizeof(mp
->mnt_stat
.f_mntfromname
), "%s",
733 rootvol
->ondisk
->vol_name
);
734 mp
->mnt_stat
.f_fsid
.val
[0] =
735 crc32((char *)&rootvol
->ondisk
->vol_fsid
+ 0, 8);
736 mp
->mnt_stat
.f_fsid
.val
[1] =
737 crc32((char *)&rootvol
->ondisk
->vol_fsid
+ 8, 8);
738 mp
->mnt_stat
.f_fsid
.val
[1] &= 0x0000FFFF;
740 mp
->mnt_vstat
.f_fsid_uuid
= rootvol
->ondisk
->vol_fsid
;
741 mp
->mnt_vstat
.f_fsid
= crc32(&mp
->mnt_vstat
.f_fsid_uuid
,
742 sizeof(mp
->mnt_vstat
.f_fsid_uuid
));
745 * Certain often-modified fields in the root volume are cached in
746 * the hammer_mount structure so we do not have to generate lots
747 * of little UNDO structures for them.
749 * Recopy after recovery. This also has the side effect of
750 * setting our cached undo FIFO's first_offset, which serves to
751 * placemark the FIFO start for the NEXT flush cycle while the
752 * on-disk first_offset represents the LAST flush cycle.
754 hmp
->next_tid
= rootvol
->ondisk
->vol0_next_tid
;
755 hmp
->flush_tid1
= hmp
->next_tid
;
756 hmp
->flush_tid2
= hmp
->next_tid
;
757 bcopy(rootvol
->ondisk
->vol0_blockmap
, hmp
->blockmap
,
758 sizeof(hmp
->blockmap
));
759 hmp
->copy_stat_freebigblocks
= rootvol
->ondisk
->vol0_stat_freebigblocks
;
761 hammer_flusher_create(hmp
);
764 * Locate the root directory using the root cluster's B-Tree as a
765 * starting point. The root directory uses an obj_id of 1.
767 * FUTURE: Leave the root directory cached referenced but unlocked
768 * in hmp->rootvp (need to flush it on unmount).
770 error
= hammer_vfs_vget(mp
, NULL
, 1, &rootvp
);
774 /*vn_unlock(hmp->rootvp);*/
776 error
= hammer_recover_stage2(hmp
, rootvol
);
779 * If the stage2 recovery fails be sure to clean out all cached
780 * vnodes before throwing away the mount structure or bad things
787 hammer_rel_volume(rootvol
, 0);
790 * Cleanup and return.
793 /* called with fs_token held */
796 lwkt_reltoken(&hmp
->fs_token
);
802 hammer_vfs_unmount(struct mount
*mp
, int mntflags
)
804 hammer_mount_t hmp
= (void *)mp
->mnt_data
;
809 * Clean out the vnodes
811 lwkt_gettoken(&hmp
->fs_token
);
813 if (mntflags
& MNT_FORCE
)
815 error
= vflush(mp
, 0, flags
);
818 * Clean up the internal mount structure and related entities. This
822 /* called with fs_token held */
825 lwkt_reltoken(&hmp
->fs_token
);
831 * Clean up the internal mount structure and disassociate it from the mount.
832 * This may issue I/O.
834 * Called with fs_token held.
837 hammer_free_hmp(struct mount
*mp
)
839 hammer_mount_t hmp
= (void *)mp
->mnt_data
;
840 hammer_flush_group_t flg
;
845 * Flush anything dirty. This won't even run if the
846 * filesystem errored-out.
849 while (hammer_flusher_haswork(hmp
)) {
850 hammer_flusher_sync(hmp
);
854 kprintf("HAMMER: umount flushing.");
857 tsleep(&dummy
, 0, "hmrufl", hz
);
860 kprintf("giving up\n");
864 if (count
>= 5 && count
< 30)
868 * If the mount had a critical error we have to destroy any
869 * remaining inodes before we can finish cleaning up the flusher.
871 if (hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
) {
872 RB_SCAN(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, NULL
,
873 hammer_destroy_inode_callback
, NULL
);
877 * There shouldn't be any inodes left now and any left over
878 * flush groups should now be empty.
880 KKASSERT(RB_EMPTY(&hmp
->rb_inos_root
));
881 while ((flg
= TAILQ_FIRST(&hmp
->flush_group_list
)) != NULL
) {
882 TAILQ_REMOVE(&hmp
->flush_group_list
, flg
, flush_entry
);
883 KKASSERT(RB_EMPTY(&flg
->flush_tree
));
885 kprintf("HAMMER: Warning, flush_group %p was "
886 "not empty on umount!\n", flg
);
888 kfree(flg
, hmp
->m_misc
);
892 * We can finally destroy the flusher
894 hammer_flusher_destroy(hmp
);
897 * We may have held recovered buffers due to a read-only mount.
898 * These must be discarded.
901 hammer_recover_flush_buffers(hmp
, NULL
, -1);
904 * Unload buffers and then volumes
906 RB_SCAN(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
, NULL
,
907 hammer_unload_buffer
, NULL
);
908 RB_SCAN(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, NULL
,
909 hammer_unload_volume
, NULL
);
912 mp
->mnt_flag
&= ~MNT_LOCAL
;
914 hammer_destroy_objid_cache(hmp
);
915 hammer_destroy_dedup_cache(hmp
);
916 if (hmp
->dedup_free_cache
!= NULL
) {
917 kfree(hmp
->dedup_free_cache
, hmp
->m_misc
);
918 hmp
->dedup_free_cache
= NULL
;
920 kmalloc_destroy(&hmp
->m_misc
);
921 kmalloc_destroy(&hmp
->m_inodes
);
922 lwkt_reltoken(&hmp
->fs_token
);
923 kfree(hmp
, M_HAMMER
);
927 * Report critical errors. ip may be NULL.
930 hammer_critical_error(hammer_mount_t hmp
, hammer_inode_t ip
,
931 int error
, const char *msg
)
933 hmp
->flags
|= HAMMER_MOUNT_CRITICAL_ERROR
;
935 krateprintf(&hmp
->krate
,
936 "HAMMER(%s): Critical error inode=%jd error=%d %s\n",
937 hmp
->mp
->mnt_stat
.f_mntfromname
,
938 (intmax_t)(ip
? ip
->obj_id
: -1),
941 if (hmp
->ronly
== 0) {
942 hmp
->ronly
= 2; /* special errored read-only mode */
943 hmp
->mp
->mnt_flag
|= MNT_RDONLY
;
944 kprintf("HAMMER(%s): Forcing read-only mode\n",
945 hmp
->mp
->mnt_stat
.f_mntfromname
);
948 if (hammer_debug_critical
)
949 Debugger("Entering debugger");
954 * Obtain a vnode for the specified inode number. An exclusively locked
958 hammer_vfs_vget(struct mount
*mp
, struct vnode
*dvp
,
959 ino_t ino
, struct vnode
**vpp
)
961 struct hammer_transaction trans
;
962 struct hammer_mount
*hmp
= (void *)mp
->mnt_data
;
963 struct hammer_inode
*ip
;
965 u_int32_t localization
;
967 lwkt_gettoken(&hmp
->fs_token
);
968 hammer_simple_transaction(&trans
, hmp
);
971 * If a directory vnode is supplied (mainly NFS) then we can acquire
972 * the PFS domain from it. Otherwise we would only be able to vget
973 * inodes in the root PFS.
976 localization
= HAMMER_DEF_LOCALIZATION
+
977 VTOI(dvp
)->obj_localization
;
979 localization
= HAMMER_DEF_LOCALIZATION
;
983 * Lookup the requested HAMMER inode. The structure must be
984 * left unlocked while we manipulate the related vnode to avoid
987 ip
= hammer_get_inode(&trans
, NULL
, ino
,
988 hmp
->asof
, localization
,
993 error
= hammer_get_vnode(ip
, vpp
);
994 hammer_rel_inode(ip
, 0);
996 hammer_done_transaction(&trans
);
997 lwkt_reltoken(&hmp
->fs_token
);
1002 * Return the root vnode for the filesystem.
1004 * HAMMER stores the root vnode in the hammer_mount structure so
1005 * getting it is easy.
1008 hammer_vfs_root(struct mount
*mp
, struct vnode
**vpp
)
1012 error
= hammer_vfs_vget(mp
, NULL
, 1, vpp
);
1017 hammer_vfs_statfs(struct mount
*mp
, struct statfs
*sbp
, struct ucred
*cred
)
1019 struct hammer_mount
*hmp
= (void *)mp
->mnt_data
;
1020 hammer_volume_t volume
;
1021 hammer_volume_ondisk_t ondisk
;
1026 lwkt_gettoken(&hmp
->fs_token
);
1027 volume
= hammer_get_root_volume(hmp
, &error
);
1029 lwkt_reltoken(&hmp
->fs_token
);
1032 ondisk
= volume
->ondisk
;
1037 _hammer_checkspace(hmp
, HAMMER_CHKSPC_WRITE
, &breserved
);
1038 mp
->mnt_stat
.f_files
= ondisk
->vol0_stat_inodes
;
1039 bfree
= ondisk
->vol0_stat_freebigblocks
* HAMMER_LARGEBLOCK_SIZE
;
1040 hammer_rel_volume(volume
, 0);
1042 mp
->mnt_stat
.f_bfree
= (bfree
- breserved
) / HAMMER_BUFSIZE
;
1043 mp
->mnt_stat
.f_bavail
= mp
->mnt_stat
.f_bfree
;
1044 if (mp
->mnt_stat
.f_files
< 0)
1045 mp
->mnt_stat
.f_files
= 0;
1047 *sbp
= mp
->mnt_stat
;
1048 lwkt_reltoken(&hmp
->fs_token
);
1053 hammer_vfs_statvfs(struct mount
*mp
, struct statvfs
*sbp
, struct ucred
*cred
)
1055 struct hammer_mount
*hmp
= (void *)mp
->mnt_data
;
1056 hammer_volume_t volume
;
1057 hammer_volume_ondisk_t ondisk
;
1062 lwkt_gettoken(&hmp
->fs_token
);
1063 volume
= hammer_get_root_volume(hmp
, &error
);
1065 lwkt_reltoken(&hmp
->fs_token
);
1068 ondisk
= volume
->ondisk
;
1073 _hammer_checkspace(hmp
, HAMMER_CHKSPC_WRITE
, &breserved
);
1074 mp
->mnt_vstat
.f_files
= ondisk
->vol0_stat_inodes
;
1075 bfree
= ondisk
->vol0_stat_freebigblocks
* HAMMER_LARGEBLOCK_SIZE
;
1076 hammer_rel_volume(volume
, 0);
1078 mp
->mnt_vstat
.f_bfree
= (bfree
- breserved
) / HAMMER_BUFSIZE
;
1079 mp
->mnt_vstat
.f_bavail
= mp
->mnt_vstat
.f_bfree
;
1080 if (mp
->mnt_vstat
.f_files
< 0)
1081 mp
->mnt_vstat
.f_files
= 0;
1082 *sbp
= mp
->mnt_vstat
;
1083 lwkt_reltoken(&hmp
->fs_token
);
1088 * Sync the filesystem. Currently we have to run it twice, the second
1089 * one will advance the undo start index to the end index, so if a crash
1090 * occurs no undos will be run on mount.
1092 * We do not sync the filesystem if we are called from a panic. If we did
1093 * we might end up blowing up a sync that was already in progress.
1096 hammer_vfs_sync(struct mount
*mp
, int waitfor
)
1098 struct hammer_mount
*hmp
= (void *)mp
->mnt_data
;
1101 lwkt_gettoken(&hmp
->fs_token
);
1102 if (panicstr
== NULL
) {
1103 error
= hammer_sync_hmp(hmp
, waitfor
);
1107 lwkt_reltoken(&hmp
->fs_token
);
1112 * Convert a vnode to a file handle.
1114 * Accesses read-only fields on already-referenced structures so
1115 * no token is needed.
1118 hammer_vfs_vptofh(struct vnode
*vp
, struct fid
*fhp
)
1122 KKASSERT(MAXFIDSZ
>= 16);
1124 fhp
->fid_len
= offsetof(struct fid
, fid_data
[16]);
1125 fhp
->fid_ext
= ip
->obj_localization
>> 16;
1126 bcopy(&ip
->obj_id
, fhp
->fid_data
+ 0, sizeof(ip
->obj_id
));
1127 bcopy(&ip
->obj_asof
, fhp
->fid_data
+ 8, sizeof(ip
->obj_asof
));
1133 * Convert a file handle back to a vnode.
1135 * Use rootvp to enforce PFS isolation when a PFS is exported via a
1139 hammer_vfs_fhtovp(struct mount
*mp
, struct vnode
*rootvp
,
1140 struct fid
*fhp
, struct vnode
**vpp
)
1142 hammer_mount_t hmp
= (void *)mp
->mnt_data
;
1143 struct hammer_transaction trans
;
1144 struct hammer_inode
*ip
;
1145 struct hammer_inode_info info
;
1147 u_int32_t localization
;
1149 bcopy(fhp
->fid_data
+ 0, &info
.obj_id
, sizeof(info
.obj_id
));
1150 bcopy(fhp
->fid_data
+ 8, &info
.obj_asof
, sizeof(info
.obj_asof
));
1152 localization
= VTOI(rootvp
)->obj_localization
;
1154 localization
= (u_int32_t
)fhp
->fid_ext
<< 16;
1156 lwkt_gettoken(&hmp
->fs_token
);
1157 hammer_simple_transaction(&trans
, hmp
);
1160 * Get/allocate the hammer_inode structure. The structure must be
1161 * unlocked while we manipulate the related vnode to avoid a
1164 ip
= hammer_get_inode(&trans
, NULL
, info
.obj_id
,
1165 info
.obj_asof
, localization
, 0, &error
);
1167 error
= hammer_get_vnode(ip
, vpp
);
1168 hammer_rel_inode(ip
, 0);
1172 hammer_done_transaction(&trans
);
1173 lwkt_reltoken(&hmp
->fs_token
);
1178 hammer_vfs_checkexp(struct mount
*mp
, struct sockaddr
*nam
,
1179 int *exflagsp
, struct ucred
**credanonp
)
1181 hammer_mount_t hmp
= (void *)mp
->mnt_data
;
1185 lwkt_gettoken(&hmp
->fs_token
);
1186 np
= vfs_export_lookup(mp
, &hmp
->export
, nam
);
1188 *exflagsp
= np
->netc_exflags
;
1189 *credanonp
= &np
->netc_anon
;
1194 lwkt_reltoken(&hmp
->fs_token
);
1200 hammer_vfs_export(struct mount
*mp
, int op
, const struct export_args
*export
)
1202 hammer_mount_t hmp
= (void *)mp
->mnt_data
;
1205 lwkt_gettoken(&hmp
->fs_token
);
1208 case MOUNTCTL_SET_EXPORT
:
1209 error
= vfs_export(mp
, &hmp
->export
, export
);
1215 lwkt_reltoken(&hmp
->fs_token
);