2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/mountctl.h>
40 * NOTE! Global statistics may not be MPSAFE so HAMMER never uses them
43 int hammer_supported_version
= HAMMER_VOL_VERSION_DEFAULT
;
45 int hammer_debug_general
;
46 int hammer_debug_inode
;
47 int hammer_debug_locks
;
48 int hammer_debug_btree
;
50 int hammer_debug_recover
; /* -1 will disable, +1 will force */
51 int hammer_debug_critical
; /* non-zero enter debugger on error */
52 int hammer_cluster_enable
= 2; /* ena cluster_read, scale x 2 */
53 int hammer_tdmux_ticks
;
54 int hammer_count_fsyncs
;
55 int hammer_count_inodes
;
56 int hammer_count_iqueued
;
57 int hammer_count_reclaims
;
58 int hammer_count_records
;
59 int hammer_count_record_datas
;
60 int hammer_count_volumes
;
61 int hammer_count_buffers
;
62 int hammer_count_nodes
;
63 int64_t hammer_stats_btree_lookups
;
64 int64_t hammer_stats_btree_searches
;
65 int64_t hammer_stats_btree_inserts
;
66 int64_t hammer_stats_btree_deletes
;
67 int64_t hammer_stats_btree_elements
;
68 int64_t hammer_stats_btree_splits
;
69 int64_t hammer_stats_btree_iterations
;
70 int64_t hammer_stats_btree_root_iterations
;
71 int64_t hammer_stats_record_iterations
;
73 int64_t hammer_stats_file_read
;
74 int64_t hammer_stats_file_write
;
75 int64_t hammer_stats_disk_read
;
76 int64_t hammer_stats_disk_write
;
77 int64_t hammer_stats_inode_flushes
;
78 int64_t hammer_stats_commits
;
79 int64_t hammer_stats_undo
;
80 int64_t hammer_stats_redo
;
82 long hammer_count_dirtybufspace
; /* global */
83 int hammer_count_refedbufs
; /* global */
84 int hammer_count_reservations
;
85 long hammer_count_io_running_read
;
86 long hammer_count_io_running_write
;
87 int hammer_count_io_locked
;
88 long hammer_limit_dirtybufspace
; /* per-mount */
89 int hammer_limit_recs
; /* as a whole XXX */
90 int hammer_limit_inode_recs
= 2048; /* per inode */
91 int hammer_limit_reclaims
;
92 int hammer_live_dedup_cache_size
= 4096;
93 int hammer_limit_redo
= 4096 * 1024; /* per inode */
94 int hammer_autoflush
= 500; /* auto flush (typ on reclaim) */
95 int hammer_verify_zone
;
96 int hammer_verify_data
= 1;
97 int hammer_double_buffer
;
98 int hammer_btree_full_undo
= 1;
99 int hammer_yield_check
= 16;
100 int hammer_fsync_mode
= 3;
101 int64_t hammer_contention_count
;
103 int hammer_noatime
= 1;
104 TUNABLE_INT("vfs.hammer.noatime", &hammer_noatime
);
106 SYSCTL_NODE(_vfs
, OID_AUTO
, hammer
, CTLFLAG_RW
, 0, "HAMMER filesystem");
107 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, supported_version
, CTLFLAG_RD
,
108 &hammer_supported_version
, 0, "");
109 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_general
, CTLFLAG_RW
,
110 &hammer_debug_general
, 0, "");
111 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_io
, CTLFLAG_RW
,
112 &hammer_debug_io
, 0, "");
113 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_inode
, CTLFLAG_RW
,
114 &hammer_debug_inode
, 0, "");
115 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_locks
, CTLFLAG_RW
,
116 &hammer_debug_locks
, 0, "");
117 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_btree
, CTLFLAG_RW
,
118 &hammer_debug_btree
, 0, "");
119 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_tid
, CTLFLAG_RW
,
120 &hammer_debug_tid
, 0, "");
121 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_recover
, CTLFLAG_RW
,
122 &hammer_debug_recover
, 0, "");
123 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, debug_critical
, CTLFLAG_RW
,
124 &hammer_debug_critical
, 0, "");
125 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, cluster_enable
, CTLFLAG_RW
,
126 &hammer_cluster_enable
, 0, "");
127 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, tdmux_ticks
, CTLFLAG_RW
,
128 &hammer_tdmux_ticks
, 0, "Hammer tdmux ticks");
130 SYSCTL_LONG(_vfs_hammer
, OID_AUTO
, limit_dirtybufspace
, CTLFLAG_RW
,
131 &hammer_limit_dirtybufspace
, 0, "");
132 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, limit_recs
, CTLFLAG_RW
,
133 &hammer_limit_recs
, 0, "");
134 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, limit_inode_recs
, CTLFLAG_RW
,
135 &hammer_limit_inode_recs
, 0, "");
136 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, limit_reclaims
, CTLFLAG_RW
,
137 &hammer_limit_reclaims
, 0, "");
138 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, live_dedup_cache_size
, CTLFLAG_RW
,
139 &hammer_live_dedup_cache_size
, 0,
140 "Number of cache entries");
141 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, limit_redo
, CTLFLAG_RW
,
142 &hammer_limit_redo
, 0, "");
144 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_fsyncs
, CTLFLAG_RD
,
145 &hammer_count_fsyncs
, 0, "");
146 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_inodes
, CTLFLAG_RD
,
147 &hammer_count_inodes
, 0, "");
148 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_iqueued
, CTLFLAG_RD
,
149 &hammer_count_iqueued
, 0, "");
150 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_reclaims
, CTLFLAG_RD
,
151 &hammer_count_reclaims
, 0, "");
152 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_records
, CTLFLAG_RD
,
153 &hammer_count_records
, 0, "");
154 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_record_datas
, CTLFLAG_RD
,
155 &hammer_count_record_datas
, 0, "");
156 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_volumes
, CTLFLAG_RD
,
157 &hammer_count_volumes
, 0, "");
158 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_buffers
, CTLFLAG_RD
,
159 &hammer_count_buffers
, 0, "");
160 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_nodes
, CTLFLAG_RD
,
161 &hammer_count_nodes
, 0, "");
163 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_searches
, CTLFLAG_RD
,
164 &hammer_stats_btree_searches
, 0, "");
165 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_lookups
, CTLFLAG_RD
,
166 &hammer_stats_btree_lookups
, 0, "");
167 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_inserts
, CTLFLAG_RD
,
168 &hammer_stats_btree_inserts
, 0, "");
169 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_deletes
, CTLFLAG_RD
,
170 &hammer_stats_btree_deletes
, 0, "");
171 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_elements
, CTLFLAG_RD
,
172 &hammer_stats_btree_elements
, 0, "");
173 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_splits
, CTLFLAG_RD
,
174 &hammer_stats_btree_splits
, 0, "");
175 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_iterations
, CTLFLAG_RD
,
176 &hammer_stats_btree_iterations
, 0, "");
177 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_btree_root_iterations
, CTLFLAG_RD
,
178 &hammer_stats_btree_root_iterations
, 0, "");
179 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_record_iterations
, CTLFLAG_RD
,
180 &hammer_stats_record_iterations
, 0, "");
182 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_file_read
, CTLFLAG_RD
,
183 &hammer_stats_file_read
, 0, "");
184 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_file_write
, CTLFLAG_RD
,
185 &hammer_stats_file_write
, 0, "");
186 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_disk_read
, CTLFLAG_RD
,
187 &hammer_stats_disk_read
, 0, "");
188 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_disk_write
, CTLFLAG_RD
,
189 &hammer_stats_disk_write
, 0, "");
190 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_inode_flushes
, CTLFLAG_RD
,
191 &hammer_stats_inode_flushes
, 0, "");
192 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_commits
, CTLFLAG_RD
,
193 &hammer_stats_commits
, 0, "");
194 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_undo
, CTLFLAG_RD
,
195 &hammer_stats_undo
, 0, "");
196 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, stats_redo
, CTLFLAG_RD
,
197 &hammer_stats_redo
, 0, "");
199 SYSCTL_LONG(_vfs_hammer
, OID_AUTO
, count_dirtybufspace
, CTLFLAG_RD
,
200 &hammer_count_dirtybufspace
, 0, "");
201 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_refedbufs
, CTLFLAG_RD
,
202 &hammer_count_refedbufs
, 0, "");
203 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_reservations
, CTLFLAG_RD
,
204 &hammer_count_reservations
, 0, "");
205 SYSCTL_LONG(_vfs_hammer
, OID_AUTO
, count_io_running_read
, CTLFLAG_RD
,
206 &hammer_count_io_running_read
, 0, "");
207 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, count_io_locked
, CTLFLAG_RD
,
208 &hammer_count_io_locked
, 0, "");
209 SYSCTL_LONG(_vfs_hammer
, OID_AUTO
, count_io_running_write
, CTLFLAG_RD
,
210 &hammer_count_io_running_write
, 0, "");
211 SYSCTL_QUAD(_vfs_hammer
, OID_AUTO
, contention_count
, CTLFLAG_RW
,
212 &hammer_contention_count
, 0, "");
213 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, autoflush
, CTLFLAG_RW
,
214 &hammer_autoflush
, 0, "");
215 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, verify_zone
, CTLFLAG_RW
,
216 &hammer_verify_zone
, 0, "");
217 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, verify_data
, CTLFLAG_RW
,
218 &hammer_verify_data
, 0, "");
219 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, double_buffer
, CTLFLAG_RW
,
220 &hammer_double_buffer
, 0, "");
221 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, btree_full_undo
, CTLFLAG_RW
,
222 &hammer_btree_full_undo
, 0, "");
223 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, yield_check
, CTLFLAG_RW
,
224 &hammer_yield_check
, 0, "");
225 SYSCTL_INT(_vfs_hammer
, OID_AUTO
, fsync_mode
, CTLFLAG_RW
,
226 &hammer_fsync_mode
, 0, "");
228 /* KTR_INFO_MASTER(hammer); */
233 static void hammer_free_hmp(struct mount
*mp
);
235 static int hammer_vfs_mount(struct mount
*mp
, char *path
, caddr_t data
,
237 static int hammer_vfs_unmount(struct mount
*mp
, int mntflags
);
238 static int hammer_vfs_root(struct mount
*mp
, struct vnode
**vpp
);
239 static int hammer_vfs_statfs(struct mount
*mp
, struct statfs
*sbp
,
241 static int hammer_vfs_statvfs(struct mount
*mp
, struct statvfs
*sbp
,
243 static int hammer_vfs_sync(struct mount
*mp
, int waitfor
);
244 static int hammer_vfs_vget(struct mount
*mp
, struct vnode
*dvp
,
245 ino_t ino
, struct vnode
**vpp
);
246 static int hammer_vfs_init(struct vfsconf
*conf
);
247 static int hammer_vfs_fhtovp(struct mount
*mp
, struct vnode
*rootvp
,
248 struct fid
*fhp
, struct vnode
**vpp
);
249 static int hammer_vfs_vptofh(struct vnode
*vp
, struct fid
*fhp
);
250 static int hammer_vfs_checkexp(struct mount
*mp
, struct sockaddr
*nam
,
251 int *exflagsp
, struct ucred
**credanonp
);
254 static struct vfsops hammer_vfsops
= {
256 .vfs_mount
= hammer_vfs_mount
,
257 .vfs_unmount
= hammer_vfs_unmount
,
258 .vfs_root
= hammer_vfs_root
,
259 .vfs_statfs
= hammer_vfs_statfs
,
260 .vfs_statvfs
= hammer_vfs_statvfs
,
261 .vfs_sync
= hammer_vfs_sync
,
262 .vfs_vget
= hammer_vfs_vget
,
263 .vfs_init
= hammer_vfs_init
,
264 .vfs_vptofh
= hammer_vfs_vptofh
,
265 .vfs_fhtovp
= hammer_vfs_fhtovp
,
266 .vfs_checkexp
= hammer_vfs_checkexp
269 MALLOC_DEFINE(M_HAMMER
, "HAMMER-mount", "");
271 VFS_SET(hammer_vfsops
, hammer
, VFCF_MPSAFE
);
272 MODULE_VERSION(hammer
, 1);
275 hammer_vfs_init(struct vfsconf
*conf
)
280 * Wait up to this long for an exclusive deadlock to clear
281 * before acquiring a new shared lock on the ip. The deadlock
282 * may have occured on a b-tree node related to the ip.
284 if (hammer_tdmux_ticks
== 0)
285 hammer_tdmux_ticks
= hz
/ 5;
288 * Autosize, but be careful because a hammer filesystem's
289 * reserve is partially calculated based on dirtybufspace,
290 * so we simply cannot allow it to get too large.
292 if (hammer_limit_recs
== 0) {
294 if (n
> kmalloc_limit(M_HAMMER
) / 512)
295 n
= kmalloc_limit(M_HAMMER
) / 512;
296 if (n
> 2 * 1024 * 1024)
298 hammer_limit_recs
= (int)n
;
300 if (hammer_limit_dirtybufspace
== 0) {
301 hammer_limit_dirtybufspace
= hidirtybufspace
/ 2;
302 if (hammer_limit_dirtybufspace
< 1L * 1024 * 1024)
303 hammer_limit_dirtybufspace
= 1024L * 1024;
304 if (hammer_limit_dirtybufspace
> 1024L * 1024 * 1024)
305 hammer_limit_dirtybufspace
= 1024L * 1024 * 1024;
309 * The hammer_inode structure detaches from the vnode on reclaim.
310 * This limits the number of inodes in this state to prevent a
311 * memory pool blowout.
313 if (hammer_limit_reclaims
== 0) {
314 hammer_limit_reclaims
= maxvnodes
/ 10;
315 if (hammer_limit_reclaims
> HAMMER_LIMIT_RECLAIMS
)
316 hammer_limit_reclaims
= HAMMER_LIMIT_RECLAIMS
;
323 hammer_vfs_mount(struct mount
*mp
, char *mntpt
, caddr_t data
,
326 struct hammer_mount_info info
;
328 hammer_volume_t rootvol
;
329 struct vnode
*rootvp
;
330 struct vnode
*devvp
= NULL
;
331 const char *upath
; /* volume name in userspace */
332 char *path
; /* volume name in system space */
337 char *next_volume_ptr
= NULL
;
339 if (hammer_noatime
) {
341 mp
->mnt_flag
|= MNT_NOATIME
;
345 * Accept hammer_mount_info. mntpt is NULL for root mounts at boot.
348 bzero(&info
, sizeof(info
));
353 next_volume_ptr
= mp
->mnt_stat
.f_mntfromname
;
355 /* Count number of volumes separated by ':' */
356 for (char *p
= next_volume_ptr
; *p
!= '\0'; ++p
) {
362 mp
->mnt_flag
&= ~MNT_RDONLY
; /* mount R/W */
364 if ((error
= copyin(data
, &info
, sizeof(info
))) != 0)
369 * updating or new mount
371 if (mp
->mnt_flag
& MNT_UPDATE
) {
372 hmp
= (void *)mp
->mnt_data
;
373 KKASSERT(hmp
!= NULL
);
375 if (info
.nvolumes
<= 0 || info
.nvolumes
> HAMMER_MAX_VOLUMES
)
381 * master-id validation. The master id may not be changed by a
384 if (info
.hflags
& HMNT_MASTERID
|| info
.hflags
& HMNT_NOMIRROR
) {
385 if (hmp
&& hmp
->master_id
!= info
.master_id
) {
386 hkprintf("cannot change master id with mount update\n");
389 master_id
= info
.master_id
;
390 if (master_id
< -1 || master_id
>= HAMMER_MAX_MASTERS
)
394 master_id
= hmp
->master_id
;
400 * Internal mount data structure
403 hmp
= kmalloc(sizeof(*hmp
), M_HAMMER
, M_WAITOK
| M_ZERO
);
404 mp
->mnt_data
= (qaddr_t
)hmp
;
408 * Make sure kmalloc type limits are set appropriately.
410 * Our inode kmalloc group is sized based on maxvnodes
411 * (controlled by the system, not us).
413 kmalloc_create(&hmp
->m_misc
, "HAMMER-others");
414 kmalloc_create(&hmp
->m_inodes
, "HAMMER-inodes");
416 kmalloc_raise_limit(hmp
->m_inodes
, 0); /* unlimited */
418 hmp
->root_btree_beg
.localization
=
419 HAMMER_MIN_ONDISK_LOCALIZATION
;
420 hmp
->root_btree_beg
.obj_id
= HAMMER_MIN_OBJID
;
421 hmp
->root_btree_beg
.key
= HAMMER_MIN_KEY
;
422 hmp
->root_btree_beg
.create_tid
= 1;
423 hmp
->root_btree_beg
.delete_tid
= 1;
424 hmp
->root_btree_beg
.rec_type
= HAMMER_MIN_RECTYPE
;
425 hmp
->root_btree_beg
.obj_type
= 0;
426 hmp
->root_btree_beg
.btype
= HAMMER_BTREE_TYPE_NONE
;
428 hmp
->root_btree_end
.localization
=
429 HAMMER_MAX_ONDISK_LOCALIZATION
;
430 hmp
->root_btree_end
.obj_id
= HAMMER_MAX_OBJID
;
431 hmp
->root_btree_end
.key
= HAMMER_MAX_KEY
;
432 hmp
->root_btree_end
.create_tid
= HAMMER_MAX_TID
;
433 hmp
->root_btree_end
.delete_tid
= 0; /* special case */
434 hmp
->root_btree_end
.rec_type
= HAMMER_MAX_RECTYPE
;
435 hmp
->root_btree_end
.obj_type
= 0;
436 hmp
->root_btree_end
.btype
= HAMMER_BTREE_TYPE_NONE
;
438 hmp
->krate
.freq
= 1; /* maximum reporting rate (hz) */
439 hmp
->krate
.count
= -16; /* initial burst */
440 hmp
->kdiag
.freq
= 1; /* maximum reporting rate (hz) */
441 hmp
->kdiag
.count
= -16; /* initial burst */
443 hmp
->sync_lock
.refs
= 1;
444 hmp
->undo_lock
.refs
= 1;
445 hmp
->blkmap_lock
.refs
= 1;
446 hmp
->snapshot_lock
.refs
= 1;
447 hmp
->volume_lock
.refs
= 1;
449 TAILQ_INIT(&hmp
->delay_list
);
450 TAILQ_INIT(&hmp
->flush_group_list
);
451 TAILQ_INIT(&hmp
->objid_cache_list
);
452 TAILQ_INIT(&hmp
->undo_lru_list
);
453 TAILQ_INIT(&hmp
->reclaim_list
);
455 hmp
->hflags
&= ~HMNT_USERFLAGS
;
456 hmp
->hflags
|= info
.hflags
& HMNT_USERFLAGS
;
458 hmp
->master_id
= master_id
;
461 mp
->mnt_flag
|= MNT_RDONLY
;
462 hmp
->asof
= info
.asof
;
464 hmp
->asof
= HAMMER_MAX_TID
;
467 hmp
->volume_to_remove
= -1;
470 * Re-open read-write if originally read-only, or vise-versa.
472 * When going from read-only to read-write execute the stage2
473 * recovery if it has not already been run.
475 if (mp
->mnt_flag
& MNT_UPDATE
) {
476 lwkt_gettoken(&hmp
->fs_token
);
478 if (hmp
->ronly
&& (mp
->mnt_kern_flag
& MNTK_WANTRDWR
)) {
479 hkprintf("read-only -> read-write\n");
481 RB_SCAN(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, NULL
,
482 hammer_adjust_volume_mode
, NULL
);
483 rootvol
= hammer_get_root_volume(hmp
, &error
);
485 hammer_recover_flush_buffers(hmp
, rootvol
, 1);
486 error
= hammer_recover_stage2(hmp
, rootvol
);
487 bcopy(rootvol
->ondisk
->vol0_blockmap
,
489 sizeof(hmp
->blockmap
));
490 hammer_rel_volume(rootvol
, 0);
492 RB_SCAN(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, NULL
,
493 hammer_reload_inode
, NULL
);
494 /* kernel clears MNT_RDONLY */
495 } else if (hmp
->ronly
== 0 && (mp
->mnt_flag
& MNT_RDONLY
)) {
496 hkprintf("read-write -> read-only\n");
497 hmp
->ronly
= 1; /* messy */
498 RB_SCAN(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, NULL
,
499 hammer_reload_inode
, NULL
);
501 hammer_flusher_sync(hmp
);
502 hammer_flusher_sync(hmp
);
503 hammer_flusher_sync(hmp
);
505 RB_SCAN(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, NULL
,
506 hammer_adjust_volume_mode
, NULL
);
508 lwkt_reltoken(&hmp
->fs_token
);
512 RB_INIT(&hmp
->rb_vols_root
);
513 RB_INIT(&hmp
->rb_inos_root
);
514 RB_INIT(&hmp
->rb_redo_root
);
515 RB_INIT(&hmp
->rb_nods_root
);
516 RB_INIT(&hmp
->rb_undo_root
);
517 RB_INIT(&hmp
->rb_resv_root
);
518 RB_INIT(&hmp
->rb_bufs_root
);
519 RB_INIT(&hmp
->rb_pfsm_root
);
521 hmp
->ronly
= ((mp
->mnt_flag
& MNT_RDONLY
) != 0);
523 RB_INIT(&hmp
->volu_root
);
524 RB_INIT(&hmp
->undo_root
);
525 RB_INIT(&hmp
->data_root
);
526 RB_INIT(&hmp
->meta_root
);
527 RB_INIT(&hmp
->lose_root
);
528 TAILQ_INIT(&hmp
->iorun_list
);
530 lwkt_token_init(&hmp
->fs_token
, "hammerfs");
531 lwkt_token_init(&hmp
->io_token
, "hammerio");
533 lwkt_gettoken(&hmp
->fs_token
);
538 path
= objcache_get(namei_oc
, M_WAITOK
);
540 for (i
= 0; i
< info
.nvolumes
; ++i
) {
545 KKASSERT(next_volume_ptr
!= NULL
);
547 if (*next_volume_ptr
!= '/') {
549 strcpy(path
, "/dev/");
552 for (k
= strlen(path
); k
< MAXPATHLEN
-1; ++k
) {
553 if (*next_volume_ptr
== '\0') {
555 } else if (*next_volume_ptr
== ':') {
559 path
[k
] = *next_volume_ptr
;
566 cdev_t dev
= kgetdiskbyname(path
);
567 error
= bdevvp(dev
, &devvp
);
569 hdkprintf("can't find devvp\n");
572 error
= copyin(&info
.volumes
[i
], &upath
,
575 error
= copyinstr(upath
, path
,
579 error
= hammer_install_volume(hmp
, path
, devvp
, NULL
);
583 objcache_put(namei_oc
, path
);
586 * Make sure we found a root volume
588 if (hmp
->rootvol
== NULL
) {
589 if (error
== EBUSY
) {
590 hdkprintf("The volumes are probably mounted\n");
592 hdkprintf("No root volume found!\n");
599 * Check that all required volumes are available
601 if (error
== 0 && hammer_mountcheck_volumes(hmp
)) {
602 hdkprintf("Missing volumes, cannot mount!\n");
611 hdkprintf("Failed to load volumes!\n");
615 nvolumes
= hammer_get_installed_volumes(hmp
);
616 if (hmp
->nvolumes
!= nvolumes
) {
617 hdkprintf("volume header says %d volumes, but %d installed\n",
618 hmp
->nvolumes
, nvolumes
);
624 * No errors, setup enough of the mount point so we can lookup the
627 mp
->mnt_iosize_max
= MAXPHYS
;
628 mp
->mnt_kern_flag
|= MNTK_THR_SYNC
; /* new vsyncscan semantics */
631 * MPSAFE code. Note that VOPs and VFSops which are not MPSAFE
632 * will acquire a per-mount token prior to entry and release it
635 mp
->mnt_kern_flag
|= MNTK_ALL_MPSAFE
;
638 * note: f_iosize is used by vnode_pager_haspage() when constructing
641 mp
->mnt_stat
.f_iosize
= HAMMER_BUFSIZE
;
642 mp
->mnt_stat
.f_bsize
= HAMMER_BUFSIZE
;
644 mp
->mnt_vstat
.f_frsize
= HAMMER_BUFSIZE
;
645 mp
->mnt_vstat
.f_bsize
= HAMMER_BUFSIZE
;
647 mp
->mnt_maxsymlinklen
= 255;
648 mp
->mnt_flag
|= MNT_LOCAL
;
650 vfs_add_vnodeops(mp
, &hammer_vnode_vops
, &mp
->mnt_vn_norm_ops
);
651 vfs_add_vnodeops(mp
, &hammer_spec_vops
, &mp
->mnt_vn_spec_ops
);
652 vfs_add_vnodeops(mp
, &hammer_fifo_vops
, &mp
->mnt_vn_fifo_ops
);
655 * The root volume's ondisk pointer is only valid if we hold a
658 rootvol
= hammer_get_root_volume(hmp
, &error
);
663 * Perform any necessary UNDO operations. The recovery code does
664 * call hammer_undo_lookup() so we have to pre-cache the blockmap,
665 * and then re-copy it again after recovery is complete.
667 * If this is a read-only mount the UNDO information is retained
668 * in memory in the form of dirty buffer cache buffers, and not
669 * written back to the media.
671 bcopy(rootvol
->ondisk
->vol0_blockmap
, hmp
->blockmap
,
672 sizeof(hmp
->blockmap
));
675 * Check filesystem version
677 hmp
->version
= rootvol
->ondisk
->vol_version
;
678 if (hmp
->version
< HAMMER_VOL_VERSION_MIN
||
679 hmp
->version
> HAMMER_VOL_VERSION_MAX
) {
680 hkprintf("mount unsupported fs version %d\n", hmp
->version
);
686 * The undo_rec_limit limits the size of flush groups to avoid
687 * blowing out the UNDO FIFO. This calculation is typically in
688 * the tens of thousands and is designed primarily when small
689 * HAMMER filesystems are created.
691 hmp
->undo_rec_limit
= hammer_undo_max(hmp
) / 8192 + 100;
692 if (hammer_debug_general
& 0x0001)
693 hkprintf("undo_rec_limit %d\n", hmp
->undo_rec_limit
);
696 * NOTE: Recover stage1 not only handles meta-data recovery, it
697 * also sets hmp->undo_seqno for HAMMER VERSION 4+ filesystems.
699 error
= hammer_recover_stage1(hmp
, rootvol
);
701 kprintf("Failed to recover HAMMER filesystem on mount\n");
706 * Finish setup now that we have a good root volume.
708 ksnprintf(mp
->mnt_stat
.f_mntfromname
,
709 sizeof(mp
->mnt_stat
.f_mntfromname
), "%s",
710 rootvol
->ondisk
->vol_label
);
711 mp
->mnt_stat
.f_fsid
.val
[0] =
712 crc32((char *)&rootvol
->ondisk
->vol_fsid
+ 0, 8);
713 mp
->mnt_stat
.f_fsid
.val
[1] =
714 crc32((char *)&rootvol
->ondisk
->vol_fsid
+ 8, 8);
715 mp
->mnt_stat
.f_fsid
.val
[1] &= HAMMER_LOCALIZE_MASK
;
717 mp
->mnt_vstat
.f_fsid_uuid
= rootvol
->ondisk
->vol_fsid
;
718 mp
->mnt_vstat
.f_fsid
= crc32(&mp
->mnt_vstat
.f_fsid_uuid
,
719 sizeof(mp
->mnt_vstat
.f_fsid_uuid
));
722 * Certain often-modified fields in the root volume are cached in
723 * the hammer_mount structure so we do not have to generate lots
724 * of little UNDO structures for them.
726 * Recopy after recovery. This also has the side effect of
727 * setting our cached undo FIFO's first_offset, which serves to
728 * placemark the FIFO start for the NEXT flush cycle while the
729 * on-disk first_offset represents the LAST flush cycle.
731 hmp
->next_tid
= rootvol
->ondisk
->vol0_next_tid
;
732 hmp
->flush_tid1
= hmp
->next_tid
;
733 hmp
->flush_tid2
= hmp
->next_tid
;
734 bcopy(rootvol
->ondisk
->vol0_blockmap
, hmp
->blockmap
,
735 sizeof(hmp
->blockmap
));
736 hmp
->copy_stat_freebigblocks
= rootvol
->ondisk
->vol0_stat_freebigblocks
;
738 hammer_flusher_create(hmp
);
741 * Locate the root directory with an obj_id of 1.
743 error
= hammer_vfs_root(mp
, &rootvp
);
748 error
= hammer_recover_stage2(hmp
, rootvol
);
751 * If the stage2 recovery fails be sure to clean out all cached
752 * vnodes before throwing away the mount structure or bad things
759 if ((mp
->mnt_flag
& MNT_UPDATE
) == 0) {
762 /* Populate info for mount point (NULL pad)*/
763 bzero(mp
->mnt_stat
.f_mntonname
, MNAMELEN
);
766 copyinstr(mntpt
, mp
->mnt_stat
.f_mntonname
,
768 } else { /* Root mount */
769 mp
->mnt_stat
.f_mntonname
[0] = '/';
772 (void)VFS_STATFS(mp
, &mp
->mnt_stat
, cred
);
773 hammer_rel_volume(rootvol
, 0);
776 * Cleanup and return.
779 /* called with fs_token held */
782 lwkt_reltoken(&hmp
->fs_token
);
788 hammer_vfs_unmount(struct mount
*mp
, int mntflags
)
790 hammer_mount_t hmp
= (void *)mp
->mnt_data
;
795 * Clean out the vnodes
797 lwkt_gettoken(&hmp
->fs_token
);
799 if (mntflags
& MNT_FORCE
)
801 error
= vflush(mp
, 0, flags
);
804 * Clean up the internal mount structure and related entities. This
808 /* called with fs_token held */
811 lwkt_reltoken(&hmp
->fs_token
);
817 * Clean up the internal mount structure and disassociate it from the mount.
818 * This may issue I/O.
820 * Called with fs_token held.
823 hammer_free_hmp(struct mount
*mp
)
825 hammer_mount_t hmp
= (void *)mp
->mnt_data
;
826 hammer_flush_group_t flg
;
829 * Flush anything dirty. This won't even run if the
830 * filesystem errored-out.
832 hammer_flush_dirty(hmp
, 30);
835 * If the mount had a critical error we have to destroy any
836 * remaining inodes before we can finish cleaning up the flusher.
838 if (hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
) {
839 RB_SCAN(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, NULL
,
840 hammer_destroy_inode_callback
, NULL
);
844 * There shouldn't be any inodes left now and any left over
845 * flush groups should now be empty.
847 KKASSERT(RB_EMPTY(&hmp
->rb_inos_root
));
848 while ((flg
= TAILQ_FIRST(&hmp
->flush_group_list
)) != NULL
) {
849 TAILQ_REMOVE(&hmp
->flush_group_list
, flg
, flush_entry
);
850 KKASSERT(RB_EMPTY(&flg
->flush_tree
));
852 hkprintf("Warning, flush_group %p was "
853 "not empty on umount!\n", flg
);
855 kfree(flg
, hmp
->m_misc
);
859 * We can finally destroy the flusher
861 hammer_flusher_destroy(hmp
);
864 * We may have held recovered buffers due to a read-only mount.
865 * These must be discarded.
868 hammer_recover_flush_buffers(hmp
, NULL
, -1);
871 * Unload buffers and then volumes
873 RB_SCAN(hammer_buf_rb_tree
, &hmp
->rb_bufs_root
, NULL
,
874 hammer_unload_buffer
, NULL
);
875 RB_SCAN(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, NULL
,
876 hammer_unload_volume
, NULL
);
879 mp
->mnt_flag
&= ~MNT_LOCAL
;
881 hammer_destroy_objid_cache(hmp
);
882 kmalloc_destroy(&hmp
->m_misc
);
883 kmalloc_destroy(&hmp
->m_inodes
);
884 lwkt_reltoken(&hmp
->fs_token
);
885 kfree(hmp
, M_HAMMER
);
889 * Report critical errors. ip may be NULL.
892 hammer_critical_error(hammer_mount_t hmp
, hammer_inode_t ip
,
893 int error
, const char *msg
)
895 hmp
->flags
|= HAMMER_MOUNT_CRITICAL_ERROR
;
897 hmkrateprintf(&hmp
->krate
, hmp
,
898 "Critical error inode=%jd error=%d %s\n",
899 (intmax_t)(ip
? ip
->obj_id
: -1),
902 if (hmp
->ronly
== 0) {
903 hmp
->ronly
= 2; /* special errored read-only mode */
904 hmp
->mp
->mnt_flag
|= MNT_RDONLY
;
905 RB_SCAN(hammer_vol_rb_tree
, &hmp
->rb_vols_root
, NULL
,
906 hammer_adjust_volume_mode
, NULL
);
907 hmkprintf(hmp
, "Forcing read-only mode\n");
910 if (hammer_debug_critical
)
911 Debugger("Entering debugger");
916 * Obtain a vnode for the specified inode number. An exclusively locked
920 hammer_vfs_vget(struct mount
*mp
, struct vnode
*dvp
,
921 ino_t ino
, struct vnode
**vpp
)
923 struct hammer_transaction trans
;
924 hammer_mount_t hmp
= (void *)mp
->mnt_data
;
927 uint32_t localization
;
929 lwkt_gettoken(&hmp
->fs_token
);
930 hammer_simple_transaction(&trans
, hmp
);
933 * If a directory vnode is supplied (mainly NFS) then we can acquire
934 * the PFS domain from it. Otherwise we would only be able to vget
935 * inodes in the root PFS.
938 localization
= HAMMER_DEF_LOCALIZATION
|
939 VTOI(dvp
)->obj_localization
;
941 localization
= HAMMER_DEF_LOCALIZATION
;
945 * Lookup the requested HAMMER inode. The structure must be
946 * left unlocked while we manipulate the related vnode to avoid
949 ip
= hammer_get_inode(&trans
, NULL
, ino
,
950 hmp
->asof
, localization
,
955 error
= hammer_get_vnode(ip
, vpp
);
956 hammer_rel_inode(ip
, 0);
958 hammer_done_transaction(&trans
);
959 lwkt_reltoken(&hmp
->fs_token
);
964 * Return the root vnode for the filesystem.
966 * HAMMER stores the root vnode in the hammer_mount structure so
967 * getting it is easy.
970 hammer_vfs_root(struct mount
*mp
, struct vnode
**vpp
)
974 error
= hammer_vfs_vget(mp
, NULL
, HAMMER_OBJID_ROOT
, vpp
);
979 hammer_vfs_statfs(struct mount
*mp
, struct statfs
*sbp
, struct ucred
*cred
)
981 hammer_mount_t hmp
= (void *)mp
->mnt_data
;
982 hammer_volume_t volume
;
983 hammer_volume_ondisk_t ondisk
;
988 lwkt_gettoken(&hmp
->fs_token
);
989 volume
= hammer_get_root_volume(hmp
, &error
);
991 lwkt_reltoken(&hmp
->fs_token
);
994 ondisk
= volume
->ondisk
;
999 _hammer_checkspace(hmp
, HAMMER_CHKSPC_WRITE
, &breserved
);
1000 mp
->mnt_stat
.f_files
= ondisk
->vol0_stat_inodes
;
1001 bfree
= ondisk
->vol0_stat_freebigblocks
* HAMMER_BIGBLOCK_SIZE
;
1002 hammer_rel_volume(volume
, 0);
1004 if (breserved
> bfree
)
1006 mp
->mnt_stat
.f_bfree
= (bfree
- breserved
) / HAMMER_BUFSIZE
;
1007 mp
->mnt_stat
.f_bavail
= mp
->mnt_stat
.f_bfree
;
1008 if (mp
->mnt_stat
.f_files
< 0)
1009 mp
->mnt_stat
.f_files
= 0;
1011 *sbp
= mp
->mnt_stat
;
1012 lwkt_reltoken(&hmp
->fs_token
);
1017 hammer_vfs_statvfs(struct mount
*mp
, struct statvfs
*sbp
, struct ucred
*cred
)
1019 hammer_mount_t hmp
= (void *)mp
->mnt_data
;
1020 hammer_volume_t volume
;
1021 hammer_volume_ondisk_t ondisk
;
1026 lwkt_gettoken(&hmp
->fs_token
);
1027 volume
= hammer_get_root_volume(hmp
, &error
);
1029 lwkt_reltoken(&hmp
->fs_token
);
1032 ondisk
= volume
->ondisk
;
1037 _hammer_checkspace(hmp
, HAMMER_CHKSPC_WRITE
, &breserved
);
1038 mp
->mnt_vstat
.f_files
= ondisk
->vol0_stat_inodes
;
1039 bfree
= ondisk
->vol0_stat_freebigblocks
* HAMMER_BIGBLOCK_SIZE
;
1040 hammer_rel_volume(volume
, 0);
1042 if (breserved
> bfree
)
1044 mp
->mnt_vstat
.f_bfree
= (bfree
- breserved
) / HAMMER_BUFSIZE
;
1045 mp
->mnt_vstat
.f_bavail
= mp
->mnt_vstat
.f_bfree
;
1046 if (mp
->mnt_vstat
.f_files
< 0)
1047 mp
->mnt_vstat
.f_files
= 0;
1048 *sbp
= mp
->mnt_vstat
;
1049 lwkt_reltoken(&hmp
->fs_token
);
1054 * Sync the filesystem. Currently we have to run it twice, the second
1055 * one will advance the undo start index to the end index, so if a crash
1056 * occurs no undos will be run on mount.
1058 * We do not sync the filesystem if we are called from a panic. If we did
1059 * we might end up blowing up a sync that was already in progress.
1062 hammer_vfs_sync(struct mount
*mp
, int waitfor
)
1064 hammer_mount_t hmp
= (void *)mp
->mnt_data
;
1067 lwkt_gettoken(&hmp
->fs_token
);
1068 if (panicstr
== NULL
) {
1069 error
= hammer_sync_hmp(hmp
, waitfor
);
1073 lwkt_reltoken(&hmp
->fs_token
);
1078 * Convert a vnode to a file handle.
1080 * Accesses read-only fields on already-referenced structures so
1081 * no token is needed.
1084 hammer_vfs_vptofh(struct vnode
*vp
, struct fid
*fhp
)
1088 KKASSERT(MAXFIDSZ
>= 16);
1090 fhp
->fid_len
= offsetof(struct fid
, fid_data
[16]);
1091 fhp
->fid_ext
= lo_to_pfs(ip
->obj_localization
);
1092 bcopy(&ip
->obj_id
, fhp
->fid_data
+ 0, sizeof(ip
->obj_id
));
1093 bcopy(&ip
->obj_asof
, fhp
->fid_data
+ 8, sizeof(ip
->obj_asof
));
1099 * Convert a file handle back to a vnode.
1101 * Use rootvp to enforce PFS isolation when a PFS is exported via a
1105 hammer_vfs_fhtovp(struct mount
*mp
, struct vnode
*rootvp
,
1106 struct fid
*fhp
, struct vnode
**vpp
)
1108 hammer_mount_t hmp
= (void *)mp
->mnt_data
;
1109 struct hammer_transaction trans
;
1111 struct hammer_inode_info info
;
1113 uint32_t localization
;
1115 bcopy(fhp
->fid_data
+ 0, &info
.obj_id
, sizeof(info
.obj_id
));
1116 bcopy(fhp
->fid_data
+ 8, &info
.obj_asof
, sizeof(info
.obj_asof
));
1118 localization
= VTOI(rootvp
)->obj_localization
;
1120 localization
= pfs_to_lo(fhp
->fid_ext
);
1122 lwkt_gettoken(&hmp
->fs_token
);
1123 hammer_simple_transaction(&trans
, hmp
);
1126 * Get/allocate the hammer_inode structure. The structure must be
1127 * unlocked while we manipulate the related vnode to avoid a
1130 ip
= hammer_get_inode(&trans
, NULL
, info
.obj_id
,
1131 info
.obj_asof
, localization
, 0, &error
);
1133 error
= hammer_get_vnode(ip
, vpp
);
1134 hammer_rel_inode(ip
, 0);
1138 hammer_done_transaction(&trans
);
1139 lwkt_reltoken(&hmp
->fs_token
);
1144 hammer_vfs_checkexp(struct mount
*mp
, struct sockaddr
*nam
,
1145 int *exflagsp
, struct ucred
**credanonp
)
1147 hammer_mount_t hmp
= (void *)mp
->mnt_data
;
1151 lwkt_gettoken(&hmp
->fs_token
);
1152 np
= vfs_export_lookup(mp
, &hmp
->export
, nam
);
1154 *exflagsp
= np
->netc_exflags
;
1155 *credanonp
= &np
->netc_anon
;
1160 lwkt_reltoken(&hmp
->fs_token
);
1166 hammer_vfs_export(struct mount
*mp
, int op
, const struct export_args
*export
)
1168 hammer_mount_t hmp
= (void *)mp
->mnt_data
;
1171 lwkt_gettoken(&hmp
->fs_token
);
1174 case MOUNTCTL_SET_EXPORT
:
1175 error
= vfs_export(mp
, &hmp
->export
, export
);
1181 lwkt_reltoken(&hmp
->fs_token
);