2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/nlookup.h>
39 #include <sys/vnode.h>
40 #include <sys/mount.h>
41 #include <sys/fcntl.h>
44 #include <sys/vfsops.h>
45 #include <sys/sysctl.h>
46 #include <sys/socket.h>
47 #include <sys/objcache.h>
50 #include <sys/namei.h>
51 #include <sys/mountctl.h>
52 #include <sys/dirent.h>
55 #include <sys/mutex.h>
56 #include <sys/mutex2.h>
59 #include "hammer2_disk.h"
60 #include "hammer2_mount.h"
61 #include "hammer2_lz4.h"
63 #include "zlib/hammer2_zlib.h"
65 #define REPORT_REFS_ERRORS 1 /* XXX remove me */
67 MALLOC_DEFINE(M_OBJCACHE
, "objcache", "Object Cache");
69 struct hammer2_sync_info
{
75 TAILQ_HEAD(hammer2_mntlist
, hammer2_dev
);
76 static struct hammer2_mntlist hammer2_mntlist
;
78 struct hammer2_pfslist hammer2_pfslist
;
79 struct hammer2_pfslist hammer2_spmplist
;
80 struct lock hammer2_mntlk
;
82 int hammer2_supported_version
= HAMMER2_VOL_VERSION_DEFAULT
;
84 int hammer2_cluster_meta_read
= 1; /* physical read-ahead */
85 int hammer2_cluster_data_read
= 4; /* physical read-ahead */
86 int hammer2_dedup_enable
= 1;
87 int hammer2_always_compress
= 0; /* always try to compress */
88 int hammer2_inval_enable
= 0;
89 int hammer2_flush_pipe
= 100;
90 int hammer2_dio_count
;
91 int hammer2_dio_limit
= 256;
92 int hammer2_bulkfree_tps
= 5000;
93 long hammer2_chain_allocs
;
94 long hammer2_chain_frees
;
95 long hammer2_limit_dirty_chains
;
96 long hammer2_count_modified_chains
;
97 long hammer2_iod_invals
;
98 long hammer2_iod_file_read
;
99 long hammer2_iod_meta_read
;
100 long hammer2_iod_indr_read
;
101 long hammer2_iod_fmap_read
;
102 long hammer2_iod_volu_read
;
103 long hammer2_iod_file_write
;
104 long hammer2_iod_file_wembed
;
105 long hammer2_iod_file_wzero
;
106 long hammer2_iod_file_wdedup
;
107 long hammer2_iod_meta_write
;
108 long hammer2_iod_indr_write
;
109 long hammer2_iod_fmap_write
;
110 long hammer2_iod_volu_write
;
112 MALLOC_DECLARE(M_HAMMER2_CBUFFER
);
113 MALLOC_DEFINE(M_HAMMER2_CBUFFER
, "HAMMER2-compbuffer",
114 "Buffer used for compression.");
116 MALLOC_DECLARE(M_HAMMER2_DEBUFFER
);
117 MALLOC_DEFINE(M_HAMMER2_DEBUFFER
, "HAMMER2-decompbuffer",
118 "Buffer used for decompression.");
120 SYSCTL_NODE(_vfs
, OID_AUTO
, hammer2
, CTLFLAG_RW
, 0, "HAMMER2 filesystem");
122 SYSCTL_INT(_vfs_hammer2
, OID_AUTO
, supported_version
, CTLFLAG_RD
,
123 &hammer2_supported_version
, 0, "");
124 SYSCTL_INT(_vfs_hammer2
, OID_AUTO
, debug
, CTLFLAG_RW
,
125 &hammer2_debug
, 0, "");
126 SYSCTL_INT(_vfs_hammer2
, OID_AUTO
, cluster_meta_read
, CTLFLAG_RW
,
127 &hammer2_cluster_meta_read
, 0, "");
128 SYSCTL_INT(_vfs_hammer2
, OID_AUTO
, cluster_data_read
, CTLFLAG_RW
,
129 &hammer2_cluster_data_read
, 0, "");
130 SYSCTL_INT(_vfs_hammer2
, OID_AUTO
, dedup_enable
, CTLFLAG_RW
,
131 &hammer2_dedup_enable
, 0, "");
132 SYSCTL_INT(_vfs_hammer2
, OID_AUTO
, always_compress
, CTLFLAG_RW
,
133 &hammer2_always_compress
, 0, "");
134 SYSCTL_INT(_vfs_hammer2
, OID_AUTO
, inval_enable
, CTLFLAG_RW
,
135 &hammer2_inval_enable
, 0, "");
136 SYSCTL_INT(_vfs_hammer2
, OID_AUTO
, flush_pipe
, CTLFLAG_RW
,
137 &hammer2_flush_pipe
, 0, "");
138 SYSCTL_INT(_vfs_hammer2
, OID_AUTO
, bulkfree_tps
, CTLFLAG_RW
,
139 &hammer2_bulkfree_tps
, 0, "");
140 SYSCTL_LONG(_vfs_hammer2
, OID_AUTO
, chain_allocs
, CTLFLAG_RW
,
141 &hammer2_chain_allocs
, 0, "");
142 SYSCTL_LONG(_vfs_hammer2
, OID_AUTO
, chain_frees
, CTLFLAG_RW
,
143 &hammer2_chain_frees
, 0, "");
144 SYSCTL_LONG(_vfs_hammer2
, OID_AUTO
, limit_dirty_chains
, CTLFLAG_RW
,
145 &hammer2_limit_dirty_chains
, 0, "");
146 SYSCTL_LONG(_vfs_hammer2
, OID_AUTO
, count_modified_chains
, CTLFLAG_RW
,
147 &hammer2_count_modified_chains
, 0, "");
148 SYSCTL_INT(_vfs_hammer2
, OID_AUTO
, dio_count
, CTLFLAG_RD
,
149 &hammer2_dio_count
, 0, "");
150 SYSCTL_INT(_vfs_hammer2
, OID_AUTO
, dio_limit
, CTLFLAG_RW
,
151 &hammer2_dio_limit
, 0, "");
153 SYSCTL_LONG(_vfs_hammer2
, OID_AUTO
, iod_invals
, CTLFLAG_RW
,
154 &hammer2_iod_invals
, 0, "");
155 SYSCTL_LONG(_vfs_hammer2
, OID_AUTO
, iod_file_read
, CTLFLAG_RW
,
156 &hammer2_iod_file_read
, 0, "");
157 SYSCTL_LONG(_vfs_hammer2
, OID_AUTO
, iod_meta_read
, CTLFLAG_RW
,
158 &hammer2_iod_meta_read
, 0, "");
159 SYSCTL_LONG(_vfs_hammer2
, OID_AUTO
, iod_indr_read
, CTLFLAG_RW
,
160 &hammer2_iod_indr_read
, 0, "");
161 SYSCTL_LONG(_vfs_hammer2
, OID_AUTO
, iod_fmap_read
, CTLFLAG_RW
,
162 &hammer2_iod_fmap_read
, 0, "");
163 SYSCTL_LONG(_vfs_hammer2
, OID_AUTO
, iod_volu_read
, CTLFLAG_RW
,
164 &hammer2_iod_volu_read
, 0, "");
166 SYSCTL_LONG(_vfs_hammer2
, OID_AUTO
, iod_file_write
, CTLFLAG_RW
,
167 &hammer2_iod_file_write
, 0, "");
168 SYSCTL_LONG(_vfs_hammer2
, OID_AUTO
, iod_file_wembed
, CTLFLAG_RW
,
169 &hammer2_iod_file_wembed
, 0, "");
170 SYSCTL_LONG(_vfs_hammer2
, OID_AUTO
, iod_file_wzero
, CTLFLAG_RW
,
171 &hammer2_iod_file_wzero
, 0, "");
172 SYSCTL_LONG(_vfs_hammer2
, OID_AUTO
, iod_file_wdedup
, CTLFLAG_RW
,
173 &hammer2_iod_file_wdedup
, 0, "");
174 SYSCTL_LONG(_vfs_hammer2
, OID_AUTO
, iod_meta_write
, CTLFLAG_RW
,
175 &hammer2_iod_meta_write
, 0, "");
176 SYSCTL_LONG(_vfs_hammer2
, OID_AUTO
, iod_indr_write
, CTLFLAG_RW
,
177 &hammer2_iod_indr_write
, 0, "");
178 SYSCTL_LONG(_vfs_hammer2
, OID_AUTO
, iod_fmap_write
, CTLFLAG_RW
,
179 &hammer2_iod_fmap_write
, 0, "");
180 SYSCTL_LONG(_vfs_hammer2
, OID_AUTO
, iod_volu_write
, CTLFLAG_RW
,
181 &hammer2_iod_volu_write
, 0, "");
183 long hammer2_process_icrc32
;
184 long hammer2_process_xxhash64
;
185 SYSCTL_LONG(_vfs_hammer2
, OID_AUTO
, process_icrc32
, CTLFLAG_RW
,
186 &hammer2_process_icrc32
, 0, "");
187 SYSCTL_LONG(_vfs_hammer2
, OID_AUTO
, process_xxhash64
, CTLFLAG_RW
,
188 &hammer2_process_xxhash64
, 0, "");
190 static int hammer2_vfs_init(struct vfsconf
*conf
);
191 static int hammer2_vfs_uninit(struct vfsconf
*vfsp
);
192 static int hammer2_vfs_mount(struct mount
*mp
, char *path
, caddr_t data
,
194 static int hammer2_remount(hammer2_dev_t
*, struct mount
*, char *,
195 struct vnode
*, struct ucred
*);
196 static int hammer2_recovery(hammer2_dev_t
*hmp
);
197 static int hammer2_vfs_unmount(struct mount
*mp
, int mntflags
);
198 static int hammer2_vfs_root(struct mount
*mp
, struct vnode
**vpp
);
199 static int hammer2_vfs_statfs(struct mount
*mp
, struct statfs
*sbp
,
201 static int hammer2_vfs_statvfs(struct mount
*mp
, struct statvfs
*sbp
,
203 static int hammer2_vfs_fhtovp(struct mount
*mp
, struct vnode
*rootvp
,
204 struct fid
*fhp
, struct vnode
**vpp
);
205 static int hammer2_vfs_vptofh(struct vnode
*vp
, struct fid
*fhp
);
206 static int hammer2_vfs_checkexp(struct mount
*mp
, struct sockaddr
*nam
,
207 int *exflagsp
, struct ucred
**credanonp
);
209 static int hammer2_install_volume_header(hammer2_dev_t
*hmp
);
210 static int hammer2_sync_scan2(struct mount
*mp
, struct vnode
*vp
, void *data
);
212 static void hammer2_update_pmps(hammer2_dev_t
*hmp
);
214 static void hammer2_mount_helper(struct mount
*mp
, hammer2_pfs_t
*pmp
);
215 static void hammer2_unmount_helper(struct mount
*mp
, hammer2_pfs_t
*pmp
,
217 static int hammer2_fixup_pfses(hammer2_dev_t
*hmp
);
220 * HAMMER2 vfs operations.
222 static struct vfsops hammer2_vfsops
= {
223 .vfs_init
= hammer2_vfs_init
,
224 .vfs_uninit
= hammer2_vfs_uninit
,
225 .vfs_sync
= hammer2_vfs_sync
,
226 .vfs_mount
= hammer2_vfs_mount
,
227 .vfs_unmount
= hammer2_vfs_unmount
,
228 .vfs_root
= hammer2_vfs_root
,
229 .vfs_statfs
= hammer2_vfs_statfs
,
230 .vfs_statvfs
= hammer2_vfs_statvfs
,
231 .vfs_vget
= hammer2_vfs_vget
,
232 .vfs_vptofh
= hammer2_vfs_vptofh
,
233 .vfs_fhtovp
= hammer2_vfs_fhtovp
,
234 .vfs_checkexp
= hammer2_vfs_checkexp
237 MALLOC_DEFINE(M_HAMMER2
, "HAMMER2-mount", "");
239 VFS_SET(hammer2_vfsops
, hammer2
, VFCF_MPSAFE
);
240 MODULE_VERSION(hammer2
, 1);
244 hammer2_vfs_init(struct vfsconf
*conf
)
246 static struct objcache_malloc_args margs_read
;
247 static struct objcache_malloc_args margs_write
;
248 static struct objcache_malloc_args margs_vop
;
255 * A large DIO cache is needed to retain dedup enablement masks.
256 * The bulkfree code clears related masks as part of the disk block
257 * recycling algorithm, preventing it from being used for a later
260 * NOTE: A large buffer cache can actually interfere with dedup
261 * operation because we dedup based on media physical buffers
262 * and not logical buffers. Try to make the DIO case large
263 * enough to avoid this problem, but also cap it.
265 hammer2_dio_limit
= nbuf
* 2;
266 if (hammer2_dio_limit
> 100000)
267 hammer2_dio_limit
= 100000;
269 if (HAMMER2_BLOCKREF_BYTES
!= sizeof(struct hammer2_blockref
))
271 if (HAMMER2_INODE_BYTES
!= sizeof(struct hammer2_inode_data
))
273 if (HAMMER2_VOLUME_BYTES
!= sizeof(struct hammer2_volume_data
))
277 kprintf("HAMMER2 structure size mismatch; cannot continue.\n");
279 margs_read
.objsize
= 65536;
280 margs_read
.mtype
= M_HAMMER2_DEBUFFER
;
282 margs_write
.objsize
= 32768;
283 margs_write
.mtype
= M_HAMMER2_CBUFFER
;
285 margs_vop
.objsize
= sizeof(hammer2_xop_t
);
286 margs_vop
.mtype
= M_HAMMER2
;
289 * Note thaht for the XOPS cache we want backing store allocations
290 * to use M_ZERO. This is not allowed in objcache_get() (to avoid
291 * confusion), so use the backing store function that does it. This
292 * means that initial XOPS objects are zerod but REUSED objects are
293 * not. So we are responsible for cleaning the object up sufficiently
294 * for our needs before objcache_put()ing it back (typically just the
297 cache_buffer_read
= objcache_create(margs_read
.mtype
->ks_shortdesc
,
298 0, 1, NULL
, NULL
, NULL
,
299 objcache_malloc_alloc
,
300 objcache_malloc_free
,
302 cache_buffer_write
= objcache_create(margs_write
.mtype
->ks_shortdesc
,
303 0, 1, NULL
, NULL
, NULL
,
304 objcache_malloc_alloc
,
305 objcache_malloc_free
,
307 cache_xops
= objcache_create(margs_vop
.mtype
->ks_shortdesc
,
308 0, 1, NULL
, NULL
, NULL
,
309 objcache_malloc_alloc_zero
,
310 objcache_malloc_free
,
314 lockinit(&hammer2_mntlk
, "mntlk", 0, 0);
315 TAILQ_INIT(&hammer2_mntlist
);
316 TAILQ_INIT(&hammer2_pfslist
);
317 TAILQ_INIT(&hammer2_spmplist
);
319 hammer2_limit_dirty_chains
= maxvnodes
/ 10;
320 if (hammer2_limit_dirty_chains
> HAMMER2_LIMIT_DIRTY_CHAINS
)
321 hammer2_limit_dirty_chains
= HAMMER2_LIMIT_DIRTY_CHAINS
;
328 hammer2_vfs_uninit(struct vfsconf
*vfsp __unused
)
330 objcache_destroy(cache_buffer_read
);
331 objcache_destroy(cache_buffer_write
);
332 objcache_destroy(cache_xops
);
337 * Core PFS allocator. Used to allocate or reference the pmp structure
338 * for PFS cluster mounts and the spmp structure for media (hmp) structures.
339 * The pmp can be passed in or loaded by this function using the chain and
342 * pmp->modify_tid tracks new modify_tid transaction ids for front-end
343 * transactions. Note that synchronization does not use this field.
344 * (typically frontend operations and synchronization cannot run on the
345 * same PFS node at the same time).
350 hammer2_pfsalloc(hammer2_chain_t
*chain
,
351 const hammer2_inode_data_t
*ripdata
,
352 hammer2_tid_t modify_tid
, hammer2_dev_t
*force_local
)
355 hammer2_inode_t
*iroot
;
363 * Locate or create the PFS based on the cluster id. If ripdata
364 * is NULL this is a spmp which is unique and is always allocated.
366 * If the device is mounted in local mode all PFSs are considered
367 * independent and not part of any cluster (for debugging only).
370 TAILQ_FOREACH(pmp
, &hammer2_pfslist
, mntentry
) {
371 if (force_local
!= pmp
->force_local
)
373 if (force_local
== NULL
&&
374 bcmp(&pmp
->pfs_clid
, &ripdata
->meta
.pfs_clid
,
375 sizeof(pmp
->pfs_clid
)) == 0) {
377 } else if (force_local
&& pmp
->pfs_names
[0] &&
378 strcmp(pmp
->pfs_names
[0], ripdata
->filename
) == 0) {
385 pmp
= kmalloc(sizeof(*pmp
), M_HAMMER2
, M_WAITOK
| M_ZERO
);
386 pmp
->force_local
= force_local
;
387 hammer2_trans_manage_init(pmp
);
388 kmalloc_create(&pmp
->minode
, "HAMMER2-inodes");
389 kmalloc_create(&pmp
->mmsg
, "HAMMER2-pfsmsg");
390 lockinit(&pmp
->lock
, "pfslk", 0, 0);
391 lockinit(&pmp
->lock_nlink
, "h2nlink", 0, 0);
392 spin_init(&pmp
->inum_spin
, "hm2pfsalloc_inum");
393 spin_init(&pmp
->xop_spin
, "h2xop");
394 spin_init(&pmp
->lru_spin
, "h2lru");
395 RB_INIT(&pmp
->inum_tree
);
396 TAILQ_INIT(&pmp
->sideq
);
397 TAILQ_INIT(&pmp
->lru_list
);
398 spin_init(&pmp
->list_spin
, "hm2pfsalloc_list");
401 * Distribute backend operations to threads
403 for (i
= 0; i
< HAMMER2_XOPGROUPS
; ++i
)
404 hammer2_xop_group_init(pmp
, &pmp
->xop_groups
[i
]);
407 * Save the last media transaction id for the flusher. Set
411 pmp
->pfs_clid
= ripdata
->meta
.pfs_clid
;
412 TAILQ_INSERT_TAIL(&hammer2_pfslist
, pmp
, mntentry
);
414 pmp
->flags
|= HAMMER2_PMPF_SPMP
;
415 TAILQ_INSERT_TAIL(&hammer2_spmplist
, pmp
, mntentry
);
419 * The synchronization thread may start too early, make
420 * sure it stays frozen until we are ready to let it go.
424 pmp->primary_thr.flags = HAMMER2_THREAD_FROZEN |
425 HAMMER2_THREAD_REMASTER;
430 * Create the PFS's root inode and any missing XOP helper threads.
432 if ((iroot
= pmp
->iroot
) == NULL
) {
433 iroot
= hammer2_inode_get(pmp
, NULL
, NULL
, -1);
435 iroot
->meta
= ripdata
->meta
;
437 hammer2_inode_ref(iroot
);
438 hammer2_inode_unlock(iroot
);
442 * Stop here if no chain is passed in.
448 * When a chain is passed in we must add it to the PFS's root
449 * inode, update pmp->pfs_types[], and update the syncronization
452 * When forcing local mode, mark the PFS as a MASTER regardless.
454 * At the moment empty spots can develop due to removals or failures.
455 * Ultimately we want to re-fill these spots but doing so might
456 * confused running code. XXX
458 hammer2_inode_ref(iroot
);
459 hammer2_mtx_ex(&iroot
->lock
);
460 j
= iroot
->cluster
.nchains
;
462 if (j
== HAMMER2_MAXCLUSTER
) {
463 kprintf("hammer2_mount: cluster full!\n");
464 /* XXX fatal error? */
466 KKASSERT(chain
->pmp
== NULL
);
468 hammer2_chain_ref(chain
);
469 iroot
->cluster
.array
[j
].chain
= chain
;
471 pmp
->pfs_types
[j
] = HAMMER2_PFSTYPE_MASTER
;
473 pmp
->pfs_types
[j
] = ripdata
->meta
.pfs_type
;
474 pmp
->pfs_names
[j
] = kstrdup(ripdata
->filename
, M_HAMMER2
);
475 pmp
->pfs_hmps
[j
] = chain
->hmp
;
478 * If the PFS is already mounted we must account
479 * for the mount_count here.
482 ++chain
->hmp
->mount_count
;
485 * May have to fixup dirty chain tracking. Previous
486 * pmp was NULL so nothing to undo.
488 if (chain
->flags
& HAMMER2_CHAIN_MODIFIED
)
489 hammer2_pfs_memory_inc(pmp
);
492 iroot
->cluster
.nchains
= j
;
495 * Update nmasters from any PFS inode which is part of the cluster.
496 * It is possible that this will result in a value which is too
497 * high. MASTER PFSs are authoritative for pfs_nmasters and will
498 * override this value later on.
500 * (This informs us of masters that might not currently be
501 * discoverable by this mount).
503 if (ripdata
&& pmp
->pfs_nmasters
< ripdata
->meta
.pfs_nmasters
) {
504 pmp
->pfs_nmasters
= ripdata
->meta
.pfs_nmasters
;
508 * Count visible masters. Masters are usually added with
509 * ripdata->meta.pfs_nmasters set to 1. This detects when there
510 * are more (XXX and must update the master inodes).
513 for (i
= 0; i
< iroot
->cluster
.nchains
; ++i
) {
514 if (pmp
->pfs_types
[i
] == HAMMER2_PFSTYPE_MASTER
)
517 if (pmp
->pfs_nmasters
< count
)
518 pmp
->pfs_nmasters
= count
;
521 * Create missing synchronization and support threads.
523 * Single-node masters (including snapshots) have nothing to
524 * synchronize and do not require this thread.
526 * Multi-node masters or any number of soft masters, slaves, copy,
527 * or other PFS types need the thread.
529 * Each thread is responsible for its particular cluster index.
530 * We use independent threads so stalls or mismatches related to
531 * any given target do not affect other targets.
533 for (i
= 0; i
< iroot
->cluster
.nchains
; ++i
) {
535 * Single-node masters (including snapshots) have nothing
536 * to synchronize and will make direct xops support calls,
537 * thus they do not require this thread.
539 * Note that there can be thousands of snapshots. We do not
540 * want to create thousands of threads.
542 if (pmp
->pfs_nmasters
<= 1 &&
543 pmp
->pfs_types
[i
] == HAMMER2_PFSTYPE_MASTER
) {
548 * Sync support thread
550 if (pmp
->sync_thrs
[i
].td
== NULL
) {
551 hammer2_thr_create(&pmp
->sync_thrs
[i
], pmp
, NULL
,
553 hammer2_primary_sync_thread
);
558 * Create missing Xop threads
560 * NOTE: We create helper threads for all mounted PFSs or any
561 * PFSs with 2+ nodes (so the sync thread can update them,
562 * even if not mounted).
564 if (pmp
->mp
|| iroot
->cluster
.nchains
>= 2)
565 hammer2_xop_helper_create(pmp
);
567 hammer2_mtx_unlock(&iroot
->lock
);
568 hammer2_inode_drop(iroot
);
574 * Deallocate an element of a probed PFS. If destroying and this is a
575 * MASTER, adjust nmasters.
577 * This function does not physically destroy the PFS element in its device
578 * under the super-root (see hammer2_ioctl_pfs_delete()).
581 hammer2_pfsdealloc(hammer2_pfs_t
*pmp
, int clindex
, int destroying
)
583 hammer2_inode_t
*iroot
;
584 hammer2_chain_t
*chain
;
588 * Cleanup our reference on iroot. iroot is (should) not be needed
596 * XXX flush after acquiring the iroot lock.
597 * XXX clean out the cluster index from all inode structures.
599 hammer2_thr_delete(&pmp
->sync_thrs
[clindex
]);
602 * Remove the cluster index from the group. If destroying
603 * the PFS and this is a master, adjust pfs_nmasters.
605 hammer2_mtx_ex(&iroot
->lock
);
606 chain
= iroot
->cluster
.array
[clindex
].chain
;
607 iroot
->cluster
.array
[clindex
].chain
= NULL
;
609 switch(pmp
->pfs_types
[clindex
]) {
610 case HAMMER2_PFSTYPE_MASTER
:
611 if (destroying
&& pmp
->pfs_nmasters
> 0)
613 /* XXX adjust ripdata->meta.pfs_nmasters */
618 pmp
->pfs_types
[clindex
] = HAMMER2_PFSTYPE_NONE
;
620 hammer2_mtx_unlock(&iroot
->lock
);
626 atomic_set_int(&chain
->flags
, HAMMER2_CHAIN_RELEASE
);
627 hammer2_chain_drop(chain
);
631 * Terminate all XOP threads for the cluster index.
633 for (j
= 0; j
< HAMMER2_XOPGROUPS
; ++j
)
634 hammer2_thr_delete(&pmp
->xop_groups
[j
].thrs
[clindex
]);
639 * Destroy a PFS, typically only occurs after the last mount on a device
643 hammer2_pfsfree(hammer2_pfs_t
*pmp
)
645 hammer2_inode_t
*iroot
;
646 hammer2_chain_t
*chain
;
651 * Cleanup our reference on iroot. iroot is (should) not be needed
654 if (pmp
->flags
& HAMMER2_PMPF_SPMP
)
655 TAILQ_REMOVE(&hammer2_spmplist
, pmp
, mntentry
);
657 TAILQ_REMOVE(&hammer2_pfslist
, pmp
, mntentry
);
661 for (i
= 0; i
< iroot
->cluster
.nchains
; ++i
) {
662 hammer2_thr_delete(&pmp
->sync_thrs
[i
]);
663 for (j
= 0; j
< HAMMER2_XOPGROUPS
; ++j
)
664 hammer2_thr_delete(&pmp
->xop_groups
[j
].thrs
[i
]);
666 #if REPORT_REFS_ERRORS
667 if (pmp
->iroot
->refs
!= 1)
668 kprintf("PMP->IROOT %p REFS WRONG %d\n",
669 pmp
->iroot
, pmp
->iroot
->refs
);
671 KKASSERT(pmp
->iroot
->refs
== 1);
673 /* ref for pmp->iroot */
674 hammer2_inode_drop(pmp
->iroot
);
679 * Cleanup chains remaining on LRU list.
681 hammer2_spin_ex(&pmp
->lru_spin
);
682 while ((chain
= TAILQ_FIRST(&pmp
->lru_list
)) != NULL
) {
683 KKASSERT(chain
->flags
& HAMMER2_CHAIN_ONLRU
);
684 atomic_add_int(&pmp
->lru_count
, -1);
685 atomic_clear_int(&chain
->flags
, HAMMER2_CHAIN_ONLRU
);
686 TAILQ_REMOVE(&pmp
->lru_list
, chain
, lru_node
);
687 hammer2_chain_ref(chain
);
688 hammer2_spin_unex(&pmp
->lru_spin
);
689 atomic_set_int(&chain
->flags
, HAMMER2_CHAIN_RELEASE
);
690 hammer2_chain_drop(chain
);
691 hammer2_spin_ex(&pmp
->lru_spin
);
693 hammer2_spin_unex(&pmp
->lru_spin
);
696 * Free remaining pmp resources
698 kmalloc_destroy(&pmp
->mmsg
);
699 kmalloc_destroy(&pmp
->minode
);
701 kfree(pmp
, M_HAMMER2
);
705 * Remove all references to hmp from the pfs list. Any PFS which becomes
706 * empty is terminated and freed.
711 hammer2_pfsfree_scan(hammer2_dev_t
*hmp
, int which
)
714 hammer2_inode_t
*iroot
;
715 hammer2_chain_t
*rchain
;
719 struct hammer2_pfslist
*wlist
;
722 wlist
= &hammer2_pfslist
;
724 wlist
= &hammer2_spmplist
;
726 TAILQ_FOREACH(pmp
, wlist
, mntentry
) {
727 if ((iroot
= pmp
->iroot
) == NULL
)
729 hammer2_trans_init(pmp
, HAMMER2_TRANS_ISFLUSH
);
730 hammer2_inode_run_sideq(pmp
, 1);
731 hammer2_bioq_sync(pmp
);
732 hammer2_trans_done(pmp
);
735 * Determine if this PFS is affected. If it is we must
736 * freeze all management threads and lock its iroot.
738 * Freezing a management thread forces it idle, operations
739 * in-progress will be aborted and it will have to start
740 * over again when unfrozen, or exit if told to exit.
742 for (i
= 0; i
< HAMMER2_MAXCLUSTER
; ++i
) {
743 if (pmp
->pfs_hmps
[i
] == hmp
)
746 if (i
!= HAMMER2_MAXCLUSTER
) {
748 * Make sure all synchronization threads are locked
751 for (i
= 0; i
< HAMMER2_MAXCLUSTER
; ++i
) {
752 if (pmp
->pfs_hmps
[i
] == NULL
)
754 hammer2_thr_freeze_async(&pmp
->sync_thrs
[i
]);
755 for (j
= 0; j
< HAMMER2_XOPGROUPS
; ++j
) {
756 hammer2_thr_freeze_async(
757 &pmp
->xop_groups
[j
].thrs
[i
]);
760 for (i
= 0; i
< HAMMER2_MAXCLUSTER
; ++i
) {
761 if (pmp
->pfs_hmps
[i
] == NULL
)
763 hammer2_thr_freeze(&pmp
->sync_thrs
[i
]);
764 for (j
= 0; j
< HAMMER2_XOPGROUPS
; ++j
) {
766 &pmp
->xop_groups
[j
].thrs
[i
]);
771 * Lock the inode and clean out matching chains.
772 * Note that we cannot use hammer2_inode_lock_*()
773 * here because that would attempt to validate the
774 * cluster that we are in the middle of ripping
777 * WARNING! We are working directly on the inodes
780 hammer2_mtx_ex(&iroot
->lock
);
783 * Remove the chain from matching elements of the PFS.
785 for (i
= 0; i
< HAMMER2_MAXCLUSTER
; ++i
) {
786 if (pmp
->pfs_hmps
[i
] != hmp
)
788 hammer2_thr_delete(&pmp
->sync_thrs
[i
]);
789 for (j
= 0; j
< HAMMER2_XOPGROUPS
; ++j
) {
791 &pmp
->xop_groups
[j
].thrs
[i
]);
793 rchain
= iroot
->cluster
.array
[i
].chain
;
794 iroot
->cluster
.array
[i
].chain
= NULL
;
795 pmp
->pfs_types
[i
] = 0;
796 if (pmp
->pfs_names
[i
]) {
797 kfree(pmp
->pfs_names
[i
], M_HAMMER2
);
798 pmp
->pfs_names
[i
] = NULL
;
801 hammer2_chain_drop(rchain
);
803 if (iroot
->cluster
.focus
== rchain
)
804 iroot
->cluster
.focus
= NULL
;
806 pmp
->pfs_hmps
[i
] = NULL
;
808 hammer2_mtx_unlock(&iroot
->lock
);
809 didfreeze
= 1; /* remaster, unfreeze down below */
815 * Cleanup trailing chains. Gaps may remain.
817 for (i
= HAMMER2_MAXCLUSTER
- 1; i
>= 0; --i
) {
818 if (pmp
->pfs_hmps
[i
])
821 iroot
->cluster
.nchains
= i
+ 1;
824 * If the PMP has no elements remaining we can destroy it.
825 * (this will transition management threads from frozen->exit).
827 if (iroot
->cluster
.nchains
== 0) {
829 * If this was the hmp's spmp, we need to clean
830 * a little more stuff out.
832 if (hmp
->spmp
== pmp
) {
834 hmp
->vchain
.pmp
= NULL
;
835 hmp
->fchain
.pmp
= NULL
;
839 * Free the pmp and restart the loop
841 hammer2_pfsfree(pmp
);
846 * If elements still remain we need to set the REMASTER
847 * flag and unfreeze it.
850 for (i
= 0; i
< HAMMER2_MAXCLUSTER
; ++i
) {
851 if (pmp
->pfs_hmps
[i
] == NULL
)
853 hammer2_thr_remaster(&pmp
->sync_thrs
[i
]);
854 hammer2_thr_unfreeze(&pmp
->sync_thrs
[i
]);
855 for (j
= 0; j
< HAMMER2_XOPGROUPS
; ++j
) {
856 hammer2_thr_remaster(
857 &pmp
->xop_groups
[j
].thrs
[i
]);
858 hammer2_thr_unfreeze(
859 &pmp
->xop_groups
[j
].thrs
[i
]);
867 * Mount or remount HAMMER2 fileystem from physical media
870 * mp mount point structure
876 * mp mount point structure
877 * path path to mount point
878 * data pointer to argument structure in user space
879 * volume volume path (device@LABEL form)
880 * hflags user mount flags
881 * cred user credentials
888 hammer2_vfs_mount(struct mount
*mp
, char *path
, caddr_t data
,
891 struct hammer2_mount_info info
;
895 hammer2_dev_t
*force_local
;
896 hammer2_key_t key_next
;
897 hammer2_key_t key_dummy
;
900 struct nlookupdata nd
;
901 hammer2_chain_t
*parent
;
902 hammer2_chain_t
*chain
;
903 hammer2_cluster_t
*cluster
;
904 const hammer2_inode_data_t
*ripdata
;
905 hammer2_blockref_t bref
;
907 char devstr
[MNAMELEN
];
922 kprintf("hammer2_mount\n");
928 bzero(&info
, sizeof(info
));
929 info
.cluster_fd
= -1;
930 ksnprintf(devstr
, sizeof(devstr
), "%s",
931 mp
->mnt_stat
.f_mntfromname
);
932 kprintf("hammer2_mount: root '%s'\n", devstr
);
935 * Non-root mount or updating a mount
937 error
= copyin(data
, &info
, sizeof(info
));
941 error
= copyinstr(info
.volume
, devstr
, MNAMELEN
- 1, &done
);
947 * Extract device and label, automatically mount @BOOT, @ROOT, or @DATA
948 * if no label specified, based on the partition id. Error out if no
949 * label or device (with partition id) is specified. This is strictly
950 * a convenience to match the default label created by newfs_hammer2,
951 * our preference is that a label always be specified.
953 * NOTE: We allow 'mount @LABEL <blah>'... that is, a mount command
954 * that does not specify a device, as long as some H2 label
955 * has already been mounted from that device. This makes
956 * mounting snapshots a lot easier.
959 label
= strchr(devstr
, '@');
960 if (label
&& ((label
+ 1) - dev
) > done
)
962 if (label
== NULL
|| label
[1] == 0) {
966 label
= devstr
+ strlen(devstr
);
984 kprintf("hammer2_mount: dev=\"%s\" label=\"%s\" rdonly=%d\n",
985 dev
, label
, (mp
->mnt_flag
& MNT_RDONLY
));
987 if (mp
->mnt_flag
& MNT_UPDATE
) {
989 * Update mount. Note that pmp->iroot->cluster is
990 * an inode-embedded cluster and thus cannot be
993 * XXX HAMMER2 needs to implement NFS export via
997 pmp
->hflags
= info
.hflags
;
998 cluster
= &pmp
->iroot
->cluster
;
999 for (i
= 0; i
< cluster
->nchains
; ++i
) {
1000 if (cluster
->array
[i
].chain
== NULL
)
1002 hmp
= cluster
->array
[i
].chain
->hmp
;
1004 error
= hammer2_remount(hmp
, mp
, path
,
1016 * If a path is specified and dev is not an empty string, lookup the
1017 * name and verify that it referes to a block device.
1019 * If a path is specified and dev is an empty string we fall through
1020 * and locate the label in the hmp search.
1022 if (path
&& *dev
!= 0) {
1023 error
= nlookup_init(&nd
, dev
, UIO_SYSSPACE
, NLC_FOLLOW
);
1025 error
= nlookup(&nd
);
1027 error
= cache_vref(&nd
.nl_nch
, nd
.nl_cred
, &devvp
);
1029 } else if (path
== NULL
) {
1031 cdev_t cdev
= kgetdiskbyname(dev
);
1032 error
= bdevvp(cdev
, &devvp
);
1034 kprintf("hammer2: cannot find '%s'\n", dev
);
1037 * We will locate the hmp using the label in the hmp loop.
1043 * Make sure its a block device. Do not check to see if it is
1044 * already mounted until we determine that its a fresh H2 device.
1046 if (error
== 0 && devvp
) {
1047 vn_isdisk(devvp
, &error
);
1051 * Determine if the device has already been mounted. After this
1052 * check hmp will be non-NULL if we are doing the second or more
1053 * hammer2 mounts from the same device.
1055 lockmgr(&hammer2_mntlk
, LK_EXCLUSIVE
);
1058 * Match the device. Due to the way devfs works,
1059 * we may not be able to directly match the vnode pointer,
1060 * so also check to see if the underlying device matches.
1062 TAILQ_FOREACH(hmp
, &hammer2_mntlist
, mntentry
) {
1063 if (hmp
->devvp
== devvp
)
1065 if (devvp
->v_rdev
&&
1066 hmp
->devvp
->v_rdev
== devvp
->v_rdev
) {
1072 * If no match this may be a fresh H2 mount, make sure
1073 * the device is not mounted on anything else.
1076 error
= vfs_mountedon(devvp
);
1077 } else if (error
== 0) {
1079 * Match the label to a pmp already probed.
1081 TAILQ_FOREACH(pmp
, &hammer2_pfslist
, mntentry
) {
1082 for (i
= 0; i
< HAMMER2_MAXCLUSTER
; ++i
) {
1083 if (pmp
->pfs_names
[i
] &&
1084 strcmp(pmp
->pfs_names
[i
], label
) == 0) {
1085 hmp
= pmp
->pfs_hmps
[i
];
1097 * Open the device if this isn't a secondary mount and construct
1098 * the H2 device mount (hmp).
1101 hammer2_chain_t
*schain
;
1104 if (error
== 0 && vcount(devvp
) > 0) {
1105 kprintf("Primary device already has references\n");
1110 * Now open the device
1113 ronly
= ((mp
->mnt_flag
& MNT_RDONLY
) != 0);
1114 vn_lock(devvp
, LK_EXCLUSIVE
| LK_RETRY
);
1115 error
= vinvalbuf(devvp
, V_SAVE
, 0, 0);
1117 error
= VOP_OPEN(devvp
,
1118 (ronly
? FREAD
: FREAD
| FWRITE
),
1123 if (error
&& devvp
) {
1128 lockmgr(&hammer2_mntlk
, LK_RELEASE
);
1131 hmp
= kmalloc(sizeof(*hmp
), M_HAMMER2
, M_WAITOK
| M_ZERO
);
1132 ksnprintf(hmp
->devrepname
, sizeof(hmp
->devrepname
), "%s", dev
);
1135 hmp
->hflags
= info
.hflags
& HMNT2_DEVFLAGS
;
1136 kmalloc_create(&hmp
->mchain
, "HAMMER2-chains");
1137 TAILQ_INSERT_TAIL(&hammer2_mntlist
, hmp
, mntentry
);
1138 RB_INIT(&hmp
->iotree
);
1139 spin_init(&hmp
->io_spin
, "hm2mount_io");
1140 spin_init(&hmp
->list_spin
, "hm2mount_list");
1141 TAILQ_INIT(&hmp
->flushq
);
1143 lockinit(&hmp
->vollk
, "h2vol", 0, 0);
1144 lockinit(&hmp
->bulklk
, "h2bulk", 0, 0);
1145 lockinit(&hmp
->bflock
, "h2bflk", 0, 0);
1148 * vchain setup. vchain.data is embedded.
1149 * vchain.refs is initialized and will never drop to 0.
1151 * NOTE! voldata is not yet loaded.
1153 hmp
->vchain
.hmp
= hmp
;
1154 hmp
->vchain
.refs
= 1;
1155 hmp
->vchain
.data
= (void *)&hmp
->voldata
;
1156 hmp
->vchain
.bref
.type
= HAMMER2_BREF_TYPE_VOLUME
;
1157 hmp
->vchain
.bref
.data_off
= 0 | HAMMER2_PBUFRADIX
;
1158 hmp
->vchain
.bref
.mirror_tid
= hmp
->voldata
.mirror_tid
;
1160 hammer2_chain_core_init(&hmp
->vchain
);
1161 /* hmp->vchain.u.xxx is left NULL */
1164 * fchain setup. fchain.data is embedded.
1165 * fchain.refs is initialized and will never drop to 0.
1167 * The data is not used but needs to be initialized to
1168 * pass assertion muster. We use this chain primarily
1169 * as a placeholder for the freemap's top-level RBTREE
1170 * so it does not interfere with the volume's topology
1173 hmp
->fchain
.hmp
= hmp
;
1174 hmp
->fchain
.refs
= 1;
1175 hmp
->fchain
.data
= (void *)&hmp
->voldata
.freemap_blockset
;
1176 hmp
->fchain
.bref
.type
= HAMMER2_BREF_TYPE_FREEMAP
;
1177 hmp
->fchain
.bref
.data_off
= 0 | HAMMER2_PBUFRADIX
;
1178 hmp
->fchain
.bref
.mirror_tid
= hmp
->voldata
.freemap_tid
;
1179 hmp
->fchain
.bref
.methods
=
1180 HAMMER2_ENC_CHECK(HAMMER2_CHECK_FREEMAP
) |
1181 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE
);
1183 hammer2_chain_core_init(&hmp
->fchain
);
1184 /* hmp->fchain.u.xxx is left NULL */
1187 * Install the volume header and initialize fields from
1190 error
= hammer2_install_volume_header(hmp
);
1192 hammer2_unmount_helper(mp
, NULL
, hmp
);
1193 lockmgr(&hammer2_mntlk
, LK_RELEASE
);
1194 hammer2_vfs_unmount(mp
, MNT_FORCE
);
1199 * Really important to get these right or flush will get
1202 hmp
->spmp
= hammer2_pfsalloc(NULL
, NULL
, 0, NULL
);
1206 * Dummy-up vchain and fchain's modify_tid. mirror_tid
1207 * is inherited from the volume header.
1210 hmp
->vchain
.bref
.mirror_tid
= hmp
->voldata
.mirror_tid
;
1211 hmp
->vchain
.bref
.modify_tid
= hmp
->vchain
.bref
.mirror_tid
;
1212 hmp
->vchain
.pmp
= spmp
;
1213 hmp
->fchain
.bref
.mirror_tid
= hmp
->voldata
.freemap_tid
;
1214 hmp
->fchain
.bref
.modify_tid
= hmp
->fchain
.bref
.mirror_tid
;
1215 hmp
->fchain
.pmp
= spmp
;
1218 * First locate the super-root inode, which is key 0
1219 * relative to the volume header's blockset.
1221 * Then locate the root inode by scanning the directory keyspace
1222 * represented by the label.
1224 parent
= hammer2_chain_lookup_init(&hmp
->vchain
, 0);
1225 schain
= hammer2_chain_lookup(&parent
, &key_dummy
,
1226 HAMMER2_SROOT_KEY
, HAMMER2_SROOT_KEY
,
1228 hammer2_chain_lookup_done(parent
);
1229 if (schain
== NULL
) {
1230 kprintf("hammer2_mount: invalid super-root\n");
1231 hammer2_unmount_helper(mp
, NULL
, hmp
);
1232 lockmgr(&hammer2_mntlk
, LK_RELEASE
);
1233 hammer2_vfs_unmount(mp
, MNT_FORCE
);
1236 if (schain
->error
) {
1237 kprintf("hammer2_mount: error %s reading super-root\n",
1238 hammer2_error_str(schain
->error
));
1239 hammer2_chain_unlock(schain
);
1240 hammer2_chain_drop(schain
);
1242 hammer2_unmount_helper(mp
, NULL
, hmp
);
1243 lockmgr(&hammer2_mntlk
, LK_RELEASE
);
1244 hammer2_vfs_unmount(mp
, MNT_FORCE
);
1249 * The super-root always uses an inode_tid of 1 when
1252 spmp
->inode_tid
= 1;
1253 spmp
->modify_tid
= schain
->bref
.modify_tid
+ 1;
1256 * Sanity-check schain's pmp and finish initialization.
1257 * Any chain belonging to the super-root topology should
1258 * have a NULL pmp (not even set to spmp).
1260 ripdata
= &hammer2_chain_rdata(schain
)->ipdata
;
1261 KKASSERT(schain
->pmp
== NULL
);
1262 spmp
->pfs_clid
= ripdata
->meta
.pfs_clid
;
1265 * Replace the dummy spmp->iroot with a real one. It's
1266 * easier to just do a wholesale replacement than to try
1267 * to update the chain and fixup the iroot fields.
1269 * The returned inode is locked with the supplied cluster.
1271 cluster
= hammer2_cluster_from_chain(schain
);
1272 hammer2_inode_drop(spmp
->iroot
);
1274 spmp
->iroot
= hammer2_inode_get(spmp
, NULL
, cluster
, -1);
1275 spmp
->spmp_hmp
= hmp
;
1276 spmp
->pfs_types
[0] = ripdata
->meta
.pfs_type
;
1277 spmp
->pfs_hmps
[0] = hmp
;
1278 hammer2_inode_ref(spmp
->iroot
);
1279 hammer2_inode_unlock(spmp
->iroot
);
1280 hammer2_cluster_unlock(cluster
);
1281 hammer2_cluster_drop(cluster
);
1283 /* leave spmp->iroot with one ref */
1285 if ((mp
->mnt_flag
& MNT_RDONLY
) == 0) {
1286 error
= hammer2_recovery(hmp
);
1288 error
|= hammer2_fixup_pfses(hmp
);
1289 /* XXX do something with error */
1291 hammer2_update_pmps(hmp
);
1292 hammer2_iocom_init(hmp
);
1293 hammer2_bulkfree_init(hmp
);
1296 * Ref the cluster management messaging descriptor. The mount
1297 * program deals with the other end of the communications pipe.
1299 * Root mounts typically do not supply one.
1301 if (info
.cluster_fd
>= 0) {
1302 fp
= holdfp(curproc
->p_fd
, info
.cluster_fd
, -1);
1304 hammer2_cluster_reconnect(hmp
, fp
);
1306 kprintf("hammer2_mount: bad cluster_fd!\n");
1311 if (info
.hflags
& HMNT2_DEVFLAGS
) {
1312 kprintf("hammer2: Warning: mount flags pertaining "
1313 "to the whole device may only be specified "
1314 "on the first mount of the device: %08x\n",
1315 info
.hflags
& HMNT2_DEVFLAGS
);
1320 * Force local mount (disassociate all PFSs from their clusters).
1321 * Used primarily for debugging.
1323 force_local
= (hmp
->hflags
& HMNT2_LOCAL
) ? hmp
: NULL
;
1326 * Lookup the mount point under the media-localized super-root.
1327 * Scanning hammer2_pfslist doesn't help us because it represents
1328 * PFS cluster ids which can aggregate several named PFSs together.
1330 * cluster->pmp will incorrectly point to spmp and must be fixed
1333 hammer2_inode_lock(spmp
->iroot
, 0);
1334 parent
= hammer2_inode_chain(spmp
->iroot
, 0, HAMMER2_RESOLVE_ALWAYS
);
1335 lhc
= hammer2_dirhash(label
, strlen(label
));
1336 chain
= hammer2_chain_lookup(&parent
, &key_next
,
1337 lhc
, lhc
+ HAMMER2_DIRHASH_LOMASK
,
1340 if (chain
->bref
.type
== HAMMER2_BREF_TYPE_INODE
&&
1341 strcmp(label
, chain
->data
->ipdata
.filename
) == 0) {
1344 chain
= hammer2_chain_next(&parent
, chain
, &key_next
,
1346 lhc
+ HAMMER2_DIRHASH_LOMASK
,
1350 hammer2_chain_unlock(parent
);
1351 hammer2_chain_drop(parent
);
1353 hammer2_inode_unlock(spmp
->iroot
);
1356 * PFS could not be found?
1358 if (chain
== NULL
) {
1360 kprintf("hammer2_mount: PFS label I/O error\n");
1362 kprintf("hammer2_mount: PFS label not found\n");
1363 hammer2_unmount_helper(mp
, NULL
, hmp
);
1364 lockmgr(&hammer2_mntlk
, LK_RELEASE
);
1365 hammer2_vfs_unmount(mp
, MNT_FORCE
);
1371 * Acquire the pmp structure (it should have already been allocated
1372 * via hammer2_update_pmps() so do not pass cluster in to add to
1373 * available chains).
1375 * Check if the cluster has already been mounted. A cluster can
1376 * only be mounted once, use null mounts to mount additional copies.
1379 kprintf("hammer2_mount: PFS label I/O error\n");
1381 ripdata
= &chain
->data
->ipdata
;
1383 pmp
= hammer2_pfsalloc(NULL
, ripdata
,
1384 bref
.modify_tid
, force_local
);
1386 hammer2_chain_unlock(chain
);
1387 hammer2_chain_drop(chain
);
1392 kprintf("hammer2_mount hmp=%p pmp=%p\n", hmp
, pmp
);
1395 kprintf("hammer2_mount: PFS already mounted!\n");
1396 hammer2_unmount_helper(mp
, NULL
, hmp
);
1397 lockmgr(&hammer2_mntlk
, LK_RELEASE
);
1398 hammer2_vfs_unmount(mp
, MNT_FORCE
);
1403 pmp
->hflags
= info
.hflags
;
1404 mp
->mnt_flag
|= MNT_LOCAL
;
1405 mp
->mnt_kern_flag
|= MNTK_ALL_MPSAFE
; /* all entry pts are SMP */
1406 mp
->mnt_kern_flag
|= MNTK_THR_SYNC
; /* new vsyncscan semantics */
1409 * required mount structure initializations
1411 mp
->mnt_stat
.f_iosize
= HAMMER2_PBUFSIZE
;
1412 mp
->mnt_stat
.f_bsize
= HAMMER2_PBUFSIZE
;
1414 mp
->mnt_vstat
.f_frsize
= HAMMER2_PBUFSIZE
;
1415 mp
->mnt_vstat
.f_bsize
= HAMMER2_PBUFSIZE
;
1420 mp
->mnt_iosize_max
= MAXPHYS
;
1423 * Connect up mount pointers.
1425 hammer2_mount_helper(mp
, pmp
);
1427 lockmgr(&hammer2_mntlk
, LK_RELEASE
);
1433 vfs_add_vnodeops(mp
, &hammer2_vnode_vops
, &mp
->mnt_vn_norm_ops
);
1434 vfs_add_vnodeops(mp
, &hammer2_spec_vops
, &mp
->mnt_vn_spec_ops
);
1435 vfs_add_vnodeops(mp
, &hammer2_fifo_vops
, &mp
->mnt_vn_fifo_ops
);
1438 copyinstr(info
.volume
, mp
->mnt_stat
.f_mntfromname
,
1439 MNAMELEN
- 1, &size
);
1440 bzero(mp
->mnt_stat
.f_mntfromname
+ size
, MNAMELEN
- size
);
1441 } /* else root mount, already in there */
1443 bzero(mp
->mnt_stat
.f_mntonname
, sizeof(mp
->mnt_stat
.f_mntonname
));
1445 copyinstr(path
, mp
->mnt_stat
.f_mntonname
,
1446 sizeof(mp
->mnt_stat
.f_mntonname
) - 1,
1450 mp
->mnt_stat
.f_mntonname
[0] = '/';
1454 * Initial statfs to prime mnt_stat.
1456 hammer2_vfs_statfs(mp
, &mp
->mnt_stat
, cred
);
1462 * Scan PFSs under the super-root and create hammer2_pfs structures.
1466 hammer2_update_pmps(hammer2_dev_t
*hmp
)
1468 const hammer2_inode_data_t
*ripdata
;
1469 hammer2_chain_t
*parent
;
1470 hammer2_chain_t
*chain
;
1471 hammer2_blockref_t bref
;
1472 hammer2_dev_t
*force_local
;
1473 hammer2_pfs_t
*spmp
;
1475 hammer2_key_t key_next
;
1479 * Force local mount (disassociate all PFSs from their clusters).
1480 * Used primarily for debugging.
1482 force_local
= (hmp
->hflags
& HMNT2_LOCAL
) ? hmp
: NULL
;
1485 * Lookup mount point under the media-localized super-root.
1487 * cluster->pmp will incorrectly point to spmp and must be fixed
1491 hammer2_inode_lock(spmp
->iroot
, 0);
1492 parent
= hammer2_inode_chain(spmp
->iroot
, 0, HAMMER2_RESOLVE_ALWAYS
);
1493 chain
= hammer2_chain_lookup(&parent
, &key_next
,
1494 HAMMER2_KEY_MIN
, HAMMER2_KEY_MAX
,
1497 if (chain
->bref
.type
!= HAMMER2_BREF_TYPE_INODE
)
1500 kprintf("I/O error scanning PFS labels\n");
1502 ripdata
= &chain
->data
->ipdata
;
1505 pmp
= hammer2_pfsalloc(chain
, ripdata
,
1506 bref
.modify_tid
, force_local
);
1508 chain
= hammer2_chain_next(&parent
, chain
, &key_next
,
1509 key_next
, HAMMER2_KEY_MAX
,
1513 hammer2_chain_unlock(parent
);
1514 hammer2_chain_drop(parent
);
1516 hammer2_inode_unlock(spmp
->iroot
);
1521 hammer2_remount(hammer2_dev_t
*hmp
, struct mount
*mp
, char *path __unused
,
1522 struct vnode
*devvp
, struct ucred
*cred
)
1526 if (hmp
->ronly
&& (mp
->mnt_kern_flag
& MNTK_WANTRDWR
)) {
1527 vn_lock(devvp
, LK_EXCLUSIVE
| LK_RETRY
);
1528 VOP_OPEN(devvp
, FREAD
| FWRITE
, FSCRED
, NULL
);
1530 error
= hammer2_recovery(hmp
);
1532 error
|= hammer2_fixup_pfses(hmp
);
1533 vn_lock(devvp
, LK_EXCLUSIVE
| LK_RETRY
);
1535 VOP_CLOSE(devvp
, FREAD
, NULL
);
1538 VOP_CLOSE(devvp
, FREAD
| FWRITE
, NULL
);
1549 hammer2_vfs_unmount(struct mount
*mp
, int mntflags
)
1560 lockmgr(&hammer2_mntlk
, LK_EXCLUSIVE
);
1563 * If mount initialization proceeded far enough we must flush
1564 * its vnodes and sync the underlying mount points. Three syncs
1565 * are required to fully flush the filesystem (freemap updates lag
1566 * by one flush, and one extra for safety).
1568 if (mntflags
& MNT_FORCE
)
1573 error
= vflush(mp
, 0, flags
);
1576 hammer2_vfs_sync(mp
, MNT_WAIT
);
1577 hammer2_vfs_sync(mp
, MNT_WAIT
);
1578 hammer2_vfs_sync(mp
, MNT_WAIT
);
1582 * Cleanup the frontend support XOPS threads
1584 hammer2_xop_helper_cleanup(pmp
);
1587 hammer2_unmount_helper(mp
, pmp
, NULL
);
1591 lockmgr(&hammer2_mntlk
, LK_RELEASE
);
1597 * Mount helper, hook the system mount into our PFS.
1598 * The mount lock is held.
1600 * We must bump the mount_count on related devices for any
1605 hammer2_mount_helper(struct mount
*mp
, hammer2_pfs_t
*pmp
)
1607 hammer2_cluster_t
*cluster
;
1608 hammer2_chain_t
*rchain
;
1611 mp
->mnt_data
= (qaddr_t
)pmp
;
1615 * After pmp->mp is set we have to adjust hmp->mount_count.
1617 cluster
= &pmp
->iroot
->cluster
;
1618 for (i
= 0; i
< cluster
->nchains
; ++i
) {
1619 rchain
= cluster
->array
[i
].chain
;
1622 ++rchain
->hmp
->mount_count
;
1626 * Create missing Xop threads
1628 hammer2_xop_helper_create(pmp
);
1632 * Mount helper, unhook the system mount from our PFS.
1633 * The mount lock is held.
1635 * If hmp is supplied a mount responsible for being the first to open
1636 * the block device failed and the block device and all PFSs using the
1637 * block device must be cleaned up.
1639 * If pmp is supplied multiple devices might be backing the PFS and each
1640 * must be disconnected. This might not be the last PFS using some of the
1641 * underlying devices. Also, we have to adjust our hmp->mount_count
1642 * accounting for the devices backing the pmp which is now undergoing an
1647 hammer2_unmount_helper(struct mount
*mp
, hammer2_pfs_t
*pmp
, hammer2_dev_t
*hmp
)
1649 hammer2_cluster_t
*cluster
;
1650 hammer2_chain_t
*rchain
;
1651 struct vnode
*devvp
;
1657 * If no device supplied this is a high-level unmount and we have to
1658 * to disconnect the mount, adjust mount_count, and locate devices
1659 * that might now have no mounts.
1662 KKASSERT(hmp
== NULL
);
1663 KKASSERT((void *)(intptr_t)mp
->mnt_data
== pmp
);
1665 mp
->mnt_data
= NULL
;
1668 * After pmp->mp is cleared we have to account for
1671 cluster
= &pmp
->iroot
->cluster
;
1672 for (i
= 0; i
< cluster
->nchains
; ++i
) {
1673 rchain
= cluster
->array
[i
].chain
;
1676 --rchain
->hmp
->mount_count
;
1677 /* scrapping hmp now may invalidate the pmp */
1680 TAILQ_FOREACH(hmp
, &hammer2_mntlist
, mntentry
) {
1681 if (hmp
->mount_count
== 0) {
1682 hammer2_unmount_helper(NULL
, NULL
, hmp
);
1690 * Try to terminate the block device. We can't terminate it if
1691 * there are still PFSs referencing it.
1693 if (hmp
->mount_count
)
1697 * Decomission the network before we start messing with the
1700 hammer2_iocom_uninit(hmp
);
1702 hammer2_bulkfree_uninit(hmp
);
1703 hammer2_pfsfree_scan(hmp
, 0);
1704 hammer2_dev_exlock(hmp
); /* XXX order */
1707 * Cycle the volume data lock as a safety (probably not needed any
1708 * more). To ensure everything is out we need to flush at least
1709 * three times. (1) The running of the sideq can dirty the
1710 * filesystem, (2) A normal flush can dirty the freemap, and
1711 * (3) ensure that the freemap is fully synchronized.
1713 * The next mount's recovery scan can clean everything up but we want
1714 * to leave the filesystem in a 100% clean state on a normal unmount.
1717 hammer2_voldata_lock(hmp
);
1718 hammer2_voldata_unlock(hmp
);
1722 * Flush whatever is left. Unmounted but modified PFS's might still
1723 * have some dirty chains on them.
1725 hammer2_chain_lock(&hmp
->vchain
, HAMMER2_RESOLVE_ALWAYS
);
1726 hammer2_chain_lock(&hmp
->fchain
, HAMMER2_RESOLVE_ALWAYS
);
1728 if (hmp
->fchain
.flags
& HAMMER2_CHAIN_FLUSH_MASK
) {
1729 hammer2_voldata_modify(hmp
);
1730 hammer2_flush(&hmp
->fchain
, HAMMER2_FLUSH_TOP
|
1733 hammer2_chain_unlock(&hmp
->fchain
);
1735 if (hmp
->vchain
.flags
& HAMMER2_CHAIN_FLUSH_MASK
) {
1736 hammer2_flush(&hmp
->vchain
, HAMMER2_FLUSH_TOP
|
1739 hammer2_chain_unlock(&hmp
->vchain
);
1741 if ((hmp
->vchain
.flags
| hmp
->fchain
.flags
) &
1742 HAMMER2_CHAIN_FLUSH_MASK
) {
1743 kprintf("hammer2_unmount: chains left over "
1744 "after final sync\n");
1745 kprintf(" vchain %08x\n", hmp
->vchain
.flags
);
1746 kprintf(" fchain %08x\n", hmp
->fchain
.flags
);
1748 if (hammer2_debug
& 0x0010)
1749 Debugger("entered debugger");
1752 hammer2_pfsfree_scan(hmp
, 1);
1754 KKASSERT(hmp
->spmp
== NULL
);
1757 * Finish up with the device vnode
1759 if ((devvp
= hmp
->devvp
) != NULL
) {
1761 vn_lock(devvp
, LK_EXCLUSIVE
| LK_RETRY
);
1762 kprintf("hammer2_unmount(A): devvp %s rbdirty %p ronly=%d\n",
1763 hmp
->devrepname
, RB_ROOT(&devvp
->v_rbdirty_tree
),
1765 vinvalbuf(devvp
, (ronly
? 0 : V_SAVE
), 0, 0);
1766 kprintf("hammer2_unmount(B): devvp %s rbdirty %p\n",
1767 hmp
->devrepname
, RB_ROOT(&devvp
->v_rbdirty_tree
));
1769 VOP_CLOSE(devvp
, (ronly
? FREAD
: FREAD
|FWRITE
), NULL
);
1776 * Clear vchain/fchain flags that might prevent final cleanup
1779 if (hmp
->vchain
.flags
& HAMMER2_CHAIN_MODIFIED
) {
1780 atomic_add_long(&hammer2_count_modified_chains
, -1);
1781 atomic_clear_int(&hmp
->vchain
.flags
, HAMMER2_CHAIN_MODIFIED
);
1782 hammer2_pfs_memory_wakeup(hmp
->vchain
.pmp
);
1784 if (hmp
->vchain
.flags
& HAMMER2_CHAIN_UPDATE
) {
1785 atomic_clear_int(&hmp
->vchain
.flags
, HAMMER2_CHAIN_UPDATE
);
1788 if (hmp
->fchain
.flags
& HAMMER2_CHAIN_MODIFIED
) {
1789 atomic_add_long(&hammer2_count_modified_chains
, -1);
1790 atomic_clear_int(&hmp
->fchain
.flags
, HAMMER2_CHAIN_MODIFIED
);
1791 hammer2_pfs_memory_wakeup(hmp
->fchain
.pmp
);
1793 if (hmp
->fchain
.flags
& HAMMER2_CHAIN_UPDATE
) {
1794 atomic_clear_int(&hmp
->fchain
.flags
, HAMMER2_CHAIN_UPDATE
);
1798 * Final drop of embedded freemap root chain to
1799 * clean up fchain.core (fchain structure is not
1800 * flagged ALLOCATED so it is cleaned out and then
1803 hammer2_chain_drop(&hmp
->fchain
);
1806 * Final drop of embedded volume root chain to clean
1807 * up vchain.core (vchain structure is not flagged
1808 * ALLOCATED so it is cleaned out and then left to
1812 hammer2_dump_chain(&hmp
->vchain
, 0, &dumpcnt
, 'v', (u_int
)-1);
1814 hammer2_dump_chain(&hmp
->fchain
, 0, &dumpcnt
, 'f', (u_int
)-1);
1815 hammer2_dev_unlock(hmp
);
1816 hammer2_chain_drop(&hmp
->vchain
);
1818 hammer2_io_cleanup(hmp
, &hmp
->iotree
);
1819 if (hmp
->iofree_count
) {
1820 kprintf("io_cleanup: %d I/O's left hanging\n",
1824 TAILQ_REMOVE(&hammer2_mntlist
, hmp
, mntentry
);
1825 kmalloc_destroy(&hmp
->mchain
);
1826 kfree(hmp
, M_HAMMER2
);
1830 hammer2_vfs_vget(struct mount
*mp
, struct vnode
*dvp
,
1831 ino_t ino
, struct vnode
**vpp
)
1833 hammer2_xop_lookup_t
*xop
;
1835 hammer2_inode_t
*ip
;
1839 inum
= (hammer2_tid_t
)ino
& HAMMER2_DIRHASH_USERMSK
;
1845 * Easy if we already have it cached
1847 ip
= hammer2_inode_lookup(pmp
, inum
);
1849 hammer2_inode_lock(ip
, HAMMER2_RESOLVE_SHARED
);
1850 *vpp
= hammer2_igetv(ip
, &error
);
1851 hammer2_inode_unlock(ip
);
1852 hammer2_inode_drop(ip
); /* from lookup */
1858 * Otherwise we have to find the inode
1860 xop
= hammer2_xop_alloc(pmp
->iroot
, 0);
1862 hammer2_xop_start(&xop
->head
, hammer2_xop_lookup
);
1863 error
= hammer2_xop_collect(&xop
->head
, 0);
1866 if (hammer2_cluster_rdata(&xop
->head
.cluster
) == NULL
) {
1867 kprintf("vget: no collect error but also no rdata\n");
1868 kprintf("xop %p\n", xop
);
1869 while ((hammer2_debug
& 0x80000) == 0) {
1870 tsleep(xop
, PCATCH
, "wait", hz
* 10);
1874 ip
= hammer2_inode_get(pmp
, NULL
, &xop
->head
.cluster
, -1);
1877 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1880 *vpp
= hammer2_igetv(ip
, &error
);
1881 hammer2_inode_unlock(ip
);
1891 hammer2_vfs_root(struct mount
*mp
, struct vnode
**vpp
)
1898 if (pmp
->iroot
== NULL
) {
1904 hammer2_inode_lock(pmp
->iroot
, HAMMER2_RESOLVE_SHARED
);
1906 while (pmp
->inode_tid
== 0) {
1907 hammer2_xop_ipcluster_t
*xop
;
1908 hammer2_inode_meta_t
*meta
;
1910 xop
= hammer2_xop_alloc(pmp
->iroot
, HAMMER2_XOP_MODIFYING
);
1911 hammer2_xop_start(&xop
->head
, hammer2_xop_ipcluster
);
1912 error
= hammer2_xop_collect(&xop
->head
, 0);
1915 meta
= &xop
->head
.cluster
.focus
->data
->ipdata
.meta
;
1916 pmp
->iroot
->meta
= *meta
;
1917 pmp
->inode_tid
= meta
->pfs_inum
+ 1;
1918 if (pmp
->inode_tid
< HAMMER2_INODE_START
)
1919 pmp
->inode_tid
= HAMMER2_INODE_START
;
1921 xop
->head
.cluster
.focus
->bref
.modify_tid
+ 1;
1923 kprintf("PFS: Starting inode %jd\n",
1924 (intmax_t)pmp
->inode_tid
);
1925 kprintf("PMP focus good set nextino=%ld mod=%016jx\n",
1926 pmp
->inode_tid
, pmp
->modify_tid
);
1928 wakeup(&pmp
->iroot
);
1930 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1933 * Prime the mount info.
1935 hammer2_vfs_statfs(mp
, &mp
->mnt_stat
, NULL
);
1942 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1943 hammer2_inode_unlock(pmp
->iroot
);
1944 error
= tsleep(&pmp
->iroot
, PCATCH
, "h2root", hz
);
1945 hammer2_inode_lock(pmp
->iroot
, HAMMER2_RESOLVE_SHARED
);
1951 hammer2_inode_unlock(pmp
->iroot
);
1954 vp
= hammer2_igetv(pmp
->iroot
, &error
);
1955 hammer2_inode_unlock(pmp
->iroot
);
1965 * XXX incorporate ipdata->meta.inode_quota and data_quota
1969 hammer2_vfs_statfs(struct mount
*mp
, struct statfs
*sbp
, struct ucred
*cred
)
1973 hammer2_blockref_t bref
;
1978 * NOTE: iroot might not have validated the cluster yet.
1982 bzero(&tmp
, sizeof(tmp
));
1984 for (i
= 0; i
< pmp
->iroot
->cluster
.nchains
; ++i
) {
1985 hmp
= pmp
->pfs_hmps
[i
];
1988 if (pmp
->iroot
->cluster
.array
[i
].chain
)
1989 bref
= pmp
->iroot
->cluster
.array
[i
].chain
->bref
;
1991 bzero(&bref
, sizeof(bref
));
1993 tmp
.f_files
= bref
.embed
.stats
.inode_count
;
1995 tmp
.f_blocks
= hmp
->voldata
.allocator_size
/
1996 mp
->mnt_vstat
.f_bsize
;
1997 tmp
.f_bfree
= hmp
->voldata
.allocator_free
/
1998 mp
->mnt_vstat
.f_bsize
;
1999 tmp
.f_bavail
= tmp
.f_bfree
;
2001 if (cred
&& cred
->cr_uid
!= 0) {
2005 adj
= hmp
->free_reserved
/ mp
->mnt_vstat
.f_bsize
;
2006 tmp
.f_blocks
-= adj
;
2008 tmp
.f_bavail
-= adj
;
2011 mp
->mnt_stat
.f_blocks
= tmp
.f_blocks
;
2012 mp
->mnt_stat
.f_bfree
= tmp
.f_bfree
;
2013 mp
->mnt_stat
.f_bavail
= tmp
.f_bavail
;
2014 mp
->mnt_stat
.f_files
= tmp
.f_files
;
2015 mp
->mnt_stat
.f_ffree
= tmp
.f_ffree
;
2017 *sbp
= mp
->mnt_stat
;
2024 hammer2_vfs_statvfs(struct mount
*mp
, struct statvfs
*sbp
, struct ucred
*cred
)
2028 hammer2_blockref_t bref
;
2033 * NOTE: iroot might not have validated the cluster yet.
2036 bzero(&tmp
, sizeof(tmp
));
2038 for (i
= 0; i
< pmp
->iroot
->cluster
.nchains
; ++i
) {
2039 hmp
= pmp
->pfs_hmps
[i
];
2042 if (pmp
->iroot
->cluster
.array
[i
].chain
)
2043 bref
= pmp
->iroot
->cluster
.array
[i
].chain
->bref
;
2045 bzero(&bref
, sizeof(bref
));
2047 tmp
.f_files
= bref
.embed
.stats
.inode_count
;
2049 tmp
.f_blocks
= hmp
->voldata
.allocator_size
/
2050 mp
->mnt_vstat
.f_bsize
;
2051 tmp
.f_bfree
= hmp
->voldata
.allocator_free
/
2052 mp
->mnt_vstat
.f_bsize
;
2053 tmp
.f_bavail
= tmp
.f_bfree
;
2055 if (cred
&& cred
->cr_uid
!= 0) {
2059 adj
= hmp
->free_reserved
/ mp
->mnt_vstat
.f_bsize
;
2060 tmp
.f_blocks
-= adj
;
2062 tmp
.f_bavail
-= adj
;
2065 mp
->mnt_vstat
.f_blocks
= tmp
.f_blocks
;
2066 mp
->mnt_vstat
.f_bfree
= tmp
.f_bfree
;
2067 mp
->mnt_vstat
.f_bavail
= tmp
.f_bavail
;
2068 mp
->mnt_vstat
.f_files
= tmp
.f_files
;
2069 mp
->mnt_vstat
.f_ffree
= tmp
.f_ffree
;
2071 *sbp
= mp
->mnt_vstat
;
2077 * Mount-time recovery (RW mounts)
2079 * Updates to the free block table are allowed to lag flushes by one
2080 * transaction. In case of a crash, then on a fresh mount we must do an
2081 * incremental scan of the last committed transaction id and make sure that
2082 * all related blocks have been marked allocated.
2084 * The super-root topology and each PFS has its own transaction id domain,
2085 * so we must track PFS boundary transitions.
2087 struct hammer2_recovery_elm
{
2088 TAILQ_ENTRY(hammer2_recovery_elm
) entry
;
2089 hammer2_chain_t
*chain
;
2090 hammer2_tid_t sync_tid
;
2093 TAILQ_HEAD(hammer2_recovery_list
, hammer2_recovery_elm
);
2095 struct hammer2_recovery_info
{
2096 struct hammer2_recovery_list list
;
2101 static int hammer2_recovery_scan(hammer2_dev_t
*hmp
,
2102 hammer2_chain_t
*parent
,
2103 struct hammer2_recovery_info
*info
,
2104 hammer2_tid_t sync_tid
);
2106 #define HAMMER2_RECOVERY_MAXDEPTH 10
2110 hammer2_recovery(hammer2_dev_t
*hmp
)
2112 struct hammer2_recovery_info info
;
2113 struct hammer2_recovery_elm
*elm
;
2114 hammer2_chain_t
*parent
;
2115 hammer2_tid_t sync_tid
;
2116 hammer2_tid_t mirror_tid
;
2119 hammer2_trans_init(hmp
->spmp
, 0);
2121 sync_tid
= hmp
->voldata
.freemap_tid
;
2122 mirror_tid
= hmp
->voldata
.mirror_tid
;
2124 kprintf("hammer2 mount \"%s\": ", hmp
->devrepname
);
2125 if (sync_tid
>= mirror_tid
) {
2126 kprintf(" no recovery needed\n");
2128 kprintf(" freemap recovery %016jx-%016jx\n",
2129 sync_tid
+ 1, mirror_tid
);
2132 TAILQ_INIT(&info
.list
);
2134 parent
= hammer2_chain_lookup_init(&hmp
->vchain
, 0);
2135 error
= hammer2_recovery_scan(hmp
, parent
, &info
, sync_tid
);
2136 hammer2_chain_lookup_done(parent
);
2138 while ((elm
= TAILQ_FIRST(&info
.list
)) != NULL
) {
2139 TAILQ_REMOVE(&info
.list
, elm
, entry
);
2140 parent
= elm
->chain
;
2141 sync_tid
= elm
->sync_tid
;
2142 kfree(elm
, M_HAMMER2
);
2144 hammer2_chain_lock(parent
, HAMMER2_RESOLVE_ALWAYS
);
2145 error
|= hammer2_recovery_scan(hmp
, parent
, &info
,
2146 hmp
->voldata
.freemap_tid
);
2147 hammer2_chain_unlock(parent
);
2148 hammer2_chain_drop(parent
); /* drop elm->chain ref */
2151 hammer2_trans_done(hmp
->spmp
);
2158 hammer2_recovery_scan(hammer2_dev_t
*hmp
, hammer2_chain_t
*parent
,
2159 struct hammer2_recovery_info
*info
,
2160 hammer2_tid_t sync_tid
)
2162 const hammer2_inode_data_t
*ripdata
;
2163 hammer2_chain_t
*chain
;
2164 hammer2_blockref_t bref
;
2171 * Adjust freemap to ensure that the block(s) are marked allocated.
2173 if (parent
->bref
.type
!= HAMMER2_BREF_TYPE_VOLUME
) {
2174 hammer2_freemap_adjust(hmp
, &parent
->bref
,
2175 HAMMER2_FREEMAP_DORECOVER
);
2179 * Check type for recursive scan
2181 switch(parent
->bref
.type
) {
2182 case HAMMER2_BREF_TYPE_VOLUME
:
2183 /* data already instantiated */
2185 case HAMMER2_BREF_TYPE_INODE
:
2187 * Must instantiate data for DIRECTDATA test and also
2190 hammer2_chain_lock(parent
, HAMMER2_RESOLVE_ALWAYS
);
2191 ripdata
= &hammer2_chain_rdata(parent
)->ipdata
;
2192 if (ripdata
->meta
.op_flags
& HAMMER2_OPFLAG_DIRECTDATA
) {
2193 /* not applicable to recovery scan */
2194 hammer2_chain_unlock(parent
);
2197 hammer2_chain_unlock(parent
);
2199 case HAMMER2_BREF_TYPE_INDIRECT
:
2201 * Must instantiate data for recursion
2203 hammer2_chain_lock(parent
, HAMMER2_RESOLVE_ALWAYS
);
2204 hammer2_chain_unlock(parent
);
2206 case HAMMER2_BREF_TYPE_DIRENT
:
2207 case HAMMER2_BREF_TYPE_DATA
:
2208 case HAMMER2_BREF_TYPE_FREEMAP
:
2209 case HAMMER2_BREF_TYPE_FREEMAP_NODE
:
2210 case HAMMER2_BREF_TYPE_FREEMAP_LEAF
:
2211 /* not applicable to recovery scan */
2215 return HAMMER2_ERROR_BADBREF
;
2219 * Defer operation if depth limit reached or if we are crossing a
2222 if (info
->depth
>= HAMMER2_RECOVERY_MAXDEPTH
) {
2223 struct hammer2_recovery_elm
*elm
;
2225 elm
= kmalloc(sizeof(*elm
), M_HAMMER2
, M_ZERO
| M_WAITOK
);
2226 elm
->chain
= parent
;
2227 elm
->sync_tid
= sync_tid
;
2228 hammer2_chain_ref(parent
);
2229 TAILQ_INSERT_TAIL(&info
->list
, elm
, entry
);
2230 /* unlocked by caller */
2237 * Recursive scan of the last flushed transaction only. We are
2238 * doing this without pmp assignments so don't leave the chains
2239 * hanging around after we are done with them.
2241 * error Cumulative error this level only
2242 * rup_error Cumulative error for recursion
2243 * tmp_error Specific non-cumulative recursion error
2251 error
|= hammer2_chain_scan(parent
, &chain
, &bref
,
2253 HAMMER2_LOOKUP_NODATA
);
2256 * Problem during scan or EOF
2264 if (chain
== NULL
) {
2265 if (bref
.mirror_tid
> sync_tid
) {
2266 hammer2_freemap_adjust(hmp
, &bref
,
2267 HAMMER2_FREEMAP_DORECOVER
);
2273 * This may or may not be a recursive node.
2275 atomic_set_int(&chain
->flags
, HAMMER2_CHAIN_RELEASE
);
2276 if (bref
.mirror_tid
> sync_tid
) {
2278 tmp_error
= hammer2_recovery_scan(hmp
, chain
,
2286 * Flush the recovery at the PFS boundary to stage it for
2287 * the final flush of the super-root topology.
2289 if (tmp_error
== 0 &&
2290 (bref
.flags
& HAMMER2_BREF_FLAG_PFSROOT
) &&
2291 (chain
->flags
& HAMMER2_CHAIN_ONFLUSH
)) {
2292 hammer2_flush(chain
, HAMMER2_FLUSH_TOP
|
2295 rup_error
|= tmp_error
;
2297 return ((error
| rup_error
) & ~HAMMER2_ERROR_EOF
);
2301 * This fixes up an error introduced in earlier H2 implementations where
2302 * moving a PFS inode into an indirect block wound up causing the
2303 * HAMMER2_BREF_FLAG_PFSROOT flag in the bref to get cleared.
2307 hammer2_fixup_pfses(hammer2_dev_t
*hmp
)
2309 const hammer2_inode_data_t
*ripdata
;
2310 hammer2_chain_t
*parent
;
2311 hammer2_chain_t
*chain
;
2312 hammer2_key_t key_next
;
2313 hammer2_pfs_t
*spmp
;
2319 * Lookup mount point under the media-localized super-root.
2321 * cluster->pmp will incorrectly point to spmp and must be fixed
2325 hammer2_inode_lock(spmp
->iroot
, 0);
2326 parent
= hammer2_inode_chain(spmp
->iroot
, 0, HAMMER2_RESOLVE_ALWAYS
);
2327 chain
= hammer2_chain_lookup(&parent
, &key_next
,
2328 HAMMER2_KEY_MIN
, HAMMER2_KEY_MAX
,
2331 if (chain
->bref
.type
!= HAMMER2_BREF_TYPE_INODE
)
2334 kprintf("I/O error scanning PFS labels\n");
2335 error
|= chain
->error
;
2336 } else if ((chain
->bref
.flags
&
2337 HAMMER2_BREF_FLAG_PFSROOT
) == 0) {
2340 ripdata
= &chain
->data
->ipdata
;
2341 hammer2_trans_init(hmp
->spmp
, 0);
2342 error2
= hammer2_chain_modify(chain
,
2343 chain
->bref
.modify_tid
,
2346 kprintf("hammer2: Correct mis-flagged PFS %s\n",
2348 chain
->bref
.flags
|= HAMMER2_BREF_FLAG_PFSROOT
;
2352 hammer2_flush(chain
, HAMMER2_FLUSH_TOP
|
2354 hammer2_trans_done(hmp
->spmp
);
2356 chain
= hammer2_chain_next(&parent
, chain
, &key_next
,
2357 key_next
, HAMMER2_KEY_MAX
,
2361 hammer2_chain_unlock(parent
);
2362 hammer2_chain_drop(parent
);
2364 hammer2_inode_unlock(spmp
->iroot
);
2370 * Sync a mount point; this is called periodically on a per-mount basis from
2371 * the filesystem syncer, and whenever a user issues a sync.
2374 hammer2_vfs_sync(struct mount
*mp
, int waitfor
)
2376 hammer2_xop_flush_t
*xop
;
2377 struct hammer2_sync_info info
;
2378 hammer2_inode_t
*iroot
;
2386 KKASSERT(iroot
->pmp
== pmp
);
2389 * We can't acquire locks on existing vnodes while in a transaction
2390 * without risking a deadlock. This assumes that vfsync() can be
2391 * called without the vnode locked (which it can in DragonFly).
2392 * Otherwise we'd have to implement a multi-pass or flag the lock
2393 * failures and retry.
2395 * The reclamation code interlocks with the sync list's token
2396 * (by removing the vnode from the scan list) before unlocking
2397 * the inode, giving us time to ref the inode.
2399 /*flags = VMSC_GETVP;*/
2401 if (waitfor
& MNT_LAZY
)
2402 flags
|= VMSC_ONEPASS
;
2405 * Flush vnodes individually using a normal transaction to avoid
2406 * stalling any concurrent operations. This will flush the related
2407 * buffer cache buffers and inodes to the media.
2409 * For efficiency do an async pass before making sure with a
2410 * synchronous pass on all related buffer cache buffers.
2412 hammer2_trans_init(pmp
, 0);
2416 info
.waitfor
= MNT_NOWAIT
;
2418 vsyncscan(mp
, flags
| VMSC_NOWAIT
, hammer2_sync_scan2
, &info
);
2421 * Now do two passes making sure we get everything. The first pass
2422 * vfsync()s dirty vnodes. The second pass waits for their I/O's
2423 * to finish and cleans up the dirty flag on the vnode.
2426 info
.waitfor
= MNT_WAIT
;
2427 vsyncscan(mp
, flags
, hammer2_sync_scan2
, &info
);
2430 info
.waitfor
= MNT_WAIT
;
2431 vsyncscan(mp
, flags
, hammer2_sync_scan2
, &info
);
2434 * We must also run the sideq to handle any disconnected inodes
2435 * as the vnode scan will not see these.
2437 hammer2_inode_run_sideq(pmp
, 1);
2438 hammer2_trans_done(pmp
);
2441 * Start our flush transaction and flush the root topology down to
2442 * the inodes, but not the inodes themselves (which we already flushed
2443 * above). Any concurrent activity effecting inode contents will not
2445 * The flush sequence will
2447 * NOTE! It is still possible for the paging code to push pages
2448 * out via a UIO_NOCOPY hammer2_vop_write() during the main
2451 hammer2_trans_init(pmp
, HAMMER2_TRANS_ISFLUSH
);
2454 * sync dirty vnodes again while in the flush transaction. This is
2455 * currently an expensive shim to makre sure the logical topology is
2456 * completely consistent before we flush the volume header.
2459 info
.waitfor
= MNT_WAIT
;
2460 vsyncscan(mp
, flags
, hammer2_sync_scan2
, &info
);
2463 info
.waitfor
= MNT_WAIT
;
2464 vsyncscan(mp
, flags
, hammer2_sync_scan2
, &info
);
2467 * Use the XOP interface to concurrently flush all nodes to
2468 * synchronize the PFSROOT subtopology to the media. A standard
2469 * end-of-scan ENOENT error indicates cluster sufficiency.
2471 * Note that this flush will not be visible on crash recovery until
2472 * we flush the super-root topology in the next loop.
2474 * XXX For now wait for all flushes to complete.
2478 * If unmounting try to flush everything including any
2479 * sub-trees under inodes, just in case there is dangling
2480 * modified data, as a safety. Otherwise just flush up to
2481 * the inodes in this stage.
2483 if (mp
->mnt_kern_flag
& MNTK_UNMOUNT
) {
2484 xop
= hammer2_xop_alloc(iroot
, HAMMER2_XOP_MODIFYING
|
2485 HAMMER2_XOP_VOLHDR
);
2487 xop
= hammer2_xop_alloc(iroot
, HAMMER2_XOP_MODIFYING
|
2488 HAMMER2_XOP_INODE_STOP
|
2489 HAMMER2_XOP_VOLHDR
);
2491 hammer2_xop_start(&xop
->head
, hammer2_inode_xop_flush
);
2492 error
= hammer2_xop_collect(&xop
->head
,
2493 HAMMER2_XOP_COLLECT_WAITALL
);
2494 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
2495 if (error
== HAMMER2_ERROR_ENOENT
)
2498 error
= hammer2_error_to_errno(error
);
2502 hammer2_trans_done(pmp
);
2510 * Note that we ignore the tranasction mtid we got above. Instead,
2511 * each vfsync below will ultimately get its own via TRANS_BUFCACHE
2514 * WARNING! The frontend might be waiting on chnmem (limit_dirty_chains)
2515 * while holding a vnode locked. When this situation occurs we cannot
2516 * safely test whether it is ok to clear the dirty bit on the vnode.
2517 * However, we can still flush the inode's topology.
2520 hammer2_sync_scan2(struct mount
*mp
, struct vnode
*vp
, void *data
)
2522 struct hammer2_sync_info
*info
= data
;
2523 hammer2_inode_t
*ip
;
2527 * Degenerate cases. Note that ip == NULL typically means the
2528 * syncer vnode itself and we don't want to vclrisdirty() in that
2535 if (vp
->v_type
== VNON
|| vp
->v_type
== VBAD
) {
2541 * Synchronize the buffer cche and inode meta-data to the backing
2544 * vfsync is not necessarily synchronous, so it is best NOT to try
2545 * to flush the backing topology to media at this point.
2547 hammer2_inode_ref(ip
);
2548 if ((ip
->flags
& (HAMMER2_INODE_RESIZED
|HAMMER2_INODE_MODIFIED
)) ||
2549 !RB_EMPTY(&vp
->v_rbdirty_tree
)) {
2550 if (info
->pass
== 1)
2551 vfsync(vp
, info
->waitfor
, 1, NULL
, NULL
);
2553 bio_track_wait(&vp
->v_track_write
, 0, 0);
2555 if (info
->pass
== 2 && (vp
->v_flag
& VISDIRTY
)) {
2557 * v_token is needed to interlock v_rbdirty_tree.
2559 lwkt_gettoken(&vp
->v_token
);
2560 hammer2_inode_lock(ip
, 0);
2561 hammer2_inode_chain_sync(ip
);
2562 hammer2_inode_chain_flush(ip
);
2563 if ((ip
->flags
& (HAMMER2_INODE_MODIFIED
|
2564 HAMMER2_INODE_RESIZED
|
2565 HAMMER2_INODE_DIRTYDATA
)) == 0 &&
2566 RB_EMPTY(&vp
->v_rbdirty_tree
) &&
2567 !bio_track_active(&vp
->v_track_write
)) {
2570 hammer2_inode_unlock(ip
);
2571 lwkt_reltoken(&vp
->v_token
);
2573 hammer2_inode_drop(ip
);
2577 info
->error
= error
;
2584 hammer2_vfs_vptofh(struct vnode
*vp
, struct fid
*fhp
)
2586 hammer2_inode_t
*ip
;
2588 KKASSERT(MAXFIDSZ
>= 16);
2590 fhp
->fid_len
= offsetof(struct fid
, fid_data
[16]);
2592 ((hammer2_tid_t
*)fhp
->fid_data
)[0] = ip
->meta
.inum
;
2593 ((hammer2_tid_t
*)fhp
->fid_data
)[1] = 0;
2600 hammer2_vfs_fhtovp(struct mount
*mp
, struct vnode
*rootvp
,
2601 struct fid
*fhp
, struct vnode
**vpp
)
2608 inum
= ((hammer2_tid_t
*)fhp
->fid_data
)[0] & HAMMER2_DIRHASH_USERMSK
;
2611 error
= hammer2_vfs_root(mp
, vpp
);
2613 error
= hammer2_vfs_vget(mp
, NULL
, inum
, vpp
);
2618 kprintf("fhtovp: %016jx -> %p, %d\n", inum
, *vpp
, error
);
2624 hammer2_vfs_checkexp(struct mount
*mp
, struct sockaddr
*nam
,
2625 int *exflagsp
, struct ucred
**credanonp
)
2632 np
= vfs_export_lookup(mp
, &pmp
->export
, nam
);
2634 *exflagsp
= np
->netc_exflags
;
2635 *credanonp
= &np
->netc_anon
;
2644 * Support code for hammer2_vfs_mount(). Read, verify, and install the volume
2645 * header into the HMP
2647 * XXX read four volhdrs and use the one with the highest TID whos CRC
2652 * XXX For filesystems w/ less than 4 volhdrs, make sure to not write to
2653 * nonexistant locations.
2655 * XXX Record selected volhdr and ring updates to each of 4 volhdrs
2659 hammer2_install_volume_header(hammer2_dev_t
*hmp
)
2661 hammer2_volume_data_t
*vd
;
2663 hammer2_crc32_t crc0
, crc
, bcrc0
, bcrc
;
2675 * There are up to 4 copies of the volume header (syncs iterate
2676 * between them so there is no single master). We don't trust the
2677 * volu_size field so we don't know precisely how large the filesystem
2678 * is, so depend on the OS to return an error if we go beyond the
2679 * block device's EOF.
2681 for (i
= 0; i
< HAMMER2_NUM_VOLHDRS
; i
++) {
2682 error
= bread(hmp
->devvp
, i
* HAMMER2_ZONE_BYTES64
,
2683 HAMMER2_VOLUME_BYTES
, &bp
);
2690 vd
= (struct hammer2_volume_data
*) bp
->b_data
;
2691 if ((vd
->magic
!= HAMMER2_VOLUME_ID_HBO
) &&
2692 (vd
->magic
!= HAMMER2_VOLUME_ID_ABO
)) {
2698 if (vd
->magic
== HAMMER2_VOLUME_ID_ABO
) {
2699 /* XXX: Reversed-endianness filesystem */
2700 kprintf("hammer2: reverse-endian filesystem detected");
2706 crc
= vd
->icrc_sects
[HAMMER2_VOL_ICRC_SECT0
];
2707 crc0
= hammer2_icrc32(bp
->b_data
+ HAMMER2_VOLUME_ICRC0_OFF
,
2708 HAMMER2_VOLUME_ICRC0_SIZE
);
2709 bcrc
= vd
->icrc_sects
[HAMMER2_VOL_ICRC_SECT1
];
2710 bcrc0
= hammer2_icrc32(bp
->b_data
+ HAMMER2_VOLUME_ICRC1_OFF
,
2711 HAMMER2_VOLUME_ICRC1_SIZE
);
2712 if ((crc0
!= crc
) || (bcrc0
!= bcrc
)) {
2713 kprintf("hammer2 volume header crc "
2714 "mismatch copy #%d %08x/%08x\n",
2721 if (valid
== 0 || hmp
->voldata
.mirror_tid
< vd
->mirror_tid
) {
2730 hmp
->volsync
= hmp
->voldata
;
2731 hmp
->free_reserved
= hmp
->voldata
.allocator_size
/ 20;
2733 if (error_reported
|| bootverbose
|| 1) { /* 1/DEBUG */
2734 kprintf("hammer2: using volume header #%d\n",
2739 kprintf("hammer2: no valid volume headers found!\n");
2745 * This handles hysteresis on regular file flushes. Because the BIOs are
2746 * routed to a thread it is possible for an excessive number to build up
2747 * and cause long front-end stalls long before the runningbuffspace limit
2748 * is hit, so we implement hammer2_flush_pipe to control the
2751 * This is a particular problem when compression is used.
2754 hammer2_lwinprog_ref(hammer2_pfs_t
*pmp
)
2756 atomic_add_int(&pmp
->count_lwinprog
, 1);
2760 hammer2_lwinprog_drop(hammer2_pfs_t
*pmp
)
2764 lwinprog
= atomic_fetchadd_int(&pmp
->count_lwinprog
, -1);
2765 if ((lwinprog
& HAMMER2_LWINPROG_WAITING
) &&
2766 (lwinprog
& HAMMER2_LWINPROG_MASK
) <= hammer2_flush_pipe
* 2 / 3) {
2767 atomic_clear_int(&pmp
->count_lwinprog
,
2768 HAMMER2_LWINPROG_WAITING
);
2769 wakeup(&pmp
->count_lwinprog
);
2771 if ((lwinprog
& HAMMER2_LWINPROG_WAITING0
) &&
2772 (lwinprog
& HAMMER2_LWINPROG_MASK
) <= 0) {
2773 atomic_clear_int(&pmp
->count_lwinprog
,
2774 HAMMER2_LWINPROG_WAITING0
);
2775 wakeup(&pmp
->count_lwinprog
);
2780 hammer2_lwinprog_wait(hammer2_pfs_t
*pmp
, int flush_pipe
)
2783 int lwflag
= (flush_pipe
) ? HAMMER2_LWINPROG_WAITING
:
2784 HAMMER2_LWINPROG_WAITING0
;
2787 lwinprog
= pmp
->count_lwinprog
;
2789 if ((lwinprog
& HAMMER2_LWINPROG_MASK
) <= flush_pipe
)
2791 tsleep_interlock(&pmp
->count_lwinprog
, 0);
2792 atomic_set_int(&pmp
->count_lwinprog
, lwflag
);
2793 lwinprog
= pmp
->count_lwinprog
;
2794 if ((lwinprog
& HAMMER2_LWINPROG_MASK
) <= flush_pipe
)
2796 tsleep(&pmp
->count_lwinprog
, PINTERLOCKED
, "h2wpipe", hz
);
2801 * Manage excessive memory resource use for chain and related
2805 hammer2_pfs_memory_wait(hammer2_pfs_t
*pmp
)
2815 * Atomic check condition and wait. Also do an early speedup of
2816 * the syncer to try to avoid hitting the wait.
2819 waiting
= pmp
->inmem_dirty_chains
;
2821 count
= waiting
& HAMMER2_DIRTYCHAIN_MASK
;
2823 limit
= pmp
->mp
->mnt_nvnodelistsize
/ 10;
2824 if (limit
< hammer2_limit_dirty_chains
)
2825 limit
= hammer2_limit_dirty_chains
;
2830 if ((int)(ticks
- zzticks
) > hz
) {
2832 kprintf("count %ld %ld\n", count
, limit
);
2837 * Block if there are too many dirty chains present, wait
2838 * for the flush to clean some out.
2840 if (count
> limit
) {
2841 tsleep_interlock(&pmp
->inmem_dirty_chains
, 0);
2842 if (atomic_cmpset_int(&pmp
->inmem_dirty_chains
,
2844 waiting
| HAMMER2_DIRTYCHAIN_WAITING
)) {
2845 speedup_syncer(pmp
->mp
);
2846 tsleep(&pmp
->inmem_dirty_chains
, PINTERLOCKED
,
2849 continue; /* loop on success or fail */
2853 * Try to start an early flush before we are forced to block.
2855 if (count
> limit
* 5 / 10)
2856 speedup_syncer(pmp
->mp
);
2862 hammer2_pfs_memory_inc(hammer2_pfs_t
*pmp
)
2865 atomic_add_int(&pmp
->inmem_dirty_chains
, 1);
2870 hammer2_pfs_memory_wakeup(hammer2_pfs_t
*pmp
)
2875 waiting
= atomic_fetchadd_int(&pmp
->inmem_dirty_chains
, -1);
2876 /* don't need --waiting to test flag */
2877 if (waiting
& HAMMER2_DIRTYCHAIN_WAITING
) {
2878 atomic_clear_int(&pmp
->inmem_dirty_chains
,
2879 HAMMER2_DIRTYCHAIN_WAITING
);
2880 wakeup(&pmp
->inmem_dirty_chains
);
2886 * Returns 0 if the filesystem has tons of free space
2887 * Returns 1 if the filesystem has less than 10% remaining
2888 * Returns 2 if the filesystem has less than 2%/5% (user/root) remaining.
2891 hammer2_vfs_enospace(hammer2_inode_t
*ip
, off_t bytes
, struct ucred
*cred
)
2895 hammer2_off_t free_reserved
;
2896 hammer2_off_t free_nominal
;
2901 if (pmp
->free_ticks
== 0 || pmp
->free_ticks
!= ticks
) {
2902 free_reserved
= HAMMER2_SEGSIZE
;
2903 free_nominal
= 0x7FFFFFFFFFFFFFFFLLU
;
2904 for (i
= 0; i
< pmp
->iroot
->cluster
.nchains
; ++i
) {
2905 hmp
= pmp
->pfs_hmps
[i
];
2908 if (pmp
->pfs_types
[i
] != HAMMER2_PFSTYPE_MASTER
&&
2909 pmp
->pfs_types
[i
] != HAMMER2_PFSTYPE_SOFT_MASTER
)
2912 if (free_nominal
> hmp
->voldata
.allocator_free
)
2913 free_nominal
= hmp
->voldata
.allocator_free
;
2914 if (free_reserved
< hmp
->free_reserved
)
2915 free_reserved
= hmp
->free_reserved
;
2921 pmp
->free_reserved
= free_reserved
;
2922 pmp
->free_nominal
= free_nominal
;
2923 pmp
->free_ticks
= ticks
;
2925 free_reserved
= pmp
->free_reserved
;
2926 free_nominal
= pmp
->free_nominal
;
2928 if (cred
&& cred
->cr_uid
!= 0) {
2929 if ((int64_t)(free_nominal
- bytes
) <
2930 (int64_t)free_reserved
) {
2934 if ((int64_t)(free_nominal
- bytes
) <
2935 (int64_t)free_reserved
/ 2) {
2939 if ((int64_t)(free_nominal
- bytes
) < (int64_t)free_reserved
* 2)
2948 hammer2_dump_chain(hammer2_chain_t
*chain
, int tab
, int *countp
, char pfx
,
2951 hammer2_chain_t
*scan
;
2952 hammer2_chain_t
*parent
;
2956 kprintf("%*.*s...\n", tab
, tab
, "");
2961 kprintf("%*.*s%c-chain %p.%d %016jx/%d mir=%016jx\n",
2963 chain
, chain
->bref
.type
,
2964 chain
->bref
.key
, chain
->bref
.keybits
,
2965 chain
->bref
.mirror_tid
);
2967 kprintf("%*.*s [%08x] (%s) refs=%d",
2970 ((chain
->bref
.type
== HAMMER2_BREF_TYPE_INODE
&&
2971 chain
->data
) ? (char *)chain
->data
->ipdata
.filename
: "?"),
2974 parent
= chain
->parent
;
2976 kprintf("\n%*.*s p=%p [pflags %08x prefs %d",
2978 parent
, parent
->flags
, parent
->refs
);
2979 if (RB_EMPTY(&chain
->core
.rbtree
)) {
2983 RB_FOREACH(scan
, hammer2_chain_tree
, &chain
->core
.rbtree
) {
2984 if ((scan
->flags
& flags
) || flags
== (u_int
)-1) {
2985 hammer2_dump_chain(scan
, tab
+ 4, countp
, 'a',
2989 if (chain
->bref
.type
== HAMMER2_BREF_TYPE_INODE
&& chain
->data
)
2990 kprintf("%*.*s}(%s)\n", tab
, tab
, "",
2991 chain
->data
->ipdata
.filename
);
2993 kprintf("%*.*s}\n", tab
, tab
, "");