hammer2 - Limit bulkfree cpu and SSD I/O
[dragonfly.git] / sys / vfs / hammer2 / hammer2.h
blob2fc2c7ab3b0d77020e63892e50ed7af2998d0a11
1 /*
2 * Copyright (c) 2011-2017 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
37 * HAMMER2 IN-MEMORY CACHE OF MEDIA STRUCTURES
39 * This header file contains structures used internally by the HAMMER2
40 * implementation. See hammer2_disk.h for on-disk structures.
42 * There is an in-memory representation of all on-media data structure.
43 * Almost everything is represented by a hammer2_chain structure in-memory.
44 * Other higher-level structures typically map to chains.
46 * A great deal of data is accessed simply via its buffer cache buffer,
47 * which is mapped for the duration of the chain's lock. Hammer2 must
48 * implement its own buffer cache layer on top of the system layer to
49 * allow for different threads to lock different sub-block-sized buffers.
51 * When modifications are made to a chain a new filesystem block must be
52 * allocated. Multiple modifications do not typically allocate new blocks
53 * until the current block has been flushed. Flushes do not block the
54 * front-end unless the front-end operation crosses the current inode being
55 * flushed.
57 * The in-memory representation may remain cached (for example in order to
58 * placemark clustering locks) even after the related data has been
59 * detached.
62 #ifndef _VFS_HAMMER2_HAMMER2_H_
63 #define _VFS_HAMMER2_HAMMER2_H_
65 #ifdef _KERNEL
66 #include <sys/param.h>
67 #endif
68 #include <sys/types.h>
69 #ifdef _KERNEL
70 #include <sys/kernel.h>
71 #endif
72 #include <sys/conf.h>
73 #ifdef _KERNEL
74 #include <sys/systm.h>
75 #endif
76 #include <sys/tree.h>
77 #include <sys/malloc.h>
78 #include <sys/mount.h>
79 #include <sys/vnode.h>
80 #include <sys/proc.h>
81 #include <sys/mountctl.h>
82 #include <sys/priv.h>
83 #include <sys/stat.h>
84 #include <sys/thread.h>
85 #include <sys/globaldata.h>
86 #include <sys/lockf.h>
87 #include <sys/buf.h>
88 #include <sys/queue.h>
89 #include <sys/limits.h>
90 #include <sys/dmsg.h>
91 #include <sys/mutex.h>
92 #ifdef _KERNEL
93 #include <sys/kern_syscall.h>
94 #endif
96 #ifdef _KERNEL
97 #include <sys/signal2.h>
98 #include <sys/buf2.h>
99 #include <sys/mutex2.h>
100 #include <sys/thread2.h>
101 #endif
103 #include "hammer2_xxhash.h"
104 #include "hammer2_disk.h"
105 #include "hammer2_mount.h"
106 #include "hammer2_ioctl.h"
108 struct hammer2_io;
109 struct hammer2_chain;
110 struct hammer2_cluster;
111 struct hammer2_inode;
112 struct hammer2_dev;
113 struct hammer2_pfs;
114 struct hammer2_span;
115 struct hammer2_state;
116 struct hammer2_msg;
117 struct hammer2_thread;
118 union hammer2_xop;
121 * Mutex and lock shims. Hammer2 requires support for asynchronous and
122 * abortable locks, and both exclusive and shared spinlocks. Normal
123 * synchronous non-abortable locks can be substituted for spinlocks.
125 typedef mtx_t hammer2_mtx_t;
126 typedef mtx_link_t hammer2_mtx_link_t;
127 typedef mtx_state_t hammer2_mtx_state_t;
129 typedef struct spinlock hammer2_spin_t;
131 #define hammer2_mtx_ex mtx_lock_ex_quick
132 #define hammer2_mtx_sh mtx_lock_sh_quick
133 #define hammer2_mtx_sh_again mtx_lock_sh_again
134 #define hammer2_mtx_unlock mtx_unlock
135 #define hammer2_mtx_downgrade mtx_downgrade
136 #define hammer2_mtx_owned mtx_owned
137 #define hammer2_mtx_init mtx_init
138 #define hammer2_mtx_temp_release mtx_lock_temp_release
139 #define hammer2_mtx_temp_restore mtx_lock_temp_restore
140 #define hammer2_mtx_refs mtx_lockrefs
142 #define hammer2_spin_init spin_init
143 #define hammer2_spin_sh spin_lock_shared
144 #define hammer2_spin_ex spin_lock
145 #define hammer2_spin_unsh spin_unlock_shared
146 #define hammer2_spin_unex spin_unlock
148 TAILQ_HEAD(hammer2_xop_list, hammer2_xop_head);
149 TAILQ_HEAD(hammer2_chain_list, hammer2_chain);
151 typedef struct hammer2_xop_list hammer2_xop_list_t;
153 #ifdef _KERNEL
155 * General lock support
157 static __inline
159 hammer2_mtx_upgrade_try(hammer2_mtx_t *mtx)
161 return mtx_upgrade_try(mtx);
164 #endif
167 * The xid tracks internal transactional updates.
169 * XXX fix-me, really needs to be 64-bits
171 typedef uint32_t hammer2_xid_t;
173 #define HAMMER2_XID_MIN 0x00000000U
174 #define HAMMER2_XID_MAX 0x7FFFFFFFU
176 #define HAMMER2_LIMIT_DIRTY_CHAINS (65536)
179 * The chain structure tracks a portion of the media topology from the
180 * root (volume) down. Chains represent volumes, inodes, indirect blocks,
181 * data blocks, and freemap nodes and leafs.
183 * The chain structure utilizes a simple singly-homed topology and the
184 * chain's in-memory topology will move around as the chains do, due mainly
185 * to renames and indirect block creation.
187 * Block Table Updates
189 * Block table updates for insertions and updates are delayed until the
190 * flush. This allows us to avoid having to modify the parent chain
191 * all the way to the root.
193 * Block table deletions are performed immediately (modifying the parent
194 * in the process) because the flush code uses the chain structure to
195 * track delayed updates and the chain will be (likely) gone or moved to
196 * another location in the topology after a deletion.
198 * A prior iteration of the code tried to keep the relationship intact
199 * on deletes by doing a delete-duplicate operation on the chain, but
200 * it added way too much complexity to the codebase.
202 * Flush Synchronization
204 * The flush code must flush modified chains bottom-up. Because chain
205 * structures can shift around and are NOT topologically stable,
206 * modified chains are independently indexed for the flush. As the flush
207 * runs it modifies (or further modifies) and updates the parents,
208 * propagating the flush all the way to the volume root.
210 * Modifying front-end operations can occur during a flush but will block
211 * in two cases: (1) when the front-end tries to operate on the inode
212 * currently in the midst of being flushed and (2) if the front-end
213 * crosses an inode currently being flushed (such as during a rename).
214 * So, for example, if you rename directory "x" to "a/b/c/d/e/f/g/x" and
215 * the flusher is currently working on "a/b/c", the rename will block
216 * temporarily in order to ensure that "x" exists in one place or the
217 * other.
219 * Meta-data statistics are updated by the flusher. The front-end will
220 * make estimates but meta-data must be fully synchronized only during a
221 * flush in order to ensure that it remains correct across a crash.
223 * Multiple flush synchronizations can theoretically be in-flight at the
224 * same time but the implementation is not coded to handle the case and
225 * currently serializes them.
227 * Snapshots:
229 * Snapshots currently require the subdirectory tree being snapshotted
230 * to be flushed. The snapshot then creates a new super-root inode which
231 * copies the flushed blockdata of the directory or file that was
232 * snapshotted.
234 * RBTREE NOTES:
236 * - Note that the radix tree runs in powers of 2 only so sub-trees
237 * cannot straddle edges.
239 RB_HEAD(hammer2_chain_tree, hammer2_chain);
240 TAILQ_HEAD(h2_flush_list, hammer2_chain);
241 TAILQ_HEAD(h2_core_list, hammer2_chain);
243 #define CHAIN_CORE_DELETE_BMAP_ENTRIES \
244 (HAMMER2_PBUFSIZE / sizeof(hammer2_blockref_t) / sizeof(uint32_t))
247 * Core topology for chain (embedded in chain). Protected by a spinlock.
249 struct hammer2_chain_core {
250 hammer2_spin_t spin;
251 struct hammer2_chain_tree rbtree; /* sub-chains */
252 int live_zero; /* blockref array opt */
253 u_int live_count; /* live (not deleted) chains in tree */
254 u_int chain_count; /* live + deleted chains under core */
255 int generation; /* generation number (inserts only) */
258 typedef struct hammer2_chain_core hammer2_chain_core_t;
260 RB_HEAD(hammer2_io_tree, hammer2_io);
263 * DIO - Management structure wrapping system buffer cache.
265 * HAMMER2 uses an I/O abstraction that allows it to cache and manipulate
266 * fixed-sized filesystem buffers frontend by variable-sized hammer2_chain
267 * structures.
269 struct hammer2_io {
270 RB_ENTRY(hammer2_io) rbnode; /* indexed by device offset */
271 struct hammer2_dev *hmp;
272 struct buf *bp;
273 off_t pbase;
274 uint64_t refs;
275 int psize;
276 int act; /* activity */
277 int btype; /* approximate BREF_TYPE_* */
278 int ticks;
279 int error;
280 int unused01;
281 uint64_t dedup_valid; /* valid for dedup operation */
282 uint64_t dedup_alloc; /* allocated / de-dupable */
285 typedef struct hammer2_io hammer2_io_t;
287 #define HAMMER2_DIO_INPROG 0x8000000000000000LLU /* bio in progress */
288 #define HAMMER2_DIO_GOOD 0x4000000000000000LLU /* dio->bp is stable */
289 #define HAMMER2_DIO_WAITING 0x2000000000000000LLU /* wait on INPROG */
290 #define HAMMER2_DIO_DIRTY 0x1000000000000000LLU /* flush last drop */
292 #define HAMMER2_DIO_MASK 0x00FFFFFFFFFFFFFFLLU
295 * Primary chain structure keeps track of the topology in-memory.
297 struct hammer2_chain {
298 hammer2_mtx_t lock;
299 hammer2_chain_core_t core;
300 RB_ENTRY(hammer2_chain) rbnode; /* live chain(s) */
301 hammer2_blockref_t bref;
302 struct hammer2_chain *parent;
303 struct hammer2_state *state; /* if active cache msg */
304 struct hammer2_dev *hmp;
305 struct hammer2_pfs *pmp; /* A PFS or super-root (spmp) */
307 hammer2_io_t *dio; /* physical data buffer */
308 u_int bytes; /* physical data size */
309 u_int flags;
310 u_int refs;
311 u_int lockcnt;
312 int error; /* on-lock data error state */
313 int cache_index; /* heur speeds up lookup */
315 hammer2_media_data_t *data; /* data pointer shortcut */
316 TAILQ_ENTRY(hammer2_chain) flush_node; /* flush list */
317 TAILQ_ENTRY(hammer2_chain) lru_node; /* 0-refs LRU */
320 typedef struct hammer2_chain hammer2_chain_t;
322 int hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2);
323 RB_PROTOTYPE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
326 * Special notes on flags:
328 * INITIAL - This flag allows a chain to be created and for storage to
329 * be allocated without having to immediately instantiate the
330 * related buffer. The data is assumed to be all-zeros. It
331 * is primarily used for indirect blocks.
333 * MODIFIED - The chain's media data has been modified. Prevents chain
334 * free on lastdrop if still in the topology.
336 * UPDATE - Chain might not be modified but parent blocktable needs
337 * an update. Prevents chain free on lastdrop if still in
338 * the topology.
340 * FICTITIOUS - Faked chain as a placeholder for an error condition. This
341 * chain is unsuitable for I/O.
343 * BMAPPED - Indicates that the chain is present in the parent blockmap.
345 * BMAPUPD - Indicates that the chain is present but needs to be updated
346 * in the parent blockmap.
348 #define HAMMER2_CHAIN_MODIFIED 0x00000001 /* dirty chain data */
349 #define HAMMER2_CHAIN_ALLOCATED 0x00000002 /* kmalloc'd chain */
350 #define HAMMER2_CHAIN_DESTROY 0x00000004
351 #define HAMMER2_CHAIN_DEDUPABLE 0x00000008 /* registered w/dedup */
352 #define HAMMER2_CHAIN_DELETED 0x00000010 /* deleted chain */
353 #define HAMMER2_CHAIN_INITIAL 0x00000020 /* initial create */
354 #define HAMMER2_CHAIN_UPDATE 0x00000040 /* need parent update */
355 #define HAMMER2_CHAIN_DEFERRED 0x00000080 /* flush depth defer */
356 #define HAMMER2_CHAIN_TESTEDGOOD 0x00000100 /* crc tested good */
357 #define HAMMER2_CHAIN_ONFLUSH 0x00000200 /* on a flush list */
358 #define HAMMER2_CHAIN_FICTITIOUS 0x00000400 /* unsuitable for I/O */
359 #define HAMMER2_CHAIN_VOLUMESYNC 0x00000800 /* needs volume sync */
360 #define HAMMER2_CHAIN_DELAYED 0x00001000 /* delayed flush */
361 #define HAMMER2_CHAIN_COUNTEDBREFS 0x00002000 /* block table stats */
362 #define HAMMER2_CHAIN_ONRBTREE 0x00004000 /* on parent RB tree */
363 #define HAMMER2_CHAIN_ONLRU 0x00008000 /* on LRU list */
364 #define HAMMER2_CHAIN_EMBEDDED 0x00010000 /* embedded data */
365 #define HAMMER2_CHAIN_RELEASE 0x00020000 /* don't keep around */
366 #define HAMMER2_CHAIN_BMAPPED 0x00040000 /* present in blkmap */
367 #define HAMMER2_CHAIN_BMAPUPD 0x00080000 /* +needs updating */
368 #define HAMMER2_CHAIN_IOINPROG 0x00100000 /* I/O interlock */
369 #define HAMMER2_CHAIN_IOSIGNAL 0x00200000 /* I/O interlock */
370 #define HAMMER2_CHAIN_PFSBOUNDARY 0x00400000 /* super->pfs inode */
371 #define HAMMER2_CHAIN_HINT_LEAF_COUNT 0x00800000 /* redo leaf count */
373 #define HAMMER2_CHAIN_FLUSH_MASK (HAMMER2_CHAIN_MODIFIED | \
374 HAMMER2_CHAIN_UPDATE | \
375 HAMMER2_CHAIN_ONFLUSH | \
376 HAMMER2_CHAIN_DESTROY)
379 * Hammer2 error codes, used by chain->error and cluster->error. The error
380 * code is typically set on-lock unless no I/O was requested, and set on
381 * I/O otherwise. If set for a cluster it generally means that the cluster
382 * code could not find a valid copy to present.
384 * All H2 error codes are flags and can be accumulated by ORing them
385 * together.
387 * IO - An I/O error occurred
388 * CHECK - I/O succeeded but did not match the check code
389 * INCOMPLETE - A cluster is not complete enough to use, or
390 * a chain cannot be loaded because its parent has an error.
392 * NOTE: API allows callers to check zero/non-zero to determine if an error
393 * condition exists.
395 * NOTE: Chain's data field is usually NULL on an IO error but not necessarily
396 * NULL on other errors. Check chain->error, not chain->data.
398 #define HAMMER2_ERROR_NONE 0 /* no error (must be 0) */
399 #define HAMMER2_ERROR_EIO 0x00000001 /* device I/O error */
400 #define HAMMER2_ERROR_CHECK 0x00000002 /* check code error */
401 #define HAMMER2_ERROR_INCOMPLETE 0x00000004 /* incomplete cluster */
402 #define HAMMER2_ERROR_DEPTH 0x00000008 /* tmp depth limit */
403 #define HAMMER2_ERROR_BADBREF 0x00000010 /* illegal bref */
404 #define HAMMER2_ERROR_ENOSPC 0x00000020 /* allocation failure */
405 #define HAMMER2_ERROR_ENOENT 0x00000040 /* entry not found */
406 #define HAMMER2_ERROR_ENOTEMPTY 0x00000080 /* dir not empty */
407 #define HAMMER2_ERROR_EAGAIN 0x00000100 /* retry */
408 #define HAMMER2_ERROR_ENOTDIR 0x00000200 /* not directory */
409 #define HAMMER2_ERROR_EISDIR 0x00000400 /* is directory */
410 #define HAMMER2_ERROR_EINPROGRESS 0x00000800 /* already running */
411 #define HAMMER2_ERROR_ABORTED 0x00001000 /* aborted operation */
412 #define HAMMER2_ERROR_EOF 0x00002000 /* end of scan */
413 #define HAMMER2_ERROR_EINVAL 0x00004000 /* catch-all */
414 #define HAMMER2_ERROR_EEXIST 0x00008000 /* entry exists */
415 #define HAMMER2_ERROR_EDEADLK 0x00010000
416 #define HAMMER2_ERROR_ESRCH 0x00020000
417 #define HAMMER2_ERROR_ETIMEDOUT 0x00040000
420 * Flags passed to hammer2_chain_lookup() and hammer2_chain_next()
422 * NOTES:
423 * NOLOCK - Input and output chains are referenced only and not
424 * locked. Output chain might be temporarily locked
425 * internally.
427 * NODATA - Asks that the chain->data not be resolved in order
428 * to avoid I/O.
430 * NODIRECT - Prevents a lookup of offset 0 in an inode from returning
431 * the inode itself if the inode is in DIRECTDATA mode
432 * (i.e. file is <= 512 bytes). Used by the synchronization
433 * code to prevent confusion.
435 * SHARED - The input chain is expected to be locked shared,
436 * and the output chain is locked shared.
438 * MATCHIND - Allows an indirect block / freemap node to be returned
439 * when the passed key range matches the radix. Remember
440 * that key_end is inclusive (e.g. {0x000,0xFFF},
441 * not {0x000,0x1000}).
443 * (Cannot be used for remote or cluster ops).
445 * ALLNODES - Allows NULL focus.
447 * ALWAYS - Always resolve the data. If ALWAYS and NODATA are both
448 * missing, bulk file data is not resolved but inodes and
449 * other meta-data will.
451 * NOUNLOCK - Used by hammer2_chain_next() to leave the lock on
452 * the input chain intact. The chain is still dropped.
453 * This allows the caller to add a reference to the chain
454 * and retain it in a locked state (used by the
455 * XOP/feed/collect code).
457 #define HAMMER2_LOOKUP_NOLOCK 0x00000001 /* ref only */
458 #define HAMMER2_LOOKUP_NODATA 0x00000002 /* data left NULL */
459 #define HAMMER2_LOOKUP_NODIRECT 0x00000004 /* no offset=0 DD */
460 #define HAMMER2_LOOKUP_SHARED 0x00000100
461 #define HAMMER2_LOOKUP_MATCHIND 0x00000200 /* return all chains */
462 #define HAMMER2_LOOKUP_ALLNODES 0x00000400 /* allow NULL focus */
463 #define HAMMER2_LOOKUP_ALWAYS 0x00000800 /* resolve data */
464 #define HAMMER2_LOOKUP_NOUNLOCK 0x00001000 /* leave lock intact */
467 * Flags passed to hammer2_chain_modify() and hammer2_chain_resize()
469 * NOTE: OPTDATA allows us to avoid instantiating buffers for INDIRECT
470 * blocks in the INITIAL-create state.
472 #define HAMMER2_MODIFY_OPTDATA 0x00000002 /* data can be NULL */
473 #define HAMMER2_MODIFY_NO_MODIFY_TID 0x00000004
474 #define HAMMER2_MODIFY_UNUSED0008 0x00000008
477 * Flags passed to hammer2_chain_lock()
479 * NOTE: RDONLY is set to optimize cluster operations when *no* modifications
480 * will be made to either the cluster being locked or any underlying
481 * cluster. It allows the cluster to lock and access data for a subset
482 * of available nodes instead of all available nodes.
484 #define HAMMER2_RESOLVE_NEVER 1
485 #define HAMMER2_RESOLVE_MAYBE 2
486 #define HAMMER2_RESOLVE_ALWAYS 3
487 #define HAMMER2_RESOLVE_MASK 0x0F
489 #define HAMMER2_RESOLVE_SHARED 0x10 /* request shared lock */
490 #define HAMMER2_RESOLVE_LOCKAGAIN 0x20 /* another shared lock */
491 #define HAMMER2_RESOLVE_RDONLY 0x40 /* higher level op flag */
494 * Flags passed to hammer2_chain_delete()
496 #define HAMMER2_DELETE_PERMANENT 0x0001
499 * Flags passed to hammer2_chain_insert() or hammer2_chain_rename()
500 * or hammer2_chain_create().
502 #define HAMMER2_INSERT_PFSROOT 0x0004
503 #define HAMMER2_INSERT_SAMEPARENT 0x0008
506 * Flags passed to hammer2_chain_delete_duplicate()
508 #define HAMMER2_DELDUP_RECORE 0x0001
511 * Cluster different types of storage together for allocations
513 #define HAMMER2_FREECACHE_INODE 0
514 #define HAMMER2_FREECACHE_INDIR 1
515 #define HAMMER2_FREECACHE_DATA 2
516 #define HAMMER2_FREECACHE_UNUSED3 3
517 #define HAMMER2_FREECACHE_TYPES 4
520 * hammer2_freemap_alloc() block preference
522 #define HAMMER2_OFF_NOPREF ((hammer2_off_t)-1)
525 * BMAP read-ahead maximum parameters
527 #define HAMMER2_BMAP_COUNT 16 /* max bmap read-ahead */
528 #define HAMMER2_BMAP_BYTES (HAMMER2_PBUFSIZE * HAMMER2_BMAP_COUNT)
531 * hammer2_freemap_adjust()
533 #define HAMMER2_FREEMAP_DORECOVER 1
534 #define HAMMER2_FREEMAP_DOMAYFREE 2
535 #define HAMMER2_FREEMAP_DOREALFREE 3
538 * HAMMER2 cluster - A set of chains representing the same entity.
540 * hammer2_cluster typically represents a temporary set of representitive
541 * chains. The one exception is that a hammer2_cluster is embedded in
542 * hammer2_inode. This embedded cluster is ONLY used to track the
543 * representitive chains and cannot be directly locked.
545 * A cluster is usually temporary (and thus per-thread) for locking purposes,
546 * allowing us to embed the asynchronous storage required for cluster
547 * operations in the cluster itself and adjust the state and status without
548 * having to worry too much about SMP issues.
550 * The exception is the cluster embedded in the hammer2_inode structure.
551 * This is used to cache the cluster state on an inode-by-inode basis.
552 * Individual hammer2_chain structures not incorporated into clusters might
553 * also stick around to cache miscellanious elements.
555 * Because the cluster is a 'working copy' and is usually subject to cluster
556 * quorum rules, it is quite possible for us to end up with an insufficient
557 * number of live chains to execute an operation. If an insufficient number
558 * of chains remain in a working copy, the operation may have to be
559 * downgraded, retried, stall until the requisit number of chains are
560 * available, or possibly even error out depending on the mount type.
562 * A cluster's focus is set when it is locked. The focus can only be set
563 * to a chain still part of the synchronized set.
565 #define HAMMER2_MAXCLUSTER 8
566 #define HAMMER2_XOPMASK_CLUSTER ((1U << HAMMER2_MAXCLUSTER) - 1)
567 #define HAMMER2_XOPFIFO 16
568 #define HAMMER2_XOPFIFO_MASK (HAMMER2_XOPFIFO - 1)
569 #define HAMMER2_XOPGROUPS 32
570 #define HAMMER2_XOPGROUPS_MASK (HAMMER2_XOPGROUPS - 1)
571 #define HAMMER2_XOPMASK_VOP 0x80000000U
572 #define HAMMER2_XOPMASK_FIFOW 0x40000000U
574 #define HAMMER2_XOPMASK_ALLDONE (HAMMER2_XOPMASK_VOP | HAMMER2_XOPMASK_CLUSTER)
576 #define HAMMER2_SPECTHREADS 1 /* sync */
578 struct hammer2_cluster_item {
579 hammer2_chain_t *chain;
580 int error;
581 uint32_t flags;
584 typedef struct hammer2_cluster_item hammer2_cluster_item_t;
587 * INVALID - Invalid for focus, i.e. not part of synchronized set.
588 * Once set, this bit is sticky across operations.
590 * FEMOD - Indicates that front-end modifying operations can
591 * mess with this entry and MODSYNC will copy also
592 * effect it.
594 #define HAMMER2_CITEM_INVALID 0x00000001
595 #define HAMMER2_CITEM_FEMOD 0x00000002
596 #define HAMMER2_CITEM_NULL 0x00000004
598 struct hammer2_cluster {
599 int refs; /* track for deallocation */
600 int ddflag;
601 struct hammer2_pfs *pmp;
602 uint32_t flags;
603 int nchains;
604 int error; /* error code valid on lock */
605 int focus_index;
606 hammer2_chain_t *focus; /* current focus (or mod) */
607 hammer2_cluster_item_t array[HAMMER2_MAXCLUSTER];
610 typedef struct hammer2_cluster hammer2_cluster_t;
613 * WRHARD - Hard mounts can write fully synchronized
614 * RDHARD - Hard mounts can read fully synchronized
615 * UNHARD - Unsynchronized masters present
616 * NOHARD - No masters visible
617 * WRSOFT - Soft mounts can write to at least the SOFT_MASTER
618 * RDSOFT - Soft mounts can read from at least a SOFT_SLAVE
619 * UNSOFT - Unsynchronized slaves present
620 * NOSOFT - No slaves visible
621 * RDSLAVE - slaves are accessible (possibly unsynchronized or remote).
622 * MSYNCED - All masters are fully synchronized
623 * SSYNCED - All known local slaves are fully synchronized to masters
625 * All available masters are always incorporated. All PFSs belonging to a
626 * cluster (master, slave, copy, whatever) always try to synchronize the
627 * total number of known masters in the PFSs root inode.
629 * A cluster might have access to many slaves, copies, or caches, but we
630 * have a limited number of cluster slots. Any such elements which are
631 * directly mounted from block device(s) will always be incorporated. Note
632 * that SSYNCED only applies to such elements which are directly mounted,
633 * not to any remote slaves, copies, or caches that could be available. These
634 * bits are used to monitor and drive our synchronization threads.
636 * When asking the question 'is any data accessible at all', then a simple
637 * test against (RDHARD|RDSOFT|RDSLAVE) gives you the answer. If any of
638 * these bits are set the object can be read with certain caveats:
639 * RDHARD - no caveats. RDSOFT - authoritative but might not be synchronized.
640 * and RDSLAVE - not authoritative, has some data but it could be old or
641 * incomplete.
643 * When both soft and hard mounts are available, data will be read and written
644 * via the soft mount only. But all might be in the cluster because
645 * background synchronization threads still need to do their work.
647 #define HAMMER2_CLUSTER_INODE 0x00000001 /* embedded in inode struct */
648 #define HAMMER2_CLUSTER_UNUSED2 0x00000002
649 #define HAMMER2_CLUSTER_LOCKED 0x00000004 /* cluster lks not recursive */
650 #define HAMMER2_CLUSTER_WRHARD 0x00000100 /* hard-mount can write */
651 #define HAMMER2_CLUSTER_RDHARD 0x00000200 /* hard-mount can read */
652 #define HAMMER2_CLUSTER_UNHARD 0x00000400 /* unsynchronized masters */
653 #define HAMMER2_CLUSTER_NOHARD 0x00000800 /* no masters visible */
654 #define HAMMER2_CLUSTER_WRSOFT 0x00001000 /* soft-mount can write */
655 #define HAMMER2_CLUSTER_RDSOFT 0x00002000 /* soft-mount can read */
656 #define HAMMER2_CLUSTER_UNSOFT 0x00004000 /* unsynchronized slaves */
657 #define HAMMER2_CLUSTER_NOSOFT 0x00008000 /* no slaves visible */
658 #define HAMMER2_CLUSTER_MSYNCED 0x00010000 /* all masters synchronized */
659 #define HAMMER2_CLUSTER_SSYNCED 0x00020000 /* known slaves synchronized */
661 #define HAMMER2_CLUSTER_ANYDATA ( HAMMER2_CLUSTER_RDHARD | \
662 HAMMER2_CLUSTER_RDSOFT | \
663 HAMMER2_CLUSTER_RDSLAVE)
665 #define HAMMER2_CLUSTER_RDOK ( HAMMER2_CLUSTER_RDHARD | \
666 HAMMER2_CLUSTER_RDSOFT)
668 #define HAMMER2_CLUSTER_WROK ( HAMMER2_CLUSTER_WRHARD | \
669 HAMMER2_CLUSTER_WRSOFT)
671 #define HAMMER2_CLUSTER_ZFLAGS ( HAMMER2_CLUSTER_WRHARD | \
672 HAMMER2_CLUSTER_RDHARD | \
673 HAMMER2_CLUSTER_WRSOFT | \
674 HAMMER2_CLUSTER_RDSOFT | \
675 HAMMER2_CLUSTER_MSYNCED | \
676 HAMMER2_CLUSTER_SSYNCED)
679 * Helper functions (cluster must be locked for flags to be valid).
681 static __inline
683 hammer2_cluster_rdok(hammer2_cluster_t *cluster)
685 return (cluster->flags & HAMMER2_CLUSTER_RDOK);
688 static __inline
690 hammer2_cluster_wrok(hammer2_cluster_t *cluster)
692 return (cluster->flags & HAMMER2_CLUSTER_WROK);
695 RB_HEAD(hammer2_inode_tree, hammer2_inode);
698 * A hammer2 inode.
700 * NOTE: The inode-embedded cluster is never used directly for I/O (since
701 * it may be shared). Instead it will be replicated-in and synchronized
702 * back out if changed.
704 struct hammer2_inode {
705 RB_ENTRY(hammer2_inode) rbnode; /* inumber lookup (HL) */
706 hammer2_mtx_t lock; /* inode lock */
707 hammer2_mtx_t truncate_lock; /* prevent truncates */
708 struct hammer2_pfs *pmp; /* PFS mount */
709 struct vnode *vp;
710 struct spinlock cluster_spin; /* update cluster */
711 hammer2_cluster_t cluster;
712 struct lockf advlock;
713 u_int flags;
714 u_int refs; /* +vpref, +flushref */
715 uint8_t comp_heuristic;
716 hammer2_inode_meta_t meta; /* copy of meta-data */
717 hammer2_off_t osize;
720 typedef struct hammer2_inode hammer2_inode_t;
723 * MODIFIED - Inode is in a modified state, ip->meta may have changes.
724 * RESIZED - Inode truncated (any) or inode extended beyond
725 * EMBEDDED_BYTES.
727 #define HAMMER2_INODE_MODIFIED 0x0001
728 #define HAMMER2_INODE_SROOT 0x0002 /* kmalloc special case */
729 #define HAMMER2_INODE_RENAME_INPROG 0x0004
730 #define HAMMER2_INODE_ONRBTREE 0x0008
731 #define HAMMER2_INODE_RESIZED 0x0010 /* requires inode_fsync */
732 #define HAMMER2_INODE_ISDELETED 0x0020 /* deleted */
733 #define HAMMER2_INODE_ISUNLINKED 0x0040
734 #define HAMMER2_INODE_METAGOOD 0x0080 /* inode meta-data good */
735 #define HAMMER2_INODE_ONSIDEQ 0x0100 /* on side processing queue */
737 int hammer2_inode_cmp(hammer2_inode_t *ip1, hammer2_inode_t *ip2);
738 RB_PROTOTYPE2(hammer2_inode_tree, hammer2_inode, rbnode, hammer2_inode_cmp,
739 hammer2_tid_t);
742 * inode-unlink side-structure
744 struct hammer2_inode_sideq {
745 TAILQ_ENTRY(hammer2_inode_sideq) entry;
746 hammer2_inode_t *ip;
748 TAILQ_HEAD(h2_sideq_list, hammer2_inode_sideq);
750 typedef struct hammer2_inode_sideq hammer2_inode_sideq_t;
753 * Transaction management sub-structure under hammer2_pfs
755 struct hammer2_trans {
756 uint32_t flags;
757 uint32_t sync_wait;
760 typedef struct hammer2_trans hammer2_trans_t;
762 #define HAMMER2_TRANS_ISFLUSH 0x80000000 /* flush code */
763 #define HAMMER2_TRANS_BUFCACHE 0x40000000 /* bio strategy */
764 #define HAMMER2_TRANS_UNUSED20 0x20000000
765 #define HAMMER2_TRANS_FPENDING 0x10000000 /* flush pending */
766 #define HAMMER2_TRANS_WAITING 0x08000000 /* someone waiting */
767 #define HAMMER2_TRANS_MASK 0x00FFFFFF /* count mask */
769 #define HAMMER2_FREEMAP_HEUR_NRADIX 4 /* pwr 2 PBUFRADIX-MINIORADIX */
770 #define HAMMER2_FREEMAP_HEUR_TYPES 8
771 #define HAMMER2_FREEMAP_HEUR_SIZE (HAMMER2_FREEMAP_HEUR_NRADIX * \
772 HAMMER2_FREEMAP_HEUR_TYPES)
774 #define HAMMER2_DEDUP_HEUR_SIZE (65536 * 4)
775 #define HAMMER2_DEDUP_HEUR_MASK (HAMMER2_DEDUP_HEUR_SIZE - 1)
777 #define HAMMER2_FLUSH_TOP 0x0001
778 #define HAMMER2_FLUSH_ALL 0x0002
782 * Hammer2 support thread element.
784 * Potentially many support threads can hang off of hammer2, primarily
785 * off the hammer2_pfs structure. Typically:
787 * td x Nodes A synchronization thread for each node.
788 * td x Nodes x workers Worker threads for frontend operations.
789 * td x 1 Bioq thread for logical buffer writes.
791 * In addition, the synchronization thread(s) associated with the
792 * super-root PFS (spmp) for a node is responsible for automatic bulkfree
793 * and dedup scans.
795 struct hammer2_thread {
796 struct hammer2_pfs *pmp;
797 struct hammer2_dev *hmp;
798 hammer2_xop_list_t xopq;
799 thread_t td;
800 uint32_t flags;
801 int depth;
802 int clindex; /* cluster element index */
803 int repidx;
804 char *scratch; /* MAXPHYS */
807 typedef struct hammer2_thread hammer2_thread_t;
809 #define HAMMER2_THREAD_UNMOUNTING 0x0001 /* unmount request */
810 #define HAMMER2_THREAD_DEV 0x0002 /* related to dev, not pfs */
811 #define HAMMER2_THREAD_WAITING 0x0004 /* thread in idle tsleep */
812 #define HAMMER2_THREAD_REMASTER 0x0008 /* remaster request */
813 #define HAMMER2_THREAD_STOP 0x0010 /* exit request */
814 #define HAMMER2_THREAD_FREEZE 0x0020 /* force idle */
815 #define HAMMER2_THREAD_FROZEN 0x0040 /* thread is frozen */
816 #define HAMMER2_THREAD_XOPQ 0x0080 /* work pending */
817 #define HAMMER2_THREAD_STOPPED 0x0100 /* thread has stopped */
818 #define HAMMER2_THREAD_UNFREEZE 0x0200
820 #define HAMMER2_THREAD_WAKEUP_MASK (HAMMER2_THREAD_UNMOUNTING | \
821 HAMMER2_THREAD_REMASTER | \
822 HAMMER2_THREAD_STOP | \
823 HAMMER2_THREAD_FREEZE | \
824 HAMMER2_THREAD_XOPQ)
827 * Support structure for dedup heuristic.
829 struct hammer2_dedup {
830 hammer2_off_t data_off;
831 uint64_t data_crc;
832 uint32_t ticks;
833 uint32_t unused03;
836 typedef struct hammer2_dedup hammer2_dedup_t;
839 * hammer2_xop - container for VOP/XOP operation (allocated, not on stack).
841 * This structure is used to distribute a VOP operation across multiple
842 * nodes. It provides a rendezvous for concurrent node execution and
843 * can be detached from the frontend operation to allow the frontend to
844 * return early.
846 * This structure also sequences operations on up to three inodes.
848 typedef void (*hammer2_xop_func_t)(hammer2_thread_t *thr,
849 union hammer2_xop *xop);
851 struct hammer2_xop_fifo {
852 TAILQ_ENTRY(hammer2_xop_head) entry;
853 hammer2_chain_t *array[HAMMER2_XOPFIFO];
854 int errors[HAMMER2_XOPFIFO];
855 int ri;
856 int wi;
857 int flags;
858 hammer2_thread_t *thr;
861 typedef struct hammer2_xop_fifo hammer2_xop_fifo_t;
863 #define HAMMER2_XOP_FIFO_RUN 0x0001
864 #define HAMMER2_XOP_FIFO_STALL 0x0002
866 struct hammer2_xop_head {
867 hammer2_xop_func_t func;
868 hammer2_tid_t mtid;
869 struct hammer2_inode *ip1;
870 struct hammer2_inode *ip2;
871 struct hammer2_inode *ip3;
872 uint32_t check_counter;
873 uint32_t run_mask;
874 uint32_t chk_mask;
875 int flags;
876 int state;
877 int error;
878 hammer2_key_t collect_key;
879 char *name1;
880 size_t name1_len;
881 char *name2;
882 size_t name2_len;
883 hammer2_xop_fifo_t collect[HAMMER2_MAXCLUSTER];
884 hammer2_cluster_t cluster; /* help collections */
887 typedef struct hammer2_xop_head hammer2_xop_head_t;
889 #define HAMMER2_XOP_CHKWAIT 0x00000001U
890 #define HAMMER2_XOP_CHKINC 0x00000002U
892 struct hammer2_xop_ipcluster {
893 hammer2_xop_head_t head;
896 struct hammer2_xop_strategy {
897 hammer2_xop_head_t head;
898 hammer2_key_t lbase;
899 int finished;
900 hammer2_mtx_t lock;
901 struct bio *bio;
904 struct hammer2_xop_readdir {
905 hammer2_xop_head_t head;
906 hammer2_key_t lkey;
909 struct hammer2_xop_nresolve {
910 hammer2_xop_head_t head;
911 hammer2_key_t lhc; /* if name is NULL used lhc */
914 struct hammer2_xop_unlink {
915 hammer2_xop_head_t head;
916 int isdir;
917 int dopermanent;
920 struct hammer2_xop_nrename {
921 hammer2_xop_head_t head;
922 hammer2_tid_t lhc;
923 int ip_key;
926 struct hammer2_xop_scanlhc {
927 hammer2_xop_head_t head;
928 hammer2_key_t lhc;
931 struct hammer2_xop_scanall {
932 hammer2_xop_head_t head;
933 hammer2_key_t key_beg; /* inclusive */
934 hammer2_key_t key_end; /* inclusive */
935 int resolve_flags;
936 int lookup_flags;
939 struct hammer2_xop_lookup {
940 hammer2_xop_head_t head;
941 hammer2_key_t lhc;
944 struct hammer2_xop_mkdirent {
945 hammer2_xop_head_t head;
946 hammer2_dirent_head_t dirent;
947 hammer2_key_t lhc;
950 struct hammer2_xop_create {
951 hammer2_xop_head_t head;
952 hammer2_inode_meta_t meta; /* initial metadata */
953 hammer2_key_t lhc;
954 int flags;
957 struct hammer2_xop_destroy {
958 hammer2_xop_head_t head;
961 struct hammer2_xop_fsync {
962 hammer2_xop_head_t head;
963 hammer2_inode_meta_t meta;
964 hammer2_off_t osize;
965 u_int ipflags;
966 int clear_directdata;
969 struct hammer2_xop_unlinkall {
970 hammer2_xop_head_t head;
971 hammer2_key_t key_beg;
972 hammer2_key_t key_end;
975 struct hammer2_xop_connect {
976 hammer2_xop_head_t head;
977 hammer2_key_t lhc;
980 struct hammer2_xop_flush {
981 hammer2_xop_head_t head;
984 typedef struct hammer2_xop_readdir hammer2_xop_readdir_t;
985 typedef struct hammer2_xop_nresolve hammer2_xop_nresolve_t;
986 typedef struct hammer2_xop_unlink hammer2_xop_unlink_t;
987 typedef struct hammer2_xop_nrename hammer2_xop_nrename_t;
988 typedef struct hammer2_xop_ipcluster hammer2_xop_ipcluster_t;
989 typedef struct hammer2_xop_strategy hammer2_xop_strategy_t;
990 typedef struct hammer2_xop_mkdirent hammer2_xop_mkdirent_t;
991 typedef struct hammer2_xop_create hammer2_xop_create_t;
992 typedef struct hammer2_xop_destroy hammer2_xop_destroy_t;
993 typedef struct hammer2_xop_fsync hammer2_xop_fsync_t;
994 typedef struct hammer2_xop_unlinkall hammer2_xop_unlinkall_t;
995 typedef struct hammer2_xop_scanlhc hammer2_xop_scanlhc_t;
996 typedef struct hammer2_xop_scanall hammer2_xop_scanall_t;
997 typedef struct hammer2_xop_lookup hammer2_xop_lookup_t;
998 typedef struct hammer2_xop_connect hammer2_xop_connect_t;
999 typedef struct hammer2_xop_flush hammer2_xop_flush_t;
1001 union hammer2_xop {
1002 hammer2_xop_head_t head;
1003 hammer2_xop_ipcluster_t xop_ipcluster;
1004 hammer2_xop_readdir_t xop_readdir;
1005 hammer2_xop_nresolve_t xop_nresolve;
1006 hammer2_xop_unlink_t xop_unlink;
1007 hammer2_xop_nrename_t xop_nrename;
1008 hammer2_xop_strategy_t xop_strategy;
1009 hammer2_xop_mkdirent_t xop_mkdirent;
1010 hammer2_xop_create_t xop_create;
1011 hammer2_xop_destroy_t xop_destroy;
1012 hammer2_xop_fsync_t xop_fsync;
1013 hammer2_xop_unlinkall_t xop_unlinkall;
1014 hammer2_xop_scanlhc_t xop_scanlhc;
1015 hammer2_xop_scanall_t xop_scanall;
1016 hammer2_xop_lookup_t xop_lookup;
1017 hammer2_xop_flush_t xop_flush;
1018 hammer2_xop_connect_t xop_connect;
1021 typedef union hammer2_xop hammer2_xop_t;
1024 * hammer2_xop_group - Manage XOP support threads.
1026 struct hammer2_xop_group {
1027 hammer2_thread_t thrs[HAMMER2_MAXCLUSTER];
1030 typedef struct hammer2_xop_group hammer2_xop_group_t;
1033 * flags to hammer2_xop_collect()
1035 #define HAMMER2_XOP_COLLECT_NOWAIT 0x00000001
1036 #define HAMMER2_XOP_COLLECT_WAITALL 0x00000002
1039 * flags to hammer2_xop_alloc()
1041 * MODIFYING - This is a modifying transaction, allocate a mtid.
1043 #define HAMMER2_XOP_MODIFYING 0x00000001
1044 #define HAMMER2_XOP_STRATEGY 0x00000002
1047 * Global (per partition) management structure, represents a hard block
1048 * device. Typically referenced by hammer2_chain structures when applicable.
1049 * Typically not used for network-managed elements.
1051 * Note that a single hammer2_dev can be indirectly tied to multiple system
1052 * mount points. There is no direct relationship. System mounts are
1053 * per-cluster-id, not per-block-device, and a single hard mount might contain
1054 * many PFSs and those PFSs might combine together in various ways to form
1055 * the set of available clusters.
1057 struct hammer2_dev {
1058 struct vnode *devvp; /* device vnode */
1059 int ronly; /* read-only mount */
1060 int mount_count; /* number of actively mounted PFSs */
1061 TAILQ_ENTRY(hammer2_dev) mntentry; /* hammer2_mntlist */
1063 struct malloc_type *mchain;
1064 int nipstacks;
1065 int maxipstacks;
1066 kdmsg_iocom_t iocom; /* volume-level dmsg interface */
1067 struct spinlock io_spin; /* iotree, iolruq access */
1068 struct hammer2_io_tree iotree;
1069 int iofree_count;
1070 int freemap_relaxed;
1071 hammer2_chain_t vchain; /* anchor chain (topology) */
1072 hammer2_chain_t fchain; /* anchor chain (freemap) */
1073 struct spinlock list_spin;
1074 struct h2_flush_list flushq; /* flush seeds */
1075 struct hammer2_pfs *spmp; /* super-root pmp for transactions */
1076 struct lock vollk; /* lockmgr lock */
1077 struct lock bulklk; /* bulkfree operation lock */
1078 struct lock bflock; /* bulk-free manual function lock */
1079 hammer2_off_t heur_freemap[HAMMER2_FREEMAP_HEUR_SIZE];
1080 hammer2_dedup_t heur_dedup[HAMMER2_DEDUP_HEUR_SIZE];
1081 int volhdrno; /* last volhdrno written */
1082 uint32_t hflags; /* HMNT2 flags applicable to device */
1083 hammer2_off_t free_reserved; /* nominal free reserved */
1084 hammer2_thread_t bfthr; /* bulk-free thread */
1085 char devrepname[64]; /* for kprintf */
1086 hammer2_ioc_bulkfree_t bflast; /* stats for last bulkfree run */
1087 hammer2_volume_data_t voldata;
1088 hammer2_volume_data_t volsync; /* synchronized voldata */
1091 typedef struct hammer2_dev hammer2_dev_t;
1094 * Helper functions (cluster must be locked for flags to be valid).
1096 static __inline
1098 hammer2_chain_rdok(hammer2_chain_t *chain)
1100 return (chain->error == 0);
1103 static __inline
1105 hammer2_chain_wrok(hammer2_chain_t *chain)
1107 return (chain->error == 0 && chain->hmp->ronly == 0);
1111 * Per-cluster management structure. This structure will be tied to a
1112 * system mount point if the system is mounting the PFS, but is also used
1113 * to manage clusters encountered during the super-root scan or received
1114 * via LNK_SPANs that might not be mounted.
1116 * This structure is also used to represent the super-root that hangs off
1117 * of a hard mount point. The super-root is not really a cluster element.
1118 * In this case the spmp_hmp field will be non-NULL. It's just easier to do
1119 * this than to special case super-root manipulation in the hammer2_chain*
1120 * code as being only hammer2_dev-related.
1122 * pfs_mode and pfs_nmasters are rollup fields which critically describes
1123 * how elements of the cluster act on the cluster. pfs_mode is only applicable
1124 * when a PFS is mounted by the system. pfs_nmasters is our best guess as to
1125 * how many masters have been configured for a cluster and is always
1126 * applicable. pfs_types[] is an array with 1:1 correspondance to the
1127 * iroot cluster and describes the PFS types of the nodes making up the
1128 * cluster.
1130 * WARNING! Portions of this structure have deferred initialization. In
1131 * particular, if not mounted there will be no wthread.
1132 * umounted network PFSs will also be missing iroot and numerous
1133 * other fields will not be initialized prior to mount.
1135 * Synchronization threads are chain-specific and only applicable
1136 * to local hard PFS entries. A hammer2_pfs structure may contain
1137 * more than one when multiple hard PFSs are present on the local
1138 * machine which require synchronization monitoring. Most PFSs
1139 * (such as snapshots) are 1xMASTER PFSs which do not need a
1140 * synchronization thread.
1142 * WARNING! The chains making up pfs->iroot's cluster are accounted for in
1143 * hammer2_dev->mount_count when the pfs is associated with a mount
1144 * point.
1146 struct hammer2_pfs {
1147 struct mount *mp;
1148 TAILQ_ENTRY(hammer2_pfs) mntentry; /* hammer2_pfslist */
1149 uuid_t pfs_clid;
1150 hammer2_dev_t *spmp_hmp; /* only if super-root pmp */
1151 hammer2_dev_t *force_local; /* only if 'local' mount */
1152 hammer2_inode_t *iroot; /* PFS root inode */
1153 uint8_t pfs_types[HAMMER2_MAXCLUSTER];
1154 char *pfs_names[HAMMER2_MAXCLUSTER];
1155 hammer2_dev_t *pfs_hmps[HAMMER2_MAXCLUSTER];
1156 hammer2_trans_t trans;
1157 struct lock lock; /* PFS lock for certain ops */
1158 struct lock lock_nlink; /* rename and nlink lock */
1159 struct netexport export; /* nfs export */
1160 int ronly; /* read-only mount */
1161 int hflags; /* pfs-specific mount flags */
1162 struct malloc_type *minode;
1163 struct malloc_type *mmsg;
1164 struct spinlock inum_spin; /* inumber lookup */
1165 struct hammer2_inode_tree inum_tree; /* (not applicable to spmp) */
1166 struct spinlock lru_spin; /* inumber lookup */
1167 struct hammer2_chain_list lru_list; /* chains on LRU */
1168 int lru_count; /* #of chains on LRU */
1169 hammer2_tid_t modify_tid; /* modify transaction id */
1170 hammer2_tid_t inode_tid; /* inode allocator */
1171 uint8_t pfs_nmasters; /* total masters */
1172 uint8_t pfs_mode; /* operating mode PFSMODE */
1173 uint8_t unused01;
1174 uint8_t unused02;
1175 int free_ticks; /* free_* calculations */
1176 long inmem_inodes;
1177 hammer2_off_t free_reserved;
1178 hammer2_off_t free_nominal;
1179 uint32_t inmem_dirty_chains;
1180 int count_lwinprog; /* logical write in prog */
1181 struct spinlock list_spin;
1182 struct h2_sideq_list sideq; /* last-close dirty/unlink */
1183 hammer2_thread_t sync_thrs[HAMMER2_MAXCLUSTER];
1184 uint32_t cluster_flags; /* cached cluster flags */
1185 int has_xop_threads;
1186 struct spinlock xop_spin; /* xop sequencer */
1187 hammer2_xop_group_t xop_groups[HAMMER2_XOPGROUPS];
1190 typedef struct hammer2_pfs hammer2_pfs_t;
1192 TAILQ_HEAD(hammer2_pfslist, hammer2_pfs);
1194 #define HAMMER2_LRU_LIMIT 1024 /* per pmp lru_list */
1196 #define HAMMER2_DIRTYCHAIN_WAITING 0x80000000
1197 #define HAMMER2_DIRTYCHAIN_MASK 0x7FFFFFFF
1199 #define HAMMER2_LWINPROG_WAITING 0x80000000
1200 #define HAMMER2_LWINPROG_WAITING0 0x40000000
1201 #define HAMMER2_LWINPROG_MASK 0x3FFFFFFF
1204 * hammer2_cluster_check
1206 #define HAMMER2_CHECK_NULL 0x00000001
1209 * Misc
1211 #if defined(_KERNEL)
1213 MALLOC_DECLARE(M_HAMMER2);
1215 #define VTOI(vp) ((hammer2_inode_t *)(vp)->v_data)
1216 #define ITOV(ip) ((ip)->vp)
1219 * Currently locked chains retain the locked buffer cache buffer for
1220 * indirect blocks, and indirect blocks can be one of two sizes. The
1221 * device buffer has to match the case to avoid deadlocking recursive
1222 * chains that might otherwise try to access different offsets within
1223 * the same device buffer.
1225 static __inline
1227 hammer2_devblkradix(int radix)
1229 #if 0
1230 if (radix <= HAMMER2_LBUFRADIX) {
1231 return (HAMMER2_LBUFRADIX);
1232 } else {
1233 return (HAMMER2_PBUFRADIX);
1235 #endif
1236 return (HAMMER2_PBUFRADIX);
1240 * XXX almost time to remove this. DIO uses PBUFSIZE exclusively now.
1242 static __inline
1243 size_t
1244 hammer2_devblksize(size_t bytes)
1246 #if 0
1247 if (bytes <= HAMMER2_LBUFSIZE) {
1248 return(HAMMER2_LBUFSIZE);
1249 } else {
1250 KKASSERT(bytes <= HAMMER2_PBUFSIZE &&
1251 (bytes ^ (bytes - 1)) == ((bytes << 1) - 1));
1252 return (HAMMER2_PBUFSIZE);
1254 #endif
1255 return (HAMMER2_PBUFSIZE);
1259 static __inline
1260 hammer2_pfs_t *
1261 MPTOPMP(struct mount *mp)
1263 return ((hammer2_pfs_t *)mp->mnt_data);
1266 #define HAMMER2_DEDUP_FRAG (HAMMER2_PBUFSIZE / 64)
1267 #define HAMMER2_DEDUP_FRAGRADIX (HAMMER2_PBUFRADIX - 6)
1269 static __inline
1270 uint64_t
1271 hammer2_dedup_mask(hammer2_io_t *dio, hammer2_off_t data_off, u_int bytes)
1273 int bbeg;
1274 int bits;
1275 uint64_t mask;
1277 bbeg = (int)((data_off & ~HAMMER2_OFF_MASK_RADIX) - dio->pbase) >>
1278 HAMMER2_DEDUP_FRAGRADIX;
1279 bits = (int)((bytes + (HAMMER2_DEDUP_FRAG - 1)) >>
1280 HAMMER2_DEDUP_FRAGRADIX);
1281 mask = ((uint64_t)1 << bbeg) - 1;
1282 if (bbeg + bits == 64)
1283 mask = (uint64_t)-1;
1284 else
1285 mask = ((uint64_t)1 << (bbeg + bits)) - 1;
1287 mask &= ~(((uint64_t)1 << bbeg) - 1);
1289 return mask;
1292 static __inline
1294 hammer2_error_to_errno(int error)
1296 if (error) {
1297 if (error & HAMMER2_ERROR_EIO)
1298 error = EIO;
1299 else if (error & HAMMER2_ERROR_CHECK)
1300 error = EDOM;
1301 else if (error & HAMMER2_ERROR_ABORTED)
1302 error = EINTR;
1303 else if (error & HAMMER2_ERROR_BADBREF)
1304 error = EIO;
1305 else if (error & HAMMER2_ERROR_ENOSPC)
1306 error = ENOSPC;
1307 else if (error & HAMMER2_ERROR_ENOENT)
1308 error = ENOENT;
1309 else if (error & HAMMER2_ERROR_ENOTEMPTY)
1310 error = ENOTEMPTY;
1311 else if (error & HAMMER2_ERROR_EAGAIN)
1312 error = EAGAIN;
1313 else if (error & HAMMER2_ERROR_ENOTDIR)
1314 error = ENOTDIR;
1315 else if (error & HAMMER2_ERROR_EISDIR)
1316 error = EISDIR;
1317 else if (error & HAMMER2_ERROR_EINPROGRESS)
1318 error = EINPROGRESS;
1319 else
1320 error = EDOM;
1322 return error;
1325 static __inline
1327 hammer2_errno_to_error(int error)
1329 switch(error) {
1330 case 0:
1331 return 0;
1332 case EIO:
1333 return HAMMER2_ERROR_EIO;
1334 case EINVAL:
1335 default:
1336 return HAMMER2_ERROR_EINVAL;
1340 extern struct vop_ops hammer2_vnode_vops;
1341 extern struct vop_ops hammer2_spec_vops;
1342 extern struct vop_ops hammer2_fifo_vops;
1343 extern struct hammer2_pfslist hammer2_pfslist;
1344 extern struct lock hammer2_mntlk;
1347 extern int hammer2_debug;
1348 extern int hammer2_cluster_meta_read;
1349 extern int hammer2_cluster_data_read;
1350 extern int hammer2_cluster_write;
1351 extern int hammer2_dedup_enable;
1352 extern int hammer2_always_compress;
1353 extern int hammer2_inval_enable;
1354 extern int hammer2_flush_pipe;
1355 extern int hammer2_synchronous_flush;
1356 extern int hammer2_dio_count;
1357 extern int hammer2_limit_dio;
1358 extern int hammer2_bulkfree_tps;
1359 extern long hammer2_chain_allocs;
1360 extern long hammer2_chain_frees;
1361 extern long hammer2_limit_dirty_chains;
1362 extern long hammer2_count_modified_chains;
1363 extern long hammer2_iod_invals;
1364 extern long hammer2_iod_file_read;
1365 extern long hammer2_iod_meta_read;
1366 extern long hammer2_iod_indr_read;
1367 extern long hammer2_iod_fmap_read;
1368 extern long hammer2_iod_volu_read;
1369 extern long hammer2_iod_file_write;
1370 extern long hammer2_iod_file_wembed;
1371 extern long hammer2_iod_file_wzero;
1372 extern long hammer2_iod_file_wdedup;
1373 extern long hammer2_iod_meta_write;
1374 extern long hammer2_iod_indr_write;
1375 extern long hammer2_iod_fmap_write;
1376 extern long hammer2_iod_volu_write;
1378 extern long hammer2_check_xxhash64;
1379 extern long hammer2_check_icrc32;
1381 extern struct objcache *cache_buffer_read;
1382 extern struct objcache *cache_buffer_write;
1383 extern struct objcache *cache_xops;
1386 * hammer2_subr.c
1388 #define hammer2_icrc32(buf, size) iscsi_crc32((buf), (size))
1389 #define hammer2_icrc32c(buf, size, crc) iscsi_crc32_ext((buf), (size), (crc))
1391 int hammer2_signal_check(time_t *timep);
1392 const char *hammer2_error_str(int error);
1394 void hammer2_inode_lock(hammer2_inode_t *ip, int how);
1395 void hammer2_inode_unlock(hammer2_inode_t *ip);
1396 hammer2_chain_t *hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how);
1397 hammer2_chain_t *hammer2_inode_chain_and_parent(hammer2_inode_t *ip,
1398 int clindex, hammer2_chain_t **parentp, int how);
1399 hammer2_mtx_state_t hammer2_inode_lock_temp_release(hammer2_inode_t *ip);
1400 void hammer2_inode_lock_temp_restore(hammer2_inode_t *ip,
1401 hammer2_mtx_state_t ostate);
1402 int hammer2_inode_lock_upgrade(hammer2_inode_t *ip);
1403 void hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int);
1405 void hammer2_dev_exlock(hammer2_dev_t *hmp);
1406 void hammer2_dev_shlock(hammer2_dev_t *hmp);
1407 void hammer2_dev_unlock(hammer2_dev_t *hmp);
1409 int hammer2_get_dtype(uint8_t type);
1410 int hammer2_get_vtype(uint8_t type);
1411 uint8_t hammer2_get_obj_type(enum vtype vtype);
1412 void hammer2_time_to_timespec(uint64_t xtime, struct timespec *ts);
1413 uint64_t hammer2_timespec_to_time(const struct timespec *ts);
1414 uint32_t hammer2_to_unix_xid(const uuid_t *uuid);
1415 void hammer2_guid_to_uuid(uuid_t *uuid, uint32_t guid);
1416 void hammer2_trans_manage_init(hammer2_pfs_t *pmp);
1418 hammer2_key_t hammer2_dirhash(const unsigned char *name, size_t len);
1419 int hammer2_getradix(size_t bytes);
1421 int hammer2_calc_logical(hammer2_inode_t *ip, hammer2_off_t uoff,
1422 hammer2_key_t *lbasep, hammer2_key_t *leofp);
1423 int hammer2_calc_physical(hammer2_inode_t *ip, hammer2_key_t lbase);
1424 void hammer2_update_time(uint64_t *timep);
1425 void hammer2_adjreadcounter(hammer2_blockref_t *bref, size_t bytes);
1428 * hammer2_inode.c
1430 struct vnode *hammer2_igetv(hammer2_inode_t *ip, int *errorp);
1431 hammer2_inode_t *hammer2_inode_lookup(hammer2_pfs_t *pmp,
1432 hammer2_tid_t inum);
1433 hammer2_inode_t *hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_inode_t *dip,
1434 hammer2_cluster_t *cluster, int idx);
1435 void hammer2_inode_free(hammer2_inode_t *ip);
1436 void hammer2_inode_ref(hammer2_inode_t *ip);
1437 void hammer2_inode_drop(hammer2_inode_t *ip);
1438 void hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_inode_t *pip,
1439 hammer2_cluster_t *cluster);
1440 void hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1441 int idx);
1442 void hammer2_inode_modify(hammer2_inode_t *ip);
1443 void hammer2_inode_run_sideq(hammer2_pfs_t *pmp);
1445 hammer2_inode_t *hammer2_inode_create(hammer2_inode_t *dip,
1446 hammer2_inode_t *pip,
1447 struct vattr *vap, struct ucred *cred,
1448 const uint8_t *name, size_t name_len, hammer2_key_t lhc,
1449 hammer2_key_t inum, uint8_t type, uint8_t target_type,
1450 int flags, int *errorp);
1451 void hammer2_inode_chain_sync(hammer2_inode_t *ip);
1452 int hammer2_inode_unlink_finisher(hammer2_inode_t *ip, int isopen);
1453 int hammer2_dirent_create(hammer2_inode_t *dip, const char *name,
1454 size_t name_len, hammer2_key_t inum, uint8_t type);
1457 * hammer2_chain.c
1459 void hammer2_voldata_lock(hammer2_dev_t *hmp);
1460 void hammer2_voldata_unlock(hammer2_dev_t *hmp);
1461 void hammer2_voldata_modify(hammer2_dev_t *hmp);
1462 hammer2_chain_t *hammer2_chain_alloc(hammer2_dev_t *hmp,
1463 hammer2_pfs_t *pmp,
1464 hammer2_blockref_t *bref);
1465 void hammer2_chain_core_init(hammer2_chain_t *chain);
1466 void hammer2_chain_ref(hammer2_chain_t *chain);
1467 void hammer2_chain_ref_hold(hammer2_chain_t *chain);
1468 void hammer2_chain_drop(hammer2_chain_t *chain);
1469 void hammer2_chain_drop_unhold(hammer2_chain_t *chain);
1470 void hammer2_chain_lock(hammer2_chain_t *chain, int how);
1471 void hammer2_chain_lock_unhold(hammer2_chain_t *chain, int how);
1472 #if 0
1473 void hammer2_chain_push_shared_lock(hammer2_chain_t *chain);
1474 void hammer2_chain_pull_shared_lock(hammer2_chain_t *chain);
1475 #endif
1476 void hammer2_chain_load_data(hammer2_chain_t *chain);
1477 const hammer2_media_data_t *hammer2_chain_rdata(hammer2_chain_t *chain);
1478 hammer2_media_data_t *hammer2_chain_wdata(hammer2_chain_t *chain);
1479 int hammer2_chain_snapshot(hammer2_chain_t *chain, hammer2_ioc_pfs_t *pmp,
1480 hammer2_tid_t mtid);
1482 int hammer2_chain_inode_find(hammer2_pfs_t *pmp, hammer2_key_t inum,
1483 int clindex, int flags,
1484 hammer2_chain_t **parentp,
1485 hammer2_chain_t **chainp);
1486 int hammer2_chain_modify(hammer2_chain_t *chain, hammer2_tid_t mtid,
1487 hammer2_off_t dedup_off, int flags);
1488 int hammer2_chain_modify_ip(hammer2_inode_t *ip, hammer2_chain_t *chain,
1489 hammer2_tid_t mtid, int flags);
1490 int hammer2_chain_resize(hammer2_chain_t *chain,
1491 hammer2_tid_t mtid, hammer2_off_t dedup_off,
1492 int nradix, int flags);
1493 void hammer2_chain_unlock(hammer2_chain_t *chain);
1494 void hammer2_chain_unlock_hold(hammer2_chain_t *chain);
1495 void hammer2_chain_wait(hammer2_chain_t *chain);
1496 hammer2_chain_t *hammer2_chain_get(hammer2_chain_t *parent, int generation,
1497 hammer2_blockref_t *bref);
1498 hammer2_chain_t *hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags);
1499 void hammer2_chain_lookup_done(hammer2_chain_t *parent);
1500 hammer2_chain_t *hammer2_chain_getparent(hammer2_chain_t *chain, int how);
1501 hammer2_chain_t *hammer2_chain_repparent(hammer2_chain_t **chainp, int how);
1502 hammer2_chain_t *hammer2_chain_lookup(hammer2_chain_t **parentp,
1503 hammer2_key_t *key_nextp,
1504 hammer2_key_t key_beg, hammer2_key_t key_end,
1505 int *errorp, int flags);
1506 hammer2_chain_t *hammer2_chain_next(hammer2_chain_t **parentp,
1507 hammer2_chain_t *chain,
1508 hammer2_key_t *key_nextp,
1509 hammer2_key_t key_beg, hammer2_key_t key_end,
1510 int *errorp, int flags);
1511 int hammer2_chain_scan(hammer2_chain_t *parent,
1512 hammer2_chain_t **chainp,
1513 hammer2_blockref_t *bref,
1514 int *firstp, int flags);
1516 int hammer2_chain_create(hammer2_chain_t **parentp, hammer2_chain_t **chainp,
1517 hammer2_pfs_t *pmp, int methods,
1518 hammer2_key_t key, int keybits,
1519 int type, size_t bytes, hammer2_tid_t mtid,
1520 hammer2_off_t dedup_off, int flags);
1521 void hammer2_chain_rename(hammer2_blockref_t *bref,
1522 hammer2_chain_t **parentp,
1523 hammer2_chain_t *chain,
1524 hammer2_tid_t mtid, int flags);
1525 int hammer2_chain_delete(hammer2_chain_t *parent, hammer2_chain_t *chain,
1526 hammer2_tid_t mtid, int flags);
1527 int hammer2_chain_indirect_maintenance(hammer2_chain_t *parent,
1528 hammer2_chain_t *chain);
1529 void hammer2_chain_setflush(hammer2_chain_t *chain);
1530 void hammer2_chain_countbrefs(hammer2_chain_t *chain,
1531 hammer2_blockref_t *base, int count);
1532 hammer2_chain_t *hammer2_chain_bulksnap(hammer2_dev_t *hmp);
1533 void hammer2_chain_bulkdrop(hammer2_chain_t *copy);
1535 void hammer2_chain_setcheck(hammer2_chain_t *chain, void *bdata);
1536 int hammer2_chain_testcheck(hammer2_chain_t *chain, void *bdata);
1537 int hammer2_chain_dirent_test(hammer2_chain_t *chain, const char *name,
1538 size_t name_len);
1540 void hammer2_pfs_memory_wait(hammer2_pfs_t *pmp);
1541 void hammer2_pfs_memory_inc(hammer2_pfs_t *pmp);
1542 void hammer2_pfs_memory_wakeup(hammer2_pfs_t *pmp);
1544 void hammer2_base_delete(hammer2_chain_t *chain,
1545 hammer2_blockref_t *base, int count,
1546 hammer2_chain_t *child);
1547 void hammer2_base_insert(hammer2_chain_t *chain,
1548 hammer2_blockref_t *base, int count,
1549 hammer2_chain_t *child,
1550 hammer2_blockref_t *elm);
1553 * hammer2_flush.c
1555 int hammer2_flush(hammer2_chain_t *chain, int istop);
1556 void hammer2_delayed_flush(hammer2_chain_t *chain);
1559 * hammer2_trans.c
1561 void hammer2_trans_init(hammer2_pfs_t *pmp, uint32_t flags);
1562 hammer2_tid_t hammer2_trans_sub(hammer2_pfs_t *pmp);
1563 void hammer2_trans_done(hammer2_pfs_t *pmp);
1564 hammer2_tid_t hammer2_trans_newinum(hammer2_pfs_t *pmp);
1565 void hammer2_trans_assert_strategy(hammer2_pfs_t *pmp);
1566 void hammer2_dedup_record(hammer2_chain_t *chain, hammer2_io_t *dio,
1567 char *data);
1570 * hammer2_ioctl.c
1572 int hammer2_ioctl(hammer2_inode_t *ip, u_long com, void *data,
1573 int fflag, struct ucred *cred);
1576 * hammer2_io.c
1578 void hammer2_io_putblk(hammer2_io_t **diop);
1579 void hammer2_io_inval(hammer2_io_t *dio, hammer2_off_t data_off, u_int bytes);
1580 void hammer2_io_cleanup(hammer2_dev_t *hmp, struct hammer2_io_tree *tree);
1581 char *hammer2_io_data(hammer2_io_t *dio, off_t lbase);
1582 hammer2_io_t *hammer2_io_getblk(hammer2_dev_t *hmp, int btype, off_t lbase,
1583 int lsize, int op);
1584 void hammer2_io_dedup_set(hammer2_dev_t *hmp, hammer2_blockref_t *bref);
1585 void hammer2_io_dedup_delete(hammer2_dev_t *hmp, uint8_t btype,
1586 hammer2_off_t data_off, u_int bytes);
1587 void hammer2_io_dedup_assert(hammer2_dev_t *hmp, hammer2_off_t data_off,
1588 u_int bytes);
1589 void hammer2_io_callback(struct bio *bio);
1590 int hammer2_io_new(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize,
1591 hammer2_io_t **diop);
1592 int hammer2_io_newnz(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize,
1593 hammer2_io_t **diop);
1594 int hammer2_io_bread(hammer2_dev_t *hmp, int btype, off_t lbase, int lsize,
1595 hammer2_io_t **diop);
1596 hammer2_io_t *hammer2_io_getquick(hammer2_dev_t *hmp, off_t lbase, int lsize);
1597 void hammer2_io_bawrite(hammer2_io_t **diop);
1598 void hammer2_io_bdwrite(hammer2_io_t **diop);
1599 int hammer2_io_bwrite(hammer2_io_t **diop);
1600 void hammer2_io_setdirty(hammer2_io_t *dio);
1601 void hammer2_io_brelse(hammer2_io_t **diop);
1602 void hammer2_io_bqrelse(hammer2_io_t **diop);
1605 * hammer2_thread.c
1607 void hammer2_thr_signal(hammer2_thread_t *thr, uint32_t flags);
1608 void hammer2_thr_signal2(hammer2_thread_t *thr,
1609 uint32_t pflags, uint32_t nflags);
1610 void hammer2_thr_wait(hammer2_thread_t *thr, uint32_t flags);
1611 void hammer2_thr_wait_neg(hammer2_thread_t *thr, uint32_t flags);
1612 int hammer2_thr_wait_any(hammer2_thread_t *thr, uint32_t flags, int timo);
1613 void hammer2_thr_create(hammer2_thread_t *thr,
1614 hammer2_pfs_t *pmp, hammer2_dev_t *hmp,
1615 const char *id, int clindex, int repidx,
1616 void (*func)(void *arg));
1617 void hammer2_thr_delete(hammer2_thread_t *thr);
1618 void hammer2_thr_remaster(hammer2_thread_t *thr);
1619 void hammer2_thr_freeze_async(hammer2_thread_t *thr);
1620 void hammer2_thr_freeze(hammer2_thread_t *thr);
1621 void hammer2_thr_unfreeze(hammer2_thread_t *thr);
1622 int hammer2_thr_break(hammer2_thread_t *thr);
1623 void hammer2_primary_xops_thread(void *arg);
1626 * hammer2_thread.c (XOP API)
1628 void hammer2_xop_group_init(hammer2_pfs_t *pmp, hammer2_xop_group_t *xgrp);
1629 void *hammer2_xop_alloc(hammer2_inode_t *ip, int flags);
1630 void hammer2_xop_setname(hammer2_xop_head_t *xop,
1631 const char *name, size_t name_len);
1632 void hammer2_xop_setname2(hammer2_xop_head_t *xop,
1633 const char *name, size_t name_len);
1634 size_t hammer2_xop_setname_inum(hammer2_xop_head_t *xop, hammer2_key_t inum);
1635 void hammer2_xop_setip2(hammer2_xop_head_t *xop, hammer2_inode_t *ip2);
1636 void hammer2_xop_setip3(hammer2_xop_head_t *xop, hammer2_inode_t *ip3);
1637 void hammer2_xop_reinit(hammer2_xop_head_t *xop);
1638 void hammer2_xop_helper_create(hammer2_pfs_t *pmp);
1639 void hammer2_xop_helper_cleanup(hammer2_pfs_t *pmp);
1640 void hammer2_xop_start(hammer2_xop_head_t *xop, hammer2_xop_func_t func);
1641 void hammer2_xop_start_except(hammer2_xop_head_t *xop, hammer2_xop_func_t func,
1642 int notidx);
1643 int hammer2_xop_collect(hammer2_xop_head_t *xop, int flags);
1644 void hammer2_xop_retire(hammer2_xop_head_t *xop, uint32_t mask);
1645 int hammer2_xop_active(hammer2_xop_head_t *xop);
1646 int hammer2_xop_feed(hammer2_xop_head_t *xop, hammer2_chain_t *chain,
1647 int clindex, int error);
1650 * hammer2_synchro.c
1652 void hammer2_primary_sync_thread(void *arg);
1655 * XOP backends in hammer2_xops.c, primarily for VNOPS. Other XOP backends
1656 * may be integrated into other source files.
1658 void hammer2_xop_ipcluster(hammer2_thread_t *thr, hammer2_xop_t *xop);
1659 void hammer2_xop_readdir(hammer2_thread_t *thr, hammer2_xop_t *xop);
1660 void hammer2_xop_nresolve(hammer2_thread_t *thr, hammer2_xop_t *xop);
1661 void hammer2_xop_unlink(hammer2_thread_t *thr, hammer2_xop_t *xop);
1662 void hammer2_xop_nrename(hammer2_thread_t *thr, hammer2_xop_t *xop);
1663 void hammer2_xop_scanlhc(hammer2_thread_t *thr, hammer2_xop_t *xop);
1664 void hammer2_xop_scanall(hammer2_thread_t *thr, hammer2_xop_t *xop);
1665 void hammer2_xop_lookup(hammer2_thread_t *thr, hammer2_xop_t *xop);
1666 void hammer2_inode_xop_mkdirent(hammer2_thread_t *thr, hammer2_xop_t *xop);
1667 void hammer2_inode_xop_create(hammer2_thread_t *thr, hammer2_xop_t *xop);
1668 void hammer2_inode_xop_destroy(hammer2_thread_t *thr, hammer2_xop_t *xop);
1669 void hammer2_inode_xop_chain_sync(hammer2_thread_t *thr, hammer2_xop_t *xop);
1670 void hammer2_inode_xop_unlinkall(hammer2_thread_t *thr, hammer2_xop_t *xop);
1671 void hammer2_inode_xop_connect(hammer2_thread_t *thr, hammer2_xop_t *xop);
1672 void hammer2_inode_xop_flush(hammer2_thread_t *thr, hammer2_xop_t *xop);
1675 * hammer2_msgops.c
1677 int hammer2_msg_dbg_rcvmsg(kdmsg_msg_t *msg);
1678 int hammer2_msg_adhoc_input(kdmsg_msg_t *msg);
1681 * hammer2_vfsops.c
1683 void hammer2_volconf_update(hammer2_dev_t *hmp, int index);
1684 void hammer2_dump_chain(hammer2_chain_t *chain, int tab, int *countp, char pfx);
1685 int hammer2_vfs_sync(struct mount *mp, int waitflags);
1686 int hammer2_vfs_enospace(hammer2_inode_t *ip, off_t bytes, struct ucred *cred);
1688 hammer2_pfs_t *hammer2_pfsalloc(hammer2_chain_t *chain,
1689 const hammer2_inode_data_t *ripdata,
1690 hammer2_tid_t modify_tid,
1691 hammer2_dev_t *force_local);
1692 void hammer2_pfsdealloc(hammer2_pfs_t *pmp, int clindex, int destroying);
1693 int hammer2_vfs_vget(struct mount *mp, struct vnode *dvp,
1694 ino_t ino, struct vnode **vpp);
1696 void hammer2_lwinprog_ref(hammer2_pfs_t *pmp);
1697 void hammer2_lwinprog_drop(hammer2_pfs_t *pmp);
1698 void hammer2_lwinprog_wait(hammer2_pfs_t *pmp, int pipe);
1701 * hammer2_freemap.c
1703 int hammer2_freemap_alloc(hammer2_chain_t *chain, size_t bytes);
1704 void hammer2_freemap_adjust(hammer2_dev_t *hmp,
1705 hammer2_blockref_t *bref, int how);
1708 * hammer2_cluster.c
1710 uint8_t hammer2_cluster_type(hammer2_cluster_t *cluster);
1711 const hammer2_media_data_t *hammer2_cluster_rdata(hammer2_cluster_t *cluster);
1712 hammer2_media_data_t *hammer2_cluster_wdata(hammer2_cluster_t *cluster);
1713 hammer2_cluster_t *hammer2_cluster_from_chain(hammer2_chain_t *chain);
1714 void hammer2_cluster_bref(hammer2_cluster_t *cluster, hammer2_blockref_t *bref);
1715 hammer2_cluster_t *hammer2_cluster_alloc(hammer2_pfs_t *pmp,
1716 hammer2_blockref_t *bref);
1717 void hammer2_cluster_ref(hammer2_cluster_t *cluster);
1718 void hammer2_cluster_drop(hammer2_cluster_t *cluster);
1719 void hammer2_cluster_lock(hammer2_cluster_t *cluster, int how);
1720 int hammer2_cluster_check(hammer2_cluster_t *cluster, hammer2_key_t lokey,
1721 int flags);
1722 void hammer2_cluster_resolve(hammer2_cluster_t *cluster);
1723 void hammer2_cluster_forcegood(hammer2_cluster_t *cluster);
1724 void hammer2_cluster_unlock(hammer2_cluster_t *cluster);
1726 void hammer2_bulkfree_init(hammer2_dev_t *hmp);
1727 void hammer2_bulkfree_uninit(hammer2_dev_t *hmp);
1728 int hammer2_bulkfree_pass(hammer2_dev_t *hmp, hammer2_chain_t *vchain,
1729 struct hammer2_ioc_bulkfree *bfi);
1732 * hammer2_iocom.c
1734 void hammer2_iocom_init(hammer2_dev_t *hmp);
1735 void hammer2_iocom_uninit(hammer2_dev_t *hmp);
1736 void hammer2_cluster_reconnect(hammer2_dev_t *hmp, struct file *fp);
1739 * hammer2_strategy.c
1741 int hammer2_vop_strategy(struct vop_strategy_args *ap);
1742 int hammer2_vop_bmap(struct vop_bmap_args *ap);
1743 void hammer2_write_thread(void *arg);
1744 void hammer2_bioq_sync(hammer2_pfs_t *pmp);
1745 void hammer2_dedup_clear(hammer2_dev_t *hmp);
1747 #endif /* !_KERNEL */
1748 #endif /* !_VFS_HAMMER2_HAMMER2_H_ */