HAMMER 54/Many: Performance tuning
[dragonfly.git] / sys / vfs / hammer / hammer.h
blob0c838a97cc887d3f1abe28c5f0dfe5ace4230184
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer.h,v 1.81 2008/06/11 22:33:21 dillon Exp $
37 * This header file contains structures used internally by the HAMMERFS
38 * implementation. See hammer_disk.h for on-disk structures.
41 #include <sys/param.h>
42 #include <sys/types.h>
43 #include <sys/kernel.h>
44 #include <sys/conf.h>
45 #include <sys/systm.h>
46 #include <sys/tree.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/mountctl.h>
50 #include <sys/vnode.h>
51 #include <sys/proc.h>
52 #include <sys/stat.h>
53 #include <sys/globaldata.h>
54 #include <sys/lockf.h>
55 #include <sys/buf.h>
56 #include <sys/queue.h>
57 #include <sys/globaldata.h>
59 #include <sys/buf2.h>
60 #include <sys/signal2.h>
61 #include "hammer_disk.h"
62 #include "hammer_mount.h"
63 #include "hammer_ioctl.h"
65 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
67 MALLOC_DECLARE(M_HAMMER);
69 struct hammer_mount;
72 * Key structure used for custom RB tree inode lookups. This prototypes
73 * the function hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info).
75 typedef struct hammer_inode_info {
76 int64_t obj_id; /* (key) object identifier */
77 hammer_tid_t obj_asof; /* (key) snapshot transid or 0 */
78 } *hammer_inode_info_t;
80 typedef enum hammer_transaction_type {
81 HAMMER_TRANS_RO,
82 HAMMER_TRANS_STD,
83 HAMMER_TRANS_FLS
84 } hammer_transaction_type_t;
87 * HAMMER Transaction tracking
89 struct hammer_transaction {
90 hammer_transaction_type_t type;
91 struct hammer_mount *hmp;
92 hammer_tid_t tid;
93 hammer_tid_t time;
94 int sync_lock_refs;
95 struct hammer_volume *rootvol;
98 typedef struct hammer_transaction *hammer_transaction_t;
101 * HAMMER locks
103 struct hammer_lock {
104 int refs; /* active references delay writes */
105 int lockcount; /* lock count for exclusive/shared access */
106 int wanted;
107 struct thread *locktd;
110 static __inline int
111 hammer_islocked(struct hammer_lock *lock)
113 return(lock->lockcount != 0);
116 static __inline int
117 hammer_isactive(struct hammer_lock *lock)
119 return(lock->refs != 0);
122 static __inline int
123 hammer_islastref(struct hammer_lock *lock)
125 return(lock->refs == 1);
129 * Return if we specifically own the lock exclusively.
131 static __inline int
132 hammer_lock_excl_owned(struct hammer_lock *lock, thread_t td)
134 if (lock->lockcount > 0 && lock->locktd == td)
135 return(1);
136 return(0);
140 * Flush state, used by various structures
142 typedef enum hammer_inode_state {
143 HAMMER_FST_IDLE,
144 HAMMER_FST_SETUP,
145 HAMMER_FST_FLUSH
146 } hammer_inode_state_t;
148 TAILQ_HEAD(hammer_record_list, hammer_record);
151 * Cache object ids. A fixed number of objid cache structures are
152 * created to reserve object id's for newly created files in multiples
153 * of 100,000, localized to a particular directory, and recycled as
154 * needed. This allows parallel create operations in different
155 * directories to retain fairly localized object ids which in turn
156 * improves reblocking performance and layout.
158 #define OBJID_CACHE_SIZE 128
159 #define OBJID_CACHE_BULK 100000
161 typedef struct hammer_objid_cache {
162 TAILQ_ENTRY(hammer_objid_cache) entry;
163 struct hammer_inode *dip;
164 hammer_tid_t next_tid;
165 int count;
166 } *hammer_objid_cache_t;
169 * Structure used to represent an inode in-memory.
171 * The record and data associated with an inode may be out of sync with
172 * the disk (xDIRTY flags), or not even on the disk at all (ONDISK flag
173 * clear).
175 * An inode may also hold a cache of unsynchronized records, used for
176 * database and directories only. Unsynchronized regular file data is
177 * stored in the buffer cache.
179 * NOTE: A file which is created and destroyed within the initial
180 * synchronization period can wind up not doing any disk I/O at all.
182 * Finally, an inode may cache numerous disk-referencing B-Tree cursors.
184 struct hammer_ino_rb_tree;
185 struct hammer_inode;
186 RB_HEAD(hammer_ino_rb_tree, hammer_inode);
187 RB_PROTOTYPEX(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
188 hammer_ino_rb_compare, hammer_inode_info_t);
190 struct hammer_rec_rb_tree;
191 struct hammer_record;
192 RB_HEAD(hammer_rec_rb_tree, hammer_record);
193 RB_PROTOTYPEX(hammer_rec_rb_tree, INFO, hammer_record, rb_node,
194 hammer_rec_rb_compare, hammer_btree_leaf_elm_t);
196 TAILQ_HEAD(hammer_node_list, hammer_node);
198 struct hammer_inode {
199 RB_ENTRY(hammer_inode) rb_node;
200 hammer_inode_state_t flush_state;
201 int flush_group;
202 TAILQ_ENTRY(hammer_inode) flush_entry;
203 struct hammer_record_list target_list; /* target of dependant recs */
204 u_int64_t obj_id; /* (key) object identifier */
205 hammer_tid_t obj_asof; /* (key) snapshot or 0 */
206 struct hammer_mount *hmp;
207 hammer_objid_cache_t objid_cache;
208 int flags;
209 int error; /* flush error */
210 int cursor_ip_refs; /* sanity */
211 int rsv_databufs;
212 int rsv_recs;
213 int idle_wakeup;
214 struct vnode *vp;
215 struct lockf advlock;
216 struct hammer_lock lock; /* sync copy interlock */
217 off_t trunc_off;
218 struct hammer_btree_leaf_elm ino_leaf; /* in-memory cache */
219 struct hammer_inode_data ino_data; /* in-memory cache */
220 struct hammer_rec_rb_tree rec_tree; /* in-memory cache */
221 struct hammer_node *cache[2]; /* search initiate cache */
224 * When a demark is created to synchronize an inode to
225 * disk, certain fields are copied so the front-end VOPs
226 * can continue to run in parallel with the synchronization
227 * occuring in the background.
229 int sync_flags; /* to-sync flags cache */
230 off_t sync_trunc_off; /* to-sync truncation */
231 struct hammer_btree_leaf_elm sync_ino_leaf; /* to-sync cache */
232 struct hammer_inode_data sync_ino_data; /* to-sync cache */
235 typedef struct hammer_inode *hammer_inode_t;
237 #define VTOI(vp) ((struct hammer_inode *)(vp)->v_data)
239 #define HAMMER_INODE_DDIRTY 0x0001 /* in-memory ino_data is dirty */
240 #define HAMMER_INODE_RSV_INODES 0x0002 /* hmp->rsv_inodes bumped */
241 #define HAMMER_INODE_ITIMES 0x0004 /* in-memory mtime/atime modified */
242 #define HAMMER_INODE_XDIRTY 0x0008 /* in-memory records */
243 #define HAMMER_INODE_ONDISK 0x0010 /* inode is on-disk (else not yet) */
244 #define HAMMER_INODE_FLUSH 0x0020 /* flush on last ref */
245 #define HAMMER_INODE_DELETED 0x0080 /* inode delete (backend) */
246 #define HAMMER_INODE_DELONDISK 0x0100 /* delete synchronized to disk */
247 #define HAMMER_INODE_RO 0x0200 /* read-only (because of as-of) */
248 #define HAMMER_INODE_VHELD 0x0400 /* vnode held on sync */
249 #define HAMMER_INODE_DONDISK 0x0800 /* data records may be on disk */
250 #define HAMMER_INODE_BUFS 0x1000 /* dirty high level bps present */
251 #define HAMMER_INODE_REFLUSH 0x2000 /* pipelined flush during flush */
252 #define HAMMER_INODE_RECLAIM 0x4000 /* trying to reclaim */
253 #define HAMMER_INODE_FLUSHW 0x8000 /* Someone waiting for flush */
255 #define HAMMER_INODE_TRUNCATED 0x00010000
256 #define HAMMER_INODE_DELETING 0x00020000 /* inode delete request (frontend)*/
257 #define HAMMER_INODE_RESIGNAL 0x00040000 /* re-signal on re-flush */
258 #define HAMMER_INODE_PARTIALW 0x00080000 /* wait partial record flush */
260 #define HAMMER_INODE_MODMASK (HAMMER_INODE_DDIRTY| \
261 HAMMER_INODE_XDIRTY|HAMMER_INODE_BUFS| \
262 HAMMER_INODE_ITIMES|HAMMER_INODE_TRUNCATED|\
263 HAMMER_INODE_DELETING)
265 #define HAMMER_INODE_MODMASK_NOXDIRTY \
266 (HAMMER_INODE_MODMASK & ~HAMMER_INODE_XDIRTY)
268 #define HAMMER_MAX_INODE_CURSORS 4
270 #define HAMMER_FLUSH_SIGNAL 0x0001
271 #define HAMMER_FLUSH_RECURSION 0x0002
273 #define HAMMER_RECLAIM_MIN 1000 /* absolute value */
274 #define HAMMER_RECLAIM_MID 2000 /* absolute value */
275 #define HAMMER_RECLAIM_MAX 3000 /* absolute value */
278 * Structure used to represent an unsynchronized record in-memory. These
279 * records typically represent directory entries. Only non-historical
280 * records are kept in-memory.
282 * Records are organized as a per-inode RB-Tree. If the inode is not
283 * on disk then neither are any records and the in-memory record tree
284 * represents the entire contents of the inode. If the inode is on disk
285 * then the on-disk B-Tree is scanned in parallel with the in-memory
286 * RB-Tree to synthesize the current state of the file.
288 * Records are also used to enforce the ordering of directory create/delete
289 * operations. A new inode will not be flushed to disk unless its related
290 * directory entry is also being flushed at the same time. A directory entry
291 * will not be removed unless its related inode is also being removed at the
292 * same time.
294 typedef enum hammer_record_type {
295 HAMMER_MEM_RECORD_GENERAL, /* misc record */
296 HAMMER_MEM_RECORD_INODE, /* inode record */
297 HAMMER_MEM_RECORD_ADD, /* positive memory cache record */
298 HAMMER_MEM_RECORD_DEL, /* negative delete-on-disk record */
299 HAMMER_MEM_RECORD_DATA /* bulk-data record w/on-disk ref */
300 } hammer_record_type_t;
302 struct hammer_record {
303 RB_ENTRY(hammer_record) rb_node;
304 TAILQ_ENTRY(hammer_record) target_entry;
305 hammer_inode_state_t flush_state;
306 int flush_group;
307 hammer_record_type_t type;
308 struct hammer_lock lock;
309 struct hammer_reserve *resv;
310 struct hammer_inode *ip;
311 struct hammer_inode *target_ip;
312 struct hammer_btree_leaf_elm leaf;
313 union hammer_data_ondisk *data;
314 int flags;
317 typedef struct hammer_record *hammer_record_t;
320 * Record flags. Note that FE can only be set by the frontend if the
321 * record has not been interlocked by the backend w/ BE.
323 #define HAMMER_RECF_ALLOCDATA 0x0001
324 #define HAMMER_RECF_ONRBTREE 0x0002
325 #define HAMMER_RECF_DELETED_FE 0x0004 /* deleted (frontend) */
326 #define HAMMER_RECF_DELETED_BE 0x0008 /* deleted (backend) */
327 #define HAMMER_RECF_UNUSED0010 0x0010
328 #define HAMMER_RECF_INTERLOCK_BE 0x0020 /* backend interlock */
329 #define HAMMER_RECF_WANTED 0x0040 /* wanted by the frontend */
330 #define HAMMER_RECF_WANTIDLE 0x0080 /* wanted when idle */
331 #define HAMMER_RECF_CONVERT_DELETE 0x0100 /* special case */
334 * In-memory structures representing on-disk structures.
336 struct hammer_volume;
337 struct hammer_buffer;
338 struct hammer_node;
339 struct hammer_undo;
340 struct hammer_reserve;
342 RB_HEAD(hammer_vol_rb_tree, hammer_volume);
343 RB_HEAD(hammer_buf_rb_tree, hammer_buffer);
344 RB_HEAD(hammer_nod_rb_tree, hammer_node);
345 RB_HEAD(hammer_und_rb_tree, hammer_undo);
346 RB_HEAD(hammer_res_rb_tree, hammer_reserve);
348 RB_PROTOTYPE2(hammer_vol_rb_tree, hammer_volume, rb_node,
349 hammer_vol_rb_compare, int32_t);
350 RB_PROTOTYPE2(hammer_buf_rb_tree, hammer_buffer, rb_node,
351 hammer_buf_rb_compare, hammer_off_t);
352 RB_PROTOTYPE2(hammer_nod_rb_tree, hammer_node, rb_node,
353 hammer_nod_rb_compare, hammer_off_t);
354 RB_PROTOTYPE2(hammer_und_rb_tree, hammer_undo, rb_node,
355 hammer_und_rb_compare, hammer_off_t);
356 RB_PROTOTYPE2(hammer_res_rb_tree, hammer_reserve, rb_node,
357 hammer_res_rb_compare, hammer_off_t);
360 * IO management - embedded at the head of various in-memory structures
362 * VOLUME - hammer_volume containing meta-data
363 * META_BUFFER - hammer_buffer containing meta-data
364 * DATA_BUFFER - hammer_buffer containing pure-data
366 * Dirty volume headers and dirty meta-data buffers are locked until the
367 * flusher can sequence them out. Dirty pure-data buffers can be written.
368 * Clean buffers can be passively released.
370 typedef enum hammer_io_type {
371 HAMMER_STRUCTURE_VOLUME,
372 HAMMER_STRUCTURE_META_BUFFER,
373 HAMMER_STRUCTURE_UNDO_BUFFER,
374 HAMMER_STRUCTURE_DATA_BUFFER
375 } hammer_io_type_t;
377 union hammer_io_structure;
378 struct hammer_io;
380 struct worklist {
381 LIST_ENTRY(worklist) node;
384 TAILQ_HEAD(hammer_io_list, hammer_io);
385 typedef struct hammer_io_list *hammer_io_list_t;
387 struct hammer_io {
388 struct worklist worklist;
389 struct hammer_lock lock;
390 enum hammer_io_type type;
391 struct hammer_mount *hmp;
392 TAILQ_ENTRY(hammer_io) mod_entry; /* list entry if modified */
393 hammer_io_list_t mod_list;
394 struct buf *bp;
395 int64_t offset;
396 int loading; /* loading/unloading interlock */
397 int modify_refs;
399 u_int modified : 1; /* bp's data was modified */
400 u_int released : 1; /* bp released (w/ B_LOCKED set) */
401 u_int running : 1; /* bp write IO in progress */
402 u_int waiting : 1; /* someone is waiting on us */
403 u_int validated : 1; /* ondisk has been validated */
404 u_int waitdep : 1; /* flush waits for dependancies */
405 u_int recovered : 1; /* has recovery ref */
406 u_int waitmod : 1; /* waiting for modify_refs */
407 u_int reclaim : 1; /* reclaim requested */
410 typedef struct hammer_io *hammer_io_t;
412 #define HAMMER_CLUSTER_SIZE (64 * 1024)
413 #if HAMMER_CLUSTER_SIZE > MAXBSIZE
414 #undef HAMMER_CLUSTER_SIZE
415 #define HAMMER_CLUSTER_SIZE MAXBSIZE
416 #endif
417 #define HAMMER_CLUSTER_BUFS (HAMMER_CLUSTER_SIZE / HAMMER_BUFSIZE)
420 * In-memory volume representing on-disk buffer
422 struct hammer_volume {
423 struct hammer_io io;
424 RB_ENTRY(hammer_volume) rb_node;
425 struct hammer_volume_ondisk *ondisk;
426 int32_t vol_no;
427 int64_t nblocks; /* note: special calculation for statfs */
428 int64_t buffer_base; /* base offset of buffer 0 */
429 hammer_off_t maxbuf_off; /* Maximum buffer offset (zone-2) */
430 hammer_off_t maxraw_off; /* Maximum raw offset for device */
431 char *vol_name;
432 struct vnode *devvp;
433 int vol_flags;
436 typedef struct hammer_volume *hammer_volume_t;
439 * In-memory buffer (other then volume, super-cluster, or cluster),
440 * representing an on-disk buffer.
442 struct hammer_buffer {
443 struct hammer_io io;
444 RB_ENTRY(hammer_buffer) rb_node;
445 void *ondisk;
446 struct hammer_volume *volume;
447 hammer_off_t zoneX_offset;
448 hammer_off_t zone2_offset;
449 struct hammer_reserve *resv;
450 struct hammer_node_list clist;
453 typedef struct hammer_buffer *hammer_buffer_t;
456 * In-memory B-Tree node, representing an on-disk B-Tree node.
458 * This is a hang-on structure which is backed by a hammer_buffer,
459 * indexed by a hammer_cluster, and used for fine-grained locking of
460 * B-Tree nodes in order to properly control lock ordering. A hammer_buffer
461 * can contain multiple nodes representing wildly disassociated portions
462 * of the B-Tree so locking cannot be done on a buffer-by-buffer basis.
464 * This structure uses a cluster-relative index to reduce the number
465 * of layers required to access it, and also because all on-disk B-Tree
466 * references are cluster-relative offsets.
468 struct hammer_node {
469 struct hammer_lock lock; /* node-by-node lock */
470 TAILQ_ENTRY(hammer_node) entry; /* per-buffer linkage */
471 RB_ENTRY(hammer_node) rb_node; /* per-cluster linkage */
472 hammer_off_t node_offset; /* full offset spec */
473 struct hammer_mount *hmp;
474 struct hammer_buffer *buffer; /* backing buffer */
475 hammer_node_ondisk_t ondisk; /* ptr to on-disk structure */
476 struct hammer_node **cache1; /* passive cache(s) */
477 struct hammer_node **cache2;
478 int flags;
479 int loading; /* load interlock */
482 #define HAMMER_NODE_DELETED 0x0001
483 #define HAMMER_NODE_FLUSH 0x0002
485 typedef struct hammer_node *hammer_node_t;
488 * List of locked nodes.
490 struct hammer_node_locklist {
491 struct hammer_node_locklist *next;
492 hammer_node_t node;
495 typedef struct hammer_node_locklist *hammer_node_locklist_t;
499 * Common I/O management structure - embedded in in-memory structures
500 * which are backed by filesystem buffers.
502 union hammer_io_structure {
503 struct hammer_io io;
504 struct hammer_volume volume;
505 struct hammer_buffer buffer;
508 typedef union hammer_io_structure *hammer_io_structure_t;
511 * Allocation holes are recorded when an allocation does not fit within a
512 * buffer. Later allocations which might fit may then be satisfied from
513 * a recorded hole. The resv reference prevents the big block from being
514 * allocated out of via the normal blockmap mechanism.
516 * This is strictly a heuristic.
518 #define HAMMER_MAX_HOLES 8
520 struct hammer_hole;
522 struct hammer_holes {
523 TAILQ_HEAD(, hammer_hole) list;
524 int count;
527 typedef struct hammer_holes *hammer_holes_t;
529 struct hammer_hole {
530 TAILQ_ENTRY(hammer_hole) entry;
531 struct hammer_reserve *resv;
532 hammer_off_t zone_offset;
533 int bytes;
536 typedef struct hammer_hole *hammer_hole_t;
539 * The reserve structure prevents the blockmap from allocating
540 * out of a reserved bigblock. Such reservations are used by
541 * the direct-write mechanism.
543 * The structure is also used to hold off on reallocations of
544 * big blocks from the freemap until flush dependancies have
545 * been dealt with.
547 struct hammer_reserve {
548 RB_ENTRY(hammer_reserve) rb_node;
549 TAILQ_ENTRY(hammer_reserve) delay_entry;
550 int flush_group;
551 int refs;
552 hammer_off_t zone_offset;
555 typedef struct hammer_reserve *hammer_reserve_t;
557 #include "hammer_cursor.h"
560 * The undo structure tracks recent undos to avoid laying down duplicate
561 * undos within a flush group, saving us a significant amount of overhead.
563 * This is strictly a heuristic.
565 #define HAMMER_MAX_UNDOS 1024
566 #define HAMMER_MAX_FLUSHERS 4
568 struct hammer_undo {
569 RB_ENTRY(hammer_undo) rb_node;
570 TAILQ_ENTRY(hammer_undo) lru_entry;
571 hammer_off_t offset;
572 int bytes;
575 typedef struct hammer_undo *hammer_undo_t;
577 struct hammer_flusher_info;
579 struct hammer_flusher {
580 int signal; /* flusher thread sequencer */
581 int act; /* currently active flush group */
582 int done; /* set to act when complete */
583 int next; /* next flush group */
584 int group_lock; /* lock sequencing of the next flush */
585 int exiting; /* request master exit */
586 int count; /* number of slave flushers */
587 int running; /* number of slave flushers running */
588 thread_t td; /* master flusher thread */
589 hammer_tid_t tid; /* last flushed transaction id */
590 int finalize_want; /* serialize finalization */
591 struct hammer_lock finalize_lock; /* serialize finalization */
592 struct hammer_transaction trans; /* shared transaction */
593 struct hammer_flusher_info *info[HAMMER_MAX_FLUSHERS];
597 * Internal hammer mount data structure
599 struct hammer_mount {
600 struct mount *mp;
601 /*struct vnode *rootvp;*/
602 struct hammer_ino_rb_tree rb_inos_root;
603 struct hammer_vol_rb_tree rb_vols_root;
604 struct hammer_nod_rb_tree rb_nods_root;
605 struct hammer_und_rb_tree rb_undo_root;
606 struct hammer_res_rb_tree rb_resv_root;
607 struct hammer_buf_rb_tree rb_bufs_root;
608 struct hammer_volume *rootvol;
609 struct hammer_base_elm root_btree_beg;
610 struct hammer_base_elm root_btree_end;
611 char *zbuf; /* HAMMER_BUFSIZE bytes worth of all-zeros */
612 int flags;
613 int hflags;
614 int ronly;
615 int nvolumes;
616 int volume_iterator;
617 int rsv_inodes; /* reserved space due to dirty inodes */
618 int rsv_databufs; /* reserved space due to dirty buffers */
619 int rsv_databytes; /* reserved space due to record data */
620 int rsv_recs; /* reserved space due to dirty records */
622 int inode_reclaims; /* inodes pending reclaim by flusher */
623 int count_inodes; /* total number of inodes */
624 int count_iqueued; /* inodes queued to flusher */
626 struct hammer_flusher flusher;
628 u_int check_interrupt;
629 uuid_t fsid;
630 udev_t fsid_udev;
631 struct hammer_io_list volu_list; /* dirty undo buffers */
632 struct hammer_io_list undo_list; /* dirty undo buffers */
633 struct hammer_io_list data_list; /* dirty data buffers */
634 struct hammer_io_list alt_data_list; /* dirty data buffers */
635 struct hammer_io_list meta_list; /* dirty meta bufs */
636 struct hammer_io_list lose_list; /* loose buffers */
637 int locked_dirty_count; /* meta/volu count */
638 int io_running_count;
639 int objid_cache_count;
640 hammer_tid_t asof;
641 hammer_off_t next_tid;
642 int64_t copy_stat_freebigblocks; /* number of free bigblocks */
644 u_int32_t namekey_iterator;
645 hammer_off_t zone_limits[HAMMER_MAX_ZONES];
646 struct netexport export;
647 struct hammer_lock sync_lock;
648 struct hammer_lock free_lock;
649 struct lock blockmap_lock;
650 struct hammer_blockmap blockmap[HAMMER_MAX_ZONES];
651 struct hammer_holes holes[HAMMER_MAX_ZONES];
652 struct hammer_undo undos[HAMMER_MAX_UNDOS];
653 int undo_alloc;
654 TAILQ_HEAD(, hammer_undo) undo_lru_list;
655 TAILQ_HEAD(, hammer_inode) flush_list;
656 TAILQ_HEAD(, hammer_reserve) delay_list;
657 TAILQ_HEAD(, hammer_objid_cache) objid_cache_list;
660 typedef struct hammer_mount *hammer_mount_t;
662 #define HAMMER_MOUNT_WAITIMAX 0x0001
664 struct hammer_sync_info {
665 int error;
666 int waitfor;
669 #endif
671 #if defined(_KERNEL)
673 extern struct vop_ops hammer_vnode_vops;
674 extern struct vop_ops hammer_spec_vops;
675 extern struct vop_ops hammer_fifo_vops;
676 extern struct bio_ops hammer_bioops;
678 extern int hammer_debug_io;
679 extern int hammer_debug_general;
680 extern int hammer_debug_debug;
681 extern int hammer_debug_inode;
682 extern int hammer_debug_locks;
683 extern int hammer_debug_btree;
684 extern int hammer_debug_tid;
685 extern int hammer_debug_recover;
686 extern int hammer_debug_recover_faults;
687 extern int hammer_debug_write_release;
688 extern int hammer_debug_cluster_enable;
689 extern int hammer_count_inodes;
690 extern int hammer_count_iqueued;
691 extern int hammer_count_reclaiming;
692 extern int hammer_count_records;
693 extern int hammer_count_record_datas;
694 extern int hammer_count_volumes;
695 extern int hammer_count_buffers;
696 extern int hammer_count_nodes;
697 extern int hammer_count_dirtybufs;
698 extern int hammer_count_refedbufs;
699 extern int hammer_count_reservations;
700 extern int hammer_count_io_running_read;
701 extern int hammer_count_io_running_write;
702 extern int hammer_count_io_locked;
703 extern int hammer_limit_dirtybufs;
704 extern int hammer_limit_iqueued;
705 extern int hammer_limit_irecs;
706 extern int hammer_limit_recs;
707 extern int hammer_bio_count;
708 extern int hammer_stats_btree_iterations;
709 extern int hammer_stats_record_iterations;
710 extern int64_t hammer_contention_count;
712 int hammer_vop_inactive(struct vop_inactive_args *);
713 int hammer_vop_reclaim(struct vop_reclaim_args *);
714 int hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp);
715 struct hammer_inode *hammer_get_inode(hammer_transaction_t trans,
716 struct hammer_node **cache,
717 u_int64_t obj_id, hammer_tid_t asof, int flags,
718 int *errorp);
719 void hammer_put_inode(struct hammer_inode *ip);
720 void hammer_put_inode_ref(struct hammer_inode *ip);
722 int hammer_unload_volume(hammer_volume_t volume, void *data __unused);
723 int hammer_adjust_volume_mode(hammer_volume_t volume, void *data __unused);
725 int hammer_unload_buffer(hammer_buffer_t buffer, void *data __unused);
726 int hammer_install_volume(hammer_mount_t hmp, const char *volname);
728 int hammer_ip_lookup(hammer_cursor_t cursor);
729 int hammer_ip_first(hammer_cursor_t cursor);
730 int hammer_ip_next(hammer_cursor_t cursor);
731 int hammer_ip_resolve_data(hammer_cursor_t cursor);
732 int hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip,
733 hammer_tid_t tid);
734 int hammer_delete_at_cursor(hammer_cursor_t cursor, int64_t *stat_bytes);
735 int hammer_ip_check_directory_empty(hammer_transaction_t trans,
736 hammer_inode_t ip);
737 int hammer_sync_hmp(hammer_mount_t hmp, int waitfor);
738 int hammer_queue_inodes_flusher(hammer_mount_t hmp, int waitfor);
741 hammer_record_t
742 hammer_alloc_mem_record(hammer_inode_t ip, int data_len);
743 void hammer_flush_record_done(hammer_record_t record, int error);
744 void hammer_wait_mem_record_ident(hammer_record_t record, const char *ident);
745 void hammer_rel_mem_record(hammer_record_t record);
747 int hammer_cursor_up(hammer_cursor_t cursor);
748 int hammer_cursor_up_locked(hammer_cursor_t cursor);
749 int hammer_cursor_down(hammer_cursor_t cursor);
750 int hammer_cursor_upgrade(hammer_cursor_t cursor);
751 void hammer_cursor_downgrade(hammer_cursor_t cursor);
752 int hammer_cursor_seek(hammer_cursor_t cursor, hammer_node_t node,
753 int index);
754 void hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident);
755 int hammer_lock_ex_try(struct hammer_lock *lock);
756 void hammer_lock_sh(struct hammer_lock *lock);
757 int hammer_lock_sh_try(struct hammer_lock *lock);
758 int hammer_lock_upgrade(struct hammer_lock *lock);
759 void hammer_lock_downgrade(struct hammer_lock *lock);
760 void hammer_unlock(struct hammer_lock *lock);
761 void hammer_ref(struct hammer_lock *lock);
762 void hammer_unref(struct hammer_lock *lock);
764 void hammer_sync_lock_ex(hammer_transaction_t trans);
765 void hammer_sync_lock_sh(hammer_transaction_t trans);
766 int hammer_sync_lock_sh_try(hammer_transaction_t trans);
767 void hammer_sync_unlock(hammer_transaction_t trans);
769 u_int32_t hammer_to_unix_xid(uuid_t *uuid);
770 void hammer_guid_to_uuid(uuid_t *uuid, u_int32_t guid);
771 void hammer_to_timespec(hammer_tid_t tid, struct timespec *ts);
772 hammer_tid_t hammer_timespec_to_transid(struct timespec *ts);
773 hammer_tid_t hammer_now_tid(void);
774 hammer_tid_t hammer_str_to_tid(const char *str);
775 hammer_tid_t hammer_alloc_objid(hammer_transaction_t trans, hammer_inode_t dip);
776 void hammer_clear_objid(hammer_inode_t dip);
777 void hammer_destroy_objid_cache(hammer_mount_t hmp);
779 int hammer_enter_undo_history(hammer_mount_t hmp, hammer_off_t offset,
780 int bytes);
781 void hammer_clear_undo_history(hammer_mount_t hmp);
782 enum vtype hammer_get_vnode_type(u_int8_t obj_type);
783 int hammer_get_dtype(u_int8_t obj_type);
784 u_int8_t hammer_get_obj_type(enum vtype vtype);
785 int64_t hammer_directory_namekey(void *name, int len);
786 int hammer_nohistory(hammer_inode_t ip);
788 int hammer_init_cursor(hammer_transaction_t trans, hammer_cursor_t cursor,
789 struct hammer_node **cache, hammer_inode_t ip);
790 int hammer_reinit_cursor(hammer_cursor_t cursor);
791 void hammer_normalize_cursor(hammer_cursor_t cursor);
792 void hammer_done_cursor(hammer_cursor_t cursor);
793 void hammer_mem_done(hammer_cursor_t cursor);
795 int hammer_btree_lookup(hammer_cursor_t cursor);
796 int hammer_btree_first(hammer_cursor_t cursor);
797 int hammer_btree_last(hammer_cursor_t cursor);
798 int hammer_btree_extract(hammer_cursor_t cursor, int flags);
799 int hammer_btree_iterate(hammer_cursor_t cursor);
800 int hammer_btree_iterate_reverse(hammer_cursor_t cursor);
801 int hammer_btree_insert(hammer_cursor_t cursor,
802 hammer_btree_leaf_elm_t elm);
803 int hammer_btree_delete(hammer_cursor_t cursor);
804 int hammer_btree_cmp(hammer_base_elm_t key1, hammer_base_elm_t key2);
805 int hammer_btree_chkts(hammer_tid_t ts, hammer_base_elm_t key);
806 int hammer_btree_correct_rhb(hammer_cursor_t cursor, hammer_tid_t tid);
807 int hammer_btree_correct_lhb(hammer_cursor_t cursor, hammer_tid_t tid);
809 int btree_set_parent(hammer_transaction_t trans, hammer_node_t node,
810 hammer_btree_elm_t elm);
811 int hammer_btree_lock_children(hammer_cursor_t cursor,
812 struct hammer_node_locklist **locklistp);
813 void hammer_btree_unlock_children(struct hammer_node_locklist **locklistp);
816 void hammer_print_btree_node(hammer_node_ondisk_t ondisk);
817 void hammer_print_btree_elm(hammer_btree_elm_t elm, u_int8_t type, int i);
819 void *hammer_bread(struct hammer_mount *hmp, hammer_off_t off,
820 int *errorp, struct hammer_buffer **bufferp);
821 void *hammer_bnew(struct hammer_mount *hmp, hammer_off_t off,
822 int *errorp, struct hammer_buffer **bufferp);
824 hammer_volume_t hammer_get_root_volume(hammer_mount_t hmp, int *errorp);
826 hammer_volume_t hammer_get_volume(hammer_mount_t hmp,
827 int32_t vol_no, int *errorp);
828 hammer_buffer_t hammer_get_buffer(hammer_mount_t hmp,
829 hammer_off_t buf_offset, int isnew, int *errorp);
830 void hammer_del_buffers(hammer_mount_t hmp, hammer_off_t base_offset,
831 hammer_off_t zone2_offset, int bytes);
833 int hammer_ref_volume(hammer_volume_t volume);
834 int hammer_ref_buffer(hammer_buffer_t buffer);
835 void hammer_flush_buffer_nodes(hammer_buffer_t buffer);
837 void hammer_rel_volume(hammer_volume_t volume, int flush);
838 void hammer_rel_buffer(hammer_buffer_t buffer, int flush);
840 int hammer_vfs_export(struct mount *mp, int op,
841 const struct export_args *export);
842 hammer_node_t hammer_get_node(hammer_mount_t hmp, hammer_off_t node_offset,
843 int isnew, int *errorp);
844 void hammer_ref_node(hammer_node_t node);
845 hammer_node_t hammer_ref_node_safe(struct hammer_mount *hmp,
846 struct hammer_node **cache, int *errorp);
847 void hammer_rel_node(hammer_node_t node);
848 void hammer_delete_node(hammer_transaction_t trans,
849 hammer_node_t node);
850 void hammer_cache_node(hammer_node_t node,
851 struct hammer_node **cache);
852 void hammer_uncache_node(struct hammer_node **cache);
853 void hammer_flush_node(hammer_node_t node);
855 void hammer_dup_buffer(struct hammer_buffer **bufferp,
856 struct hammer_buffer *buffer);
857 hammer_node_t hammer_alloc_btree(hammer_transaction_t trans, int *errorp);
858 void *hammer_alloc_data(hammer_transaction_t trans, int32_t data_len,
859 hammer_off_t *data_offsetp,
860 struct hammer_buffer **data_bufferp, int *errorp);
862 int hammer_generate_undo(hammer_transaction_t trans, hammer_io_t io,
863 hammer_off_t zone1_offset, void *base, int len);
865 void hammer_put_volume(struct hammer_volume *volume, int flush);
866 void hammer_put_buffer(struct hammer_buffer *buffer, int flush);
868 hammer_off_t hammer_freemap_alloc(hammer_transaction_t trans,
869 hammer_off_t owner, int *errorp);
870 void hammer_freemap_free(hammer_transaction_t trans, hammer_off_t phys_offset,
871 hammer_off_t owner, int *errorp);
872 int hammer_checkspace(hammer_mount_t hmp);
873 hammer_off_t hammer_blockmap_alloc(hammer_transaction_t trans, int zone,
874 int bytes, int *errorp);
875 hammer_reserve_t hammer_blockmap_reserve(hammer_mount_t hmp, int zone,
876 int bytes, hammer_off_t *zone_offp, int *errorp);
877 void hammer_blockmap_reserve_complete(hammer_mount_t hmp,
878 hammer_reserve_t resv);
879 void hammer_blockmap_free(hammer_transaction_t trans,
880 hammer_off_t bmap_off, int bytes);
881 int hammer_blockmap_getfree(hammer_mount_t hmp, hammer_off_t bmap_off,
882 int *curp, int *errorp);
883 hammer_off_t hammer_blockmap_lookup(hammer_mount_t hmp, hammer_off_t bmap_off,
884 int *errorp);
885 hammer_off_t hammer_undo_lookup(hammer_mount_t hmp, hammer_off_t bmap_off,
886 int *errorp);
887 int64_t hammer_undo_used(hammer_mount_t hmp);
888 int64_t hammer_undo_space(hammer_mount_t hmp);
889 int64_t hammer_undo_max(hammer_mount_t hmp);
891 void hammer_start_transaction(struct hammer_transaction *trans,
892 struct hammer_mount *hmp);
893 void hammer_simple_transaction(struct hammer_transaction *trans,
894 struct hammer_mount *hmp);
895 void hammer_start_transaction_fls(struct hammer_transaction *trans,
896 struct hammer_mount *hmp);
897 void hammer_done_transaction(struct hammer_transaction *trans);
899 void hammer_modify_inode(hammer_inode_t ip, int flags);
900 void hammer_flush_inode(hammer_inode_t ip, int flags);
901 void hammer_flush_inode_done(hammer_inode_t ip);
902 void hammer_wait_inode(hammer_inode_t ip);
903 void hammer_wait_inode_recs(hammer_inode_t ip);
905 int hammer_create_inode(struct hammer_transaction *trans, struct vattr *vap,
906 struct ucred *cred, struct hammer_inode *dip,
907 struct hammer_inode **ipp);
908 void hammer_rel_inode(hammer_inode_t ip, int flush);
909 int hammer_reload_inode(hammer_inode_t ip, void *arg __unused);
910 int hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2);
912 int hammer_sync_inode(hammer_inode_t ip);
913 void hammer_test_inode(hammer_inode_t ip);
914 void hammer_inode_unloadable_check(hammer_inode_t ip, int getvp);
915 void hammer_inode_waitreclaims(hammer_mount_t hmp);
917 int hammer_ip_add_directory(struct hammer_transaction *trans,
918 hammer_inode_t dip, struct namecache *ncp,
919 hammer_inode_t nip);
920 int hammer_ip_del_directory(struct hammer_transaction *trans,
921 hammer_cursor_t cursor, hammer_inode_t dip,
922 hammer_inode_t ip);
923 hammer_record_t hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset,
924 void *data, int bytes, int *errorp);
925 int hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size);
926 int hammer_ip_add_record(struct hammer_transaction *trans,
927 hammer_record_t record);
928 int hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip,
929 int64_t ran_beg, int64_t ran_end, int truncating);
930 int hammer_ip_delete_range_all(hammer_cursor_t cursor, hammer_inode_t ip,
931 int *countp);
932 int hammer_ip_sync_data(hammer_cursor_t cursor, hammer_inode_t ip,
933 int64_t offset, void *data, int bytes);
934 int hammer_ip_sync_record(hammer_transaction_t trans, hammer_record_t rec);
935 int hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t rec);
937 int hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag,
938 struct ucred *cred);
940 void hammer_io_init(hammer_io_t io, hammer_mount_t hmp,
941 enum hammer_io_type type);
942 int hammer_io_read(struct vnode *devvp, struct hammer_io *io,
943 hammer_off_t limit);
944 int hammer_io_new(struct vnode *devvp, struct hammer_io *io);
945 void hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset);
946 void hammer_io_release(struct hammer_io *io, int flush);
947 void hammer_io_flush(struct hammer_io *io);
948 void hammer_io_waitdep(struct hammer_io *io);
949 void hammer_io_wait_all(hammer_mount_t hmp, const char *ident);
950 int hammer_io_direct_read(hammer_mount_t hmp, hammer_off_t data_offset,
951 struct bio *bio);
952 int hammer_io_direct_write(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf,
953 struct bio *bio);
954 void hammer_io_write_interlock(hammer_io_t io);
955 void hammer_io_done_interlock(hammer_io_t io);
956 void hammer_io_clear_modify(struct hammer_io *io);
957 void hammer_io_clear_modlist(struct hammer_io *io);
958 void hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
959 void *base, int len);
960 void hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
961 void *base, int len);
962 void hammer_modify_volume_done(hammer_volume_t volume);
963 void hammer_modify_buffer_done(hammer_buffer_t buffer);
965 int hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip,
966 struct hammer_ioc_reblock *reblock);
967 int hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip,
968 struct hammer_ioc_prune *prune);
970 void hammer_init_holes(hammer_mount_t hmp, hammer_holes_t holes);
971 void hammer_free_holes(hammer_mount_t hmp, hammer_holes_t holes);
972 int hammer_signal_check(hammer_mount_t hmp);
974 void hammer_flusher_create(hammer_mount_t hmp);
975 void hammer_flusher_destroy(hammer_mount_t hmp);
976 void hammer_flusher_sync(hammer_mount_t hmp);
977 void hammer_flusher_async(hammer_mount_t hmp);
979 int hammer_recover(hammer_mount_t hmp, hammer_volume_t rootvol);
980 void hammer_recover_flush_buffers(hammer_mount_t hmp,
981 hammer_volume_t root_volume);
983 void hammer_crc_set_blockmap(hammer_blockmap_t blockmap);
984 void hammer_crc_set_volume(hammer_volume_ondisk_t ondisk);
986 int hammer_crc_test_blockmap(hammer_blockmap_t blockmap);
987 int hammer_crc_test_volume(hammer_volume_ondisk_t ondisk);
988 int hammer_crc_test_btree(hammer_node_ondisk_t ondisk);
989 void hkprintf(const char *ctl, ...);
991 #endif
993 static __inline void
994 hammer_wait_mem_record(hammer_record_t record)
996 hammer_wait_mem_record_ident(record, "hmmwai");
999 static __inline void
1000 hammer_lock_ex(struct hammer_lock *lock)
1002 hammer_lock_ex_ident(lock, "hmrlck");
1005 static __inline void
1006 hammer_modify_node_noundo(hammer_transaction_t trans, hammer_node_t node)
1008 hammer_modify_buffer(trans, node->buffer, NULL, 0);
1011 static __inline void
1012 hammer_modify_node_all(hammer_transaction_t trans, struct hammer_node *node)
1014 hammer_modify_buffer(trans, node->buffer,
1015 node->ondisk, sizeof(*node->ondisk));
1018 static __inline void
1019 hammer_modify_node(hammer_transaction_t trans, hammer_node_t node,
1020 void *base, int len)
1022 hammer_crc_t *crcptr;
1024 KKASSERT((char *)base >= (char *)node->ondisk &&
1025 (char *)base + len <=
1026 (char *)node->ondisk + sizeof(*node->ondisk));
1027 hammer_modify_buffer(trans, node->buffer, base, len);
1028 crcptr = &node->ondisk->crc;
1029 hammer_modify_buffer(trans, node->buffer, crcptr, sizeof(hammer_crc_t));
1030 --node->buffer->io.modify_refs; /* only want one ref */
1033 static __inline void
1034 hammer_modify_node_done(hammer_node_t node)
1036 node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
1037 hammer_modify_buffer_done(node->buffer);
1040 #define hammer_modify_volume_field(trans, vol, field) \
1041 hammer_modify_volume(trans, vol, &(vol)->ondisk->field, \
1042 sizeof((vol)->ondisk->field))
1044 #define hammer_modify_node_field(trans, node, field) \
1045 hammer_modify_node(trans, node, &(node)->ondisk->field, \
1046 sizeof((node)->ondisk->field))