2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer.h,v 1.86 2008/06/18 01:13:30 dillon Exp $
37 * This header file contains structures used internally by the HAMMERFS
38 * implementation. See hammer_disk.h for on-disk structures.
41 #include <sys/param.h>
42 #include <sys/types.h>
43 #include <sys/kernel.h>
45 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/mountctl.h>
50 #include <sys/vnode.h>
53 #include <sys/globaldata.h>
54 #include <sys/lockf.h>
56 #include <sys/queue.h>
58 #include <sys/globaldata.h>
61 #include <sys/signal2.h>
62 #include "hammer_disk.h"
63 #include "hammer_mount.h"
64 #include "hammer_ioctl.h"
66 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
68 MALLOC_DECLARE(M_HAMMER
);
73 #if !defined(KTR_HAMMER)
74 #define KTR_HAMMER KTR_ALL
76 KTR_INFO_MASTER_EXTERN(hammer
);
84 * Key structure used for custom RB tree inode lookups. This prototypes
85 * the function hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info).
87 typedef struct hammer_inode_info
{
88 int64_t obj_id
; /* (key) object identifier */
89 hammer_tid_t obj_asof
; /* (key) snapshot transid or 0 */
90 } *hammer_inode_info_t
;
92 typedef enum hammer_transaction_type
{
96 } hammer_transaction_type_t
;
99 * HAMMER Transaction tracking
101 struct hammer_transaction
{
102 hammer_transaction_type_t type
;
103 struct hammer_mount
*hmp
;
107 struct hammer_volume
*rootvol
;
110 typedef struct hammer_transaction
*hammer_transaction_t
;
116 int refs
; /* active references delay writes */
117 int lockcount
; /* lock count for exclusive/shared access */
119 struct thread
*locktd
;
123 hammer_islocked(struct hammer_lock
*lock
)
125 return(lock
->lockcount
!= 0);
129 hammer_isactive(struct hammer_lock
*lock
)
131 return(lock
->refs
!= 0);
135 hammer_islastref(struct hammer_lock
*lock
)
137 return(lock
->refs
== 1);
141 * Return if we specifically own the lock exclusively.
144 hammer_lock_excl_owned(struct hammer_lock
*lock
, thread_t td
)
146 if (lock
->lockcount
> 0 && lock
->locktd
== td
)
152 * Flush state, used by various structures
154 typedef enum hammer_inode_state
{
158 } hammer_inode_state_t
;
160 TAILQ_HEAD(hammer_record_list
, hammer_record
);
163 * Cache object ids. A fixed number of objid cache structures are
164 * created to reserve object id's for newly created files in multiples
165 * of 100,000, localized to a particular directory, and recycled as
166 * needed. This allows parallel create operations in different
167 * directories to retain fairly localized object ids which in turn
168 * improves reblocking performance and layout.
170 #define OBJID_CACHE_SIZE 1024
171 #define OBJID_CACHE_BULK 100000
173 typedef struct hammer_objid_cache
{
174 TAILQ_ENTRY(hammer_objid_cache
) entry
;
175 struct hammer_inode
*dip
;
176 hammer_tid_t next_tid
;
178 } *hammer_objid_cache_t
;
181 * Associate an inode with a B-Tree node to cache search start positions
183 typedef struct hammer_node_cache
{
184 TAILQ_ENTRY(hammer_node_cache
) entry
;
185 struct hammer_node
*node
;
186 struct hammer_inode
*ip
;
187 } *hammer_node_cache_t
;
189 TAILQ_HEAD(hammer_node_cache_list
, hammer_node_cache
);
192 * Structure used to represent an inode in-memory.
194 * The record and data associated with an inode may be out of sync with
195 * the disk (xDIRTY flags), or not even on the disk at all (ONDISK flag
198 * An inode may also hold a cache of unsynchronized records, used for
199 * database and directories only. Unsynchronized regular file data is
200 * stored in the buffer cache.
202 * NOTE: A file which is created and destroyed within the initial
203 * synchronization period can wind up not doing any disk I/O at all.
205 * Finally, an inode may cache numerous disk-referencing B-Tree cursors.
207 struct hammer_ino_rb_tree
;
209 RB_HEAD(hammer_ino_rb_tree
, hammer_inode
);
210 RB_PROTOTYPEX(hammer_ino_rb_tree
, INFO
, hammer_inode
, rb_node
,
211 hammer_ino_rb_compare
, hammer_inode_info_t
);
213 struct hammer_rec_rb_tree
;
214 struct hammer_record
;
215 RB_HEAD(hammer_rec_rb_tree
, hammer_record
);
216 RB_PROTOTYPEX(hammer_rec_rb_tree
, INFO
, hammer_record
, rb_node
,
217 hammer_rec_rb_compare
, hammer_btree_leaf_elm_t
);
219 TAILQ_HEAD(hammer_node_list
, hammer_node
);
221 struct hammer_inode
{
222 RB_ENTRY(hammer_inode
) rb_node
;
223 hammer_inode_state_t flush_state
;
225 TAILQ_ENTRY(hammer_inode
) flush_entry
;
226 struct hammer_record_list target_list
; /* target of dependant recs */
227 u_int64_t obj_id
; /* (key) object identifier */
228 hammer_tid_t obj_asof
; /* (key) snapshot or 0 */
229 struct hammer_mount
*hmp
;
230 hammer_objid_cache_t objid_cache
;
232 int error
; /* flush error */
233 int cursor_ip_refs
; /* sanity */
237 struct lockf advlock
;
238 struct hammer_lock lock
; /* sync copy interlock */
240 struct hammer_btree_leaf_elm ino_leaf
; /* in-memory cache */
241 struct hammer_inode_data ino_data
; /* in-memory cache */
242 struct hammer_rec_rb_tree rec_tree
; /* in-memory cache */
243 struct hammer_node_cache cache
[2]; /* search initiate cache */
246 * When a demark is created to synchronize an inode to
247 * disk, certain fields are copied so the front-end VOPs
248 * can continue to run in parallel with the synchronization
249 * occuring in the background.
251 int sync_flags
; /* to-sync flags cache */
252 off_t sync_trunc_off
; /* to-sync truncation */
253 struct hammer_btree_leaf_elm sync_ino_leaf
; /* to-sync cache */
254 struct hammer_inode_data sync_ino_data
; /* to-sync cache */
257 typedef struct hammer_inode
*hammer_inode_t
;
259 #define VTOI(vp) ((struct hammer_inode *)(vp)->v_data)
261 #define HAMMER_INODE_DDIRTY 0x0001 /* in-memory ino_data is dirty */
262 #define HAMMER_INODE_RSV_INODES 0x0002 /* hmp->rsv_inodes bumped */
263 #define HAMMER_INODE_ITIMES 0x0004 /* in-memory mtime/atime modified */
264 #define HAMMER_INODE_XDIRTY 0x0008 /* in-memory records */
265 #define HAMMER_INODE_ONDISK 0x0010 /* inode is on-disk (else not yet) */
266 #define HAMMER_INODE_FLUSH 0x0020 /* flush on last ref */
267 #define HAMMER_INODE_DELETED 0x0080 /* inode delete (backend) */
268 #define HAMMER_INODE_DELONDISK 0x0100 /* delete synchronized to disk */
269 #define HAMMER_INODE_RO 0x0200 /* read-only (because of as-of) */
270 #define HAMMER_INODE_VHELD 0x0400 /* vnode held on sync */
271 #define HAMMER_INODE_DONDISK 0x0800 /* data records may be on disk */
272 #define HAMMER_INODE_BUFS 0x1000 /* dirty high level bps present */
273 #define HAMMER_INODE_REFLUSH 0x2000 /* pipelined flush during flush */
274 #define HAMMER_INODE_RECLAIM 0x4000 /* trying to reclaim */
275 #define HAMMER_INODE_FLUSHW 0x8000 /* Someone waiting for flush */
277 #define HAMMER_INODE_TRUNCATED 0x00010000
278 #define HAMMER_INODE_DELETING 0x00020000 /* inode delete request (frontend)*/
279 #define HAMMER_INODE_RESIGNAL 0x00040000 /* re-signal on re-flush */
280 #define HAMMER_INODE_PARTIALW 0x00080000 /* wait partial record flush */
282 #define HAMMER_INODE_MODMASK (HAMMER_INODE_DDIRTY| \
283 HAMMER_INODE_XDIRTY|HAMMER_INODE_BUFS| \
284 HAMMER_INODE_ITIMES|HAMMER_INODE_TRUNCATED|\
285 HAMMER_INODE_DELETING)
286 #define HAMMER_INODE_MODEASY (HAMMER_INODE_DDIRTY|HAMMER_INODE_ITIMES)
288 #define HAMMER_INODE_MODMASK_NOXDIRTY \
289 (HAMMER_INODE_MODMASK & ~HAMMER_INODE_XDIRTY)
291 #define HAMMER_FLUSH_GROUP_SIZE 64
293 #define HAMMER_FLUSH_SIGNAL 0x0001
294 #define HAMMER_FLUSH_RECURSION 0x0002
297 * Used by the inode reclaim code to pipeline reclaims and avoid
298 * blowing out kernel memory or letting the flusher get too far
301 struct hammer_reclaim
{
302 TAILQ_ENTRY(hammer_reclaim
) entry
;
306 #define HAMMER_RECLAIM_PIPESIZE 1000
309 * Structure used to represent an unsynchronized record in-memory. These
310 * records typically represent directory entries. Only non-historical
311 * records are kept in-memory.
313 * Records are organized as a per-inode RB-Tree. If the inode is not
314 * on disk then neither are any records and the in-memory record tree
315 * represents the entire contents of the inode. If the inode is on disk
316 * then the on-disk B-Tree is scanned in parallel with the in-memory
317 * RB-Tree to synthesize the current state of the file.
319 * Records are also used to enforce the ordering of directory create/delete
320 * operations. A new inode will not be flushed to disk unless its related
321 * directory entry is also being flushed at the same time. A directory entry
322 * will not be removed unless its related inode is also being removed at the
325 typedef enum hammer_record_type
{
326 HAMMER_MEM_RECORD_GENERAL
, /* misc record */
327 HAMMER_MEM_RECORD_INODE
, /* inode record */
328 HAMMER_MEM_RECORD_ADD
, /* positive memory cache record */
329 HAMMER_MEM_RECORD_DEL
, /* negative delete-on-disk record */
330 HAMMER_MEM_RECORD_DATA
/* bulk-data record w/on-disk ref */
331 } hammer_record_type_t
;
333 struct hammer_record
{
334 RB_ENTRY(hammer_record
) rb_node
;
335 TAILQ_ENTRY(hammer_record
) target_entry
;
336 hammer_inode_state_t flush_state
;
338 hammer_record_type_t type
;
339 struct hammer_lock lock
;
340 struct hammer_reserve
*resv
;
341 struct hammer_inode
*ip
;
342 struct hammer_inode
*target_ip
;
343 struct hammer_btree_leaf_elm leaf
;
344 union hammer_data_ondisk
*data
;
348 typedef struct hammer_record
*hammer_record_t
;
351 * Record flags. Note that FE can only be set by the frontend if the
352 * record has not been interlocked by the backend w/ BE.
354 #define HAMMER_RECF_ALLOCDATA 0x0001
355 #define HAMMER_RECF_ONRBTREE 0x0002
356 #define HAMMER_RECF_DELETED_FE 0x0004 /* deleted (frontend) */
357 #define HAMMER_RECF_DELETED_BE 0x0008 /* deleted (backend) */
358 #define HAMMER_RECF_UNUSED0010 0x0010
359 #define HAMMER_RECF_INTERLOCK_BE 0x0020 /* backend interlock */
360 #define HAMMER_RECF_WANTED 0x0040 /* wanted by the frontend */
361 #define HAMMER_RECF_CONVERT_DELETE 0x0100 /* special case */
364 * In-memory structures representing on-disk structures.
366 struct hammer_volume
;
367 struct hammer_buffer
;
370 struct hammer_reserve
;
372 RB_HEAD(hammer_vol_rb_tree
, hammer_volume
);
373 RB_HEAD(hammer_buf_rb_tree
, hammer_buffer
);
374 RB_HEAD(hammer_nod_rb_tree
, hammer_node
);
375 RB_HEAD(hammer_und_rb_tree
, hammer_undo
);
376 RB_HEAD(hammer_res_rb_tree
, hammer_reserve
);
378 RB_PROTOTYPE2(hammer_vol_rb_tree
, hammer_volume
, rb_node
,
379 hammer_vol_rb_compare
, int32_t);
380 RB_PROTOTYPE2(hammer_buf_rb_tree
, hammer_buffer
, rb_node
,
381 hammer_buf_rb_compare
, hammer_off_t
);
382 RB_PROTOTYPE2(hammer_nod_rb_tree
, hammer_node
, rb_node
,
383 hammer_nod_rb_compare
, hammer_off_t
);
384 RB_PROTOTYPE2(hammer_und_rb_tree
, hammer_undo
, rb_node
,
385 hammer_und_rb_compare
, hammer_off_t
);
386 RB_PROTOTYPE2(hammer_res_rb_tree
, hammer_reserve
, rb_node
,
387 hammer_res_rb_compare
, hammer_off_t
);
390 * IO management - embedded at the head of various in-memory structures
392 * VOLUME - hammer_volume containing meta-data
393 * META_BUFFER - hammer_buffer containing meta-data
394 * DATA_BUFFER - hammer_buffer containing pure-data
396 * Dirty volume headers and dirty meta-data buffers are locked until the
397 * flusher can sequence them out. Dirty pure-data buffers can be written.
398 * Clean buffers can be passively released.
400 typedef enum hammer_io_type
{
401 HAMMER_STRUCTURE_VOLUME
,
402 HAMMER_STRUCTURE_META_BUFFER
,
403 HAMMER_STRUCTURE_UNDO_BUFFER
,
404 HAMMER_STRUCTURE_DATA_BUFFER
407 union hammer_io_structure
;
411 LIST_ENTRY(worklist
) node
;
414 TAILQ_HEAD(hammer_io_list
, hammer_io
);
415 typedef struct hammer_io_list
*hammer_io_list_t
;
418 struct worklist worklist
;
419 struct hammer_lock lock
;
420 enum hammer_io_type type
;
421 struct hammer_mount
*hmp
;
422 TAILQ_ENTRY(hammer_io
) mod_entry
; /* list entry if modified */
423 hammer_io_list_t mod_list
;
426 int loading
; /* loading/unloading interlock */
429 u_int modified
: 1; /* bp's data was modified */
430 u_int released
: 1; /* bp released (w/ B_LOCKED set) */
431 u_int running
: 1; /* bp write IO in progress */
432 u_int waiting
: 1; /* someone is waiting on us */
433 u_int validated
: 1; /* ondisk has been validated */
434 u_int waitdep
: 1; /* flush waits for dependancies */
435 u_int recovered
: 1; /* has recovery ref */
436 u_int waitmod
: 1; /* waiting for modify_refs */
437 u_int reclaim
: 1; /* reclaim requested */
438 u_int gencrc
: 1; /* crc needs to be generated */
441 typedef struct hammer_io
*hammer_io_t
;
443 #define HAMMER_CLUSTER_SIZE (64 * 1024)
444 #if HAMMER_CLUSTER_SIZE > MAXBSIZE
445 #undef HAMMER_CLUSTER_SIZE
446 #define HAMMER_CLUSTER_SIZE MAXBSIZE
448 #define HAMMER_CLUSTER_BUFS (HAMMER_CLUSTER_SIZE / HAMMER_BUFSIZE)
451 * In-memory volume representing on-disk buffer
453 struct hammer_volume
{
455 RB_ENTRY(hammer_volume
) rb_node
;
456 struct hammer_volume_ondisk
*ondisk
;
458 int64_t nblocks
; /* note: special calculation for statfs */
459 int64_t buffer_base
; /* base offset of buffer 0 */
460 hammer_off_t maxbuf_off
; /* Maximum buffer offset (zone-2) */
461 hammer_off_t maxraw_off
; /* Maximum raw offset for device */
467 typedef struct hammer_volume
*hammer_volume_t
;
470 * In-memory buffer (other then volume, super-cluster, or cluster),
471 * representing an on-disk buffer.
473 struct hammer_buffer
{
475 RB_ENTRY(hammer_buffer
) rb_node
;
477 struct hammer_volume
*volume
;
478 hammer_off_t zoneX_offset
;
479 hammer_off_t zone2_offset
;
480 struct hammer_reserve
*resv
;
481 struct hammer_node_list clist
;
484 typedef struct hammer_buffer
*hammer_buffer_t
;
487 * In-memory B-Tree node, representing an on-disk B-Tree node.
489 * This is a hang-on structure which is backed by a hammer_buffer,
490 * indexed by a hammer_cluster, and used for fine-grained locking of
491 * B-Tree nodes in order to properly control lock ordering. A hammer_buffer
492 * can contain multiple nodes representing wildly disassociated portions
493 * of the B-Tree so locking cannot be done on a buffer-by-buffer basis.
495 * This structure uses a cluster-relative index to reduce the number
496 * of layers required to access it, and also because all on-disk B-Tree
497 * references are cluster-relative offsets.
500 struct hammer_lock lock
; /* node-by-node lock */
501 TAILQ_ENTRY(hammer_node
) entry
; /* per-buffer linkage */
502 RB_ENTRY(hammer_node
) rb_node
; /* per-cluster linkage */
503 hammer_off_t node_offset
; /* full offset spec */
504 struct hammer_mount
*hmp
;
505 struct hammer_buffer
*buffer
; /* backing buffer */
506 hammer_node_ondisk_t ondisk
; /* ptr to on-disk structure */
507 struct hammer_node_cache_list cache_list
; /* passive caches */
509 int loading
; /* load interlock */
512 #define HAMMER_NODE_DELETED 0x0001
513 #define HAMMER_NODE_FLUSH 0x0002
514 #define HAMMER_NODE_CRCGOOD 0x0004
515 #define HAMMER_NODE_NEEDSCRC 0x0008
517 typedef struct hammer_node
*hammer_node_t
;
520 * List of locked nodes.
522 struct hammer_node_locklist
{
523 struct hammer_node_locklist
*next
;
527 typedef struct hammer_node_locklist
*hammer_node_locklist_t
;
531 * Common I/O management structure - embedded in in-memory structures
532 * which are backed by filesystem buffers.
534 union hammer_io_structure
{
536 struct hammer_volume volume
;
537 struct hammer_buffer buffer
;
540 typedef union hammer_io_structure
*hammer_io_structure_t
;
543 * The reserve structure prevents the blockmap from allocating
544 * out of a reserved bigblock. Such reservations are used by
545 * the direct-write mechanism.
547 * The structure is also used to hold off on reallocations of
548 * big blocks from the freemap until flush dependancies have
551 struct hammer_reserve
{
552 RB_ENTRY(hammer_reserve
) rb_node
;
553 TAILQ_ENTRY(hammer_reserve
) delay_entry
;
558 hammer_off_t zone_offset
;
561 typedef struct hammer_reserve
*hammer_reserve_t
;
563 #define HAMMER_RESF_ONDELAY 0x0001
565 #include "hammer_cursor.h"
568 * The undo structure tracks recent undos to avoid laying down duplicate
569 * undos within a flush group, saving us a significant amount of overhead.
571 * This is strictly a heuristic.
573 #define HAMMER_MAX_UNDOS 1024
574 #define HAMMER_MAX_FLUSHERS 4
577 RB_ENTRY(hammer_undo
) rb_node
;
578 TAILQ_ENTRY(hammer_undo
) lru_entry
;
583 typedef struct hammer_undo
*hammer_undo_t
;
585 struct hammer_flusher_info
;
587 struct hammer_flusher
{
588 int signal
; /* flusher thread sequencer */
589 int act
; /* currently active flush group */
590 int done
; /* set to act when complete */
591 int next
; /* next flush group */
592 int group_lock
; /* lock sequencing of the next flush */
593 int exiting
; /* request master exit */
594 int count
; /* number of slave flushers */
595 int running
; /* number of slave flushers running */
596 thread_t td
; /* master flusher thread */
597 hammer_tid_t tid
; /* last flushed transaction id */
598 int finalize_want
; /* serialize finalization */
599 struct hammer_lock finalize_lock
; /* serialize finalization */
600 struct hammer_transaction trans
; /* shared transaction */
601 struct hammer_flusher_info
*info
[HAMMER_MAX_FLUSHERS
];
605 * Internal hammer mount data structure
607 struct hammer_mount
{
609 /*struct vnode *rootvp;*/
610 struct hammer_ino_rb_tree rb_inos_root
;
611 struct hammer_vol_rb_tree rb_vols_root
;
612 struct hammer_nod_rb_tree rb_nods_root
;
613 struct hammer_und_rb_tree rb_undo_root
;
614 struct hammer_res_rb_tree rb_resv_root
;
615 struct hammer_buf_rb_tree rb_bufs_root
;
616 struct hammer_volume
*rootvol
;
617 struct hammer_base_elm root_btree_beg
;
618 struct hammer_base_elm root_btree_end
;
624 int rsv_inodes
; /* reserved space due to dirty inodes */
625 int rsv_databufs
; /* reserved space due to dirty buffers */
626 int rsv_databytes
; /* reserved space due to record data */
627 int rsv_recs
; /* reserved space due to dirty records */
629 int inode_reclaims
; /* inodes pending reclaim by flusher */
630 int count_inodes
; /* total number of inodes */
631 int count_iqueued
; /* inodes queued to flusher */
633 struct hammer_flusher flusher
;
635 u_int check_interrupt
;
638 struct hammer_io_list volu_list
; /* dirty undo buffers */
639 struct hammer_io_list undo_list
; /* dirty undo buffers */
640 struct hammer_io_list data_list
; /* dirty data buffers */
641 struct hammer_io_list alt_data_list
; /* dirty data buffers */
642 struct hammer_io_list meta_list
; /* dirty meta bufs */
643 struct hammer_io_list lose_list
; /* loose buffers */
644 int locked_dirty_count
; /* meta/volu count */
645 int io_running_count
;
646 int objid_cache_count
;
648 hammer_off_t next_tid
;
649 int64_t copy_stat_freebigblocks
; /* number of free bigblocks */
651 u_int32_t namekey_iterator
;
652 struct netexport export
;
653 struct hammer_lock sync_lock
;
654 struct hammer_lock free_lock
;
655 struct hammer_lock undo_lock
;
656 struct hammer_lock blkmap_lock
;
657 struct hammer_blockmap blockmap
[HAMMER_MAX_ZONES
];
658 struct hammer_undo undos
[HAMMER_MAX_UNDOS
];
660 TAILQ_HEAD(, hammer_undo
) undo_lru_list
;
661 TAILQ_HEAD(, hammer_inode
) flush_list
;
662 TAILQ_HEAD(, hammer_reserve
) delay_list
;
663 TAILQ_HEAD(, hammer_objid_cache
) objid_cache_list
;
664 TAILQ_HEAD(, hammer_reclaim
) reclaim_list
;
667 typedef struct hammer_mount
*hammer_mount_t
;
669 #define HAMMER_MOUNT_UNUSED0001 0x0001
671 struct hammer_sync_info
{
680 extern struct vop_ops hammer_vnode_vops
;
681 extern struct vop_ops hammer_spec_vops
;
682 extern struct vop_ops hammer_fifo_vops
;
683 extern struct bio_ops hammer_bioops
;
685 extern int hammer_debug_io
;
686 extern int hammer_debug_general
;
687 extern int hammer_debug_debug
;
688 extern int hammer_debug_inode
;
689 extern int hammer_debug_locks
;
690 extern int hammer_debug_btree
;
691 extern int hammer_debug_tid
;
692 extern int hammer_debug_recover
;
693 extern int hammer_debug_recover_faults
;
694 extern int hammer_debug_cluster_enable
;
695 extern int hammer_count_inodes
;
696 extern int hammer_count_iqueued
;
697 extern int hammer_count_reclaiming
;
698 extern int hammer_count_records
;
699 extern int hammer_count_record_datas
;
700 extern int hammer_count_volumes
;
701 extern int hammer_count_buffers
;
702 extern int hammer_count_nodes
;
703 extern int64_t hammer_stats_btree_lookups
;
704 extern int64_t hammer_stats_btree_searches
;
705 extern int64_t hammer_stats_btree_inserts
;
706 extern int64_t hammer_stats_btree_deletes
;
707 extern int64_t hammer_stats_btree_elements
;
708 extern int64_t hammer_stats_btree_splits
;
709 extern int64_t hammer_stats_btree_iterations
;
710 extern int64_t hammer_stats_record_iterations
;
711 extern int hammer_count_dirtybufs
;
712 extern int hammer_count_refedbufs
;
713 extern int hammer_count_reservations
;
714 extern int hammer_count_io_running_read
;
715 extern int hammer_count_io_running_write
;
716 extern int hammer_count_io_locked
;
717 extern int hammer_limit_dirtybufs
;
718 extern int hammer_limit_iqueued
;
719 extern int hammer_limit_irecs
;
720 extern int hammer_limit_recs
;
721 extern int hammer_bio_count
;
722 extern int hammer_verify_zone
;
723 extern int hammer_write_mode
;
724 extern int64_t hammer_contention_count
;
726 int hammer_vop_inactive(struct vop_inactive_args
*);
727 int hammer_vop_reclaim(struct vop_reclaim_args
*);
728 int hammer_get_vnode(struct hammer_inode
*ip
, struct vnode
**vpp
);
729 struct hammer_inode
*hammer_get_inode(hammer_transaction_t trans
,
730 hammer_inode_t dip
, u_int64_t obj_id
,
731 hammer_tid_t asof
, int flags
, int *errorp
);
732 void hammer_put_inode(struct hammer_inode
*ip
);
733 void hammer_put_inode_ref(struct hammer_inode
*ip
);
735 int hammer_unload_volume(hammer_volume_t volume
, void *data __unused
);
736 int hammer_adjust_volume_mode(hammer_volume_t volume
, void *data __unused
);
738 int hammer_unload_buffer(hammer_buffer_t buffer
, void *data __unused
);
739 int hammer_install_volume(hammer_mount_t hmp
, const char *volname
);
741 int hammer_ip_lookup(hammer_cursor_t cursor
);
742 int hammer_ip_first(hammer_cursor_t cursor
);
743 int hammer_ip_next(hammer_cursor_t cursor
);
744 int hammer_ip_resolve_data(hammer_cursor_t cursor
);
745 int hammer_ip_delete_record(hammer_cursor_t cursor
, hammer_inode_t ip
,
747 int hammer_delete_at_cursor(hammer_cursor_t cursor
, int64_t *stat_bytes
);
748 int hammer_ip_check_directory_empty(hammer_transaction_t trans
,
750 int hammer_sync_hmp(hammer_mount_t hmp
, int waitfor
);
751 int hammer_queue_inodes_flusher(hammer_mount_t hmp
, int waitfor
);
755 hammer_alloc_mem_record(hammer_inode_t ip
, int data_len
);
756 void hammer_flush_record_done(hammer_record_t record
, int error
);
757 void hammer_wait_mem_record_ident(hammer_record_t record
, const char *ident
);
758 void hammer_rel_mem_record(hammer_record_t record
);
760 int hammer_cursor_up(hammer_cursor_t cursor
);
761 int hammer_cursor_up_locked(hammer_cursor_t cursor
);
762 int hammer_cursor_down(hammer_cursor_t cursor
);
763 int hammer_cursor_upgrade(hammer_cursor_t cursor
);
764 int hammer_cursor_upgrade_node(hammer_cursor_t cursor
);
765 void hammer_cursor_downgrade(hammer_cursor_t cursor
);
766 int hammer_cursor_seek(hammer_cursor_t cursor
, hammer_node_t node
,
768 void hammer_lock_ex_ident(struct hammer_lock
*lock
, const char *ident
);
769 int hammer_lock_ex_try(struct hammer_lock
*lock
);
770 void hammer_lock_sh(struct hammer_lock
*lock
);
771 int hammer_lock_sh_try(struct hammer_lock
*lock
);
772 int hammer_lock_upgrade(struct hammer_lock
*lock
);
773 void hammer_lock_downgrade(struct hammer_lock
*lock
);
774 void hammer_unlock(struct hammer_lock
*lock
);
775 void hammer_ref(struct hammer_lock
*lock
);
776 void hammer_unref(struct hammer_lock
*lock
);
778 void hammer_sync_lock_ex(hammer_transaction_t trans
);
779 void hammer_sync_lock_sh(hammer_transaction_t trans
);
780 int hammer_sync_lock_sh_try(hammer_transaction_t trans
);
781 void hammer_sync_unlock(hammer_transaction_t trans
);
783 u_int32_t
hammer_to_unix_xid(uuid_t
*uuid
);
784 void hammer_guid_to_uuid(uuid_t
*uuid
, u_int32_t guid
);
785 void hammer_to_timespec(hammer_tid_t tid
, struct timespec
*ts
);
786 hammer_tid_t
hammer_timespec_to_transid(struct timespec
*ts
);
787 hammer_tid_t
hammer_now_tid(void);
788 hammer_tid_t
hammer_str_to_tid(const char *str
);
789 hammer_tid_t
hammer_alloc_objid(hammer_transaction_t trans
, hammer_inode_t dip
);
790 void hammer_clear_objid(hammer_inode_t dip
);
791 void hammer_destroy_objid_cache(hammer_mount_t hmp
);
793 int hammer_enter_undo_history(hammer_mount_t hmp
, hammer_off_t offset
,
795 void hammer_clear_undo_history(hammer_mount_t hmp
);
796 enum vtype
hammer_get_vnode_type(u_int8_t obj_type
);
797 int hammer_get_dtype(u_int8_t obj_type
);
798 u_int8_t
hammer_get_obj_type(enum vtype vtype
);
799 int64_t hammer_directory_namekey(void *name
, int len
);
800 int hammer_nohistory(hammer_inode_t ip
);
802 int hammer_init_cursor(hammer_transaction_t trans
, hammer_cursor_t cursor
,
803 hammer_node_cache_t cache
, hammer_inode_t ip
);
804 int hammer_reinit_cursor(hammer_cursor_t cursor
);
805 void hammer_normalize_cursor(hammer_cursor_t cursor
);
806 void hammer_done_cursor(hammer_cursor_t cursor
);
807 void hammer_mem_done(hammer_cursor_t cursor
);
809 int hammer_btree_lookup(hammer_cursor_t cursor
);
810 int hammer_btree_first(hammer_cursor_t cursor
);
811 int hammer_btree_last(hammer_cursor_t cursor
);
812 int hammer_btree_extract(hammer_cursor_t cursor
, int flags
);
813 int hammer_btree_iterate(hammer_cursor_t cursor
);
814 int hammer_btree_iterate_reverse(hammer_cursor_t cursor
);
815 int hammer_btree_insert(hammer_cursor_t cursor
,
816 hammer_btree_leaf_elm_t elm
);
817 int hammer_btree_delete(hammer_cursor_t cursor
);
818 int hammer_btree_cmp(hammer_base_elm_t key1
, hammer_base_elm_t key2
);
819 int hammer_btree_chkts(hammer_tid_t ts
, hammer_base_elm_t key
);
820 int hammer_btree_correct_rhb(hammer_cursor_t cursor
, hammer_tid_t tid
);
821 int hammer_btree_correct_lhb(hammer_cursor_t cursor
, hammer_tid_t tid
);
823 int btree_set_parent(hammer_transaction_t trans
, hammer_node_t node
,
824 hammer_btree_elm_t elm
);
825 int hammer_btree_lock_children(hammer_cursor_t cursor
,
826 struct hammer_node_locklist
**locklistp
);
827 void hammer_btree_unlock_children(struct hammer_node_locklist
**locklistp
);
828 int hammer_btree_search_node(hammer_base_elm_t elm
, hammer_node_ondisk_t node
);
830 void hammer_print_btree_node(hammer_node_ondisk_t ondisk
);
831 void hammer_print_btree_elm(hammer_btree_elm_t elm
, u_int8_t type
, int i
);
833 void *hammer_bread(struct hammer_mount
*hmp
, hammer_off_t off
,
834 int *errorp
, struct hammer_buffer
**bufferp
);
835 void *hammer_bnew(struct hammer_mount
*hmp
, hammer_off_t off
,
836 int *errorp
, struct hammer_buffer
**bufferp
);
838 hammer_volume_t
hammer_get_root_volume(hammer_mount_t hmp
, int *errorp
);
840 hammer_volume_t
hammer_get_volume(hammer_mount_t hmp
,
841 int32_t vol_no
, int *errorp
);
842 hammer_buffer_t
hammer_get_buffer(hammer_mount_t hmp
,
843 hammer_off_t buf_offset
, int isnew
, int *errorp
);
844 void hammer_del_buffers(hammer_mount_t hmp
, hammer_off_t base_offset
,
845 hammer_off_t zone2_offset
, int bytes
);
847 int hammer_ref_volume(hammer_volume_t volume
);
848 int hammer_ref_buffer(hammer_buffer_t buffer
);
849 void hammer_flush_buffer_nodes(hammer_buffer_t buffer
);
851 void hammer_rel_volume(hammer_volume_t volume
, int flush
);
852 void hammer_rel_buffer(hammer_buffer_t buffer
, int flush
);
854 int hammer_vfs_export(struct mount
*mp
, int op
,
855 const struct export_args
*export
);
856 hammer_node_t
hammer_get_node(hammer_mount_t hmp
, hammer_off_t node_offset
,
857 int isnew
, int *errorp
);
858 void hammer_ref_node(hammer_node_t node
);
859 hammer_node_t
hammer_ref_node_safe(struct hammer_mount
*hmp
,
860 hammer_node_cache_t cache
, int *errorp
);
861 void hammer_rel_node(hammer_node_t node
);
862 void hammer_delete_node(hammer_transaction_t trans
,
864 void hammer_cache_node(hammer_node_cache_t cache
,
866 void hammer_uncache_node(hammer_node_cache_t cache
);
867 void hammer_flush_node(hammer_node_t node
);
869 void hammer_dup_buffer(struct hammer_buffer
**bufferp
,
870 struct hammer_buffer
*buffer
);
871 hammer_node_t
hammer_alloc_btree(hammer_transaction_t trans
, int *errorp
);
872 void *hammer_alloc_data(hammer_transaction_t trans
, int32_t data_len
,
873 u_int16_t rec_type
, hammer_off_t
*data_offsetp
,
874 struct hammer_buffer
**data_bufferp
, int *errorp
);
876 int hammer_generate_undo(hammer_transaction_t trans
, hammer_io_t io
,
877 hammer_off_t zone1_offset
, void *base
, int len
);
879 void hammer_put_volume(struct hammer_volume
*volume
, int flush
);
880 void hammer_put_buffer(struct hammer_buffer
*buffer
, int flush
);
882 hammer_off_t
hammer_freemap_alloc(hammer_transaction_t trans
,
883 hammer_off_t owner
, int *errorp
);
884 void hammer_freemap_free(hammer_transaction_t trans
, hammer_off_t phys_offset
,
885 hammer_off_t owner
, int *errorp
);
886 int hammer_checkspace(hammer_mount_t hmp
);
887 hammer_off_t
hammer_blockmap_alloc(hammer_transaction_t trans
, int zone
,
888 int bytes
, int *errorp
);
889 hammer_reserve_t
hammer_blockmap_reserve(hammer_mount_t hmp
, int zone
,
890 int bytes
, hammer_off_t
*zone_offp
, int *errorp
);
891 void hammer_blockmap_reserve_complete(hammer_mount_t hmp
,
892 hammer_reserve_t resv
);
893 void hammer_reserve_setdelay(hammer_mount_t hmp
, hammer_reserve_t resv
,
894 hammer_off_t zone2_offset
);
895 void hammer_reserve_clrdelay(hammer_mount_t hmp
, hammer_reserve_t resv
);
896 void hammer_blockmap_free(hammer_transaction_t trans
,
897 hammer_off_t bmap_off
, int bytes
);
898 int hammer_blockmap_getfree(hammer_mount_t hmp
, hammer_off_t bmap_off
,
899 int *curp
, int *errorp
);
900 hammer_off_t
hammer_blockmap_lookup(hammer_mount_t hmp
, hammer_off_t bmap_off
,
902 hammer_off_t
hammer_undo_lookup(hammer_mount_t hmp
, hammer_off_t bmap_off
,
904 int64_t hammer_undo_used(hammer_mount_t hmp
);
905 int64_t hammer_undo_space(hammer_mount_t hmp
);
906 int64_t hammer_undo_max(hammer_mount_t hmp
);
908 void hammer_start_transaction(struct hammer_transaction
*trans
,
909 struct hammer_mount
*hmp
);
910 void hammer_simple_transaction(struct hammer_transaction
*trans
,
911 struct hammer_mount
*hmp
);
912 void hammer_start_transaction_fls(struct hammer_transaction
*trans
,
913 struct hammer_mount
*hmp
);
914 void hammer_done_transaction(struct hammer_transaction
*trans
);
916 void hammer_modify_inode(hammer_inode_t ip
, int flags
);
917 void hammer_flush_inode(hammer_inode_t ip
, int flags
);
918 void hammer_flush_inode_done(hammer_inode_t ip
);
919 void hammer_wait_inode(hammer_inode_t ip
);
920 void hammer_wait_inode_recs(hammer_inode_t ip
);
922 int hammer_create_inode(struct hammer_transaction
*trans
, struct vattr
*vap
,
923 struct ucred
*cred
, struct hammer_inode
*dip
,
924 struct hammer_inode
**ipp
);
925 void hammer_rel_inode(hammer_inode_t ip
, int flush
);
926 int hammer_reload_inode(hammer_inode_t ip
, void *arg __unused
);
927 int hammer_ino_rb_compare(hammer_inode_t ip1
, hammer_inode_t ip2
);
929 int hammer_sync_inode(hammer_inode_t ip
);
930 void hammer_test_inode(hammer_inode_t ip
);
931 void hammer_inode_unloadable_check(hammer_inode_t ip
, int getvp
);
933 int hammer_ip_add_directory(struct hammer_transaction
*trans
,
934 hammer_inode_t dip
, struct namecache
*ncp
,
936 int hammer_ip_del_directory(struct hammer_transaction
*trans
,
937 hammer_cursor_t cursor
, hammer_inode_t dip
,
939 hammer_record_t
hammer_ip_add_bulk(hammer_inode_t ip
, off_t file_offset
,
940 void *data
, int bytes
, int *errorp
);
941 int hammer_ip_frontend_trunc(struct hammer_inode
*ip
, off_t file_size
);
942 int hammer_ip_add_record(struct hammer_transaction
*trans
,
943 hammer_record_t record
);
944 int hammer_ip_delete_range(hammer_cursor_t cursor
, hammer_inode_t ip
,
945 int64_t ran_beg
, int64_t ran_end
, int truncating
);
946 int hammer_ip_delete_range_all(hammer_cursor_t cursor
, hammer_inode_t ip
,
948 int hammer_ip_sync_data(hammer_cursor_t cursor
, hammer_inode_t ip
,
949 int64_t offset
, void *data
, int bytes
);
950 int hammer_ip_sync_record(hammer_transaction_t trans
, hammer_record_t rec
);
951 int hammer_ip_sync_record_cursor(hammer_cursor_t cursor
, hammer_record_t rec
);
953 int hammer_ioctl(hammer_inode_t ip
, u_long com
, caddr_t data
, int fflag
,
956 void hammer_io_init(hammer_io_t io
, hammer_mount_t hmp
,
957 enum hammer_io_type type
);
958 int hammer_io_read(struct vnode
*devvp
, struct hammer_io
*io
,
960 int hammer_io_new(struct vnode
*devvp
, struct hammer_io
*io
);
961 void hammer_io_inval(hammer_volume_t volume
, hammer_off_t zone2_offset
);
962 void hammer_io_release(struct hammer_io
*io
, int flush
);
963 void hammer_io_flush(struct hammer_io
*io
);
964 void hammer_io_waitdep(struct hammer_io
*io
);
965 void hammer_io_wait_all(hammer_mount_t hmp
, const char *ident
);
966 int hammer_io_direct_read(hammer_mount_t hmp
, hammer_off_t data_offset
,
968 int hammer_io_direct_write(hammer_mount_t hmp
, hammer_btree_leaf_elm_t leaf
,
970 void hammer_io_write_interlock(hammer_io_t io
);
971 void hammer_io_done_interlock(hammer_io_t io
);
972 void hammer_io_clear_modify(struct hammer_io
*io
);
973 void hammer_io_clear_modlist(struct hammer_io
*io
);
974 void hammer_modify_volume(hammer_transaction_t trans
, hammer_volume_t volume
,
975 void *base
, int len
);
976 void hammer_modify_buffer(hammer_transaction_t trans
, hammer_buffer_t buffer
,
977 void *base
, int len
);
978 void hammer_modify_volume_done(hammer_volume_t volume
);
979 void hammer_modify_buffer_done(hammer_buffer_t buffer
);
981 int hammer_ioc_reblock(hammer_transaction_t trans
, hammer_inode_t ip
,
982 struct hammer_ioc_reblock
*reblock
);
983 int hammer_ioc_prune(hammer_transaction_t trans
, hammer_inode_t ip
,
984 struct hammer_ioc_prune
*prune
);
986 int hammer_signal_check(hammer_mount_t hmp
);
988 void hammer_flusher_create(hammer_mount_t hmp
);
989 void hammer_flusher_destroy(hammer_mount_t hmp
);
990 void hammer_flusher_sync(hammer_mount_t hmp
);
991 void hammer_flusher_async(hammer_mount_t hmp
);
993 int hammer_recover(hammer_mount_t hmp
, hammer_volume_t rootvol
);
994 void hammer_recover_flush_buffers(hammer_mount_t hmp
,
995 hammer_volume_t root_volume
);
997 void hammer_crc_set_blockmap(hammer_blockmap_t blockmap
);
998 void hammer_crc_set_volume(hammer_volume_ondisk_t ondisk
);
1000 int hammer_crc_test_blockmap(hammer_blockmap_t blockmap
);
1001 int hammer_crc_test_volume(hammer_volume_ondisk_t ondisk
);
1002 int hammer_crc_test_btree(hammer_node_ondisk_t ondisk
);
1003 void hkprintf(const char *ctl
, ...);
1007 static __inline
void
1008 hammer_wait_mem_record(hammer_record_t record
)
1010 hammer_wait_mem_record_ident(record
, "hmmwai");
1013 static __inline
void
1014 hammer_lock_ex(struct hammer_lock
*lock
)
1016 hammer_lock_ex_ident(lock
, "hmrlck");
1020 * Indicate that a B-Tree node is being modified.
1022 static __inline
void
1023 hammer_modify_node_noundo(hammer_transaction_t trans
, hammer_node_t node
)
1025 hammer_modify_buffer(trans
, node
->buffer
, NULL
, 0);
1028 static __inline
void
1029 hammer_modify_node_all(hammer_transaction_t trans
, struct hammer_node
*node
)
1031 hammer_modify_buffer(trans
, node
->buffer
,
1032 node
->ondisk
, sizeof(*node
->ondisk
));
1035 static __inline
void
1036 hammer_modify_node(hammer_transaction_t trans
, hammer_node_t node
,
1037 void *base
, int len
)
1039 hammer_crc_t
*crcptr
;
1041 KKASSERT((char *)base
>= (char *)node
->ondisk
&&
1042 (char *)base
+ len
<=
1043 (char *)node
->ondisk
+ sizeof(*node
->ondisk
));
1044 hammer_modify_buffer(trans
, node
->buffer
, base
, len
);
1045 crcptr
= &node
->ondisk
->crc
;
1046 hammer_modify_buffer(trans
, node
->buffer
, crcptr
, sizeof(hammer_crc_t
));
1047 --node
->buffer
->io
.modify_refs
; /* only want one ref */
1051 * Indicate that the specified modifications have been completed.
1053 * Do not try to generate the crc here, it's very expensive to do and a
1054 * sequence of insertions or deletions can result in many calls to this
1055 * function on the same node.
1057 static __inline
void
1058 hammer_modify_node_done(hammer_node_t node
)
1060 node
->flags
|= HAMMER_NODE_CRCGOOD
;
1061 if ((node
->flags
& HAMMER_NODE_NEEDSCRC
) == 0) {
1062 node
->flags
|= HAMMER_NODE_NEEDSCRC
;
1063 node
->buffer
->io
.gencrc
= 1;
1064 hammer_ref_node(node
);
1066 hammer_modify_buffer_done(node
->buffer
);
1069 #define hammer_modify_volume_field(trans, vol, field) \
1070 hammer_modify_volume(trans, vol, &(vol)->ondisk->field, \
1071 sizeof((vol)->ondisk->field))
1073 #define hammer_modify_node_field(trans, node, field) \
1074 hammer_modify_node(trans, node, &(node)->ondisk->field, \
1075 sizeof((node)->ondisk->field))