2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer.h,v 1.88 2008/06/20 21:24:53 dillon Exp $
37 * This header file contains structures used internally by the HAMMERFS
38 * implementation. See hammer_disk.h for on-disk structures.
41 #include <sys/param.h>
42 #include <sys/types.h>
43 #include <sys/kernel.h>
45 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/mountctl.h>
50 #include <sys/vnode.h>
53 #include <sys/globaldata.h>
54 #include <sys/lockf.h>
56 #include <sys/queue.h>
58 #include <sys/globaldata.h>
61 #include <sys/signal2.h>
62 #include "hammer_disk.h"
63 #include "hammer_mount.h"
64 #include "hammer_ioctl.h"
66 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
68 MALLOC_DECLARE(M_HAMMER
);
73 #if !defined(KTR_HAMMER)
74 #define KTR_HAMMER KTR_ALL
76 KTR_INFO_MASTER_EXTERN(hammer
);
84 * Key structure used for custom RB tree inode lookups. This prototypes
85 * the function hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info).
87 typedef struct hammer_inode_info
{
88 int64_t obj_id
; /* (key) object identifier */
89 hammer_tid_t obj_asof
; /* (key) snapshot transid or 0 */
90 u_int32_t obj_localization
; /* (key) pseudo-fs */
91 } *hammer_inode_info_t
;
93 typedef enum hammer_transaction_type
{
97 } hammer_transaction_type_t
;
100 * HAMMER Transaction tracking
102 struct hammer_transaction
{
103 hammer_transaction_type_t type
;
104 struct hammer_mount
*hmp
;
108 struct hammer_volume
*rootvol
;
111 typedef struct hammer_transaction
*hammer_transaction_t
;
117 int refs
; /* active references delay writes */
118 int lockcount
; /* lock count for exclusive/shared access */
120 int exwanted
; /* number of threads waiting for ex lock */
121 struct thread
*locktd
;
125 hammer_islocked(struct hammer_lock
*lock
)
127 return(lock
->lockcount
!= 0);
131 hammer_isactive(struct hammer_lock
*lock
)
133 return(lock
->refs
!= 0);
137 hammer_islastref(struct hammer_lock
*lock
)
139 return(lock
->refs
== 1);
143 * Return if we specifically own the lock exclusively.
146 hammer_lock_excl_owned(struct hammer_lock
*lock
, thread_t td
)
148 if (lock
->lockcount
> 0 && lock
->locktd
== td
)
154 * Flush state, used by various structures
156 typedef enum hammer_inode_state
{
160 } hammer_inode_state_t
;
162 TAILQ_HEAD(hammer_record_list
, hammer_record
);
165 * Cache object ids. A fixed number of objid cache structures are
166 * created to reserve object id's for newly created files in multiples
167 * of 100,000, localized to a particular directory, and recycled as
168 * needed. This allows parallel create operations in different
169 * directories to retain fairly localized object ids which in turn
170 * improves reblocking performance and layout.
172 #define OBJID_CACHE_SIZE 1024
173 #define OBJID_CACHE_BULK 100000
175 typedef struct hammer_objid_cache
{
176 TAILQ_ENTRY(hammer_objid_cache
) entry
;
177 struct hammer_inode
*dip
;
178 hammer_tid_t next_tid
;
180 } *hammer_objid_cache_t
;
183 * Associate an inode with a B-Tree node to cache search start positions
185 typedef struct hammer_node_cache
{
186 TAILQ_ENTRY(hammer_node_cache
) entry
;
187 struct hammer_node
*node
;
188 struct hammer_inode
*ip
;
189 } *hammer_node_cache_t
;
191 TAILQ_HEAD(hammer_node_cache_list
, hammer_node_cache
);
194 * Structure used to represent an inode in-memory.
196 * The record and data associated with an inode may be out of sync with
197 * the disk (xDIRTY flags), or not even on the disk at all (ONDISK flag
200 * An inode may also hold a cache of unsynchronized records, used for
201 * database and directories only. Unsynchronized regular file data is
202 * stored in the buffer cache.
204 * NOTE: A file which is created and destroyed within the initial
205 * synchronization period can wind up not doing any disk I/O at all.
207 * Finally, an inode may cache numerous disk-referencing B-Tree cursors.
209 struct hammer_ino_rb_tree
;
211 RB_HEAD(hammer_ino_rb_tree
, hammer_inode
);
212 RB_PROTOTYPEX(hammer_ino_rb_tree
, INFO
, hammer_inode
, rb_node
,
213 hammer_ino_rb_compare
, hammer_inode_info_t
);
215 struct hammer_rec_rb_tree
;
216 struct hammer_record
;
217 RB_HEAD(hammer_rec_rb_tree
, hammer_record
);
218 RB_PROTOTYPEX(hammer_rec_rb_tree
, INFO
, hammer_record
, rb_node
,
219 hammer_rec_rb_compare
, hammer_btree_leaf_elm_t
);
221 TAILQ_HEAD(hammer_node_list
, hammer_node
);
223 struct hammer_inode
{
224 RB_ENTRY(hammer_inode
) rb_node
;
225 hammer_inode_state_t flush_state
;
227 TAILQ_ENTRY(hammer_inode
) flush_entry
;
228 struct hammer_record_list target_list
; /* target of dependant recs */
229 u_int64_t obj_id
; /* (key) object identifier */
230 hammer_tid_t obj_asof
; /* (key) snapshot or 0 */
231 u_int32_t obj_localization
; /* (key) pseudo-fs */
232 struct hammer_mount
*hmp
;
233 hammer_objid_cache_t objid_cache
;
235 int error
; /* flush error */
236 int cursor_ip_refs
; /* sanity */
240 struct lockf advlock
;
241 struct hammer_lock lock
; /* sync copy interlock */
243 struct hammer_btree_leaf_elm ino_leaf
; /* in-memory cache */
244 struct hammer_inode_data ino_data
; /* in-memory cache */
245 struct hammer_rec_rb_tree rec_tree
; /* in-memory cache */
246 struct hammer_node_cache cache
[2]; /* search initiate cache */
249 * When a demark is created to synchronize an inode to
250 * disk, certain fields are copied so the front-end VOPs
251 * can continue to run in parallel with the synchronization
252 * occuring in the background.
254 int sync_flags
; /* to-sync flags cache */
255 off_t sync_trunc_off
; /* to-sync truncation */
256 struct hammer_btree_leaf_elm sync_ino_leaf
; /* to-sync cache */
257 struct hammer_inode_data sync_ino_data
; /* to-sync cache */
260 typedef struct hammer_inode
*hammer_inode_t
;
262 #define VTOI(vp) ((struct hammer_inode *)(vp)->v_data)
264 #define HAMMER_INODE_DDIRTY 0x0001 /* in-memory ino_data is dirty */
265 /* (not including atime/mtime) */
266 #define HAMMER_INODE_RSV_INODES 0x0002 /* hmp->rsv_inodes bumped */
267 #define HAMMER_INODE_UNUSED0004 0x0004
268 #define HAMMER_INODE_XDIRTY 0x0008 /* in-memory records */
269 #define HAMMER_INODE_ONDISK 0x0010 /* inode is on-disk (else not yet) */
270 #define HAMMER_INODE_FLUSH 0x0020 /* flush on last ref */
271 #define HAMMER_INODE_DELETED 0x0080 /* inode delete (backend) */
272 #define HAMMER_INODE_DELONDISK 0x0100 /* delete synchronized to disk */
273 #define HAMMER_INODE_RO 0x0200 /* read-only (because of as-of) */
274 #define HAMMER_INODE_VHELD 0x0400 /* vnode held on sync */
275 #define HAMMER_INODE_DONDISK 0x0800 /* data records may be on disk */
276 #define HAMMER_INODE_BUFS 0x1000 /* dirty high level bps present */
277 #define HAMMER_INODE_REFLUSH 0x2000 /* pipelined flush during flush */
278 #define HAMMER_INODE_RECLAIM 0x4000 /* trying to reclaim */
279 #define HAMMER_INODE_FLUSHW 0x8000 /* Someone waiting for flush */
281 #define HAMMER_INODE_TRUNCATED 0x00010000
282 #define HAMMER_INODE_DELETING 0x00020000 /* inode delete request (frontend)*/
283 #define HAMMER_INODE_RESIGNAL 0x00040000 /* re-signal on re-flush */
284 #define HAMMER_INODE_ATIME 0x00100000 /* in-memory atime modified */
285 #define HAMMER_INODE_MTIME 0x00100000 /* in-memory mtime modified */
287 #define HAMMER_INODE_MODMASK (HAMMER_INODE_DDIRTY| \
288 HAMMER_INODE_XDIRTY|HAMMER_INODE_BUFS| \
289 HAMMER_INODE_ATIME|HAMMER_INODE_MTIME| \
290 HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING)
292 #define HAMMER_INODE_MODMASK_NOXDIRTY \
293 (HAMMER_INODE_MODMASK & ~HAMMER_INODE_XDIRTY)
295 #define HAMMER_FLUSH_GROUP_SIZE 64
297 #define HAMMER_FLUSH_SIGNAL 0x0001
298 #define HAMMER_FLUSH_RECURSION 0x0002
301 * Used by the inode reclaim code to pipeline reclaims and avoid
302 * blowing out kernel memory or letting the flusher get too far
305 struct hammer_reclaim
{
306 TAILQ_ENTRY(hammer_reclaim
) entry
;
310 #define HAMMER_RECLAIM_FLUSH 2000
311 #define HAMMER_RECLAIM_WAIT 4000
314 * Structure used to represent an unsynchronized record in-memory. These
315 * records typically represent directory entries. Only non-historical
316 * records are kept in-memory.
318 * Records are organized as a per-inode RB-Tree. If the inode is not
319 * on disk then neither are any records and the in-memory record tree
320 * represents the entire contents of the inode. If the inode is on disk
321 * then the on-disk B-Tree is scanned in parallel with the in-memory
322 * RB-Tree to synthesize the current state of the file.
324 * Records are also used to enforce the ordering of directory create/delete
325 * operations. A new inode will not be flushed to disk unless its related
326 * directory entry is also being flushed at the same time. A directory entry
327 * will not be removed unless its related inode is also being removed at the
330 typedef enum hammer_record_type
{
331 HAMMER_MEM_RECORD_GENERAL
, /* misc record */
332 HAMMER_MEM_RECORD_INODE
, /* inode record */
333 HAMMER_MEM_RECORD_ADD
, /* positive memory cache record */
334 HAMMER_MEM_RECORD_DEL
, /* negative delete-on-disk record */
335 HAMMER_MEM_RECORD_DATA
/* bulk-data record w/on-disk ref */
336 } hammer_record_type_t
;
338 struct hammer_record
{
339 RB_ENTRY(hammer_record
) rb_node
;
340 TAILQ_ENTRY(hammer_record
) target_entry
;
341 hammer_inode_state_t flush_state
;
343 hammer_record_type_t type
;
344 struct hammer_lock lock
;
345 struct hammer_reserve
*resv
;
346 struct hammer_inode
*ip
;
347 struct hammer_inode
*target_ip
;
348 struct hammer_btree_leaf_elm leaf
;
349 union hammer_data_ondisk
*data
;
353 typedef struct hammer_record
*hammer_record_t
;
356 * Record flags. Note that FE can only be set by the frontend if the
357 * record has not been interlocked by the backend w/ BE.
359 #define HAMMER_RECF_ALLOCDATA 0x0001
360 #define HAMMER_RECF_ONRBTREE 0x0002
361 #define HAMMER_RECF_DELETED_FE 0x0004 /* deleted (frontend) */
362 #define HAMMER_RECF_DELETED_BE 0x0008 /* deleted (backend) */
363 #define HAMMER_RECF_UNUSED0010 0x0010
364 #define HAMMER_RECF_INTERLOCK_BE 0x0020 /* backend interlock */
365 #define HAMMER_RECF_WANTED 0x0040 /* wanted by the frontend */
366 #define HAMMER_RECF_CONVERT_DELETE 0x0100 /* special case */
369 * In-memory structures representing on-disk structures.
371 struct hammer_volume
;
372 struct hammer_buffer
;
375 struct hammer_reserve
;
377 RB_HEAD(hammer_vol_rb_tree
, hammer_volume
);
378 RB_HEAD(hammer_buf_rb_tree
, hammer_buffer
);
379 RB_HEAD(hammer_nod_rb_tree
, hammer_node
);
380 RB_HEAD(hammer_und_rb_tree
, hammer_undo
);
381 RB_HEAD(hammer_res_rb_tree
, hammer_reserve
);
383 RB_PROTOTYPE2(hammer_vol_rb_tree
, hammer_volume
, rb_node
,
384 hammer_vol_rb_compare
, int32_t);
385 RB_PROTOTYPE2(hammer_buf_rb_tree
, hammer_buffer
, rb_node
,
386 hammer_buf_rb_compare
, hammer_off_t
);
387 RB_PROTOTYPE2(hammer_nod_rb_tree
, hammer_node
, rb_node
,
388 hammer_nod_rb_compare
, hammer_off_t
);
389 RB_PROTOTYPE2(hammer_und_rb_tree
, hammer_undo
, rb_node
,
390 hammer_und_rb_compare
, hammer_off_t
);
391 RB_PROTOTYPE2(hammer_res_rb_tree
, hammer_reserve
, rb_node
,
392 hammer_res_rb_compare
, hammer_off_t
);
395 * IO management - embedded at the head of various in-memory structures
397 * VOLUME - hammer_volume containing meta-data
398 * META_BUFFER - hammer_buffer containing meta-data
399 * DATA_BUFFER - hammer_buffer containing pure-data
401 * Dirty volume headers and dirty meta-data buffers are locked until the
402 * flusher can sequence them out. Dirty pure-data buffers can be written.
403 * Clean buffers can be passively released.
405 typedef enum hammer_io_type
{
406 HAMMER_STRUCTURE_VOLUME
,
407 HAMMER_STRUCTURE_META_BUFFER
,
408 HAMMER_STRUCTURE_UNDO_BUFFER
,
409 HAMMER_STRUCTURE_DATA_BUFFER
412 union hammer_io_structure
;
416 LIST_ENTRY(worklist
) node
;
419 TAILQ_HEAD(hammer_io_list
, hammer_io
);
420 typedef struct hammer_io_list
*hammer_io_list_t
;
423 struct worklist worklist
;
424 struct hammer_lock lock
;
425 enum hammer_io_type type
;
426 struct hammer_mount
*hmp
;
427 TAILQ_ENTRY(hammer_io
) mod_entry
; /* list entry if modified */
428 hammer_io_list_t mod_list
;
430 int64_t offset
; /* zone-2 offset */
431 int bytes
; /* buffer cache buffer size */
432 int loading
; /* loading/unloading interlock */
435 u_int modified
: 1; /* bp's data was modified */
436 u_int released
: 1; /* bp released (w/ B_LOCKED set) */
437 u_int running
: 1; /* bp write IO in progress */
438 u_int waiting
: 1; /* someone is waiting on us */
439 u_int validated
: 1; /* ondisk has been validated */
440 u_int waitdep
: 1; /* flush waits for dependancies */
441 u_int recovered
: 1; /* has recovery ref */
442 u_int waitmod
: 1; /* waiting for modify_refs */
443 u_int reclaim
: 1; /* reclaim requested */
444 u_int gencrc
: 1; /* crc needs to be generated */
447 typedef struct hammer_io
*hammer_io_t
;
449 #define HAMMER_CLUSTER_SIZE (64 * 1024)
450 #if HAMMER_CLUSTER_SIZE > MAXBSIZE
451 #undef HAMMER_CLUSTER_SIZE
452 #define HAMMER_CLUSTER_SIZE MAXBSIZE
454 #define HAMMER_CLUSTER_BUFS (HAMMER_CLUSTER_SIZE / HAMMER_BUFSIZE)
457 * In-memory volume representing on-disk buffer
459 struct hammer_volume
{
461 RB_ENTRY(hammer_volume
) rb_node
;
462 struct hammer_volume_ondisk
*ondisk
;
464 int64_t nblocks
; /* note: special calculation for statfs */
465 int64_t buffer_base
; /* base offset of buffer 0 */
466 hammer_off_t maxbuf_off
; /* Maximum buffer offset (zone-2) */
467 hammer_off_t maxraw_off
; /* Maximum raw offset for device */
473 typedef struct hammer_volume
*hammer_volume_t
;
476 * In-memory buffer (other then volume, super-cluster, or cluster),
477 * representing an on-disk buffer.
479 struct hammer_buffer
{
481 RB_ENTRY(hammer_buffer
) rb_node
;
483 struct hammer_volume
*volume
;
484 hammer_off_t zoneX_offset
;
485 hammer_off_t zone2_offset
;
486 struct hammer_reserve
*resv
;
487 struct hammer_node_list clist
;
490 typedef struct hammer_buffer
*hammer_buffer_t
;
493 * In-memory B-Tree node, representing an on-disk B-Tree node.
495 * This is a hang-on structure which is backed by a hammer_buffer,
496 * indexed by a hammer_cluster, and used for fine-grained locking of
497 * B-Tree nodes in order to properly control lock ordering. A hammer_buffer
498 * can contain multiple nodes representing wildly disassociated portions
499 * of the B-Tree so locking cannot be done on a buffer-by-buffer basis.
501 * This structure uses a cluster-relative index to reduce the number
502 * of layers required to access it, and also because all on-disk B-Tree
503 * references are cluster-relative offsets.
506 struct hammer_lock lock
; /* node-by-node lock */
507 TAILQ_ENTRY(hammer_node
) entry
; /* per-buffer linkage */
508 RB_ENTRY(hammer_node
) rb_node
; /* per-cluster linkage */
509 hammer_off_t node_offset
; /* full offset spec */
510 struct hammer_mount
*hmp
;
511 struct hammer_buffer
*buffer
; /* backing buffer */
512 hammer_node_ondisk_t ondisk
; /* ptr to on-disk structure */
513 struct hammer_node_cache_list cache_list
; /* passive caches */
515 int loading
; /* load interlock */
518 #define HAMMER_NODE_DELETED 0x0001
519 #define HAMMER_NODE_FLUSH 0x0002
520 #define HAMMER_NODE_CRCGOOD 0x0004
521 #define HAMMER_NODE_NEEDSCRC 0x0008
523 typedef struct hammer_node
*hammer_node_t
;
526 * List of locked nodes.
528 struct hammer_node_locklist
{
529 struct hammer_node_locklist
*next
;
533 typedef struct hammer_node_locklist
*hammer_node_locklist_t
;
537 * Common I/O management structure - embedded in in-memory structures
538 * which are backed by filesystem buffers.
540 union hammer_io_structure
{
542 struct hammer_volume volume
;
543 struct hammer_buffer buffer
;
546 typedef union hammer_io_structure
*hammer_io_structure_t
;
549 * The reserve structure prevents the blockmap from allocating
550 * out of a reserved bigblock. Such reservations are used by
551 * the direct-write mechanism.
553 * The structure is also used to hold off on reallocations of
554 * big blocks from the freemap until flush dependancies have
557 struct hammer_reserve
{
558 RB_ENTRY(hammer_reserve
) rb_node
;
559 TAILQ_ENTRY(hammer_reserve
) delay_entry
;
564 hammer_off_t zone_offset
;
567 typedef struct hammer_reserve
*hammer_reserve_t
;
569 #define HAMMER_RESF_ONDELAY 0x0001
571 #include "hammer_cursor.h"
574 * The undo structure tracks recent undos to avoid laying down duplicate
575 * undos within a flush group, saving us a significant amount of overhead.
577 * This is strictly a heuristic.
579 #define HAMMER_MAX_UNDOS 1024
580 #define HAMMER_MAX_FLUSHERS 4
583 RB_ENTRY(hammer_undo
) rb_node
;
584 TAILQ_ENTRY(hammer_undo
) lru_entry
;
589 typedef struct hammer_undo
*hammer_undo_t
;
591 struct hammer_flusher_info
;
593 struct hammer_flusher
{
594 int signal
; /* flusher thread sequencer */
595 int act
; /* currently active flush group */
596 int done
; /* set to act when complete */
597 int next
; /* next flush group */
598 int group_lock
; /* lock sequencing of the next flush */
599 int exiting
; /* request master exit */
600 int count
; /* number of slave flushers */
601 int running
; /* number of slave flushers running */
602 thread_t td
; /* master flusher thread */
603 hammer_tid_t tid
; /* last flushed transaction id */
604 int finalize_want
; /* serialize finalization */
605 struct hammer_lock finalize_lock
; /* serialize finalization */
606 struct hammer_transaction trans
; /* shared transaction */
607 struct hammer_flusher_info
*info
[HAMMER_MAX_FLUSHERS
];
611 * Internal hammer mount data structure
613 struct hammer_mount
{
615 /*struct vnode *rootvp;*/
616 struct hammer_ino_rb_tree rb_inos_root
;
617 struct hammer_vol_rb_tree rb_vols_root
;
618 struct hammer_nod_rb_tree rb_nods_root
;
619 struct hammer_und_rb_tree rb_undo_root
;
620 struct hammer_res_rb_tree rb_resv_root
;
621 struct hammer_buf_rb_tree rb_bufs_root
;
622 struct hammer_volume
*rootvol
;
623 struct hammer_base_elm root_btree_beg
;
624 struct hammer_base_elm root_btree_end
;
630 int rsv_inodes
; /* reserved space due to dirty inodes */
631 int rsv_databufs
; /* reserved space due to dirty buffers */
632 int rsv_databytes
; /* reserved space due to record data */
633 int rsv_recs
; /* reserved space due to dirty records */
635 int count_newrecords
;
637 int inode_reclaims
; /* inodes pending reclaim by flusher */
638 int count_inodes
; /* total number of inodes */
639 int count_iqueued
; /* inodes queued to flusher */
641 struct hammer_flusher flusher
;
643 u_int check_interrupt
;
646 struct hammer_io_list volu_list
; /* dirty undo buffers */
647 struct hammer_io_list undo_list
; /* dirty undo buffers */
648 struct hammer_io_list data_list
; /* dirty data buffers */
649 struct hammer_io_list alt_data_list
; /* dirty data buffers */
650 struct hammer_io_list meta_list
; /* dirty meta bufs */
651 struct hammer_io_list lose_list
; /* loose buffers */
652 int locked_dirty_count
; /* meta/volu count */
653 int io_running_count
;
654 int objid_cache_count
;
655 hammer_tid_t asof
; /* snapshot mount */
656 hammer_off_t next_tid
;
657 int64_t copy_stat_freebigblocks
; /* number of free bigblocks */
659 u_int32_t namekey_iterator
;
660 struct netexport export
;
661 struct hammer_lock sync_lock
;
662 struct hammer_lock free_lock
;
663 struct hammer_lock undo_lock
;
664 struct hammer_lock blkmap_lock
;
665 struct hammer_blockmap blockmap
[HAMMER_MAX_ZONES
];
666 struct hammer_undo undos
[HAMMER_MAX_UNDOS
];
668 TAILQ_HEAD(, hammer_undo
) undo_lru_list
;
669 TAILQ_HEAD(, hammer_inode
) flush_list
;
670 TAILQ_HEAD(, hammer_reserve
) delay_list
;
671 TAILQ_HEAD(, hammer_objid_cache
) objid_cache_list
;
672 TAILQ_HEAD(, hammer_reclaim
) reclaim_list
;
675 typedef struct hammer_mount
*hammer_mount_t
;
677 #define HAMMER_MOUNT_UNUSED0001 0x0001
679 struct hammer_sync_info
{
688 extern struct vop_ops hammer_vnode_vops
;
689 extern struct vop_ops hammer_spec_vops
;
690 extern struct vop_ops hammer_fifo_vops
;
691 extern struct bio_ops hammer_bioops
;
693 extern int hammer_debug_io
;
694 extern int hammer_debug_general
;
695 extern int hammer_debug_debug
;
696 extern int hammer_debug_inode
;
697 extern int hammer_debug_locks
;
698 extern int hammer_debug_btree
;
699 extern int hammer_debug_tid
;
700 extern int hammer_debug_recover
;
701 extern int hammer_debug_recover_faults
;
702 extern int hammer_debug_cluster_enable
;
703 extern int hammer_count_inodes
;
704 extern int hammer_count_iqueued
;
705 extern int hammer_count_reclaiming
;
706 extern int hammer_count_records
;
707 extern int hammer_count_record_datas
;
708 extern int hammer_count_volumes
;
709 extern int hammer_count_buffers
;
710 extern int hammer_count_nodes
;
711 extern int64_t hammer_stats_btree_lookups
;
712 extern int64_t hammer_stats_btree_searches
;
713 extern int64_t hammer_stats_btree_inserts
;
714 extern int64_t hammer_stats_btree_deletes
;
715 extern int64_t hammer_stats_btree_elements
;
716 extern int64_t hammer_stats_btree_splits
;
717 extern int64_t hammer_stats_btree_iterations
;
718 extern int64_t hammer_stats_record_iterations
;
719 extern int hammer_count_dirtybufs
;
720 extern int hammer_count_refedbufs
;
721 extern int hammer_count_reservations
;
722 extern int hammer_count_io_running_read
;
723 extern int hammer_count_io_running_write
;
724 extern int hammer_count_io_locked
;
725 extern int hammer_limit_dirtybufs
;
726 extern int hammer_limit_iqueued
;
727 extern int hammer_limit_recs
;
728 extern int hammer_bio_count
;
729 extern int hammer_verify_zone
;
730 extern int hammer_write_mode
;
731 extern int64_t hammer_contention_count
;
733 int hammer_vop_inactive(struct vop_inactive_args
*);
734 int hammer_vop_reclaim(struct vop_reclaim_args
*);
735 int hammer_get_vnode(struct hammer_inode
*ip
, struct vnode
**vpp
);
736 struct hammer_inode
*hammer_get_inode(hammer_transaction_t trans
,
737 hammer_inode_t dip
, u_int64_t obj_id
,
738 hammer_tid_t asof
, u_int32_t localization
,
739 int flags
, int *errorp
);
740 void hammer_put_inode(struct hammer_inode
*ip
);
741 void hammer_put_inode_ref(struct hammer_inode
*ip
);
742 void hammer_inode_waitreclaims(hammer_mount_t hmp
);
744 int hammer_unload_volume(hammer_volume_t volume
, void *data __unused
);
745 int hammer_adjust_volume_mode(hammer_volume_t volume
, void *data __unused
);
747 int hammer_unload_buffer(hammer_buffer_t buffer
, void *data __unused
);
748 int hammer_install_volume(hammer_mount_t hmp
, const char *volname
);
750 int hammer_ip_lookup(hammer_cursor_t cursor
);
751 int hammer_ip_first(hammer_cursor_t cursor
);
752 int hammer_ip_next(hammer_cursor_t cursor
);
753 int hammer_ip_resolve_data(hammer_cursor_t cursor
);
754 int hammer_ip_delete_record(hammer_cursor_t cursor
, hammer_inode_t ip
,
756 int hammer_delete_at_cursor(hammer_cursor_t cursor
, int64_t *stat_bytes
);
757 int hammer_ip_check_directory_empty(hammer_transaction_t trans
,
759 int hammer_sync_hmp(hammer_mount_t hmp
, int waitfor
);
760 int hammer_queue_inodes_flusher(hammer_mount_t hmp
, int waitfor
);
764 hammer_alloc_mem_record(hammer_inode_t ip
, int data_len
);
765 void hammer_flush_record_done(hammer_record_t record
, int error
);
766 void hammer_wait_mem_record_ident(hammer_record_t record
, const char *ident
);
767 void hammer_rel_mem_record(hammer_record_t record
);
769 int hammer_cursor_up(hammer_cursor_t cursor
);
770 int hammer_cursor_up_locked(hammer_cursor_t cursor
);
771 int hammer_cursor_down(hammer_cursor_t cursor
);
772 int hammer_cursor_upgrade(hammer_cursor_t cursor
);
773 int hammer_cursor_upgrade_node(hammer_cursor_t cursor
);
774 void hammer_cursor_downgrade(hammer_cursor_t cursor
);
775 int hammer_cursor_seek(hammer_cursor_t cursor
, hammer_node_t node
,
777 void hammer_lock_ex_ident(struct hammer_lock
*lock
, const char *ident
);
778 int hammer_lock_ex_try(struct hammer_lock
*lock
);
779 void hammer_lock_sh(struct hammer_lock
*lock
);
780 void hammer_lock_sh_lowpri(struct hammer_lock
*lock
);
781 int hammer_lock_sh_try(struct hammer_lock
*lock
);
782 int hammer_lock_upgrade(struct hammer_lock
*lock
);
783 void hammer_lock_downgrade(struct hammer_lock
*lock
);
784 void hammer_unlock(struct hammer_lock
*lock
);
785 void hammer_ref(struct hammer_lock
*lock
);
786 void hammer_unref(struct hammer_lock
*lock
);
788 void hammer_sync_lock_ex(hammer_transaction_t trans
);
789 void hammer_sync_lock_sh(hammer_transaction_t trans
);
790 int hammer_sync_lock_sh_try(hammer_transaction_t trans
);
791 void hammer_sync_unlock(hammer_transaction_t trans
);
793 u_int32_t
hammer_to_unix_xid(uuid_t
*uuid
);
794 void hammer_guid_to_uuid(uuid_t
*uuid
, u_int32_t guid
);
795 void hammer_time_to_timespec(u_int64_t xtime
, struct timespec
*ts
);
796 u_int64_t
hammer_timespec_to_time(struct timespec
*ts
);
797 hammer_tid_t
hammer_now_tid(void);
798 hammer_tid_t
hammer_str_to_tid(const char *str
);
799 hammer_tid_t
hammer_alloc_objid(hammer_transaction_t trans
, hammer_inode_t dip
);
800 void hammer_clear_objid(hammer_inode_t dip
);
801 void hammer_destroy_objid_cache(hammer_mount_t hmp
);
803 int hammer_enter_undo_history(hammer_mount_t hmp
, hammer_off_t offset
,
805 void hammer_clear_undo_history(hammer_mount_t hmp
);
806 enum vtype
hammer_get_vnode_type(u_int8_t obj_type
);
807 int hammer_get_dtype(u_int8_t obj_type
);
808 u_int8_t
hammer_get_obj_type(enum vtype vtype
);
809 int64_t hammer_directory_namekey(void *name
, int len
);
810 int hammer_nohistory(hammer_inode_t ip
);
812 int hammer_init_cursor(hammer_transaction_t trans
, hammer_cursor_t cursor
,
813 hammer_node_cache_t cache
, hammer_inode_t ip
);
814 int hammer_reinit_cursor(hammer_cursor_t cursor
);
815 void hammer_normalize_cursor(hammer_cursor_t cursor
);
816 void hammer_done_cursor(hammer_cursor_t cursor
);
817 void hammer_mem_done(hammer_cursor_t cursor
);
819 int hammer_btree_lookup(hammer_cursor_t cursor
);
820 int hammer_btree_first(hammer_cursor_t cursor
);
821 int hammer_btree_last(hammer_cursor_t cursor
);
822 int hammer_btree_extract(hammer_cursor_t cursor
, int flags
);
823 int hammer_btree_iterate(hammer_cursor_t cursor
);
824 int hammer_btree_iterate_reverse(hammer_cursor_t cursor
);
825 int hammer_btree_insert(hammer_cursor_t cursor
,
826 hammer_btree_leaf_elm_t elm
);
827 int hammer_btree_delete(hammer_cursor_t cursor
);
828 int hammer_btree_cmp(hammer_base_elm_t key1
, hammer_base_elm_t key2
);
829 int hammer_btree_chkts(hammer_tid_t ts
, hammer_base_elm_t key
);
830 int hammer_btree_correct_rhb(hammer_cursor_t cursor
, hammer_tid_t tid
);
831 int hammer_btree_correct_lhb(hammer_cursor_t cursor
, hammer_tid_t tid
);
833 int btree_set_parent(hammer_transaction_t trans
, hammer_node_t node
,
834 hammer_btree_elm_t elm
);
835 int hammer_btree_lock_children(hammer_cursor_t cursor
,
836 struct hammer_node_locklist
**locklistp
);
837 void hammer_btree_unlock_children(struct hammer_node_locklist
**locklistp
);
838 int hammer_btree_search_node(hammer_base_elm_t elm
, hammer_node_ondisk_t node
);
840 void hammer_print_btree_node(hammer_node_ondisk_t ondisk
);
841 void hammer_print_btree_elm(hammer_btree_elm_t elm
, u_int8_t type
, int i
);
843 void *hammer_bread(struct hammer_mount
*hmp
, hammer_off_t off
,
844 int *errorp
, struct hammer_buffer
**bufferp
);
845 void *hammer_bnew(struct hammer_mount
*hmp
, hammer_off_t off
,
846 int *errorp
, struct hammer_buffer
**bufferp
);
847 void *hammer_bread_ext(struct hammer_mount
*hmp
, hammer_off_t off
, int bytes
,
848 int *errorp
, struct hammer_buffer
**bufferp
);
849 void *hammer_bnew_ext(struct hammer_mount
*hmp
, hammer_off_t off
, int bytes
,
850 int *errorp
, struct hammer_buffer
**bufferp
);
852 hammer_volume_t
hammer_get_root_volume(hammer_mount_t hmp
, int *errorp
);
854 hammer_volume_t
hammer_get_volume(hammer_mount_t hmp
,
855 int32_t vol_no
, int *errorp
);
856 hammer_buffer_t
hammer_get_buffer(hammer_mount_t hmp
, hammer_off_t buf_offset
,
857 int bytes
, int isnew
, int *errorp
);
858 void hammer_del_buffers(hammer_mount_t hmp
, hammer_off_t base_offset
,
859 hammer_off_t zone2_offset
, int bytes
);
861 int hammer_ref_volume(hammer_volume_t volume
);
862 int hammer_ref_buffer(hammer_buffer_t buffer
);
863 void hammer_flush_buffer_nodes(hammer_buffer_t buffer
);
865 void hammer_rel_volume(hammer_volume_t volume
, int flush
);
866 void hammer_rel_buffer(hammer_buffer_t buffer
, int flush
);
868 int hammer_vfs_export(struct mount
*mp
, int op
,
869 const struct export_args
*export
);
870 hammer_node_t
hammer_get_node(hammer_mount_t hmp
, hammer_off_t node_offset
,
871 int isnew
, int *errorp
);
872 void hammer_ref_node(hammer_node_t node
);
873 hammer_node_t
hammer_ref_node_safe(struct hammer_mount
*hmp
,
874 hammer_node_cache_t cache
, int *errorp
);
875 void hammer_rel_node(hammer_node_t node
);
876 void hammer_delete_node(hammer_transaction_t trans
,
878 void hammer_cache_node(hammer_node_cache_t cache
,
880 void hammer_uncache_node(hammer_node_cache_t cache
);
881 void hammer_flush_node(hammer_node_t node
);
883 void hammer_dup_buffer(struct hammer_buffer
**bufferp
,
884 struct hammer_buffer
*buffer
);
885 hammer_node_t
hammer_alloc_btree(hammer_transaction_t trans
, int *errorp
);
886 void *hammer_alloc_data(hammer_transaction_t trans
, int32_t data_len
,
887 u_int16_t rec_type
, hammer_off_t
*data_offsetp
,
888 struct hammer_buffer
**data_bufferp
, int *errorp
);
890 int hammer_generate_undo(hammer_transaction_t trans
, hammer_io_t io
,
891 hammer_off_t zone1_offset
, void *base
, int len
);
893 void hammer_put_volume(struct hammer_volume
*volume
, int flush
);
894 void hammer_put_buffer(struct hammer_buffer
*buffer
, int flush
);
896 hammer_off_t
hammer_freemap_alloc(hammer_transaction_t trans
,
897 hammer_off_t owner
, int *errorp
);
898 void hammer_freemap_free(hammer_transaction_t trans
, hammer_off_t phys_offset
,
899 hammer_off_t owner
, int *errorp
);
900 int hammer_checkspace(hammer_mount_t hmp
);
901 hammer_off_t
hammer_blockmap_alloc(hammer_transaction_t trans
, int zone
,
902 int bytes
, int *errorp
);
903 hammer_reserve_t
hammer_blockmap_reserve(hammer_mount_t hmp
, int zone
,
904 int bytes
, hammer_off_t
*zone_offp
, int *errorp
);
905 void hammer_blockmap_reserve_complete(hammer_mount_t hmp
,
906 hammer_reserve_t resv
);
907 void hammer_reserve_setdelay(hammer_mount_t hmp
, hammer_reserve_t resv
,
908 hammer_off_t zone2_offset
);
909 void hammer_reserve_clrdelay(hammer_mount_t hmp
, hammer_reserve_t resv
);
910 void hammer_blockmap_free(hammer_transaction_t trans
,
911 hammer_off_t bmap_off
, int bytes
);
912 void hammer_blockmap_finalize(hammer_transaction_t trans
,
913 hammer_off_t bmap_off
, int bytes
);
914 int hammer_blockmap_getfree(hammer_mount_t hmp
, hammer_off_t bmap_off
,
915 int *curp
, int *errorp
);
916 hammer_off_t
hammer_blockmap_lookup(hammer_mount_t hmp
, hammer_off_t bmap_off
,
918 hammer_off_t
hammer_undo_lookup(hammer_mount_t hmp
, hammer_off_t bmap_off
,
920 int64_t hammer_undo_used(hammer_mount_t hmp
);
921 int64_t hammer_undo_space(hammer_mount_t hmp
);
922 int64_t hammer_undo_max(hammer_mount_t hmp
);
924 void hammer_start_transaction(struct hammer_transaction
*trans
,
925 struct hammer_mount
*hmp
);
926 void hammer_simple_transaction(struct hammer_transaction
*trans
,
927 struct hammer_mount
*hmp
);
928 void hammer_start_transaction_fls(struct hammer_transaction
*trans
,
929 struct hammer_mount
*hmp
);
930 void hammer_done_transaction(struct hammer_transaction
*trans
);
932 void hammer_modify_inode(hammer_inode_t ip
, int flags
);
933 void hammer_flush_inode(hammer_inode_t ip
, int flags
);
934 void hammer_flush_inode_done(hammer_inode_t ip
);
935 void hammer_wait_inode(hammer_inode_t ip
);
937 int hammer_create_inode(struct hammer_transaction
*trans
, struct vattr
*vap
,
938 struct ucred
*cred
, struct hammer_inode
*dip
,
939 struct hammer_inode
**ipp
);
940 void hammer_rel_inode(hammer_inode_t ip
, int flush
);
941 int hammer_reload_inode(hammer_inode_t ip
, void *arg __unused
);
942 int hammer_ino_rb_compare(hammer_inode_t ip1
, hammer_inode_t ip2
);
944 int hammer_sync_inode(hammer_inode_t ip
);
945 void hammer_test_inode(hammer_inode_t ip
);
946 void hammer_inode_unloadable_check(hammer_inode_t ip
, int getvp
);
948 int hammer_ip_add_directory(struct hammer_transaction
*trans
,
949 hammer_inode_t dip
, struct namecache
*ncp
,
951 int hammer_ip_del_directory(struct hammer_transaction
*trans
,
952 hammer_cursor_t cursor
, hammer_inode_t dip
,
954 hammer_record_t
hammer_ip_add_bulk(hammer_inode_t ip
, off_t file_offset
,
955 void *data
, int bytes
, int *errorp
);
956 int hammer_ip_frontend_trunc(struct hammer_inode
*ip
, off_t file_size
);
957 int hammer_ip_add_record(struct hammer_transaction
*trans
,
958 hammer_record_t record
);
959 int hammer_ip_delete_range(hammer_cursor_t cursor
, hammer_inode_t ip
,
960 int64_t ran_beg
, int64_t ran_end
, int truncating
);
961 int hammer_ip_delete_range_all(hammer_cursor_t cursor
, hammer_inode_t ip
,
963 int hammer_ip_sync_data(hammer_cursor_t cursor
, hammer_inode_t ip
,
964 int64_t offset
, void *data
, int bytes
);
965 int hammer_ip_sync_record(hammer_transaction_t trans
, hammer_record_t rec
);
966 int hammer_ip_sync_record_cursor(hammer_cursor_t cursor
, hammer_record_t rec
);
968 int hammer_ioctl(hammer_inode_t ip
, u_long com
, caddr_t data
, int fflag
,
971 void hammer_io_init(hammer_io_t io
, hammer_mount_t hmp
,
972 enum hammer_io_type type
);
973 int hammer_io_read(struct vnode
*devvp
, struct hammer_io
*io
,
975 int hammer_io_new(struct vnode
*devvp
, struct hammer_io
*io
);
976 void hammer_io_inval(hammer_volume_t volume
, hammer_off_t zone2_offset
);
977 void hammer_io_release(struct hammer_io
*io
, int flush
);
978 void hammer_io_flush(struct hammer_io
*io
);
979 void hammer_io_waitdep(struct hammer_io
*io
);
980 void hammer_io_wait_all(hammer_mount_t hmp
, const char *ident
);
981 int hammer_io_direct_read(hammer_mount_t hmp
, struct bio
*bio
);
982 int hammer_io_direct_write(hammer_mount_t hmp
, hammer_btree_leaf_elm_t leaf
,
984 void hammer_io_write_interlock(hammer_io_t io
);
985 void hammer_io_done_interlock(hammer_io_t io
);
986 void hammer_io_clear_modify(struct hammer_io
*io
, int inval
);
987 void hammer_io_clear_modlist(struct hammer_io
*io
);
988 void hammer_modify_volume(hammer_transaction_t trans
, hammer_volume_t volume
,
989 void *base
, int len
);
990 void hammer_modify_buffer(hammer_transaction_t trans
, hammer_buffer_t buffer
,
991 void *base
, int len
);
992 void hammer_modify_volume_done(hammer_volume_t volume
);
993 void hammer_modify_buffer_done(hammer_buffer_t buffer
);
995 int hammer_ioc_reblock(hammer_transaction_t trans
, hammer_inode_t ip
,
996 struct hammer_ioc_reblock
*reblock
);
997 int hammer_ioc_prune(hammer_transaction_t trans
, hammer_inode_t ip
,
998 struct hammer_ioc_prune
*prune
);
1000 int hammer_signal_check(hammer_mount_t hmp
);
1002 void hammer_flusher_create(hammer_mount_t hmp
);
1003 void hammer_flusher_destroy(hammer_mount_t hmp
);
1004 void hammer_flusher_sync(hammer_mount_t hmp
);
1005 void hammer_flusher_async(hammer_mount_t hmp
);
1007 int hammer_recover(hammer_mount_t hmp
, hammer_volume_t rootvol
);
1008 void hammer_recover_flush_buffers(hammer_mount_t hmp
,
1009 hammer_volume_t root_volume
);
1011 void hammer_crc_set_blockmap(hammer_blockmap_t blockmap
);
1012 void hammer_crc_set_volume(hammer_volume_ondisk_t ondisk
);
1013 void hammer_crc_set_leaf(void *data
, hammer_btree_leaf_elm_t leaf
);
1015 int hammer_crc_test_blockmap(hammer_blockmap_t blockmap
);
1016 int hammer_crc_test_volume(hammer_volume_ondisk_t ondisk
);
1017 int hammer_crc_test_btree(hammer_node_ondisk_t ondisk
);
1018 int hammer_crc_test_leaf(void *data
, hammer_btree_leaf_elm_t leaf
);
1019 void hkprintf(const char *ctl
, ...);
1021 int hammer_blocksize(int64_t file_offset
);
1022 int64_t hammer_blockdemarc(int64_t file_offset1
, int64_t file_offset2
);
1026 static __inline
void
1027 hammer_wait_mem_record(hammer_record_t record
)
1029 hammer_wait_mem_record_ident(record
, "hmmwai");
1032 static __inline
void
1033 hammer_lock_ex(struct hammer_lock
*lock
)
1035 hammer_lock_ex_ident(lock
, "hmrlck");
1039 * Indicate that a B-Tree node is being modified.
1041 static __inline
void
1042 hammer_modify_node_noundo(hammer_transaction_t trans
, hammer_node_t node
)
1044 hammer_modify_buffer(trans
, node
->buffer
, NULL
, 0);
1047 static __inline
void
1048 hammer_modify_node_all(hammer_transaction_t trans
, struct hammer_node
*node
)
1050 hammer_modify_buffer(trans
, node
->buffer
,
1051 node
->ondisk
, sizeof(*node
->ondisk
));
1054 static __inline
void
1055 hammer_modify_node(hammer_transaction_t trans
, hammer_node_t node
,
1056 void *base
, int len
)
1058 hammer_crc_t
*crcptr
;
1060 KKASSERT((char *)base
>= (char *)node
->ondisk
&&
1061 (char *)base
+ len
<=
1062 (char *)node
->ondisk
+ sizeof(*node
->ondisk
));
1063 hammer_modify_buffer(trans
, node
->buffer
, base
, len
);
1064 crcptr
= &node
->ondisk
->crc
;
1065 hammer_modify_buffer(trans
, node
->buffer
, crcptr
, sizeof(hammer_crc_t
));
1066 --node
->buffer
->io
.modify_refs
; /* only want one ref */
1070 * Indicate that the specified modifications have been completed.
1072 * Do not try to generate the crc here, it's very expensive to do and a
1073 * sequence of insertions or deletions can result in many calls to this
1074 * function on the same node.
1076 static __inline
void
1077 hammer_modify_node_done(hammer_node_t node
)
1079 node
->flags
|= HAMMER_NODE_CRCGOOD
;
1080 if ((node
->flags
& HAMMER_NODE_NEEDSCRC
) == 0) {
1081 node
->flags
|= HAMMER_NODE_NEEDSCRC
;
1082 node
->buffer
->io
.gencrc
= 1;
1083 hammer_ref_node(node
);
1085 hammer_modify_buffer_done(node
->buffer
);
1088 #define hammer_modify_volume_field(trans, vol, field) \
1089 hammer_modify_volume(trans, vol, &(vol)->ondisk->field, \
1090 sizeof((vol)->ondisk->field))
1092 #define hammer_modify_node_field(trans, node, field) \
1093 hammer_modify_node(trans, node, &(node)->ondisk->field, \
1094 sizeof((node)->ondisk->field))