2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer.h,v 1.130 2008/11/13 02:18:43 dillon Exp $
37 * This header file contains structures used internally by the HAMMERFS
38 * implementation. See hammer_disk.h for on-disk structures.
41 #include <sys/param.h>
42 #include <sys/types.h>
43 #include <sys/kernel.h>
45 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/mountctl.h>
50 #include <sys/vnode.h>
54 #include <sys/globaldata.h>
55 #include <sys/lockf.h>
57 #include <sys/queue.h>
59 #include <sys/globaldata.h>
60 #include <sys/limits.h>
61 #include <vm/vm_extern.h>
64 #include <sys/signal2.h>
65 #include <sys/mplock2.h>
66 #include "hammer_disk.h"
67 #include "hammer_mount.h"
68 #include "hammer_ioctl.h"
70 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
72 MALLOC_DECLARE(M_HAMMER
);
77 #if !defined(KTR_HAMMER)
78 #define KTR_HAMMER KTR_ALL
80 KTR_INFO_MASTER_EXTERN(hammer
);
88 * Key structure used for custom RB tree inode lookups. This prototypes
89 * the function hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info).
91 typedef struct hammer_inode_info
{
92 int64_t obj_id
; /* (key) object identifier */
93 hammer_tid_t obj_asof
; /* (key) snapshot transid or 0 */
94 u_int32_t obj_localization
; /* (key) pseudo-fs */
96 struct hammer_btree_leaf_elm
*leaf
;
98 } *hammer_inode_info_t
;
100 typedef enum hammer_transaction_type
{
104 } hammer_transaction_type_t
;
107 * HAMMER Transaction tracking
109 struct hammer_transaction
{
110 hammer_transaction_type_t type
;
111 struct hammer_mount
*hmp
;
117 struct hammer_volume
*rootvol
;
120 typedef struct hammer_transaction
*hammer_transaction_t
;
122 #define HAMMER_TRANSF_NEWINODE 0x0001
123 #define HAMMER_TRANSF_DIDIO 0x0002
124 #define HAMMER_TRANSF_CRCDOM 0x0004 /* EDOM on CRC error, less critical */
130 int refs
; /* active references delay writes */
131 volatile u_int lockval
; /* lock count and control bits */
132 struct thread
*owner
; /* owner if exclusively held */
135 #define HAMMER_LOCKF_EXCLUSIVE 0x40000000
136 #define HAMMER_LOCKF_WANTED 0x80000000
139 hammer_notlocked(struct hammer_lock
*lock
)
141 return(lock
->lockval
== 0);
145 hammer_islocked(struct hammer_lock
*lock
)
147 return(lock
->lockval
!= 0);
151 hammer_isactive(struct hammer_lock
*lock
)
153 return(lock
->refs
!= 0);
157 hammer_islastref(struct hammer_lock
*lock
)
159 return(lock
->refs
== 1);
163 * Return if we specifically own the lock exclusively.
166 hammer_lock_excl_owned(struct hammer_lock
*lock
, thread_t td
)
168 if ((lock
->lockval
& HAMMER_LOCKF_EXCLUSIVE
) &&
176 * Flush state, used by various structures
178 typedef enum hammer_inode_state
{
182 } hammer_inode_state_t
;
184 TAILQ_HEAD(hammer_record_list
, hammer_record
);
187 * Pseudo-filesystem extended data tracking
189 struct hammer_pfs_rb_tree
;
190 struct hammer_pseudofs_inmem
;
191 RB_HEAD(hammer_pfs_rb_tree
, hammer_pseudofs_inmem
);
192 RB_PROTOTYPE2(hammer_pfs_rb_tree
, hammer_pseudofs_inmem
, rb_node
,
193 hammer_pfs_rb_compare
, u_int32_t
);
195 struct hammer_pseudofs_inmem
{
196 RB_ENTRY(hammer_pseudofs_inmem
) rb_node
;
197 struct hammer_lock lock
;
198 u_int32_t localization
;
199 hammer_tid_t create_tid
;
202 struct hammer_pseudofs_data pfsd
;
205 typedef struct hammer_pseudofs_inmem
*hammer_pseudofs_inmem_t
;
207 #define HAMMER_PFSM_DELETED 0x0001
210 * Cache object ids. A fixed number of objid cache structures are
211 * created to reserve object id's for newly created files in multiples
212 * of 100,000, localized to a particular directory, and recycled as
213 * needed. This allows parallel create operations in different
214 * directories to retain fairly localized object ids which in turn
215 * improves reblocking performance and layout.
217 #define OBJID_CACHE_SIZE 1024
218 #define OBJID_CACHE_BULK_BITS 10 /* 10 bits (1024) */
219 #define OBJID_CACHE_BULK (32 * 32) /* two level (1024) */
220 #define OBJID_CACHE_BULK_MASK (OBJID_CACHE_BULK - 1)
221 #define OBJID_CACHE_BULK_MASK64 ((u_int64_t)(OBJID_CACHE_BULK - 1))
223 typedef struct hammer_objid_cache
{
224 TAILQ_ENTRY(hammer_objid_cache
) entry
;
225 struct hammer_inode
*dip
;
226 hammer_tid_t base_tid
;
230 } *hammer_objid_cache_t
;
233 * Associate an inode with a B-Tree node to cache search start positions
235 typedef struct hammer_node_cache
{
236 TAILQ_ENTRY(hammer_node_cache
) entry
;
237 struct hammer_node
*node
;
238 struct hammer_inode
*ip
;
239 } *hammer_node_cache_t
;
241 TAILQ_HEAD(hammer_node_cache_list
, hammer_node_cache
);
244 * Structure used to organize flush groups. Flush groups must be
245 * organized into chunks in order to avoid blowing out the UNDO FIFO.
246 * Without this a 'sync' could end up flushing 50,000 inodes in a single
249 struct hammer_fls_rb_tree
;
250 RB_HEAD(hammer_fls_rb_tree
, hammer_inode
);
251 RB_PROTOTYPE(hammer_fls_rb_tree
, hammer_inode
, rb_flsnode
,
252 hammer_ino_rb_compare
);
254 struct hammer_flush_group
{
255 TAILQ_ENTRY(hammer_flush_group
) flush_entry
;
256 struct hammer_fls_rb_tree flush_tree
;
258 int total_count
; /* record load */
259 int running
; /* group is running */
264 typedef struct hammer_flush_group
*hammer_flush_group_t
;
266 TAILQ_HEAD(hammer_flush_group_list
, hammer_flush_group
);
269 * Structure used to represent an inode in-memory.
271 * The record and data associated with an inode may be out of sync with
272 * the disk (xDIRTY flags), or not even on the disk at all (ONDISK flag
275 * An inode may also hold a cache of unsynchronized records, used for
276 * database and directories only. Unsynchronized regular file data is
277 * stored in the buffer cache.
279 * NOTE: A file which is created and destroyed within the initial
280 * synchronization period can wind up not doing any disk I/O at all.
282 * Finally, an inode may cache numerous disk-referencing B-Tree cursors.
284 struct hammer_ino_rb_tree
;
286 RB_HEAD(hammer_ino_rb_tree
, hammer_inode
);
287 RB_PROTOTYPEX(hammer_ino_rb_tree
, INFO
, hammer_inode
, rb_node
,
288 hammer_ino_rb_compare
, hammer_inode_info_t
);
290 struct hammer_redo_rb_tree
;
291 RB_HEAD(hammer_redo_rb_tree
, hammer_inode
);
292 RB_PROTOTYPE2(hammer_redo_rb_tree
, hammer_inode
, rb_redonode
,
293 hammer_redo_rb_compare
, hammer_off_t
);
295 struct hammer_rec_rb_tree
;
296 struct hammer_record
;
297 RB_HEAD(hammer_rec_rb_tree
, hammer_record
);
298 RB_PROTOTYPEX(hammer_rec_rb_tree
, INFO
, hammer_record
, rb_node
,
299 hammer_rec_rb_compare
, hammer_btree_leaf_elm_t
);
301 TAILQ_HEAD(hammer_node_list
, hammer_node
);
303 struct hammer_inode
{
304 RB_ENTRY(hammer_inode
) rb_node
;
305 hammer_inode_state_t flush_state
;
306 hammer_flush_group_t flush_group
;
307 RB_ENTRY(hammer_inode
) rb_flsnode
; /* when on flush list */
308 RB_ENTRY(hammer_inode
) rb_redonode
; /* when INODE_RDIRTY is set */
309 struct hammer_record_list target_list
; /* target of dependant recs */
310 int64_t obj_id
; /* (key) object identifier */
311 hammer_tid_t obj_asof
; /* (key) snapshot or 0 */
312 u_int32_t obj_localization
; /* (key) pseudo-fs */
313 struct hammer_mount
*hmp
;
314 hammer_objid_cache_t objid_cache
;
316 int error
; /* flush error */
317 int cursor_ip_refs
; /* sanity */
320 hammer_pseudofs_inmem_t pfsm
;
321 struct lockf advlock
;
322 struct hammer_lock lock
; /* sync copy interlock */
324 struct hammer_btree_leaf_elm ino_leaf
; /* in-memory cache */
325 struct hammer_inode_data ino_data
; /* in-memory cache */
326 struct hammer_rec_rb_tree rec_tree
; /* in-memory cache */
328 struct hammer_node_cache cache
[4]; /* search initiate cache */
331 * When a demark is created to synchronize an inode to
332 * disk, certain fields are copied so the front-end VOPs
333 * can continue to run in parallel with the synchronization
334 * occuring in the background.
336 int sync_flags
; /* to-sync flags cache */
337 off_t sync_trunc_off
; /* to-sync truncation */
338 off_t save_trunc_off
; /* write optimization */
339 struct hammer_btree_leaf_elm sync_ino_leaf
; /* to-sync cache */
340 struct hammer_inode_data sync_ino_data
; /* to-sync cache */
344 * Track the earliest offset in the UNDO/REDO FIFO containing
345 * REDO records. This is staged to the backend during flush
346 * sequences. While the inode is staged redo_fifo_next is used
347 * to track the earliest offset for rotation into redo_fifo_start
348 * on completion of the flush.
350 hammer_off_t redo_fifo_start
;
351 hammer_off_t redo_fifo_next
;
354 typedef struct hammer_inode
*hammer_inode_t
;
356 #define VTOI(vp) ((struct hammer_inode *)(vp)->v_data)
359 * NOTE: DDIRTY does not include atime or mtime and does not include
360 * write-append size changes. SDIRTY handles write-append size
363 * REDO indicates that REDO logging is active, creating a definitive
364 * stream of REDO records in the UNDO/REDO log for writes and
365 * truncations, including boundary records when/if REDO is turned off.
366 * REDO is typically enabled by fsync() and turned off if excessive
367 * writes without an fsync() occurs.
369 * RDIRTY indicates that REDO records were laid down in the UNDO/REDO
370 * FIFO (even if REDO is turned off some might still be active) and
371 * still being tracked for this inode. See hammer_redo.c
373 /* (not including atime/mtime) */
374 #define HAMMER_INODE_DDIRTY 0x0001 /* in-memory ino_data is dirty */
375 #define HAMMER_INODE_RSV_INODES 0x0002 /* hmp->rsv_inodes bumped */
376 #define HAMMER_INODE_CONN_DOWN 0x0004 /* include in downward recursion */
377 #define HAMMER_INODE_XDIRTY 0x0008 /* in-memory records */
378 #define HAMMER_INODE_ONDISK 0x0010 /* inode is on-disk (else not yet) */
379 #define HAMMER_INODE_FLUSH 0x0020 /* flush on last ref */
380 #define HAMMER_INODE_DELETED 0x0080 /* inode delete (backend) */
381 #define HAMMER_INODE_DELONDISK 0x0100 /* delete synchronized to disk */
382 #define HAMMER_INODE_RO 0x0200 /* read-only (because of as-of) */
383 #define HAMMER_INODE_UNUSED0400 0x0400
384 #define HAMMER_INODE_DONDISK 0x0800 /* data records may be on disk */
385 #define HAMMER_INODE_BUFS 0x1000 /* dirty high level bps present */
386 #define HAMMER_INODE_REFLUSH 0x2000 /* flush on dependancy / reflush */
387 #define HAMMER_INODE_RECLAIM 0x4000 /* trying to reclaim */
388 #define HAMMER_INODE_FLUSHW 0x8000 /* Someone waiting for flush */
390 #define HAMMER_INODE_TRUNCATED 0x00010000
391 #define HAMMER_INODE_DELETING 0x00020000 /* inode delete request (frontend)*/
392 #define HAMMER_INODE_RESIGNAL 0x00040000 /* re-signal on re-flush */
393 #define HAMMER_INODE_ATIME 0x00100000 /* in-memory atime modified */
394 #define HAMMER_INODE_MTIME 0x00200000 /* in-memory mtime modified */
395 #define HAMMER_INODE_WOULDBLOCK 0x00400000 /* re-issue to new flush group */
396 #define HAMMER_INODE_DUMMY 0x00800000 /* dummy inode covering bad file */
397 #define HAMMER_INODE_SDIRTY 0x01000000 /* in-memory ino_data.size is dirty*/
398 #define HAMMER_INODE_REDO 0x02000000 /* REDO logging active */
399 #define HAMMER_INODE_RDIRTY 0x04000000 /* REDO records active in fifo */
401 #define HAMMER_INODE_MODMASK (HAMMER_INODE_DDIRTY|HAMMER_INODE_SDIRTY| \
402 HAMMER_INODE_XDIRTY|HAMMER_INODE_BUFS| \
403 HAMMER_INODE_ATIME|HAMMER_INODE_MTIME| \
404 HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING)
406 #define HAMMER_INODE_MODMASK_NOXDIRTY \
407 (HAMMER_INODE_MODMASK & ~HAMMER_INODE_XDIRTY)
409 #define HAMMER_INODE_MODMASK_NOREDO \
410 (HAMMER_INODE_DDIRTY| \
411 HAMMER_INODE_XDIRTY| \
412 HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING)
414 #define HAMMER_FLUSH_SIGNAL 0x0001
415 #define HAMMER_FLUSH_RECURSION 0x0002
418 * Used by the inode reclaim code to pipeline reclaims and avoid
419 * blowing out kernel memory or letting the flusher get too far
420 * behind. The reclaim wakes up when count reaches 0 or the
423 struct hammer_reclaim
{
424 TAILQ_ENTRY(hammer_reclaim
) entry
;
428 #define HAMMER_RECLAIM_WAIT 4000 /* default vfs.hammer.limit_reclaim */
431 * Track who is creating the greatest burden on the
434 struct hammer_inostats
{
435 pid_t pid
; /* track user process */
436 int ltick
; /* last tick */
437 int count
; /* count (degenerates) */
440 #define HAMMER_INOSTATS_HSIZE 32
441 #define HAMMER_INOSTATS_HMASK (HAMMER_INOSTATS_HSIZE - 1)
444 * Structure used to represent an unsynchronized record in-memory. These
445 * records typically represent directory entries. Only non-historical
446 * records are kept in-memory.
448 * Records are organized as a per-inode RB-Tree. If the inode is not
449 * on disk then neither are any records and the in-memory record tree
450 * represents the entire contents of the inode. If the inode is on disk
451 * then the on-disk B-Tree is scanned in parallel with the in-memory
452 * RB-Tree to synthesize the current state of the file.
454 * Records are also used to enforce the ordering of directory create/delete
455 * operations. A new inode will not be flushed to disk unless its related
456 * directory entry is also being flushed at the same time. A directory entry
457 * will not be removed unless its related inode is also being removed at the
460 typedef enum hammer_record_type
{
461 HAMMER_MEM_RECORD_GENERAL
, /* misc record */
462 HAMMER_MEM_RECORD_INODE
, /* inode record */
463 HAMMER_MEM_RECORD_ADD
, /* positive memory cache record */
464 HAMMER_MEM_RECORD_DEL
, /* negative delete-on-disk record */
465 HAMMER_MEM_RECORD_DATA
/* bulk-data record w/on-disk ref */
466 } hammer_record_type_t
;
468 struct hammer_record
{
469 RB_ENTRY(hammer_record
) rb_node
;
470 TAILQ_ENTRY(hammer_record
) target_entry
;
471 hammer_inode_state_t flush_state
;
472 hammer_flush_group_t flush_group
;
473 hammer_record_type_t type
;
474 struct hammer_lock lock
;
475 struct hammer_reserve
*resv
;
476 struct hammer_inode
*ip
;
477 struct hammer_inode
*target_ip
;
478 struct hammer_btree_leaf_elm leaf
;
479 union hammer_data_ondisk
*data
;
481 hammer_off_t zone2_offset
; /* direct-write only */
484 typedef struct hammer_record
*hammer_record_t
;
487 * Record flags. Note that FE can only be set by the frontend if the
488 * record has not been interlocked by the backend w/ BE.
490 #define HAMMER_RECF_ALLOCDATA 0x0001
491 #define HAMMER_RECF_ONRBTREE 0x0002
492 #define HAMMER_RECF_DELETED_FE 0x0004 /* deleted (frontend) */
493 #define HAMMER_RECF_DELETED_BE 0x0008 /* deleted (backend) */
494 #define HAMMER_RECF_COMMITTED 0x0010 /* committed to the B-Tree */
495 #define HAMMER_RECF_INTERLOCK_BE 0x0020 /* backend interlock */
496 #define HAMMER_RECF_WANTED 0x0040 /* wanted by the frontend */
497 #define HAMMER_RECF_CONVERT_DELETE 0x0100 /* special case */
498 #define HAMMER_RECF_DIRECT_IO 0x0200 /* related direct I/O running*/
499 #define HAMMER_RECF_DIRECT_WAIT 0x0400 /* related direct I/O running*/
500 #define HAMMER_RECF_DIRECT_INVAL 0x0800 /* buffer alias invalidation */
501 #define HAMMER_RECF_REDO 0x1000 /* REDO was laid down */
504 * hammer_create_at_cursor() and hammer_delete_at_cursor() flags.
506 #define HAMMER_CREATE_MODE_UMIRROR 0x0001
507 #define HAMMER_CREATE_MODE_SYS 0x0002
509 #define HAMMER_DELETE_ADJUST 0x0001
510 #define HAMMER_DELETE_DESTROY 0x0002
513 * In-memory structures representing on-disk structures.
515 struct hammer_volume
;
516 struct hammer_buffer
;
519 struct hammer_reserve
;
521 RB_HEAD(hammer_vol_rb_tree
, hammer_volume
);
522 RB_HEAD(hammer_buf_rb_tree
, hammer_buffer
);
523 RB_HEAD(hammer_nod_rb_tree
, hammer_node
);
524 RB_HEAD(hammer_und_rb_tree
, hammer_undo
);
525 RB_HEAD(hammer_res_rb_tree
, hammer_reserve
);
527 RB_PROTOTYPE2(hammer_vol_rb_tree
, hammer_volume
, rb_node
,
528 hammer_vol_rb_compare
, int32_t);
529 RB_PROTOTYPE2(hammer_buf_rb_tree
, hammer_buffer
, rb_node
,
530 hammer_buf_rb_compare
, hammer_off_t
);
531 RB_PROTOTYPE2(hammer_nod_rb_tree
, hammer_node
, rb_node
,
532 hammer_nod_rb_compare
, hammer_off_t
);
533 RB_PROTOTYPE2(hammer_und_rb_tree
, hammer_undo
, rb_node
,
534 hammer_und_rb_compare
, hammer_off_t
);
535 RB_PROTOTYPE2(hammer_res_rb_tree
, hammer_reserve
, rb_node
,
536 hammer_res_rb_compare
, hammer_off_t
);
539 * IO management - embedded at the head of various in-memory structures
541 * VOLUME - hammer_volume containing meta-data
542 * META_BUFFER - hammer_buffer containing meta-data
543 * DATA_BUFFER - hammer_buffer containing pure-data
545 * Dirty volume headers and dirty meta-data buffers are locked until the
546 * flusher can sequence them out. Dirty pure-data buffers can be written.
547 * Clean buffers can be passively released.
549 typedef enum hammer_io_type
{
550 HAMMER_STRUCTURE_VOLUME
,
551 HAMMER_STRUCTURE_META_BUFFER
,
552 HAMMER_STRUCTURE_UNDO_BUFFER
,
553 HAMMER_STRUCTURE_DATA_BUFFER
,
554 HAMMER_STRUCTURE_DUMMY
557 union hammer_io_structure
;
561 LIST_ENTRY(worklist
) node
;
564 TAILQ_HEAD(hammer_io_list
, hammer_io
);
565 typedef struct hammer_io_list
*hammer_io_list_t
;
568 struct worklist worklist
;
569 struct hammer_lock lock
;
570 enum hammer_io_type type
;
571 struct hammer_mount
*hmp
;
572 struct hammer_volume
*volume
;
573 TAILQ_ENTRY(hammer_io
) mod_entry
; /* list entry if modified */
574 TAILQ_ENTRY(hammer_io
) iorun_entry
; /* iorun_list */
575 hammer_io_list_t mod_list
;
577 int64_t offset
; /* zone-2 offset */
578 int bytes
; /* buffer cache buffer size */
579 int loading
; /* loading/unloading interlock */
582 u_int modified
: 1; /* bp's data was modified */
583 u_int released
: 1; /* bp released (w/ B_LOCKED set) */
584 u_int running
: 1; /* bp write IO in progress */
585 u_int waiting
: 1; /* someone is waiting on us */
586 u_int validated
: 1; /* ondisk has been validated */
587 u_int waitdep
: 1; /* flush waits for dependancies */
588 u_int recovered
: 1; /* has recovery ref */
589 u_int waitmod
: 1; /* waiting for modify_refs */
590 u_int reclaim
: 1; /* reclaim requested */
591 u_int gencrc
: 1; /* crc needs to be generated */
592 u_int ioerror
: 1; /* abort on io-error */
595 typedef struct hammer_io
*hammer_io_t
;
597 #define HAMMER_CLUSTER_SIZE (64 * 1024)
598 #if HAMMER_CLUSTER_SIZE > MAXBSIZE
599 #undef HAMMER_CLUSTER_SIZE
600 #define HAMMER_CLUSTER_SIZE MAXBSIZE
602 #define HAMMER_CLUSTER_BUFS (HAMMER_CLUSTER_SIZE / HAMMER_BUFSIZE)
605 * In-memory volume representing on-disk buffer
607 struct hammer_volume
{
609 RB_ENTRY(hammer_volume
) rb_node
;
610 struct hammer_volume_ondisk
*ondisk
;
612 int64_t nblocks
; /* note: special calculation for statfs */
613 int64_t buffer_base
; /* base offset of buffer 0 */
614 hammer_off_t maxbuf_off
; /* Maximum buffer offset (zone-2) */
615 hammer_off_t maxraw_off
; /* Maximum raw offset for device */
621 typedef struct hammer_volume
*hammer_volume_t
;
624 * In-memory buffer (other then volume, super-cluster, or cluster),
625 * representing an on-disk buffer.
627 struct hammer_buffer
{
629 RB_ENTRY(hammer_buffer
) rb_node
;
631 hammer_off_t zoneX_offset
;
632 hammer_off_t zone2_offset
;
633 struct hammer_reserve
*resv
;
634 struct hammer_node_list clist
;
637 typedef struct hammer_buffer
*hammer_buffer_t
;
640 * In-memory B-Tree node, representing an on-disk B-Tree node.
642 * This is a hang-on structure which is backed by a hammer_buffer,
643 * indexed by a hammer_cluster, and used for fine-grained locking of
644 * B-Tree nodes in order to properly control lock ordering. A hammer_buffer
645 * can contain multiple nodes representing wildly disassociated portions
646 * of the B-Tree so locking cannot be done on a buffer-by-buffer basis.
648 * This structure uses a cluster-relative index to reduce the number
649 * of layers required to access it, and also because all on-disk B-Tree
650 * references are cluster-relative offsets.
653 struct hammer_lock lock
; /* node-by-node lock */
654 TAILQ_ENTRY(hammer_node
) entry
; /* per-buffer linkage */
655 RB_ENTRY(hammer_node
) rb_node
; /* per-cluster linkage */
656 hammer_off_t node_offset
; /* full offset spec */
657 struct hammer_mount
*hmp
;
658 struct hammer_buffer
*buffer
; /* backing buffer */
659 hammer_node_ondisk_t ondisk
; /* ptr to on-disk structure */
660 TAILQ_HEAD(, hammer_cursor
) cursor_list
; /* deadlock recovery */
661 struct hammer_node_cache_list cache_list
; /* passive caches */
663 int loading
; /* load interlock */
666 #define HAMMER_NODE_DELETED 0x0001
667 #define HAMMER_NODE_FLUSH 0x0002
668 #define HAMMER_NODE_CRCGOOD 0x0004
669 #define HAMMER_NODE_NEEDSCRC 0x0008
670 #define HAMMER_NODE_NEEDSMIRROR 0x0010
671 #define HAMMER_NODE_CRCBAD 0x0020
672 #define HAMMER_NODE_NONLINEAR 0x0040 /* linear heuristic */
674 #define HAMMER_NODE_CRCANY (HAMMER_NODE_CRCGOOD | HAMMER_NODE_CRCBAD)
676 typedef struct hammer_node
*hammer_node_t
;
679 * List of locked nodes. This structure is used to lock potentially large
680 * numbers of nodes as an aid for complex B-Tree operations.
682 struct hammer_node_lock
;
683 TAILQ_HEAD(hammer_node_lock_list
, hammer_node_lock
);
685 struct hammer_node_lock
{
686 TAILQ_ENTRY(hammer_node_lock
) entry
;
687 struct hammer_node_lock_list list
;
688 struct hammer_node_lock
*parent
;
690 hammer_node_ondisk_t copy
; /* copy of on-disk data */
691 int index
; /* index of this node in parent */
692 int count
; /* count children */
696 typedef struct hammer_node_lock
*hammer_node_lock_t
;
698 #define HAMMER_NODE_LOCK_UPDATED 0x0001
701 * Common I/O management structure - embedded in in-memory structures
702 * which are backed by filesystem buffers.
704 union hammer_io_structure
{
706 struct hammer_volume volume
;
707 struct hammer_buffer buffer
;
710 typedef union hammer_io_structure
*hammer_io_structure_t
;
713 * The reserve structure prevents the blockmap from allocating
714 * out of a reserved bigblock. Such reservations are used by
715 * the direct-write mechanism.
717 * The structure is also used to hold off on reallocations of
718 * big blocks from the freemap until flush dependancies have
721 struct hammer_reserve
{
722 RB_ENTRY(hammer_reserve
) rb_node
;
723 TAILQ_ENTRY(hammer_reserve
) delay_entry
;
729 hammer_off_t zone_offset
;
732 typedef struct hammer_reserve
*hammer_reserve_t
;
734 #define HAMMER_RESF_ONDELAY 0x0001
735 #define HAMMER_RESF_LAYER2FREE 0x0002
737 #include "hammer_cursor.h"
740 * The undo structure tracks recent undos to avoid laying down duplicate
741 * undos within a flush group, saving us a significant amount of overhead.
743 * This is strictly a heuristic.
745 #define HAMMER_MAX_UNDOS 1024
746 #define HAMMER_MAX_FLUSHERS 4
749 RB_ENTRY(hammer_undo
) rb_node
;
750 TAILQ_ENTRY(hammer_undo
) lru_entry
;
755 typedef struct hammer_undo
*hammer_undo_t
;
757 struct hammer_flusher_info
;
758 TAILQ_HEAD(hammer_flusher_info_list
, hammer_flusher_info
);
760 struct hammer_flusher
{
761 int signal
; /* flusher thread sequencer */
762 int act
; /* currently active flush group */
763 int done
; /* set to act when complete */
764 int next
; /* next flush group */
765 int group_lock
; /* lock sequencing of the next flush */
766 int exiting
; /* request master exit */
767 thread_t td
; /* master flusher thread */
768 hammer_tid_t tid
; /* last flushed transaction id */
769 int finalize_want
; /* serialize finalization */
770 struct hammer_lock finalize_lock
; /* serialize finalization */
771 struct hammer_transaction trans
; /* shared transaction */
772 struct hammer_flusher_info_list run_list
;
773 struct hammer_flusher_info_list ready_list
;
776 #define HAMMER_FLUSH_UNDOS_RELAXED 0
777 #define HAMMER_FLUSH_UNDOS_FORCED 1
778 #define HAMMER_FLUSH_UNDOS_AUTO 2
780 * Internal hammer mount data structure
782 struct hammer_mount
{
784 /*struct vnode *rootvp;*/
785 struct hammer_ino_rb_tree rb_inos_root
;
786 struct hammer_redo_rb_tree rb_redo_root
;
787 struct hammer_vol_rb_tree rb_vols_root
;
788 struct hammer_nod_rb_tree rb_nods_root
;
789 struct hammer_und_rb_tree rb_undo_root
;
790 struct hammer_res_rb_tree rb_resv_root
;
791 struct hammer_buf_rb_tree rb_bufs_root
;
792 struct hammer_pfs_rb_tree rb_pfsm_root
;
793 struct hammer_volume
*rootvol
;
794 struct hammer_base_elm root_btree_beg
;
795 struct hammer_base_elm root_btree_end
;
797 struct malloc_type
*m_misc
;
798 struct malloc_type
*m_inodes
;
800 int flags
; /* HAMMER_MOUNT_xxx flags */
805 int master_id
; /* -1 or 0-15 - clustering and mirroring */
806 int version
; /* hammer filesystem version to use */
807 int rsv_inodes
; /* reserved space due to dirty inodes */
808 int64_t rsv_databytes
; /* reserved space due to record data */
809 int rsv_recs
; /* reserved space due to dirty records */
810 int rsv_fromdelay
; /* bigblocks reserved due to flush delay */
811 int undo_rec_limit
; /* based on size of undo area */
813 int count_newrecords
;
815 int volume_to_remove
; /* volume that is currently being removed */
817 int inode_reclaims
; /* inodes pending reclaim by flusher */
818 int count_inodes
; /* total number of inodes */
819 int count_iqueued
; /* inodes queued to flusher */
821 struct hammer_flusher flusher
;
823 u_int check_interrupt
;
826 struct hammer_io_list volu_list
; /* dirty undo buffers */
827 struct hammer_io_list undo_list
; /* dirty undo buffers */
828 struct hammer_io_list data_list
; /* dirty data buffers */
829 struct hammer_io_list alt_data_list
; /* dirty data buffers */
830 struct hammer_io_list meta_list
; /* dirty meta bufs */
831 struct hammer_io_list lose_list
; /* loose buffers */
832 int locked_dirty_space
; /* meta/volu count */
833 int io_running_space
;
834 int objid_cache_count
;
835 int error
; /* critical I/O error */
836 struct krate krate
; /* rate limited kprintf */
837 hammer_tid_t asof
; /* snapshot mount */
838 hammer_tid_t next_tid
;
839 hammer_tid_t flush_tid1
; /* flusher tid sequencing */
840 hammer_tid_t flush_tid2
; /* flusher tid sequencing */
841 int64_t copy_stat_freebigblocks
; /* number of free bigblocks */
842 u_int32_t undo_seqno
; /* UNDO/REDO FIFO seqno */
844 struct netexport export
;
845 struct hammer_lock sync_lock
;
846 struct hammer_lock free_lock
;
847 struct hammer_lock undo_lock
;
848 struct hammer_lock blkmap_lock
;
849 struct hammer_lock snapshot_lock
;
850 struct hammer_lock volume_lock
;
851 struct hammer_blockmap blockmap
[HAMMER_MAX_ZONES
];
852 struct hammer_undo undos
[HAMMER_MAX_UNDOS
];
854 TAILQ_HEAD(, hammer_undo
) undo_lru_list
;
855 TAILQ_HEAD(, hammer_reserve
) delay_list
;
856 struct hammer_flush_group_list flush_group_list
;
857 hammer_flush_group_t next_flush_group
;
858 TAILQ_HEAD(, hammer_objid_cache
) objid_cache_list
;
859 TAILQ_HEAD(, hammer_reclaim
) reclaim_list
;
860 TAILQ_HEAD(, hammer_io
) iorun_list
;
862 struct hammer_inostats inostats
[HAMMER_INOSTATS_HSIZE
];
865 typedef struct hammer_mount
*hammer_mount_t
;
867 #define HAMMER_MOUNT_CRITICAL_ERROR 0x0001
868 #define HAMMER_MOUNT_FLUSH_RECOVERY 0x0002
869 #define HAMMER_MOUNT_REDO_SYNC 0x0004
871 struct hammer_sync_info
{
877 * Minium buffer cache bufs required to rebalance the B-Tree.
878 * This is because we must hold the children and the children's children
879 * locked. Even this might not be enough if things are horribly out
882 #define HAMMER_REBALANCE_MIN_BUFS \
883 (HAMMER_BTREE_LEAF_ELMS * HAMMER_BTREE_LEAF_ELMS)
889 * checkspace slop (8MB chunks), higher numbers are more conservative.
891 #define HAMMER_CHKSPC_REBLOCK 25
892 #define HAMMER_CHKSPC_MIRROR 20
893 #define HAMMER_CHKSPC_WRITE 20
894 #define HAMMER_CHKSPC_CREATE 20
895 #define HAMMER_CHKSPC_REMOVE 10
896 #define HAMMER_CHKSPC_EMERGENCY 0
900 extern struct vop_ops hammer_vnode_vops
;
901 extern struct vop_ops hammer_spec_vops
;
902 extern struct vop_ops hammer_fifo_vops
;
903 extern struct bio_ops hammer_bioops
;
905 extern int hammer_debug_io
;
906 extern int hammer_debug_general
;
907 extern int hammer_debug_debug
;
908 extern int hammer_debug_inode
;
909 extern int hammer_debug_locks
;
910 extern int hammer_debug_btree
;
911 extern int hammer_debug_tid
;
912 extern int hammer_debug_recover
;
913 extern int hammer_debug_recover_faults
;
914 extern int hammer_debug_critical
;
915 extern int hammer_cluster_enable
;
916 extern int hammer_count_fsyncs
;
917 extern int hammer_count_inodes
;
918 extern int hammer_count_iqueued
;
919 extern int hammer_count_reclaiming
;
920 extern int hammer_count_records
;
921 extern int hammer_count_record_datas
;
922 extern int hammer_count_volumes
;
923 extern int hammer_count_buffers
;
924 extern int hammer_count_nodes
;
925 extern int64_t hammer_count_extra_space_used
;
926 extern int64_t hammer_stats_btree_lookups
;
927 extern int64_t hammer_stats_btree_searches
;
928 extern int64_t hammer_stats_btree_inserts
;
929 extern int64_t hammer_stats_btree_deletes
;
930 extern int64_t hammer_stats_btree_elements
;
931 extern int64_t hammer_stats_btree_splits
;
932 extern int64_t hammer_stats_btree_iterations
;
933 extern int64_t hammer_stats_btree_root_iterations
;
934 extern int64_t hammer_stats_record_iterations
;
935 extern int64_t hammer_stats_file_read
;
936 extern int64_t hammer_stats_file_write
;
937 extern int64_t hammer_stats_file_iopsr
;
938 extern int64_t hammer_stats_file_iopsw
;
939 extern int64_t hammer_stats_disk_read
;
940 extern int64_t hammer_stats_disk_write
;
941 extern int64_t hammer_stats_inode_flushes
;
942 extern int64_t hammer_stats_commits
;
943 extern int64_t hammer_stats_undo
;
944 extern int64_t hammer_stats_redo
;
945 extern int hammer_count_dirtybufspace
;
946 extern int hammer_count_refedbufs
;
947 extern int hammer_count_reservations
;
948 extern int hammer_count_io_running_read
;
949 extern int hammer_count_io_running_write
;
950 extern int hammer_count_io_locked
;
951 extern int hammer_limit_dirtybufspace
;
952 extern int hammer_limit_recs
;
953 extern int hammer_limit_inode_recs
;
954 extern int hammer_limit_reclaim
;
955 extern int hammer_limit_redo
;
956 extern int hammer_bio_count
;
957 extern int hammer_verify_zone
;
958 extern int hammer_verify_data
;
959 extern int hammer_write_mode
;
960 extern int hammer_yield_check
;
961 extern int hammer_fsync_mode
;
962 extern int hammer_autoflush
;
963 extern int64_t hammer_contention_count
;
965 void hammer_critical_error(hammer_mount_t hmp
, hammer_inode_t ip
,
966 int error
, const char *msg
);
967 int hammer_vop_inactive(struct vop_inactive_args
*);
968 int hammer_vop_reclaim(struct vop_reclaim_args
*);
969 int hammer_get_vnode(struct hammer_inode
*ip
, struct vnode
**vpp
);
970 struct hammer_inode
*hammer_get_inode(hammer_transaction_t trans
,
971 hammer_inode_t dip
, int64_t obj_id
,
972 hammer_tid_t asof
, u_int32_t localization
,
973 int flags
, int *errorp
);
974 struct hammer_inode
*hammer_get_dummy_inode(hammer_transaction_t trans
,
975 hammer_inode_t dip
, int64_t obj_id
,
976 hammer_tid_t asof
, u_int32_t localization
,
977 int flags
, int *errorp
);
978 struct hammer_inode
*hammer_find_inode(hammer_transaction_t trans
,
979 int64_t obj_id
, hammer_tid_t asof
,
980 u_int32_t localization
);
981 void hammer_scan_inode_snapshots(hammer_mount_t hmp
,
982 hammer_inode_info_t iinfo
,
983 int (*callback
)(hammer_inode_t ip
, void *data
),
985 void hammer_put_inode(struct hammer_inode
*ip
);
986 void hammer_put_inode_ref(struct hammer_inode
*ip
);
987 void hammer_inode_waitreclaims(hammer_transaction_t trans
);
989 int hammer_unload_volume(hammer_volume_t volume
, void *data __unused
);
990 int hammer_adjust_volume_mode(hammer_volume_t volume
, void *data __unused
);
992 int hammer_unload_buffer(hammer_buffer_t buffer
, void *data
);
993 int hammer_install_volume(hammer_mount_t hmp
, const char *volname
,
994 struct vnode
*devvp
);
995 int hammer_mountcheck_volumes(hammer_mount_t hmp
);
997 int hammer_mem_add(hammer_record_t record
);
998 int hammer_ip_lookup(hammer_cursor_t cursor
);
999 int hammer_ip_first(hammer_cursor_t cursor
);
1000 int hammer_ip_next(hammer_cursor_t cursor
);
1001 int hammer_ip_resolve_data(hammer_cursor_t cursor
);
1002 int hammer_ip_delete_record(hammer_cursor_t cursor
, hammer_inode_t ip
,
1004 int hammer_create_at_cursor(hammer_cursor_t cursor
,
1005 hammer_btree_leaf_elm_t leaf
, void *udata
, int mode
);
1006 int hammer_delete_at_cursor(hammer_cursor_t cursor
, int delete_flags
,
1007 hammer_tid_t delete_tid
, u_int32_t delete_ts
,
1008 int track
, int64_t *stat_bytes
);
1009 int hammer_ip_check_directory_empty(hammer_transaction_t trans
,
1011 int hammer_sync_hmp(hammer_mount_t hmp
, int waitfor
);
1012 int hammer_queue_inodes_flusher(hammer_mount_t hmp
, int waitfor
);
1015 hammer_alloc_mem_record(hammer_inode_t ip
, int data_len
);
1016 void hammer_flush_record_done(hammer_record_t record
, int error
);
1017 void hammer_wait_mem_record_ident(hammer_record_t record
, const char *ident
);
1018 void hammer_rel_mem_record(hammer_record_t record
);
1020 int hammer_cursor_up(hammer_cursor_t cursor
);
1021 int hammer_cursor_up_locked(hammer_cursor_t cursor
);
1022 int hammer_cursor_down(hammer_cursor_t cursor
);
1023 int hammer_cursor_upgrade(hammer_cursor_t cursor
);
1024 int hammer_cursor_upgrade_node(hammer_cursor_t cursor
);
1025 void hammer_cursor_downgrade(hammer_cursor_t cursor
);
1026 int hammer_cursor_seek(hammer_cursor_t cursor
, hammer_node_t node
,
1028 void hammer_lock_ex_ident(struct hammer_lock
*lock
, const char *ident
);
1029 int hammer_lock_ex_try(struct hammer_lock
*lock
);
1030 void hammer_lock_sh(struct hammer_lock
*lock
);
1031 int hammer_lock_sh_try(struct hammer_lock
*lock
);
1032 int hammer_lock_upgrade(struct hammer_lock
*lock
);
1033 void hammer_lock_downgrade(struct hammer_lock
*lock
);
1034 int hammer_lock_status(struct hammer_lock
*lock
);
1035 void hammer_unlock(struct hammer_lock
*lock
);
1036 void hammer_ref(struct hammer_lock
*lock
);
1037 void hammer_unref(struct hammer_lock
*lock
);
1039 void hammer_sync_lock_ex(hammer_transaction_t trans
);
1040 void hammer_sync_lock_sh(hammer_transaction_t trans
);
1041 int hammer_sync_lock_sh_try(hammer_transaction_t trans
);
1042 void hammer_sync_unlock(hammer_transaction_t trans
);
1044 u_int32_t
hammer_to_unix_xid(uuid_t
*uuid
);
1045 void hammer_guid_to_uuid(uuid_t
*uuid
, u_int32_t guid
);
1046 void hammer_time_to_timespec(u_int64_t xtime
, struct timespec
*ts
);
1047 u_int64_t
hammer_timespec_to_time(struct timespec
*ts
);
1048 int hammer_str_to_tid(const char *str
, int *ispfsp
,
1049 hammer_tid_t
*tidp
, u_int32_t
*localizationp
);
1050 int hammer_is_atatext(const char *name
, int len
);
1051 hammer_tid_t
hammer_alloc_objid(hammer_mount_t hmp
, hammer_inode_t dip
,
1053 void hammer_clear_objid(hammer_inode_t dip
);
1054 void hammer_destroy_objid_cache(hammer_mount_t hmp
);
1056 int hammer_enter_undo_history(hammer_mount_t hmp
, hammer_off_t offset
,
1058 void hammer_clear_undo_history(hammer_mount_t hmp
);
1059 enum vtype
hammer_get_vnode_type(u_int8_t obj_type
);
1060 int hammer_get_dtype(u_int8_t obj_type
);
1061 u_int8_t
hammer_get_obj_type(enum vtype vtype
);
1062 int64_t hammer_directory_namekey(hammer_inode_t dip
, const void *name
, int len
,
1063 u_int32_t
*max_iterationsp
);
1064 int hammer_nohistory(hammer_inode_t ip
);
1066 int hammer_init_cursor(hammer_transaction_t trans
, hammer_cursor_t cursor
,
1067 hammer_node_cache_t cache
, hammer_inode_t ip
);
1068 void hammer_normalize_cursor(hammer_cursor_t cursor
);
1069 void hammer_done_cursor(hammer_cursor_t cursor
);
1070 int hammer_recover_cursor(hammer_cursor_t cursor
);
1071 void hammer_unlock_cursor(hammer_cursor_t cursor
);
1072 int hammer_lock_cursor(hammer_cursor_t cursor
);
1073 hammer_cursor_t
hammer_push_cursor(hammer_cursor_t ocursor
);
1074 void hammer_pop_cursor(hammer_cursor_t ocursor
, hammer_cursor_t ncursor
);
1076 void hammer_cursor_replaced_node(hammer_node_t onode
, hammer_node_t nnode
);
1077 void hammer_cursor_removed_node(hammer_node_t onode
, hammer_node_t parent
,
1079 void hammer_cursor_split_node(hammer_node_t onode
, hammer_node_t nnode
,
1081 void hammer_cursor_moved_element(hammer_node_t oparent
, int pindex
,
1082 hammer_node_t onode
, int oindex
,
1083 hammer_node_t nnode
, int nindex
);
1084 void hammer_cursor_parent_changed(hammer_node_t node
, hammer_node_t oparent
,
1085 hammer_node_t nparent
, int nindex
);
1086 void hammer_cursor_inserted_element(hammer_node_t node
, int index
);
1087 void hammer_cursor_deleted_element(hammer_node_t node
, int index
);
1089 int hammer_btree_lookup(hammer_cursor_t cursor
);
1090 int hammer_btree_first(hammer_cursor_t cursor
);
1091 int hammer_btree_last(hammer_cursor_t cursor
);
1092 int hammer_btree_extract(hammer_cursor_t cursor
, int flags
);
1093 int hammer_btree_iterate(hammer_cursor_t cursor
);
1094 int hammer_btree_iterate_reverse(hammer_cursor_t cursor
);
1095 int hammer_btree_insert(hammer_cursor_t cursor
,
1096 hammer_btree_leaf_elm_t elm
, int *doprop
);
1097 int hammer_btree_delete(hammer_cursor_t cursor
);
1098 void hammer_btree_do_propagation(hammer_cursor_t cursor
,
1099 hammer_pseudofs_inmem_t pfsm
,
1100 hammer_btree_leaf_elm_t leaf
);
1101 int hammer_btree_cmp(hammer_base_elm_t key1
, hammer_base_elm_t key2
);
1102 int hammer_btree_chkts(hammer_tid_t ts
, hammer_base_elm_t key
);
1103 int hammer_btree_correct_rhb(hammer_cursor_t cursor
, hammer_tid_t tid
);
1104 int hammer_btree_correct_lhb(hammer_cursor_t cursor
, hammer_tid_t tid
);
1106 int btree_set_parent(hammer_transaction_t trans
, hammer_node_t node
,
1107 hammer_btree_elm_t elm
);
1108 void hammer_node_lock_init(hammer_node_lock_t parent
, hammer_node_t node
);
1109 int hammer_btree_lock_children(hammer_cursor_t cursor
, int depth
,
1110 hammer_node_lock_t parent
);
1111 void hammer_btree_lock_copy(hammer_cursor_t cursor
,
1112 hammer_node_lock_t parent
);
1113 int hammer_btree_sync_copy(hammer_cursor_t cursor
,
1114 hammer_node_lock_t parent
);
1115 void hammer_btree_unlock_children(hammer_cursor_t cursor
,
1116 hammer_node_lock_t parent
);
1117 int hammer_btree_search_node(hammer_base_elm_t elm
, hammer_node_ondisk_t node
);
1118 hammer_node_t
hammer_btree_get_parent(hammer_transaction_t trans
,
1119 hammer_node_t node
, int *parent_indexp
,
1120 int *errorp
, int try_exclusive
);
1122 void hammer_print_btree_node(hammer_node_ondisk_t ondisk
);
1123 void hammer_print_btree_elm(hammer_btree_elm_t elm
, u_int8_t type
, int i
);
1125 void *hammer_bread(struct hammer_mount
*hmp
, hammer_off_t off
,
1126 int *errorp
, struct hammer_buffer
**bufferp
);
1127 void *hammer_bnew(struct hammer_mount
*hmp
, hammer_off_t off
,
1128 int *errorp
, struct hammer_buffer
**bufferp
);
1129 void *hammer_bread_ext(struct hammer_mount
*hmp
, hammer_off_t off
, int bytes
,
1130 int *errorp
, struct hammer_buffer
**bufferp
);
1131 void *hammer_bnew_ext(struct hammer_mount
*hmp
, hammer_off_t off
, int bytes
,
1132 int *errorp
, struct hammer_buffer
**bufferp
);
1134 hammer_volume_t
hammer_get_root_volume(hammer_mount_t hmp
, int *errorp
);
1136 hammer_volume_t
hammer_get_volume(hammer_mount_t hmp
,
1137 int32_t vol_no
, int *errorp
);
1138 hammer_buffer_t
hammer_get_buffer(hammer_mount_t hmp
, hammer_off_t buf_offset
,
1139 int bytes
, int isnew
, int *errorp
);
1140 void hammer_sync_buffers(hammer_mount_t hmp
,
1141 hammer_off_t base_offset
, int bytes
);
1142 int hammer_del_buffers(hammer_mount_t hmp
,
1143 hammer_off_t base_offset
,
1144 hammer_off_t zone2_offset
, int bytes
,
1145 int report_conflicts
);
1147 int hammer_ref_volume(hammer_volume_t volume
);
1148 int hammer_ref_buffer(hammer_buffer_t buffer
);
1149 void hammer_flush_buffer_nodes(hammer_buffer_t buffer
);
1151 void hammer_rel_volume(hammer_volume_t volume
, int flush
);
1152 void hammer_rel_buffer(hammer_buffer_t buffer
, int flush
);
1154 int hammer_vfs_export(struct mount
*mp
, int op
,
1155 const struct export_args
*export
);
1156 hammer_node_t
hammer_get_node(hammer_transaction_t trans
,
1157 hammer_off_t node_offset
, int isnew
, int *errorp
);
1158 void hammer_ref_node(hammer_node_t node
);
1159 hammer_node_t
hammer_ref_node_safe(hammer_transaction_t trans
,
1160 hammer_node_cache_t cache
, int *errorp
);
1161 void hammer_rel_node(hammer_node_t node
);
1162 void hammer_delete_node(hammer_transaction_t trans
,
1163 hammer_node_t node
);
1164 void hammer_cache_node(hammer_node_cache_t cache
,
1165 hammer_node_t node
);
1166 void hammer_uncache_node(hammer_node_cache_t cache
);
1167 void hammer_flush_node(hammer_node_t node
);
1169 void hammer_dup_buffer(struct hammer_buffer
**bufferp
,
1170 struct hammer_buffer
*buffer
);
1171 hammer_node_t
hammer_alloc_btree(hammer_transaction_t trans
,
1172 hammer_off_t hint
, int *errorp
);
1173 void *hammer_alloc_data(hammer_transaction_t trans
, int32_t data_len
,
1174 u_int16_t rec_type
, hammer_off_t
*data_offsetp
,
1175 struct hammer_buffer
**data_bufferp
,
1176 hammer_off_t hint
, int *errorp
);
1178 int hammer_generate_undo(hammer_transaction_t trans
,
1179 hammer_off_t zone1_offset
, void *base
, int len
);
1180 int hammer_generate_redo(hammer_transaction_t trans
, hammer_inode_t ip
,
1181 hammer_off_t file_offset
, u_int32_t flags
,
1182 void *base
, int len
);
1183 void hammer_generate_redo_sync(hammer_transaction_t trans
);
1184 void hammer_redo_fifo_start_flush(hammer_inode_t ip
);
1185 void hammer_redo_fifo_end_flush(hammer_inode_t ip
);
1187 void hammer_format_undo(void *base
, u_int32_t seqno
);
1188 int hammer_upgrade_undo_4(hammer_transaction_t trans
);
1190 void hammer_put_volume(struct hammer_volume
*volume
, int flush
);
1191 void hammer_put_buffer(struct hammer_buffer
*buffer
, int flush
);
1193 hammer_off_t
hammer_freemap_alloc(hammer_transaction_t trans
,
1194 hammer_off_t owner
, int *errorp
);
1195 void hammer_freemap_free(hammer_transaction_t trans
, hammer_off_t phys_offset
,
1196 hammer_off_t owner
, int *errorp
);
1197 int _hammer_checkspace(hammer_mount_t hmp
, int slop
, int64_t *resp
);
1198 hammer_off_t
hammer_blockmap_alloc(hammer_transaction_t trans
, int zone
,
1199 int bytes
, hammer_off_t hint
, int *errorp
);
1200 hammer_reserve_t
hammer_blockmap_reserve(hammer_mount_t hmp
, int zone
,
1201 int bytes
, hammer_off_t
*zone_offp
, int *errorp
);
1202 void hammer_blockmap_reserve_complete(hammer_mount_t hmp
,
1203 hammer_reserve_t resv
);
1204 void hammer_reserve_clrdelay(hammer_mount_t hmp
, hammer_reserve_t resv
);
1205 void hammer_blockmap_free(hammer_transaction_t trans
,
1206 hammer_off_t bmap_off
, int bytes
);
1207 int hammer_blockmap_finalize(hammer_transaction_t trans
,
1208 hammer_reserve_t resv
,
1209 hammer_off_t bmap_off
, int bytes
);
1210 int hammer_blockmap_getfree(hammer_mount_t hmp
, hammer_off_t bmap_off
,
1211 int *curp
, int *errorp
);
1212 hammer_off_t
hammer_blockmap_lookup(hammer_mount_t hmp
, hammer_off_t bmap_off
,
1214 hammer_off_t
hammer_undo_lookup(hammer_mount_t hmp
, hammer_off_t bmap_off
,
1216 int64_t hammer_undo_used(hammer_transaction_t trans
);
1217 int64_t hammer_undo_space(hammer_transaction_t trans
);
1218 int64_t hammer_undo_max(hammer_mount_t hmp
);
1219 int hammer_undo_reclaim(hammer_io_t io
);
1221 void hammer_start_transaction(struct hammer_transaction
*trans
,
1222 struct hammer_mount
*hmp
);
1223 void hammer_simple_transaction(struct hammer_transaction
*trans
,
1224 struct hammer_mount
*hmp
);
1225 void hammer_start_transaction_fls(struct hammer_transaction
*trans
,
1226 struct hammer_mount
*hmp
);
1227 void hammer_done_transaction(struct hammer_transaction
*trans
);
1228 hammer_tid_t
hammer_alloc_tid(hammer_mount_t hmp
, int count
);
1230 void hammer_modify_inode(hammer_transaction_t trans
, hammer_inode_t ip
, int flags
);
1231 void hammer_flush_inode(hammer_inode_t ip
, int flags
);
1232 void hammer_flush_inode_done(hammer_inode_t ip
, int error
);
1233 void hammer_wait_inode(hammer_inode_t ip
);
1235 int hammer_create_inode(struct hammer_transaction
*trans
, struct vattr
*vap
,
1236 struct ucred
*cred
, struct hammer_inode
*dip
,
1237 const char *name
, int namelen
,
1238 hammer_pseudofs_inmem_t pfsm
,
1239 struct hammer_inode
**ipp
);
1240 void hammer_rel_inode(hammer_inode_t ip
, int flush
);
1241 int hammer_reload_inode(hammer_inode_t ip
, void *arg __unused
);
1242 int hammer_ino_rb_compare(hammer_inode_t ip1
, hammer_inode_t ip2
);
1243 int hammer_redo_rb_compare(hammer_inode_t ip1
, hammer_inode_t ip2
);
1244 int hammer_destroy_inode_callback(hammer_inode_t ip
, void *data __unused
);
1246 int hammer_sync_inode(hammer_transaction_t trans
, hammer_inode_t ip
);
1247 void hammer_test_inode(hammer_inode_t dip
);
1248 void hammer_inode_unloadable_check(hammer_inode_t ip
, int getvp
);
1250 int hammer_ip_add_directory(struct hammer_transaction
*trans
,
1251 hammer_inode_t dip
, const char *name
, int bytes
,
1252 hammer_inode_t nip
);
1253 int hammer_ip_del_directory(struct hammer_transaction
*trans
,
1254 hammer_cursor_t cursor
, hammer_inode_t dip
,
1256 void hammer_ip_replace_bulk(hammer_mount_t hmp
, hammer_record_t record
);
1257 hammer_record_t
hammer_ip_add_bulk(hammer_inode_t ip
, off_t file_offset
,
1258 void *data
, int bytes
, int *errorp
);
1259 int hammer_ip_frontend_trunc(struct hammer_inode
*ip
, off_t file_size
);
1260 int hammer_ip_add_record(struct hammer_transaction
*trans
,
1261 hammer_record_t record
);
1262 int hammer_ip_delete_range(hammer_cursor_t cursor
, hammer_inode_t ip
,
1263 int64_t ran_beg
, int64_t ran_end
, int truncating
);
1264 int hammer_ip_delete_clean(hammer_cursor_t cursor
, hammer_inode_t ip
,
1266 int hammer_ip_sync_data(hammer_cursor_t cursor
, hammer_inode_t ip
,
1267 int64_t offset
, void *data
, int bytes
);
1268 int hammer_ip_sync_record(hammer_transaction_t trans
, hammer_record_t rec
);
1269 int hammer_ip_sync_record_cursor(hammer_cursor_t cursor
, hammer_record_t rec
);
1270 hammer_pseudofs_inmem_t
hammer_load_pseudofs(hammer_transaction_t trans
,
1271 u_int32_t localization
, int *errorp
);
1272 int hammer_mkroot_pseudofs(hammer_transaction_t trans
, struct ucred
*cred
,
1273 hammer_pseudofs_inmem_t pfsm
);
1274 int hammer_save_pseudofs(hammer_transaction_t trans
,
1275 hammer_pseudofs_inmem_t pfsm
);
1276 int hammer_unload_pseudofs(hammer_transaction_t trans
, u_int32_t localization
);
1277 void hammer_rel_pseudofs(hammer_mount_t hmp
, hammer_pseudofs_inmem_t pfsm
);
1278 int hammer_ioctl(hammer_inode_t ip
, u_long com
, caddr_t data
, int fflag
,
1279 struct ucred
*cred
);
1281 void hammer_io_init(hammer_io_t io
, hammer_volume_t volume
,
1282 enum hammer_io_type type
);
1283 int hammer_io_read(struct vnode
*devvp
, struct hammer_io
*io
,
1284 hammer_off_t limit
);
1285 void hammer_io_advance(struct hammer_io
*io
);
1286 int hammer_io_new(struct vnode
*devvp
, struct hammer_io
*io
);
1287 int hammer_io_inval(hammer_volume_t volume
, hammer_off_t zone2_offset
);
1288 struct buf
*hammer_io_release(struct hammer_io
*io
, int flush
);
1289 void hammer_io_flush(struct hammer_io
*io
, int reclaim
);
1290 void hammer_io_wait(struct hammer_io
*io
);
1291 void hammer_io_waitdep(struct hammer_io
*io
);
1292 void hammer_io_wait_all(hammer_mount_t hmp
, const char *ident
, int doflush
);
1293 int hammer_io_direct_read(hammer_mount_t hmp
, struct bio
*bio
,
1294 hammer_btree_leaf_elm_t leaf
);
1295 int hammer_io_direct_write(hammer_mount_t hmp
, struct bio
*bio
,
1296 hammer_record_t record
);
1297 void hammer_io_direct_wait(hammer_record_t record
);
1298 void hammer_io_direct_uncache(hammer_mount_t hmp
, hammer_btree_leaf_elm_t leaf
);
1299 void hammer_io_write_interlock(hammer_io_t io
);
1300 void hammer_io_done_interlock(hammer_io_t io
);
1301 void hammer_io_clear_modify(struct hammer_io
*io
, int inval
);
1302 void hammer_io_clear_modlist(struct hammer_io
*io
);
1303 void hammer_io_flush_sync(hammer_mount_t hmp
);
1304 void hammer_io_clear_error(struct hammer_io
*io
);
1305 void hammer_io_notmeta(hammer_buffer_t buffer
);
1307 void hammer_modify_volume(hammer_transaction_t trans
, hammer_volume_t volume
,
1308 void *base
, int len
);
1309 void hammer_modify_buffer(hammer_transaction_t trans
, hammer_buffer_t buffer
,
1310 void *base
, int len
);
1311 void hammer_modify_volume_done(hammer_volume_t volume
);
1312 void hammer_modify_buffer_done(hammer_buffer_t buffer
);
1314 int hammer_ioc_reblock(hammer_transaction_t trans
, hammer_inode_t ip
,
1315 struct hammer_ioc_reblock
*reblock
);
1316 int hammer_ioc_rebalance(hammer_transaction_t trans
, hammer_inode_t ip
,
1317 struct hammer_ioc_rebalance
*rebal
);
1318 int hammer_ioc_prune(hammer_transaction_t trans
, hammer_inode_t ip
,
1319 struct hammer_ioc_prune
*prune
);
1320 int hammer_ioc_mirror_read(hammer_transaction_t trans
, hammer_inode_t ip
,
1321 struct hammer_ioc_mirror_rw
*mirror
);
1322 int hammer_ioc_mirror_write(hammer_transaction_t trans
, hammer_inode_t ip
,
1323 struct hammer_ioc_mirror_rw
*mirror
);
1324 int hammer_ioc_set_pseudofs(hammer_transaction_t trans
, hammer_inode_t ip
,
1325 struct ucred
*cred
, struct hammer_ioc_pseudofs_rw
*pfs
);
1326 int hammer_ioc_get_pseudofs(hammer_transaction_t trans
, hammer_inode_t ip
,
1327 struct hammer_ioc_pseudofs_rw
*pfs
);
1328 int hammer_ioc_destroy_pseudofs(hammer_transaction_t trans
, hammer_inode_t ip
,
1329 struct hammer_ioc_pseudofs_rw
*pfs
);
1330 int hammer_ioc_downgrade_pseudofs(hammer_transaction_t trans
, hammer_inode_t ip
,
1331 struct hammer_ioc_pseudofs_rw
*pfs
);
1332 int hammer_ioc_upgrade_pseudofs(hammer_transaction_t trans
, hammer_inode_t ip
,
1333 struct hammer_ioc_pseudofs_rw
*pfs
);
1334 int hammer_ioc_wait_pseudofs(hammer_transaction_t trans
, hammer_inode_t ip
,
1335 struct hammer_ioc_pseudofs_rw
*pfs
);
1336 int hammer_ioc_volume_add(hammer_transaction_t trans
, hammer_inode_t ip
,
1337 struct hammer_ioc_volume
*ioc
);
1338 int hammer_ioc_volume_del(hammer_transaction_t trans
, hammer_inode_t ip
,
1339 struct hammer_ioc_volume
*ioc
);
1341 int hammer_signal_check(hammer_mount_t hmp
);
1343 void hammer_flusher_create(hammer_mount_t hmp
);
1344 void hammer_flusher_destroy(hammer_mount_t hmp
);
1345 void hammer_flusher_sync(hammer_mount_t hmp
);
1346 int hammer_flusher_async(hammer_mount_t hmp
, hammer_flush_group_t flg
);
1347 int hammer_flusher_async_one(hammer_mount_t hmp
);
1348 void hammer_flusher_wait(hammer_mount_t hmp
, int seq
);
1349 void hammer_flusher_wait_next(hammer_mount_t hmp
);
1350 int hammer_flusher_meta_limit(hammer_mount_t hmp
);
1351 int hammer_flusher_meta_halflimit(hammer_mount_t hmp
);
1352 int hammer_flusher_undo_exhausted(hammer_transaction_t trans
, int quarter
);
1353 void hammer_flusher_clean_loose_ios(hammer_mount_t hmp
);
1354 void hammer_flusher_finalize(hammer_transaction_t trans
, int final
);
1355 int hammer_flusher_haswork(hammer_mount_t hmp
);
1356 void hammer_flusher_flush_undos(hammer_mount_t hmp
, int already_flushed
);
1358 int hammer_recover_stage1(hammer_mount_t hmp
, hammer_volume_t rootvol
);
1359 int hammer_recover_stage2(hammer_mount_t hmp
, hammer_volume_t rootvol
);
1360 void hammer_recover_flush_buffers(hammer_mount_t hmp
,
1361 hammer_volume_t root_volume
, int final
);
1363 void hammer_crc_set_blockmap(hammer_blockmap_t blockmap
);
1364 void hammer_crc_set_volume(hammer_volume_ondisk_t ondisk
);
1365 void hammer_crc_set_leaf(void *data
, hammer_btree_leaf_elm_t leaf
);
1367 int hammer_crc_test_blockmap(hammer_blockmap_t blockmap
);
1368 int hammer_crc_test_volume(hammer_volume_ondisk_t ondisk
);
1369 int hammer_crc_test_btree(hammer_node_ondisk_t ondisk
);
1370 int hammer_crc_test_leaf(void *data
, hammer_btree_leaf_elm_t leaf
);
1371 void hkprintf(const char *ctl
, ...);
1372 udev_t
hammer_fsid_to_udev(uuid_t
*uuid
);
1375 int hammer_blocksize(int64_t file_offset
);
1376 int hammer_blockoff(int64_t file_offset
);
1377 int64_t hammer_blockdemarc(int64_t file_offset1
, int64_t file_offset2
);
1380 * Shortcut for _hammer_checkspace(), used all over the code.
1383 hammer_checkspace(hammer_mount_t hmp
, int slop
)
1385 return(_hammer_checkspace(hmp
, slop
, NULL
));
1390 static __inline
void
1391 hammer_wait_mem_record(hammer_record_t record
)
1393 hammer_wait_mem_record_ident(record
, "hmmwai");
1396 static __inline
void
1397 hammer_lock_ex(struct hammer_lock
*lock
)
1399 hammer_lock_ex_ident(lock
, "hmrlck");
1403 * Indicate that a B-Tree node is being modified.
1405 static __inline
void
1406 hammer_modify_node_noundo(hammer_transaction_t trans
, hammer_node_t node
)
1408 KKASSERT((node
->flags
& HAMMER_NODE_CRCBAD
) == 0);
1409 hammer_modify_buffer(trans
, node
->buffer
, NULL
, 0);
1412 static __inline
void
1413 hammer_modify_node_all(hammer_transaction_t trans
, struct hammer_node
*node
)
1415 KKASSERT((node
->flags
& HAMMER_NODE_CRCBAD
) == 0);
1416 hammer_modify_buffer(trans
, node
->buffer
,
1417 node
->ondisk
, sizeof(*node
->ondisk
));
1420 static __inline
void
1421 hammer_modify_node(hammer_transaction_t trans
, hammer_node_t node
,
1422 void *base
, int len
)
1424 hammer_crc_t
*crcptr
;
1426 KKASSERT((char *)base
>= (char *)node
->ondisk
&&
1427 (char *)base
+ len
<=
1428 (char *)node
->ondisk
+ sizeof(*node
->ondisk
));
1429 KKASSERT((node
->flags
& HAMMER_NODE_CRCBAD
) == 0);
1430 hammer_modify_buffer(trans
, node
->buffer
, base
, len
);
1431 crcptr
= &node
->ondisk
->crc
;
1432 hammer_modify_buffer(trans
, node
->buffer
, crcptr
, sizeof(hammer_crc_t
));
1433 --node
->buffer
->io
.modify_refs
; /* only want one ref */
1437 * Indicate that the specified modifications have been completed.
1439 * Do not try to generate the crc here, it's very expensive to do and a
1440 * sequence of insertions or deletions can result in many calls to this
1441 * function on the same node.
1443 static __inline
void
1444 hammer_modify_node_done(hammer_node_t node
)
1446 node
->flags
|= HAMMER_NODE_CRCGOOD
;
1447 if ((node
->flags
& HAMMER_NODE_NEEDSCRC
) == 0) {
1448 node
->flags
|= HAMMER_NODE_NEEDSCRC
;
1449 node
->buffer
->io
.gencrc
= 1;
1450 hammer_ref_node(node
);
1452 hammer_modify_buffer_done(node
->buffer
);
1455 #define hammer_modify_volume_field(trans, vol, field) \
1456 hammer_modify_volume(trans, vol, &(vol)->ondisk->field, \
1457 sizeof((vol)->ondisk->field))
1459 #define hammer_modify_node_field(trans, node, field) \
1460 hammer_modify_node(trans, node, &(node)->ondisk->field, \
1461 sizeof((node)->ondisk->field))
1464 * The HAMMER_INODE_CAP_DIR_LOCAL_INO capability is set on newly
1465 * created directories for HAMMER version 2 or greater and causes
1466 * directory entries to be placed the inode localization zone in
1467 * the B-Tree instead of the misc zone.
1469 * This greatly improves localization between directory entries and
1472 static __inline u_int32_t
1473 hammer_dir_localization(hammer_inode_t dip
)
1475 if (dip
->ino_data
.cap_flags
& HAMMER_INODE_CAP_DIR_LOCAL_INO
)
1476 return(HAMMER_LOCALIZE_INODE
);
1478 return(HAMMER_LOCALIZE_MISC
);