2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #ifndef VFS_HAMMER_HAMMER_H_
36 #define VFS_HAMMER_HAMMER_H_
39 * This header file contains structures used internally by the HAMMERFS
40 * implementation. See hammer_disk.h for on-disk structures.
43 #include <sys/param.h>
45 #include <sys/kernel.h>
46 #include <sys/systm.h>
50 #include <sys/malloc.h>
51 #include <sys/mount.h>
52 #include <sys/vnode.h>
55 #include <sys/dirent.h>
57 #include <sys/fcntl.h>
58 #include <sys/lockf.h>
60 #include <sys/event.h>
62 #include <sys/queue.h>
64 #include <sys/limits.h>
65 #include <sys/sysctl.h>
66 #include <vm/swap_pager.h>
67 #include <vm/vm_extern.h>
69 #include "hammer_disk.h"
70 #include "hammer_mount.h"
71 #include "hammer_ioctl.h"
72 #include "hammer_crc.h"
74 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
76 MALLOC_DECLARE(M_HAMMER
);
81 #if !defined(KTR_HAMMER)
82 #define KTR_HAMMER KTR_ALL
84 /* KTR_INFO_MASTER_EXTERN(hammer); */
95 struct hammer_reserve
;
99 * Key structure used for custom RB tree inode lookups. This prototypes
100 * the function hammer_ino_rb_tree_RB_LOOKUP_INFO(root, info).
102 typedef struct hammer_inode_info
{
103 int64_t obj_id
; /* (key) object identifier */
104 hammer_tid_t obj_asof
; /* (key) snapshot transid or 0 */
105 uint32_t obj_localization
; /* (key) pseudo-fs id for upper 16 bits */
107 hammer_btree_leaf_elm_t leaf
;
109 } *hammer_inode_info_t
;
111 typedef enum hammer_transaction_type
{
115 } hammer_transaction_type_t
;
118 * HAMMER Transaction tracking
120 typedef struct hammer_transaction
{
121 hammer_transaction_type_t type
;
122 struct hammer_mount
*hmp
;
128 struct hammer_volume
*rootvol
;
129 } *hammer_transaction_t
;
131 #define HAMMER_TRANSF_NEWINODE 0x0001
132 #define HAMMER_TRANSF_CRCDOM 0x0004 /* EDOM on CRC error, less critical */
138 volatile u_int refs
; /* active references */
139 volatile u_int lockval
; /* lock count and control bits */
140 struct thread
*lowner
; /* owner if exclusively held */
141 struct thread
*rowner
; /* owner if exclusively held */
144 #define HAMMER_REFS_LOCKED 0x40000000 /* transition check */
145 #define HAMMER_REFS_WANTED 0x20000000 /* transition check */
146 #define HAMMER_REFS_CHECK 0x10000000 /* transition check */
148 #define HAMMER_REFS_FLAGS (HAMMER_REFS_LOCKED | \
149 HAMMER_REFS_WANTED | \
152 #define HAMMER_LOCKF_EXCLUSIVE 0x40000000
153 #define HAMMER_LOCKF_WANTED 0x20000000
155 #define HAMMER_LIMIT_RECLAIMS 16384 /* maximum reclaims in-prog */
158 hammer_notlocked(struct hammer_lock
*lock
)
160 return(lock
->lockval
== 0);
164 hammer_islocked(struct hammer_lock
*lock
)
166 return(lock
->lockval
!= 0);
170 * Returns the number of refs on the object.
173 hammer_isactive(struct hammer_lock
*lock
)
175 return(lock
->refs
& ~HAMMER_REFS_FLAGS
);
179 hammer_oneref(struct hammer_lock
*lock
)
181 return((lock
->refs
& ~HAMMER_REFS_FLAGS
) == 1);
185 hammer_norefs(struct hammer_lock
*lock
)
187 return((lock
->refs
& ~HAMMER_REFS_FLAGS
) == 0);
191 hammer_norefsorlock(struct hammer_lock
*lock
)
193 return(lock
->refs
== 0);
197 hammer_refsorlock(struct hammer_lock
*lock
)
199 return(lock
->refs
!= 0);
203 * Return if we specifically own the lock exclusively.
206 hammer_lock_excl_owned(struct hammer_lock
*lock
, thread_t td
)
208 if ((lock
->lockval
& HAMMER_LOCKF_EXCLUSIVE
) &&
209 lock
->lowner
== td
) {
216 * Flush state, used by various structures
218 typedef enum hammer_inode_state
{
222 } hammer_inode_state_t
;
225 * Pseudo-filesystem extended data tracking
227 struct hammer_pseudofs_inmem
;
228 RB_HEAD(hammer_pfs_rb_tree
, hammer_pseudofs_inmem
);
229 RB_PROTOTYPE2(hammer_pfs_rb_tree
, hammer_pseudofs_inmem
, rb_node
,
230 hammer_pfs_rb_compare
, uint32_t);
232 typedef struct hammer_pseudofs_inmem
{
233 RB_ENTRY(hammer_pseudofs_inmem
) rb_node
;
234 struct hammer_lock lock
;
235 uint32_t localization
;
236 hammer_tid_t create_tid
;
239 struct hammer_pseudofs_data pfsd
;
240 } *hammer_pseudofs_inmem_t
;
243 * Cache object ids. A fixed number of objid cache structures are
244 * created to reserve object id's for newly created files in multiples
245 * of 100,000, localized to a particular directory, and recycled as
246 * needed. This allows parallel create operations in different
247 * directories to retain fairly localized object ids which in turn
248 * improves reblocking performance and layout.
250 #define OBJID_CACHE_SIZE 2048
251 #define OBJID_CACHE_BULK_BITS 10 /* 10 bits (1024) */
252 #define OBJID_CACHE_BULK (32 * 32) /* two level (1024) */
253 #define OBJID_CACHE_BULK_MASK (OBJID_CACHE_BULK - 1)
254 #define OBJID_CACHE_BULK_MASK64 ((uint64_t)(OBJID_CACHE_BULK - 1))
256 typedef struct hammer_objid_cache
{
257 TAILQ_ENTRY(hammer_objid_cache
) entry
;
258 struct hammer_inode
*dip
;
259 hammer_tid_t base_tid
;
263 } *hammer_objid_cache_t
;
266 * Associate an inode with a B-Tree node to cache search start positions
268 typedef struct hammer_node_cache
{
269 TAILQ_ENTRY(hammer_node_cache
) entry
;
270 struct hammer_node
*node
;
271 struct hammer_inode
*ip
;
272 } *hammer_node_cache_t
;
274 TAILQ_HEAD(hammer_node_cache_list
, hammer_node_cache
);
279 struct hammer_dedup_cache
;
280 RB_HEAD(hammer_dedup_crc_rb_tree
, hammer_dedup_cache
);
281 RB_PROTOTYPE2(hammer_dedup_crc_rb_tree
, hammer_dedup_cache
, crc_entry
,
282 hammer_dedup_crc_rb_compare
, hammer_crc_t
);
284 RB_HEAD(hammer_dedup_off_rb_tree
, hammer_dedup_cache
);
285 RB_PROTOTYPE2(hammer_dedup_off_rb_tree
, hammer_dedup_cache
, off_entry
,
286 hammer_dedup_off_rb_compare
, hammer_off_t
);
288 typedef struct hammer_dedup_cache
{
289 RB_ENTRY(hammer_dedup_cache
) crc_entry
;
290 RB_ENTRY(hammer_dedup_cache
) off_entry
;
291 TAILQ_ENTRY(hammer_dedup_cache
) lru_entry
;
292 struct hammer_mount
*hmp
;
294 uint32_t localization
;
297 hammer_off_t data_offset
;
299 } *hammer_dedup_cache_t
;
302 * Structure used to organize flush groups. Flush groups must be
303 * organized into chunks in order to avoid blowing out the UNDO FIFO.
304 * Without this a 'sync' could end up flushing 50,000 inodes in a single
307 RB_HEAD(hammer_fls_rb_tree
, hammer_inode
);
308 RB_PROTOTYPE(hammer_fls_rb_tree
, hammer_inode
, rb_flsnode
,
309 hammer_ino_rb_compare
);
311 typedef struct hammer_flush_group
{
312 TAILQ_ENTRY(hammer_flush_group
) flush_entry
;
313 struct hammer_fls_rb_tree flush_tree
;
314 int seq
; /* our seq no */
315 int total_count
; /* record load */
316 int running
; /* group is running */
319 } *hammer_flush_group_t
;
321 TAILQ_HEAD(hammer_flush_group_list
, hammer_flush_group
);
324 * Structure used to represent an inode in-memory.
326 * The record and data associated with an inode may be out of sync with
327 * the disk (xDIRTY flags), or not even on the disk at all (ONDISK flag
330 * An inode may also hold a cache of unsynchronized records, used for
331 * database and directories only. Unsynchronized regular file data is
332 * stored in the buffer cache.
334 * NOTE: A file which is created and destroyed within the initial
335 * synchronization period can wind up not doing any disk I/O at all.
337 * Finally, an inode may cache numerous disk-referencing B-Tree cursors.
339 RB_HEAD(hammer_ino_rb_tree
, hammer_inode
);
340 RB_PROTOTYPEX(hammer_ino_rb_tree
, INFO
, hammer_inode
, rb_node
,
341 hammer_ino_rb_compare
, hammer_inode_info_t
);
343 RB_HEAD(hammer_redo_rb_tree
, hammer_inode
);
344 RB_PROTOTYPE2(hammer_redo_rb_tree
, hammer_inode
, rb_redonode
,
345 hammer_redo_rb_compare
, hammer_off_t
);
347 struct hammer_record
;
348 RB_HEAD(hammer_rec_rb_tree
, hammer_record
);
349 RB_PROTOTYPEX(hammer_rec_rb_tree
, INFO
, hammer_record
, rb_node
,
350 hammer_rec_rb_compare
, hammer_btree_leaf_elm_t
);
352 TAILQ_HEAD(hammer_record_list
, hammer_record
);
353 TAILQ_HEAD(hammer_node_list
, hammer_node
);
355 typedef struct hammer_inode
{
356 RB_ENTRY(hammer_inode
) rb_node
;
357 hammer_inode_state_t flush_state
;
358 hammer_flush_group_t flush_group
;
359 RB_ENTRY(hammer_inode
) rb_flsnode
; /* when on flush list */
360 RB_ENTRY(hammer_inode
) rb_redonode
; /* when INODE_RDIRTY is set */
361 struct hammer_record_list target_list
; /* target of dependant recs */
362 int64_t obj_id
; /* (key) object identifier */
363 hammer_tid_t obj_asof
; /* (key) snapshot or 0 */
364 uint32_t obj_localization
; /* (key) pseudo-fs id for upper 16 bits */
365 struct hammer_mount
*hmp
;
366 hammer_objid_cache_t objid_cache
;
368 int error
; /* flush error */
369 int cursor_ip_refs
; /* sanity */
371 int cursor_exclreq_count
;
375 hammer_pseudofs_inmem_t pfsm
;
376 struct lockf advlock
;
377 struct hammer_lock lock
; /* sync copy interlock */
379 struct hammer_btree_leaf_elm ino_leaf
; /* in-memory cache */
380 struct hammer_inode_data ino_data
; /* in-memory cache */
381 struct hammer_rec_rb_tree rec_tree
; /* in-memory cache */
385 * search initiate cache
386 * cache[0] - this inode
387 * cache[1] - related data, the content depends on situations
388 * cache[2] - for dip to cache ip to shortcut B-Tree search
389 * cache[3] - related data copied from dip to a new ip's cache[1]
391 struct hammer_node_cache cache
[4];
394 * When a demark is created to synchronize an inode to
395 * disk, certain fields are copied so the front-end VOPs
396 * can continue to run in parallel with the synchronization
397 * occuring in the background.
399 int sync_flags
; /* to-sync flags cache */
400 off_t sync_trunc_off
; /* to-sync truncation */
401 off_t save_trunc_off
; /* write optimization */
402 struct hammer_btree_leaf_elm sync_ino_leaf
; /* to-sync cache */
403 struct hammer_inode_data sync_ino_data
; /* to-sync cache */
407 * Track the earliest offset in the UNDO/REDO FIFO containing
408 * REDO records. This is staged to the backend during flush
409 * sequences. While the inode is staged redo_fifo_next is used
410 * to track the earliest offset for rotation into redo_fifo_start
411 * on completion of the flush.
413 hammer_off_t redo_fifo_start
;
414 hammer_off_t redo_fifo_next
;
417 #define VTOI(vp) ((hammer_inode_t)(vp)->v_data)
420 * NOTE: DDIRTY does not include atime or mtime and does not include
421 * write-append size changes. SDIRTY handles write-append size
424 * REDO indicates that REDO logging is active, creating a definitive
425 * stream of REDO records in the UNDO/REDO log for writes and
426 * truncations, including boundary records when/if REDO is turned off.
427 * REDO is typically enabled by fsync() and turned off if excessive
428 * writes without an fsync() occurs.
430 * RDIRTY indicates that REDO records were laid down in the UNDO/REDO
431 * FIFO (even if REDO is turned off some might still be active) and
432 * still being tracked for this inode. See hammer_redo.c
434 #define HAMMER_INODE_DDIRTY 0x0001 /* in-memory ino_data is dirty */
435 /* (not including atime/mtime) */
436 #define HAMMER_INODE_RSV_INODES 0x0002 /* hmp->rsv_inodes bumped */
437 #define HAMMER_INODE_CONN_DOWN 0x0004 /* include in downward recursion */
438 #define HAMMER_INODE_XDIRTY 0x0008 /* in-memory records */
439 #define HAMMER_INODE_ONDISK 0x0010 /* inode is on-disk (else not yet) */
440 #define HAMMER_INODE_FLUSH 0x0020 /* flush on last ref */
441 #define HAMMER_INODE_DELETED 0x0080 /* inode delete (backend) */
442 #define HAMMER_INODE_DELONDISK 0x0100 /* delete synchronized to disk */
443 #define HAMMER_INODE_RO 0x0200 /* read-only (because of as-of) */
444 #define HAMMER_INODE_RECSW 0x0400 /* waiting on data record flush */
445 #define HAMMER_INODE_DONDISK 0x0800 /* data records may be on disk */
446 #define HAMMER_INODE_BUFS 0x1000 /* dirty high level bps present */
447 #define HAMMER_INODE_REFLUSH 0x2000 /* flush on dependancy / reflush */
448 #define HAMMER_INODE_RECLAIM 0x4000 /* trying to reclaim */
449 #define HAMMER_INODE_FLUSHW 0x8000 /* someone waiting for flush */
451 #define HAMMER_INODE_TRUNCATED 0x00010000
452 #define HAMMER_INODE_DELETING 0x00020000 /* inode delete request (frontend)*/
453 #define HAMMER_INODE_RESIGNAL 0x00040000 /* re-signal on re-flush */
454 #define HAMMER_INODE_ATIME 0x00100000 /* in-memory atime modified */
455 #define HAMMER_INODE_MTIME 0x00200000 /* in-memory mtime modified */
456 #define HAMMER_INODE_WOULDBLOCK 0x00400000 /* re-issue to new flush group */
457 #define HAMMER_INODE_DUMMY 0x00800000 /* dummy inode covering bad file */
458 #define HAMMER_INODE_SDIRTY 0x01000000 /* in-memory ino_data.size is dirty*/
459 #define HAMMER_INODE_REDO 0x02000000 /* REDO logging active */
460 #define HAMMER_INODE_RDIRTY 0x04000000 /* REDO records active in fifo */
461 #define HAMMER_INODE_SLAVEFLUSH 0x08000000 /* being flushed by slave */
463 #define HAMMER_INODE_MODMASK (HAMMER_INODE_DDIRTY|HAMMER_INODE_SDIRTY| \
464 HAMMER_INODE_XDIRTY|HAMMER_INODE_BUFS| \
465 HAMMER_INODE_ATIME|HAMMER_INODE_MTIME| \
466 HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING)
468 #define HAMMER_INODE_MODMASK_NOXDIRTY \
469 (HAMMER_INODE_MODMASK & ~HAMMER_INODE_XDIRTY)
471 #define HAMMER_INODE_MODMASK_NOREDO \
472 (HAMMER_INODE_DDIRTY| \
473 HAMMER_INODE_XDIRTY| \
474 HAMMER_INODE_TRUNCATED|HAMMER_INODE_DELETING)
476 #define HAMMER_FLUSH_SIGNAL 0x0001
477 #define HAMMER_FLUSH_RECURSION 0x0002
480 * Used by the inode reclaim code to pipeline reclaims and avoid
481 * blowing out kernel memory or letting the flusher get too far
482 * behind. The reclaim wakes up when count reaches 0 or the
485 struct hammer_reclaim
{
486 TAILQ_ENTRY(hammer_reclaim
) entry
;
491 * Track who is creating the greatest burden on the
494 struct hammer_inostats
{
495 pid_t pid
; /* track user process */
496 int ltick
; /* last tick */
497 int count
; /* count (degenerates) */
500 #define HAMMER_INOSTATS_HSIZE 32
501 #define HAMMER_INOSTATS_HMASK (HAMMER_INOSTATS_HSIZE - 1)
504 * Structure used to represent an unsynchronized record in-memory. These
505 * records typically represent directory entries. Only non-historical
506 * records are kept in-memory.
508 * Records are organized as a per-inode RB-Tree. If the inode is not
509 * on disk then neither are any records and the in-memory record tree
510 * represents the entire contents of the inode. If the inode is on disk
511 * then the on-disk B-Tree is scanned in parallel with the in-memory
512 * RB-Tree to synthesize the current state of the file.
514 * Records are also used to enforce the ordering of directory create/delete
515 * operations. A new inode will not be flushed to disk unless its related
516 * directory entry is also being flushed at the same time. A directory entry
517 * will not be removed unless its related inode is also being removed at the
520 typedef enum hammer_record_type
{
521 HAMMER_MEM_RECORD_GENERAL
, /* misc record */
522 HAMMER_MEM_RECORD_INODE
, /* inode record */
523 HAMMER_MEM_RECORD_ADD
, /* positive memory cache record */
524 HAMMER_MEM_RECORD_DEL
, /* negative delete-on-disk record */
525 HAMMER_MEM_RECORD_DATA
/* bulk-data record w/on-disk ref */
526 } hammer_record_type_t
;
528 typedef struct hammer_record
{
529 RB_ENTRY(hammer_record
) rb_node
;
530 TAILQ_ENTRY(hammer_record
) target_entry
;
531 hammer_inode_state_t flush_state
;
532 hammer_flush_group_t flush_group
;
533 hammer_record_type_t type
;
534 struct hammer_lock lock
;
535 struct hammer_reserve
*resv
;
537 hammer_inode_t target_ip
;
538 struct hammer_btree_leaf_elm leaf
;
539 hammer_data_ondisk_t data
;
542 hammer_off_t zone2_offset
; /* direct-write only */
546 * Record flags. Note that FE can only be set by the frontend if the
547 * record has not been interlocked by the backend w/ BE.
549 #define HAMMER_RECF_ALLOCDATA 0x0001
550 #define HAMMER_RECF_ONRBTREE 0x0002
551 #define HAMMER_RECF_DELETED_FE 0x0004 /* deleted (frontend) */
552 #define HAMMER_RECF_DELETED_BE 0x0008 /* deleted (backend) */
553 #define HAMMER_RECF_COMMITTED 0x0010 /* committed to the B-Tree */
554 #define HAMMER_RECF_INTERLOCK_BE 0x0020 /* backend interlock */
555 #define HAMMER_RECF_WANTED 0x0040 /* wanted by the frontend */
556 #define HAMMER_RECF_DEDUPED 0x0080 /* will be live-dedup'ed */
557 #define HAMMER_RECF_CONVERT_DELETE 0x0100 /* special case */
558 #define HAMMER_RECF_REDO 0x1000 /* REDO was laid down */
561 * These flags must be separate to deal with SMP races
563 #define HAMMER_RECG_DIRECT_IO 0x0001 /* related direct I/O running*/
564 #define HAMMER_RECG_DIRECT_WAIT 0x0002 /* related direct I/O running*/
565 #define HAMMER_RECG_DIRECT_INVAL 0x0004 /* buffer alias invalidation */
567 * hammer_create_at_cursor() and hammer_delete_at_cursor() flags.
569 #define HAMMER_CREATE_MODE_UMIRROR 0x0001
570 #define HAMMER_CREATE_MODE_SYS 0x0002
572 #define HAMMER_DELETE_ADJUST 0x0001
573 #define HAMMER_DELETE_DESTROY 0x0002
576 * In-memory structures representing on-disk structures.
578 RB_HEAD(hammer_vol_rb_tree
, hammer_volume
);
579 RB_HEAD(hammer_buf_rb_tree
, hammer_buffer
);
580 RB_HEAD(hammer_nod_rb_tree
, hammer_node
);
581 RB_HEAD(hammer_und_rb_tree
, hammer_undo
);
582 RB_HEAD(hammer_res_rb_tree
, hammer_reserve
);
583 RB_HEAD(hammer_mod_rb_tree
, hammer_io
);
585 RB_PROTOTYPE2(hammer_vol_rb_tree
, hammer_volume
, rb_node
,
586 hammer_vol_rb_compare
, int32_t);
587 RB_PROTOTYPE2(hammer_buf_rb_tree
, hammer_buffer
, rb_node
,
588 hammer_buf_rb_compare
, hammer_off_t
);
589 RB_PROTOTYPE2(hammer_nod_rb_tree
, hammer_node
, rb_node
,
590 hammer_nod_rb_compare
, hammer_off_t
);
591 RB_PROTOTYPE2(hammer_und_rb_tree
, hammer_undo
, rb_node
,
592 hammer_und_rb_compare
, hammer_off_t
);
593 RB_PROTOTYPE2(hammer_res_rb_tree
, hammer_reserve
, rb_node
,
594 hammer_res_rb_compare
, hammer_off_t
);
595 RB_PROTOTYPE2(hammer_mod_rb_tree
, hammer_io
, rb_node
,
596 hammer_mod_rb_compare
, hammer_off_t
);
599 * IO management - embedded at the head of various in-memory structures
601 * VOLUME - hammer_volume containing meta-data
602 * META_BUFFER - hammer_buffer containing meta-data
603 * UNDO_BUFFER - hammer_buffer containing undo-data
604 * DATA_BUFFER - hammer_buffer containing pure-data
605 * DUMMY - hammer_buffer not containing valid data
607 * Dirty volume headers and dirty meta-data buffers are locked until the
608 * flusher can sequence them out. Dirty pure-data buffers can be written.
609 * Clean buffers can be passively released.
611 typedef enum hammer_io_type
{
612 HAMMER_IOTYPE_VOLUME
,
613 HAMMER_IOTYPE_META_BUFFER
,
614 HAMMER_IOTYPE_UNDO_BUFFER
,
615 HAMMER_IOTYPE_DATA_BUFFER
,
619 typedef struct hammer_io
{
620 struct hammer_lock lock
;
621 hammer_io_type_t type
;
622 struct hammer_mount
*hmp
;
623 struct hammer_volume
*volume
;
624 RB_ENTRY(hammer_io
) rb_node
; /* if modified */
625 TAILQ_ENTRY(hammer_io
) iorun_entry
; /* iorun_list */
626 struct hammer_mod_rb_tree
*mod_root
;
628 int64_t offset
; /* volume offset */
629 int bytes
; /* buffer cache buffer size */
633 * These can be modified at any time by the backend while holding
634 * io_token, due to bio_done and hammer_io_complete() callbacks.
636 u_int running
: 1; /* bp write IO in progress */
637 u_int waiting
: 1; /* someone is waiting on us */
638 u_int ioerror
: 1; /* abort on io-error */
642 * These can only be modified by the frontend while holding
643 * fs_token, or by the backend while holding the io interlocked
644 * with no references (which will block the frontend when it
645 * tries to reference it).
647 * WARNING! SMP RACES will create havoc if the callbacks ever tried
648 * to modify any of these outside the above restrictions.
650 u_int modified
: 1; /* bp's data was modified */
651 u_int released
: 1; /* bp released (w/ B_LOCKED set) */
652 u_int waitdep
: 1; /* flush waits for dependancies */
653 u_int recovered
: 1; /* has recovery ref */
654 u_int waitmod
: 1; /* waiting for modify_refs */
655 u_int reclaim
: 1; /* reclaim requested */
656 u_int gencrc
: 1; /* crc needs to be generated */
660 #define HAMMER_CLUSTER_SIZE (64 * 1024)
661 #if HAMMER_CLUSTER_SIZE > MAXBSIZE
662 #undef HAMMER_CLUSTER_SIZE
663 #define HAMMER_CLUSTER_SIZE MAXBSIZE
667 * In-memory volume representing on-disk buffer
669 typedef struct hammer_volume
{
670 struct hammer_io io
; /* must be at offset 0 */
671 RB_ENTRY(hammer_volume
) rb_node
;
672 hammer_volume_ondisk_t ondisk
;
674 hammer_off_t maxbuf_off
; /* Maximum buffer offset (zone-2) */
680 #define HAMMER_ITOV(iop) ((hammer_volume_t)(iop))
683 * In-memory buffer representing an on-disk buffer.
685 typedef struct hammer_buffer
{
686 struct hammer_io io
; /* must be at offset 0 */
687 RB_ENTRY(hammer_buffer
) rb_node
;
689 hammer_off_t zoneX_offset
;
690 hammer_off_t zone2_offset
;
691 struct hammer_reserve
*resv
;
692 struct hammer_node_list node_list
;
695 #define HAMMER_ITOB(iop) ((hammer_buffer_t)(iop))
698 * In-memory B-Tree node, representing an on-disk B-Tree node.
700 * This is a hang-on structure which is backed by a hammer_buffer,
701 * and used for fine-grained locking of B-Tree nodes in order to
702 * properly control lock ordering.
704 typedef struct hammer_node
{
705 struct hammer_lock lock
; /* node-by-node lock */
706 TAILQ_ENTRY(hammer_node
) entry
; /* per-buffer linkage */
707 RB_ENTRY(hammer_node
) rb_node
; /* per-mount linkage */
708 hammer_off_t node_offset
; /* full offset spec */
709 struct hammer_mount
*hmp
;
710 hammer_buffer_t buffer
; /* backing buffer */
711 hammer_node_ondisk_t ondisk
; /* ptr to on-disk structure */
712 TAILQ_HEAD(, hammer_cursor
) cursor_list
; /* deadlock recovery */
713 struct hammer_node_cache_list cache_list
; /* passive caches */
716 int cursor_exclreq_count
;
720 #define HAMMER_NODE_DELETED 0x0001
721 #define HAMMER_NODE_FLUSH 0x0002
722 #define HAMMER_NODE_CRCGOOD 0x0004
723 #define HAMMER_NODE_NEEDSCRC 0x0008
724 #define HAMMER_NODE_NEEDSMIRROR 0x0010
725 #define HAMMER_NODE_CRCBAD 0x0020
726 #define HAMMER_NODE_NONLINEAR 0x0040 /* linear heuristic */
728 #define HAMMER_NODE_CRCANY (HAMMER_NODE_CRCGOOD | HAMMER_NODE_CRCBAD)
731 * List of locked nodes. This structure is used to lock potentially large
732 * numbers of nodes as an aid for complex B-Tree operations.
734 struct hammer_node_lock
;
735 TAILQ_HEAD(hammer_node_lock_list
, hammer_node_lock
);
737 typedef struct hammer_node_lock
{
738 TAILQ_ENTRY(hammer_node_lock
) entry
;
739 struct hammer_node_lock_list list
;
740 struct hammer_node_lock
*parent
;
742 hammer_node_ondisk_t copy
; /* copy of on-disk data */
743 int index
; /* index of this node in parent */
744 int count
; /* count children */
746 } *hammer_node_lock_t
;
748 #define HAMMER_NODE_LOCK_UPDATED 0x0001
749 #define HAMMER_NODE_LOCK_LCACHE 0x0002
752 * The reserve structure prevents the blockmap from allocating
753 * out of a reserved big-block. Such reservations are used by
754 * the direct-write mechanism.
756 * The structure is also used to hold off on reallocations of
757 * big-blocks from the freemap until flush dependancies have
760 typedef struct hammer_reserve
{
761 RB_ENTRY(hammer_reserve
) rb_node
;
762 TAILQ_ENTRY(hammer_reserve
) delay_entry
;
769 hammer_off_t zone_offset
;
772 #define HAMMER_RESF_ONDELAY 0x0001
773 #define HAMMER_RESF_LAYER2FREE 0x0002
775 #include "hammer_cursor.h"
778 * The undo structure tracks recent undos to avoid laying down duplicate
779 * undos within a flush group, saving us a significant amount of overhead.
781 * This is strictly a heuristic.
783 #define HAMMER_MAX_UNDOS 1024
784 #define HAMMER_MAX_FLUSHERS 4
786 typedef struct hammer_undo
{
787 RB_ENTRY(hammer_undo
) rb_node
;
788 TAILQ_ENTRY(hammer_undo
) lru_entry
;
793 struct hammer_flusher_info
;
794 TAILQ_HEAD(hammer_flusher_info_list
, hammer_flusher_info
);
796 struct hammer_flusher
{
797 int signal
; /* flusher thread sequencer */
798 int done
; /* last completed flush group */
799 int next
; /* next unallocated flg seqno */
800 int group_lock
; /* lock sequencing of the next flush */
801 int exiting
; /* request master exit */
802 thread_t td
; /* master flusher thread */
803 hammer_tid_t tid
; /* last flushed transaction id */
804 int finalize_want
; /* serialize finalization */
805 struct hammer_lock finalize_lock
; /* serialize finalization */
806 struct hammer_transaction trans
; /* shared transaction */
807 struct hammer_flusher_info_list run_list
;
808 struct hammer_flusher_info_list ready_list
;
811 #define HAMMER_FLUSH_UNDOS_RELAXED 0
812 #define HAMMER_FLUSH_UNDOS_FORCED 1
813 #define HAMMER_FLUSH_UNDOS_AUTO 2
815 * Internal hammer mount data structure
817 typedef struct hammer_mount
{
819 struct hammer_ino_rb_tree rb_inos_root
;
820 struct hammer_redo_rb_tree rb_redo_root
;
821 struct hammer_vol_rb_tree rb_vols_root
;
822 struct hammer_nod_rb_tree rb_nods_root
;
823 struct hammer_und_rb_tree rb_undo_root
;
824 struct hammer_res_rb_tree rb_resv_root
;
825 struct hammer_buf_rb_tree rb_bufs_root
;
826 struct hammer_pfs_rb_tree rb_pfsm_root
;
828 struct hammer_dedup_crc_rb_tree rb_dedup_crc_root
;
829 struct hammer_dedup_off_rb_tree rb_dedup_off_root
;
831 hammer_volume_t rootvol
;
832 struct hammer_base_elm root_btree_beg
;
833 struct hammer_base_elm root_btree_end
;
835 struct malloc_type
*m_misc
;
836 struct malloc_type
*m_inodes
;
838 int flags
; /* HAMMER_MOUNT_xxx flags */
842 int master_id
; /* default 0, no-mirror -1, otherwise 1-15 */
843 int version
; /* hammer filesystem version to use */
844 int rsv_inodes
; /* reserved space due to dirty inodes */
845 int64_t rsv_databytes
; /* reserved space due to record data */
846 int rsv_recs
; /* reserved space due to dirty records */
847 int rsv_fromdelay
; /* big-blocks reserved due to flush delay */
848 int undo_rec_limit
; /* based on size of undo area */
850 int volume_to_remove
; /* volume that is currently being removed */
852 int count_inodes
; /* total number of inodes */
853 int count_iqueued
; /* inodes queued to flusher */
854 int count_reclaims
; /* inodes pending reclaim by flusher */
856 struct hammer_flusher flusher
;
858 u_int check_interrupt
;
861 struct hammer_mod_rb_tree volu_root
; /* dirty undo buffers */
862 struct hammer_mod_rb_tree undo_root
; /* dirty undo buffers */
863 struct hammer_mod_rb_tree data_root
; /* dirty data buffers */
864 struct hammer_mod_rb_tree meta_root
; /* dirty meta bufs */
865 struct hammer_mod_rb_tree lose_root
; /* loose buffers */
866 long locked_dirty_space
; /* meta/volu count */
867 long io_running_space
; /* io_token */
868 int objid_cache_count
;
869 int dedup_cache_count
;
870 int error
; /* critical I/O error */
871 struct krate krate
; /* rate limited kprintf */
872 struct krate kdiag
; /* rate limited kprintf */
873 hammer_tid_t asof
; /* snapshot mount */
874 hammer_tid_t next_tid
;
875 hammer_tid_t flush_tid1
; /* flusher tid sequencing */
876 hammer_tid_t flush_tid2
; /* flusher tid sequencing */
877 int64_t copy_stat_freebigblocks
; /* number of free big-blocks */
878 uint32_t undo_seqno
; /* UNDO/REDO FIFO seqno */
879 uint32_t recover_stage2_seqno
; /* REDO recovery seqno */
880 hammer_off_t recover_stage2_offset
; /* REDO recovery offset */
882 struct netexport export
;
883 struct hammer_lock sync_lock
;
884 struct hammer_lock undo_lock
;
885 struct hammer_lock blkmap_lock
;
886 struct hammer_lock snapshot_lock
;
887 struct hammer_lock volume_lock
;
888 struct hammer_blockmap blockmap
[HAMMER_MAX_ZONES
];
889 struct hammer_undo undos
[HAMMER_MAX_UNDOS
];
891 TAILQ_HEAD(, hammer_undo
) undo_lru_list
;
892 TAILQ_HEAD(, hammer_reserve
) delay_list
;
893 struct hammer_flush_group_list flush_group_list
;
894 hammer_flush_group_t fill_flush_group
;
895 hammer_flush_group_t next_flush_group
;
896 TAILQ_HEAD(, hammer_objid_cache
) objid_cache_list
;
897 TAILQ_HEAD(, hammer_dedup_cache
) dedup_lru_list
;
898 hammer_dedup_cache_t dedup_free_cache
;
899 TAILQ_HEAD(, hammer_reclaim
) reclaim_list
;
900 TAILQ_HEAD(, hammer_io
) iorun_list
;
902 struct lwkt_token fs_token
; /* high level */
903 struct lwkt_token io_token
; /* low level (IO callback) */
905 struct hammer_inostats inostats
[HAMMER_INOSTATS_HSIZE
];
906 uint64_t volume_map
[4]; /* 256 bits bitfield */
909 #define HAMMER_MOUNT_CRITICAL_ERROR 0x0001
910 #define HAMMER_MOUNT_FLUSH_RECOVERY 0x0002
911 #define HAMMER_MOUNT_REDO_SYNC 0x0004
912 #define HAMMER_MOUNT_REDO_RECOVERY_REQ 0x0008
913 #define HAMMER_MOUNT_REDO_RECOVERY_RUN 0x0010
915 #define HAMMER_VOLUME_NUMBER_FOREACH(hmp, n) \
916 for (n = 0; n < HAMMER_MAX_VOLUMES; n++) \
917 if (hammer_volume_number_test(hmp, n))
920 * Minium buffer cache bufs required to rebalance the B-Tree.
921 * This is because we must hold the children and the children's children
922 * locked. Even this might not be enough if things are horribly out
925 #define HAMMER_REBALANCE_MIN_BUFS \
926 (HAMMER_BTREE_LEAF_ELMS * HAMMER_BTREE_LEAF_ELMS)
928 #endif /* _KERNEL || _KERNEL_STRUCTURES */
932 * checkspace slop (8MB chunks), higher numbers are more conservative.
934 #define HAMMER_CHKSPC_REBLOCK 25
935 #define HAMMER_CHKSPC_MIRROR 20
936 #define HAMMER_CHKSPC_WRITE 20
937 #define HAMMER_CHKSPC_CREATE 20
938 #define HAMMER_CHKSPC_REMOVE 10
939 #define HAMMER_CHKSPC_EMERGENCY 0
941 extern struct vop_ops hammer_vnode_vops
;
942 extern struct vop_ops hammer_spec_vops
;
943 extern struct vop_ops hammer_fifo_vops
;
945 extern int hammer_debug_io
;
946 extern int hammer_debug_general
;
947 extern int hammer_debug_inode
;
948 extern int hammer_debug_locks
;
949 extern int hammer_debug_btree
;
950 extern int hammer_debug_tid
;
951 extern int hammer_debug_recover
;
952 extern int hammer_debug_critical
;
953 extern int hammer_cluster_enable
;
954 extern int hammer_live_dedup
;
955 extern int hammer_tdmux_ticks
;
956 extern int hammer_count_fsyncs
;
957 extern int hammer_count_inodes
;
958 extern int hammer_count_iqueued
;
959 extern int hammer_count_reclaims
;
960 extern int hammer_count_records
;
961 extern int hammer_count_record_datas
;
962 extern int hammer_count_volumes
;
963 extern int hammer_count_buffers
;
964 extern int hammer_count_nodes
;
965 extern int64_t hammer_stats_btree_lookups
;
966 extern int64_t hammer_stats_btree_searches
;
967 extern int64_t hammer_stats_btree_inserts
;
968 extern int64_t hammer_stats_btree_deletes
;
969 extern int64_t hammer_stats_btree_elements
;
970 extern int64_t hammer_stats_btree_splits
;
971 extern int64_t hammer_stats_btree_iterations
;
972 extern int64_t hammer_stats_btree_root_iterations
;
973 extern int64_t hammer_stats_record_iterations
;
974 extern int64_t hammer_stats_file_read
;
975 extern int64_t hammer_stats_file_write
;
976 extern int64_t hammer_stats_disk_read
;
977 extern int64_t hammer_stats_disk_write
;
978 extern int64_t hammer_stats_inode_flushes
;
979 extern int64_t hammer_stats_commits
;
980 extern int64_t hammer_stats_undo
;
981 extern int64_t hammer_stats_redo
;
982 extern long hammer_count_dirtybufspace
;
983 extern int hammer_count_refedbufs
;
984 extern int hammer_count_reservations
;
985 extern long hammer_count_io_running_read
;
986 extern long hammer_count_io_running_write
;
987 extern int hammer_count_io_locked
;
988 extern long hammer_limit_dirtybufspace
;
989 extern int hammer_limit_recs
;
990 extern int hammer_limit_inode_recs
;
991 extern int hammer_limit_reclaims
;
992 extern int hammer_live_dedup_cache_size
;
993 extern int hammer_limit_redo
;
994 extern int hammer_verify_zone
;
995 extern int hammer_verify_data
;
996 extern int hammer_double_buffer
;
997 extern int hammer_btree_full_undo
;
998 extern int hammer_yield_check
;
999 extern int hammer_fsync_mode
;
1000 extern int hammer_autoflush
;
1001 extern int64_t hammer_contention_count
;
1003 extern int64_t hammer_live_dedup_vnode_bcmps
;
1004 extern int64_t hammer_live_dedup_device_bcmps
;
1005 extern int64_t hammer_live_dedup_findblk_failures
;
1006 extern int64_t hammer_live_dedup_bmap_saves
;
1008 void hammer_critical_error(hammer_mount_t hmp
, hammer_inode_t ip
,
1009 int error
, const char *msg
);
1010 int hammer_vop_inactive(struct vop_inactive_args
*);
1011 int hammer_vop_reclaim(struct vop_reclaim_args
*);
1012 int hammer_get_vnode(hammer_inode_t ip
, struct vnode
**vpp
);
1013 hammer_inode_t
hammer_get_inode(hammer_transaction_t trans
,
1014 hammer_inode_t dip
, int64_t obj_id
,
1015 hammer_tid_t asof
, uint32_t localization
,
1016 int flags
, int *errorp
);
1017 hammer_inode_t
hammer_get_dummy_inode(hammer_transaction_t trans
,
1018 hammer_inode_t dip
, int64_t obj_id
,
1019 hammer_tid_t asof
, uint32_t localization
,
1020 int flags
, int *errorp
);
1021 hammer_inode_t
hammer_find_inode(hammer_transaction_t trans
,
1022 int64_t obj_id
, hammer_tid_t asof
,
1023 uint32_t localization
);
1024 void hammer_scan_inode_snapshots(hammer_mount_t hmp
,
1025 hammer_inode_info_t iinfo
,
1026 int (*callback
)(hammer_inode_t ip
, void *data
),
1028 void hammer_put_inode(hammer_inode_t ip
);
1029 void hammer_put_inode_ref(hammer_inode_t ip
);
1030 void hammer_inode_waitreclaims(hammer_transaction_t trans
);
1031 void hammer_inode_dirty(hammer_inode_t ip
);
1033 int hammer_unload_volume(hammer_volume_t volume
, void *data
);
1034 int hammer_adjust_volume_mode(hammer_volume_t volume
, void *data __unused
);
1036 int hammer_unload_buffer(hammer_buffer_t buffer
, void *data
);
1037 int hammer_install_volume(hammer_mount_t hmp
, const char *volname
,
1038 struct vnode
*devvp
, void *data
);
1039 int hammer_mountcheck_volumes(hammer_mount_t hmp
);
1040 int hammer_get_installed_volumes(hammer_mount_t hmp
);
1042 int hammer_mem_add(hammer_record_t record
);
1043 int hammer_ip_lookup(hammer_cursor_t cursor
);
1044 int hammer_ip_first(hammer_cursor_t cursor
);
1045 int hammer_ip_next(hammer_cursor_t cursor
);
1046 int hammer_ip_resolve_data(hammer_cursor_t cursor
);
1047 int hammer_ip_delete_record(hammer_cursor_t cursor
, hammer_inode_t ip
,
1049 int hammer_create_at_cursor(hammer_cursor_t cursor
,
1050 hammer_btree_leaf_elm_t leaf
, void *udata
, int mode
);
1051 int hammer_delete_at_cursor(hammer_cursor_t cursor
, int delete_flags
,
1052 hammer_tid_t delete_tid
, uint32_t delete_ts
,
1053 int track
, int64_t *stat_bytes
);
1054 int hammer_ip_check_directory_empty(hammer_transaction_t trans
,
1056 int hammer_sync_hmp(hammer_mount_t hmp
, int waitfor
);
1057 int hammer_queue_inodes_flusher(hammer_mount_t hmp
, int waitfor
);
1060 hammer_alloc_mem_record(hammer_inode_t ip
, int data_len
);
1061 void hammer_flush_record_done(hammer_record_t record
, int error
);
1062 void hammer_wait_mem_record_ident(hammer_record_t record
, const char *ident
);
1063 void hammer_rel_mem_record(hammer_record_t record
);
1065 int hammer_cursor_up(hammer_cursor_t cursor
);
1066 int hammer_cursor_up_locked(hammer_cursor_t cursor
);
1067 int hammer_cursor_down(hammer_cursor_t cursor
);
1068 int hammer_cursor_upgrade(hammer_cursor_t cursor
);
1069 int hammer_cursor_upgrade_node(hammer_cursor_t cursor
);
1070 void hammer_cursor_downgrade(hammer_cursor_t cursor
);
1071 int hammer_cursor_upgrade2(hammer_cursor_t c1
, hammer_cursor_t c2
);
1072 void hammer_cursor_downgrade2(hammer_cursor_t c1
, hammer_cursor_t c2
);
1073 int hammer_cursor_seek(hammer_cursor_t cursor
, hammer_node_t node
,
1075 void hammer_lock_ex_ident(struct hammer_lock
*lock
, const char *ident
);
1076 int hammer_lock_ex_try(struct hammer_lock
*lock
);
1077 void hammer_lock_sh(struct hammer_lock
*lock
);
1078 int hammer_lock_sh_try(struct hammer_lock
*lock
);
1079 int hammer_lock_upgrade(struct hammer_lock
*lock
, int shcount
);
1080 void hammer_lock_downgrade(struct hammer_lock
*lock
, int shcount
);
1081 int hammer_lock_status(struct hammer_lock
*lock
);
1082 void hammer_unlock(struct hammer_lock
*lock
);
1083 void hammer_ref(struct hammer_lock
*lock
);
1084 int hammer_ref_interlock(struct hammer_lock
*lock
);
1085 int hammer_ref_interlock_true(struct hammer_lock
*lock
);
1086 void hammer_ref_interlock_done(struct hammer_lock
*lock
);
1087 void hammer_rel(struct hammer_lock
*lock
);
1088 int hammer_rel_interlock(struct hammer_lock
*lock
, int locked
);
1089 void hammer_rel_interlock_done(struct hammer_lock
*lock
, int orig_locked
);
1090 int hammer_get_interlock(struct hammer_lock
*lock
);
1091 int hammer_try_interlock_norefs(struct hammer_lock
*lock
);
1092 void hammer_put_interlock(struct hammer_lock
*lock
, int error
);
1094 void hammer_sync_lock_ex(hammer_transaction_t trans
);
1095 void hammer_sync_lock_sh(hammer_transaction_t trans
);
1096 int hammer_sync_lock_sh_try(hammer_transaction_t trans
);
1097 void hammer_sync_unlock(hammer_transaction_t trans
);
1099 uint32_t hammer_to_unix_xid(uuid_t
*uuid
);
1100 void hammer_guid_to_uuid(uuid_t
*uuid
, uint32_t guid
);
1101 void hammer_time_to_timespec(uint64_t xtime
, struct timespec
*ts
);
1102 uint64_t hammer_timespec_to_time(struct timespec
*ts
);
1103 int hammer_str_to_tid(const char *str
, int *ispfsp
,
1104 hammer_tid_t
*tidp
, uint32_t *localizationp
);
1105 hammer_tid_t
hammer_alloc_objid(hammer_mount_t hmp
, hammer_inode_t dip
,
1107 void hammer_clear_objid(hammer_inode_t dip
);
1108 void hammer_destroy_objid_cache(hammer_mount_t hmp
);
1110 int hammer_dedup_crc_rb_compare(hammer_dedup_cache_t dc1
,
1111 hammer_dedup_cache_t dc2
);
1112 int hammer_dedup_off_rb_compare(hammer_dedup_cache_t dc1
,
1113 hammer_dedup_cache_t dc2
);
1114 hammer_dedup_cache_t
hammer_dedup_cache_add(hammer_inode_t ip
,
1115 hammer_btree_leaf_elm_t leaf
);
1116 hammer_dedup_cache_t
hammer_dedup_cache_lookup(hammer_mount_t hmp
,
1118 void hammer_dedup_cache_inval(hammer_mount_t hmp
, hammer_off_t base_offset
);
1119 void hammer_destroy_dedup_cache(hammer_mount_t hmp
);
1120 void hammer_dump_dedup_cache(hammer_mount_t hmp
);
1121 int hammer_dedup_validate(hammer_dedup_cache_t dcp
, int zone
, int bytes
,
1124 int hammer_enter_undo_history(hammer_mount_t hmp
, hammer_off_t offset
,
1126 void hammer_clear_undo_history(hammer_mount_t hmp
);
1127 enum vtype
hammer_get_vnode_type(uint8_t obj_type
);
1128 int hammer_get_dtype(uint8_t obj_type
);
1129 uint8_t hammer_get_obj_type(enum vtype vtype
);
1130 int64_t hammer_direntry_namekey(hammer_inode_t dip
, const void *name
, int len
,
1131 uint32_t *max_iterationsp
);
1132 int hammer_nohistory(hammer_inode_t ip
);
1134 int hammer_init_cursor(hammer_transaction_t trans
, hammer_cursor_t cursor
,
1135 hammer_node_cache_t cache
, hammer_inode_t ip
);
1136 void hammer_normalize_cursor(hammer_cursor_t cursor
);
1137 void hammer_done_cursor(hammer_cursor_t cursor
);
1138 int hammer_recover_cursor(hammer_cursor_t cursor
);
1139 void hammer_unlock_cursor(hammer_cursor_t cursor
);
1140 int hammer_lock_cursor(hammer_cursor_t cursor
);
1141 hammer_cursor_t
hammer_push_cursor(hammer_cursor_t ocursor
);
1142 void hammer_pop_cursor(hammer_cursor_t ocursor
, hammer_cursor_t ncursor
);
1144 void hammer_cursor_replaced_node(hammer_node_t onode
, hammer_node_t nnode
);
1145 void hammer_cursor_removed_node(hammer_node_t onode
, hammer_node_t parent
,
1147 void hammer_cursor_split_node(hammer_node_t onode
, hammer_node_t nnode
,
1149 void hammer_cursor_moved_element(hammer_node_t oparent
, int pindex
,
1150 hammer_node_t onode
, int oindex
,
1151 hammer_node_t nnode
, int nindex
);
1152 void hammer_cursor_parent_changed(hammer_node_t node
, hammer_node_t oparent
,
1153 hammer_node_t nparent
, int nindex
);
1154 void hammer_cursor_inserted_element(hammer_node_t node
, int index
);
1155 void hammer_cursor_deleted_element(hammer_node_t node
, int index
);
1156 void hammer_cursor_invalidate_cache(hammer_cursor_t cursor
);
1158 int hammer_btree_lookup(hammer_cursor_t cursor
);
1159 int hammer_btree_first(hammer_cursor_t cursor
);
1160 int hammer_btree_last(hammer_cursor_t cursor
);
1161 int hammer_btree_extract(hammer_cursor_t cursor
, int flags
);
1162 int hammer_btree_iterate(hammer_cursor_t cursor
);
1163 int hammer_btree_iterate_reverse(hammer_cursor_t cursor
);
1164 int hammer_btree_insert(hammer_cursor_t cursor
,
1165 hammer_btree_leaf_elm_t elm
, int *doprop
);
1166 int hammer_btree_delete(hammer_cursor_t cursor
, int *ndelete
);
1167 void hammer_btree_do_propagation(hammer_cursor_t cursor
,
1168 hammer_btree_leaf_elm_t leaf
);
1169 int hammer_btree_cmp(hammer_base_elm_t key1
, hammer_base_elm_t key2
);
1170 int hammer_btree_chkts(hammer_tid_t ts
, hammer_base_elm_t key
);
1171 int hammer_btree_correct_rhb(hammer_cursor_t cursor
, hammer_tid_t tid
);
1172 int hammer_btree_correct_lhb(hammer_cursor_t cursor
, hammer_tid_t tid
);
1174 int btree_set_parent_of_child(hammer_transaction_t trans
,
1176 hammer_btree_elm_t elm
);
1177 void hammer_node_lock_init(hammer_node_lock_t parent
, hammer_node_t node
);
1178 void hammer_btree_lcache_init(hammer_mount_t hmp
, hammer_node_lock_t lcache
,
1180 void hammer_btree_lcache_free(hammer_mount_t hmp
, hammer_node_lock_t lcache
);
1181 int hammer_btree_lock_children(hammer_cursor_t cursor
, int depth
,
1182 hammer_node_lock_t parent
,
1183 hammer_node_lock_t lcache
);
1184 void hammer_btree_lock_copy(hammer_cursor_t cursor
,
1185 hammer_node_lock_t parent
);
1186 int hammer_btree_sync_copy(hammer_cursor_t cursor
,
1187 hammer_node_lock_t parent
);
1188 void hammer_btree_unlock_children(hammer_mount_t hmp
,
1189 hammer_node_lock_t parent
,
1190 hammer_node_lock_t lcache
);
1191 int hammer_btree_search_node(hammer_base_elm_t elm
, hammer_node_ondisk_t node
);
1192 hammer_node_t
hammer_btree_get_parent(hammer_transaction_t trans
,
1193 hammer_node_t node
, int *parent_indexp
,
1194 int *errorp
, int try_exclusive
);
1196 void hammer_print_btree_node(hammer_node_ondisk_t ondisk
);
1197 void hammer_print_btree_elm(hammer_btree_elm_t elm
);
1199 void *hammer_bread(hammer_mount_t hmp
, hammer_off_t off
,
1200 int *errorp
, hammer_buffer_t
*bufferp
);
1201 void *hammer_bnew(hammer_mount_t hmp
, hammer_off_t off
,
1202 int *errorp
, hammer_buffer_t
*bufferp
);
1203 void *hammer_bread_ext(hammer_mount_t hmp
, hammer_off_t off
, int bytes
,
1204 int *errorp
, hammer_buffer_t
*bufferp
);
1205 void *hammer_bnew_ext(hammer_mount_t hmp
, hammer_off_t off
, int bytes
,
1206 int *errorp
, hammer_buffer_t
*bufferp
);
1208 hammer_volume_t
hammer_get_root_volume(hammer_mount_t hmp
, int *errorp
);
1210 hammer_volume_t
hammer_get_volume(hammer_mount_t hmp
,
1211 int32_t vol_no
, int *errorp
);
1212 hammer_buffer_t
hammer_get_buffer(hammer_mount_t hmp
, hammer_off_t buf_offset
,
1213 int bytes
, int isnew
, int *errorp
);
1214 void hammer_sync_buffers(hammer_mount_t hmp
,
1215 hammer_off_t base_offset
, int bytes
);
1216 int hammer_del_buffers(hammer_mount_t hmp
,
1217 hammer_off_t base_offset
,
1218 hammer_off_t zone2_offset
, int bytes
,
1219 int report_conflicts
);
1221 int hammer_ref_volume(hammer_volume_t volume
);
1222 int hammer_ref_buffer(hammer_buffer_t buffer
);
1223 void hammer_flush_buffer_nodes(hammer_buffer_t buffer
);
1225 void hammer_rel_volume(hammer_volume_t volume
, int locked
);
1226 void hammer_rel_buffer(hammer_buffer_t buffer
, int locked
);
1228 int hammer_vfs_export(struct mount
*mp
, int op
,
1229 const struct export_args
*export
);
1230 hammer_node_t
hammer_get_node(hammer_transaction_t trans
,
1231 hammer_off_t node_offset
, int isnew
, int *errorp
);
1232 void hammer_ref_node(hammer_node_t node
);
1233 hammer_node_t
hammer_ref_node_safe(hammer_transaction_t trans
,
1234 hammer_node_cache_t cache
, int *errorp
);
1235 void hammer_rel_node(hammer_node_t node
);
1236 void hammer_delete_node(hammer_transaction_t trans
,
1237 hammer_node_t node
);
1238 void hammer_cache_node(hammer_node_cache_t cache
,
1239 hammer_node_t node
);
1240 void hammer_uncache_node(hammer_node_cache_t cache
);
1241 void hammer_flush_node(hammer_node_t node
, int locked
);
1243 hammer_node_t
hammer_alloc_btree(hammer_transaction_t trans
,
1244 hammer_off_t hint
, int *errorp
);
1245 void *hammer_alloc_data(hammer_transaction_t trans
, int32_t data_len
,
1246 uint16_t rec_type
, hammer_off_t
*data_offsetp
,
1247 hammer_buffer_t
*data_bufferp
,
1248 hammer_off_t hint
, int *errorp
);
1250 int hammer_generate_undo(hammer_transaction_t trans
,
1251 hammer_off_t zone_offset
, void *base
, int len
);
1252 int hammer_generate_redo(hammer_transaction_t trans
, hammer_inode_t ip
,
1253 hammer_off_t file_offset
, uint32_t flags
,
1254 void *base
, int len
);
1255 void hammer_generate_redo_sync(hammer_transaction_t trans
);
1256 void hammer_redo_fifo_start_flush(hammer_inode_t ip
);
1257 void hammer_redo_fifo_end_flush(hammer_inode_t ip
);
1259 void hammer_format_undo(void *base
, uint32_t seqno
);
1260 int hammer_upgrade_undo_4(hammer_transaction_t trans
);
1262 hammer_off_t
hammer_freemap_alloc(hammer_transaction_t trans
,
1263 hammer_off_t owner
, int *errorp
);
1264 void hammer_freemap_free(hammer_transaction_t trans
, hammer_off_t phys_offset
,
1265 hammer_off_t owner
, int *errorp
);
1266 int _hammer_checkspace(hammer_mount_t hmp
, int slop
, int64_t *resp
);
1267 hammer_off_t
hammer_blockmap_alloc(hammer_transaction_t trans
, int zone
,
1268 int bytes
, hammer_off_t hint
, int *errorp
);
1269 hammer_reserve_t
hammer_blockmap_reserve(hammer_mount_t hmp
, int zone
,
1270 int bytes
, hammer_off_t
*zone_offp
, int *errorp
);
1271 hammer_reserve_t
hammer_blockmap_reserve_dedup(hammer_mount_t hmp
, int zone
,
1272 int bytes
, hammer_off_t zone_offset
, int *errorp
);
1273 void hammer_blockmap_reserve_complete(hammer_mount_t hmp
,
1274 hammer_reserve_t resv
);
1275 void hammer_reserve_clrdelay(hammer_mount_t hmp
, hammer_reserve_t resv
);
1276 void hammer_blockmap_free(hammer_transaction_t trans
,
1277 hammer_off_t zone_offset
, int bytes
);
1278 int hammer_blockmap_dedup(hammer_transaction_t trans
,
1279 hammer_off_t zone_offset
, int bytes
);
1280 int hammer_blockmap_finalize(hammer_transaction_t trans
,
1281 hammer_reserve_t resv
,
1282 hammer_off_t zone_offset
, int bytes
);
1283 int hammer_blockmap_getfree(hammer_mount_t hmp
, hammer_off_t zone_offset
,
1284 int *curp
, int *errorp
);
1285 hammer_off_t
hammer_blockmap_lookup_verify(hammer_mount_t hmp
,
1286 hammer_off_t zone_offset
, int *errorp
);
1288 hammer_off_t
hammer_undo_lookup(hammer_mount_t hmp
, hammer_off_t zone_offset
,
1290 int64_t hammer_undo_used(hammer_transaction_t trans
);
1291 int64_t hammer_undo_space(hammer_transaction_t trans
);
1292 int64_t hammer_undo_max(hammer_mount_t hmp
);
1293 int hammer_undo_reclaim(hammer_io_t io
);
1295 void hammer_start_transaction(hammer_transaction_t trans
,
1296 hammer_mount_t hmp
);
1297 void hammer_simple_transaction(hammer_transaction_t trans
,
1298 hammer_mount_t hmp
);
1299 void hammer_start_transaction_fls(hammer_transaction_t trans
,
1300 hammer_mount_t hmp
);
1301 void hammer_done_transaction(hammer_transaction_t trans
);
1302 hammer_tid_t
hammer_alloc_tid(hammer_mount_t hmp
, int count
);
1304 void hammer_modify_inode(hammer_transaction_t trans
, hammer_inode_t ip
, int flags
);
1305 void hammer_flush_inode(hammer_inode_t ip
, int flags
);
1306 void hammer_flush_inode_done(hammer_inode_t ip
, int error
);
1307 void hammer_wait_inode(hammer_inode_t ip
);
1309 int hammer_create_inode(hammer_transaction_t trans
, struct vattr
*vap
,
1310 struct ucred
*cred
, hammer_inode_t dip
,
1311 const char *name
, int namelen
,
1312 hammer_pseudofs_inmem_t pfsm
,
1313 hammer_inode_t
*ipp
);
1314 void hammer_rel_inode(hammer_inode_t ip
, int flush
);
1315 int hammer_reload_inode(hammer_inode_t ip
, void *arg __unused
);
1316 int hammer_ino_rb_compare(hammer_inode_t ip1
, hammer_inode_t ip2
);
1317 int hammer_redo_rb_compare(hammer_inode_t ip1
, hammer_inode_t ip2
);
1318 int hammer_destroy_inode_callback(hammer_inode_t ip
, void *data __unused
);
1320 int hammer_sync_inode(hammer_transaction_t trans
, hammer_inode_t ip
);
1321 void hammer_test_inode(hammer_inode_t dip
);
1322 void hammer_inode_unloadable_check(hammer_inode_t ip
, int getvp
);
1323 int hammer_update_atime_quick(hammer_inode_t ip
);
1325 int hammer_ip_add_direntry(hammer_transaction_t trans
,
1326 hammer_inode_t dip
, const char *name
, int bytes
,
1327 hammer_inode_t nip
);
1328 int hammer_ip_del_direntry(hammer_transaction_t trans
,
1329 hammer_cursor_t cursor
, hammer_inode_t dip
,
1331 void hammer_ip_replace_bulk(hammer_mount_t hmp
, hammer_record_t record
);
1332 hammer_record_t
hammer_ip_add_bulk(hammer_inode_t ip
, off_t file_offset
,
1333 void *data
, int bytes
, int *errorp
);
1334 int hammer_ip_frontend_trunc(hammer_inode_t ip
, off_t file_size
);
1335 int hammer_ip_add_record(hammer_transaction_t trans
,
1336 hammer_record_t record
);
1337 int hammer_ip_delete_range(hammer_cursor_t cursor
, hammer_inode_t ip
,
1338 int64_t ran_beg
, int64_t ran_end
, int truncating
);
1339 int hammer_ip_delete_clean(hammer_cursor_t cursor
, hammer_inode_t ip
,
1341 int hammer_ip_sync_data(hammer_cursor_t cursor
, hammer_inode_t ip
,
1342 int64_t offset
, void *data
, int bytes
);
1343 int hammer_ip_sync_record_cursor(hammer_cursor_t cursor
, hammer_record_t rec
);
1344 hammer_pseudofs_inmem_t
hammer_load_pseudofs(hammer_transaction_t trans
,
1345 uint32_t localization
, int *errorp
);
1346 int hammer_mkroot_pseudofs(hammer_transaction_t trans
, struct ucred
*cred
,
1347 hammer_pseudofs_inmem_t pfsm
, hammer_inode_t dip
);
1348 int hammer_save_pseudofs(hammer_transaction_t trans
,
1349 hammer_pseudofs_inmem_t pfsm
);
1350 int hammer_unload_pseudofs(hammer_transaction_t trans
, uint32_t localization
);
1351 void hammer_rel_pseudofs(hammer_mount_t hmp
, hammer_pseudofs_inmem_t pfsm
);
1352 int hammer_ioctl(hammer_inode_t ip
, u_long com
, caddr_t data
, int fflag
,
1353 struct ucred
*cred
);
1355 void hammer_io_init(hammer_io_t io
, hammer_volume_t volume
,
1356 hammer_io_type_t type
);
1357 hammer_io_type_t
hammer_zone_to_iotype(int zone
);
1358 int hammer_io_read(struct vnode
*devvp
, hammer_io_t io
, int limit
);
1359 void hammer_io_advance(hammer_io_t io
);
1360 int hammer_io_new(struct vnode
*devvp
, hammer_io_t io
);
1361 int hammer_io_inval(hammer_volume_t volume
, hammer_off_t zone2_offset
);
1362 struct buf
*hammer_io_release(hammer_io_t io
, int flush
);
1363 void hammer_io_flush(hammer_io_t io
, int reclaim
);
1364 void hammer_io_wait(hammer_io_t io
);
1365 void hammer_io_waitdep(hammer_io_t io
);
1366 void hammer_io_wait_all(hammer_mount_t hmp
, const char *ident
, int doflush
);
1367 int hammer_io_direct_read(hammer_mount_t hmp
, struct bio
*bio
,
1368 hammer_btree_leaf_elm_t leaf
);
1369 int hammer_io_indirect_read(hammer_mount_t hmp
, struct bio
*bio
,
1370 hammer_btree_leaf_elm_t leaf
);
1371 int hammer_io_direct_write(hammer_mount_t hmp
, struct bio
*bio
,
1372 hammer_record_t record
);
1373 void hammer_io_direct_wait(hammer_record_t record
);
1374 void hammer_io_direct_uncache(hammer_mount_t hmp
, hammer_btree_leaf_elm_t leaf
);
1375 void hammer_io_write_interlock(hammer_io_t io
);
1376 void hammer_io_done_interlock(hammer_io_t io
);
1377 void hammer_io_clear_modify(hammer_io_t io
, int inval
);
1378 void hammer_io_clear_modlist(hammer_io_t io
);
1379 void hammer_io_flush_sync(hammer_mount_t hmp
);
1380 void hammer_io_clear_error(hammer_io_t io
);
1381 void hammer_io_clear_error_noassert(hammer_io_t io
);
1382 void hammer_io_notmeta(hammer_buffer_t buffer
);
1383 void hammer_io_limit_backlog(hammer_mount_t hmp
);
1385 void hammer_modify_volume(hammer_transaction_t trans
, hammer_volume_t volume
,
1386 void *base
, int len
);
1387 void hammer_modify_buffer(hammer_transaction_t trans
, hammer_buffer_t buffer
,
1388 void *base
, int len
);
1389 void hammer_modify_volume_done(hammer_volume_t volume
);
1390 void hammer_modify_buffer_done(hammer_buffer_t buffer
);
1392 int hammer_ioc_reblock(hammer_transaction_t trans
, hammer_inode_t ip
,
1393 struct hammer_ioc_reblock
*reblock
);
1394 int hammer_ioc_rebalance(hammer_transaction_t trans
, hammer_inode_t ip
,
1395 struct hammer_ioc_rebalance
*rebal
);
1396 int hammer_ioc_prune(hammer_transaction_t trans
, hammer_inode_t ip
,
1397 struct hammer_ioc_prune
*prune
);
1398 int hammer_ioc_mirror_read(hammer_transaction_t trans
, hammer_inode_t ip
,
1399 struct hammer_ioc_mirror_rw
*mirror
);
1400 int hammer_ioc_mirror_write(hammer_transaction_t trans
, hammer_inode_t ip
,
1401 struct hammer_ioc_mirror_rw
*mirror
);
1402 int hammer_ioc_set_pseudofs(hammer_transaction_t trans
, hammer_inode_t ip
,
1403 struct ucred
*cred
, struct hammer_ioc_pseudofs_rw
*pfs
);
1404 int hammer_ioc_get_pseudofs(hammer_transaction_t trans
, hammer_inode_t ip
,
1405 struct hammer_ioc_pseudofs_rw
*pfs
);
1406 int hammer_ioc_destroy_pseudofs(hammer_transaction_t trans
, hammer_inode_t ip
,
1407 struct hammer_ioc_pseudofs_rw
*pfs
);
1408 int hammer_ioc_downgrade_pseudofs(hammer_transaction_t trans
, hammer_inode_t ip
,
1409 struct hammer_ioc_pseudofs_rw
*pfs
);
1410 int hammer_ioc_upgrade_pseudofs(hammer_transaction_t trans
, hammer_inode_t ip
,
1411 struct hammer_ioc_pseudofs_rw
*pfs
);
1412 int hammer_ioc_wait_pseudofs(hammer_transaction_t trans
, hammer_inode_t ip
,
1413 struct hammer_ioc_pseudofs_rw
*pfs
);
1414 int hammer_ioc_scan_pseudofs(hammer_transaction_t trans
, hammer_inode_t ip
,
1415 struct hammer_ioc_pseudofs_rw
*pfs
);
1416 int hammer_ioc_volume_add(hammer_transaction_t trans
, hammer_inode_t ip
,
1417 struct hammer_ioc_volume
*ioc
);
1418 int hammer_ioc_volume_del(hammer_transaction_t trans
, hammer_inode_t ip
,
1419 struct hammer_ioc_volume
*ioc
);
1420 int hammer_ioc_volume_list(hammer_transaction_t trans
, hammer_inode_t ip
,
1421 struct hammer_ioc_volume_list
*ioc
);
1422 int hammer_ioc_dedup(hammer_transaction_t trans
, hammer_inode_t ip
,
1423 struct hammer_ioc_dedup
*dedup
);
1425 int hammer_signal_check(hammer_mount_t hmp
);
1427 void hammer_flusher_create(hammer_mount_t hmp
);
1428 void hammer_flusher_destroy(hammer_mount_t hmp
);
1429 void hammer_flusher_sync(hammer_mount_t hmp
);
1430 int hammer_flusher_async(hammer_mount_t hmp
, hammer_flush_group_t flg
);
1431 int hammer_flusher_async_one(hammer_mount_t hmp
);
1432 int hammer_flusher_running(hammer_mount_t hmp
);
1433 void hammer_flusher_wait(hammer_mount_t hmp
, int seq
);
1434 void hammer_flusher_wait_next(hammer_mount_t hmp
);
1435 int hammer_flusher_meta_limit(hammer_mount_t hmp
);
1436 int hammer_flusher_meta_halflimit(hammer_mount_t hmp
);
1437 int hammer_flusher_undo_exhausted(hammer_transaction_t trans
, int quarter
);
1438 void hammer_flusher_clean_loose_ios(hammer_mount_t hmp
);
1439 void hammer_flusher_finalize(hammer_transaction_t trans
, int final
);
1440 int hammer_flusher_haswork(hammer_mount_t hmp
);
1441 int hammer_flush_dirty(hammer_mount_t hmp
, int max_count
);
1442 void hammer_flusher_flush_undos(hammer_mount_t hmp
, int already_flushed
);
1444 int hammer_recover_stage1(hammer_mount_t hmp
, hammer_volume_t rootvol
);
1445 int hammer_recover_stage2(hammer_mount_t hmp
, hammer_volume_t rootvol
);
1446 void hammer_recover_flush_buffers(hammer_mount_t hmp
,
1447 hammer_volume_t root_volume
, int final
);
1449 udev_t
hammer_fsid_to_udev(uuid_t
*uuid
);
1452 int hammer_blocksize(int64_t file_offset
);
1453 int hammer_blockoff(int64_t file_offset
);
1454 int64_t hammer_blockdemarc(int64_t file_offset1
, int64_t file_offset2
);
1457 * Shortcut for _hammer_checkspace(), used all over the code.
1460 hammer_checkspace(hammer_mount_t hmp
, int slop
)
1462 return(_hammer_checkspace(hmp
, slop
, NULL
));
1465 static __inline
void
1466 hammer_wait_mem_record(hammer_record_t record
)
1468 hammer_wait_mem_record_ident(record
, "hmmwai");
1471 static __inline
void
1472 hammer_lock_ex(struct hammer_lock
*lock
)
1474 hammer_lock_ex_ident(lock
, "hmrlck");
1477 static __inline
void
1478 hammer_modify_volume_noundo(hammer_transaction_t trans
, hammer_volume_t volume
)
1480 hammer_modify_volume(trans
, volume
, NULL
, 0);
1483 static __inline
void
1484 hammer_modify_buffer_noundo(hammer_transaction_t trans
, hammer_buffer_t buffer
)
1486 hammer_modify_buffer(trans
, buffer
, NULL
, 0);
1490 * Indicate that a B-Tree node is being modified.
1492 static __inline
void
1493 hammer_modify_node_noundo(hammer_transaction_t trans
, hammer_node_t node
)
1495 KKASSERT((node
->flags
& HAMMER_NODE_CRCBAD
) == 0);
1496 hammer_modify_buffer(trans
, node
->buffer
, NULL
, 0);
1499 static __inline
void
1500 hammer_modify_node_all(hammer_transaction_t trans
, hammer_node_t node
)
1502 KKASSERT((node
->flags
& HAMMER_NODE_CRCBAD
) == 0);
1503 hammer_modify_buffer(trans
, node
->buffer
,
1504 node
->ondisk
, sizeof(*node
->ondisk
));
1507 static __inline
void
1508 hammer_modify_node(hammer_transaction_t trans
, hammer_node_t node
,
1509 void *base
, int len
)
1511 hammer_crc_t
*crcptr
;
1513 KKASSERT((char *)base
>= (char *)node
->ondisk
&&
1514 (char *)base
+ len
<=
1515 (char *)node
->ondisk
+ sizeof(*node
->ondisk
));
1516 KKASSERT((node
->flags
& HAMMER_NODE_CRCBAD
) == 0);
1518 if (hammer_btree_full_undo
) {
1519 hammer_modify_node_all(trans
, node
);
1521 hammer_modify_buffer(trans
, node
->buffer
, base
, len
);
1522 crcptr
= &node
->ondisk
->crc
;
1523 hammer_modify_buffer(trans
, node
->buffer
,
1524 crcptr
, sizeof(hammer_crc_t
));
1525 --node
->buffer
->io
.modify_refs
; /* only want one ref */
1530 * Indicate that the specified modifications have been completed.
1532 * Do not try to generate the crc here, it's very expensive to do and a
1533 * sequence of insertions or deletions can result in many calls to this
1534 * function on the same node.
1536 static __inline
void
1537 hammer_modify_node_done(hammer_node_t node
)
1539 node
->flags
|= HAMMER_NODE_CRCGOOD
;
1540 if ((node
->flags
& HAMMER_NODE_NEEDSCRC
) == 0) {
1541 node
->flags
|= HAMMER_NODE_NEEDSCRC
;
1542 node
->buffer
->io
.gencrc
= 1;
1543 hammer_ref_node(node
);
1545 hammer_modify_buffer_done(node
->buffer
);
1549 hammer_btree_extract_leaf(hammer_cursor_t cursor
)
1551 return(hammer_btree_extract(cursor
, 0));
1555 hammer_btree_extract_data(hammer_cursor_t cursor
)
1557 return(hammer_btree_extract(cursor
, HAMMER_CURSOR_GET_DATA
));
1561 * Lookup a blockmap offset.
1563 static __inline hammer_off_t
1564 hammer_blockmap_lookup(hammer_mount_t hmp
, hammer_off_t zone_offset
,
1567 #if defined INVARIANTS
1568 KKASSERT(hammer_is_zone_record(zone_offset
));
1572 * We can actually skip blockmap verify by default,
1573 * as normal blockmaps are now direct-mapped onto the freemap
1574 * and so represent zone-2 addresses.
1576 if (hammer_verify_zone
== 0) {
1578 return hammer_xlate_to_zone2(zone_offset
);
1581 return hammer_blockmap_lookup_verify(hmp
, zone_offset
, errorp
);
1584 #define hammer_modify_volume_field(trans, vol, field) \
1585 hammer_modify_volume(trans, vol, &(vol)->ondisk->field, \
1586 sizeof((vol)->ondisk->field))
1588 #define hammer_modify_node_field(trans, node, field) \
1589 hammer_modify_node(trans, node, &(node)->ondisk->field, \
1590 sizeof((node)->ondisk->field))
1593 * The HAMMER_INODE_CAP_DIR_LOCAL_INO capability is set on newly
1594 * created directories for HAMMER version 2 or greater and causes
1595 * directory entries to be placed the inode localization zone in
1596 * the B-Tree instead of the misc zone.
1598 * This greatly improves localization between directory entries and
1601 static __inline
uint32_t
1602 hammer_dir_localization(hammer_inode_t dip
)
1604 return(HAMMER_DIR_INODE_LOCALIZATION(&dip
->ino_data
));
1609 hammer_buf_peek_io(struct buf
*bp
)
1611 return((hammer_io_t
)bp
->b_priv
);
1616 hammer_buf_attach_io(struct buf
*bp
, hammer_io_t io
)
1618 /* struct buf and struct hammer_io are 1:1 */
1619 KKASSERT(hammer_buf_peek_io(bp
) == NULL
);
1624 __hammer_vol_index(int vol_no
)
1626 return(vol_no
>> 6);
1629 static __inline
uint64_t
1630 __hammer_vol_low(int vol_no
)
1632 return((uint64_t)1 << (vol_no
& ((1 << 6) - 1)));
1635 static __inline
void
1636 hammer_volume_number_add(hammer_mount_t hmp
, hammer_volume_t vol
)
1638 int i
= __hammer_vol_index(vol
->vol_no
);
1639 hmp
->volume_map
[i
] |= __hammer_vol_low(vol
->vol_no
);
1642 static __inline
void
1643 hammer_volume_number_del(hammer_mount_t hmp
, hammer_volume_t vol
)
1645 int i
= __hammer_vol_index(vol
->vol_no
);
1646 hmp
->volume_map
[i
] &= ~__hammer_vol_low(vol
->vol_no
);
1650 hammer_volume_number_test(hammer_mount_t hmp
, int n
)
1652 int i
= __hammer_vol_index(n
);
1653 return((hmp
->volume_map
[i
] & __hammer_vol_low(n
)) != 0);
1656 #define hkprintf(format, args...) \
1657 kprintf("HAMMER: "format,## args)
1658 #define hvkprintf(vol, format, args...) \
1659 kprintf("HAMMER(%s) "format, vol->ondisk->vol_label,## args)
1660 #define hmkprintf(hmp, format, args...) \
1661 kprintf("HAMMER(%s) "format, hmp->mp->mnt_stat.f_mntfromname,## args)
1662 #define hdkprintf(format, args...) \
1663 kprintf("%s: "format, __func__,## args)
1665 #define hkrateprintf(rate , format, args...) \
1666 krateprintf(rate, "HAMMER: "format,## args)
1667 #define hvkrateprintf(rate, vol, format, args...) \
1668 krateprintf(rate, "HAMMER(%s) "format, vol->ondisk->vol_label,## args)
1669 #define hmkrateprintf(rate, hmp, format, args...) \
1670 krateprintf(rate, "HAMMER(%s) "format, hmp->mp->mnt_stat.f_mntfromname,## args)
1671 #define hdkrateprintf(rate, format, args...) \
1672 krateprintf(rate, "%s: "format, __func__,## args)
1674 #define hpanic(format, args...) \
1675 panic("%s: "format, __func__,## args)
1676 #endif /* _KERNEL */
1678 #endif /* !VFS_HAMMER_HAMMER_H_ */