2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.114 2008/09/24 00:53:51 dillon Exp $
38 #include <vm/vm_extern.h>
40 static int hammer_unload_inode(struct hammer_inode
*ip
);
41 static void hammer_free_inode(hammer_inode_t ip
);
42 static void hammer_flush_inode_core(hammer_inode_t ip
,
43 hammer_flush_group_t flg
, int flags
);
44 static int hammer_setup_child_callback(hammer_record_t rec
, void *data
);
46 static int hammer_syncgrp_child_callback(hammer_record_t rec
, void *data
);
48 static int hammer_setup_parent_inodes(hammer_inode_t ip
, int depth
,
49 hammer_flush_group_t flg
);
50 static int hammer_setup_parent_inodes_helper(hammer_record_t record
,
51 int depth
, hammer_flush_group_t flg
);
52 static void hammer_inode_wakereclaims(hammer_inode_t ip
);
53 static struct hammer_inostats
*hammer_inode_inostats(hammer_mount_t hmp
,
57 extern struct hammer_inode
*HammerTruncIp
;
61 * RB-Tree support for inode structures
64 hammer_ino_rb_compare(hammer_inode_t ip1
, hammer_inode_t ip2
)
66 if (ip1
->obj_localization
< ip2
->obj_localization
)
68 if (ip1
->obj_localization
> ip2
->obj_localization
)
70 if (ip1
->obj_id
< ip2
->obj_id
)
72 if (ip1
->obj_id
> ip2
->obj_id
)
74 if (ip1
->obj_asof
< ip2
->obj_asof
)
76 if (ip1
->obj_asof
> ip2
->obj_asof
)
82 hammer_redo_rb_compare(hammer_inode_t ip1
, hammer_inode_t ip2
)
84 if (ip1
->redo_fifo_start
< ip2
->redo_fifo_start
)
86 if (ip1
->redo_fifo_start
> ip2
->redo_fifo_start
)
92 * RB-Tree support for inode structures / special LOOKUP_INFO
95 hammer_inode_info_cmp(hammer_inode_info_t info
, hammer_inode_t ip
)
97 if (info
->obj_localization
< ip
->obj_localization
)
99 if (info
->obj_localization
> ip
->obj_localization
)
101 if (info
->obj_id
< ip
->obj_id
)
103 if (info
->obj_id
> ip
->obj_id
)
105 if (info
->obj_asof
< ip
->obj_asof
)
107 if (info
->obj_asof
> ip
->obj_asof
)
113 * Used by hammer_scan_inode_snapshots() to locate all of an object's
114 * snapshots. Note that the asof field is not tested, which we can get
115 * away with because it is the lowest-priority field.
118 hammer_inode_info_cmp_all_history(hammer_inode_t ip
, void *data
)
120 hammer_inode_info_t info
= data
;
122 if (ip
->obj_localization
> info
->obj_localization
)
124 if (ip
->obj_localization
< info
->obj_localization
)
126 if (ip
->obj_id
> info
->obj_id
)
128 if (ip
->obj_id
< info
->obj_id
)
134 * Used by hammer_unload_pseudofs() to locate all inodes associated with
138 hammer_inode_pfs_cmp(hammer_inode_t ip
, void *data
)
140 u_int32_t localization
= *(u_int32_t
*)data
;
141 if (ip
->obj_localization
> localization
)
143 if (ip
->obj_localization
< localization
)
149 * RB-Tree support for pseudofs structures
152 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1
, hammer_pseudofs_inmem_t p2
)
154 if (p1
->localization
< p2
->localization
)
156 if (p1
->localization
> p2
->localization
)
162 RB_GENERATE(hammer_ino_rb_tree
, hammer_inode
, rb_node
, hammer_ino_rb_compare
);
163 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree
, INFO
, hammer_inode
, rb_node
,
164 hammer_inode_info_cmp
, hammer_inode_info_t
);
165 RB_GENERATE2(hammer_pfs_rb_tree
, hammer_pseudofs_inmem
, rb_node
,
166 hammer_pfs_rb_compare
, u_int32_t
, localization
);
169 * The kernel is not actively referencing this vnode but is still holding
172 * This is called from the frontend.
177 hammer_vop_inactive(struct vop_inactive_args
*ap
)
179 struct hammer_inode
*ip
= VTOI(ap
->a_vp
);
190 * If the inode no longer has visibility in the filesystem try to
191 * recycle it immediately, even if the inode is dirty. Recycling
192 * it quickly allows the system to reclaim buffer cache and VM
193 * resources which can matter a lot in a heavily loaded system.
195 * This can deadlock in vfsync() if we aren't careful.
197 * Do not queue the inode to the flusher if we still have visibility,
198 * otherwise namespace calls such as chmod will unnecessarily generate
199 * multiple inode updates.
201 if (ip
->ino_data
.nlinks
== 0) {
203 hammer_inode_unloadable_check(ip
, 0);
204 if (ip
->flags
& HAMMER_INODE_MODMASK
)
205 hammer_flush_inode(ip
, 0);
213 * Release the vnode association. This is typically (but not always)
214 * the last reference on the inode.
216 * Once the association is lost we are on our own with regards to
217 * flushing the inode.
219 * We must interlock ip->vp so hammer_get_vnode() can avoid races.
222 hammer_vop_reclaim(struct vop_reclaim_args
*ap
)
224 struct hammer_inode
*ip
;
230 if ((ip
= vp
->v_data
) != NULL
) {
232 hammer_lock_ex(&ip
->lock
);
236 if ((ip
->flags
& HAMMER_INODE_RECLAIM
) == 0) {
237 ++hammer_count_reclaiming
;
238 ++hmp
->inode_reclaims
;
239 ip
->flags
|= HAMMER_INODE_RECLAIM
;
241 hammer_unlock(&ip
->lock
);
242 hammer_rel_inode(ip
, 1);
248 * Return a locked vnode for the specified inode. The inode must be
249 * referenced but NOT LOCKED on entry and will remain referenced on
252 * Called from the frontend.
255 hammer_get_vnode(struct hammer_inode
*ip
, struct vnode
**vpp
)
265 if ((vp
= ip
->vp
) == NULL
) {
266 error
= getnewvnode(VT_HAMMER
, hmp
->mp
, vpp
, 0, 0);
269 hammer_lock_ex(&ip
->lock
);
270 if (ip
->vp
!= NULL
) {
271 hammer_unlock(&ip
->lock
);
277 hammer_ref(&ip
->lock
);
281 obj_type
= ip
->ino_data
.obj_type
;
282 vp
->v_type
= hammer_get_vnode_type(obj_type
);
284 hammer_inode_wakereclaims(ip
);
286 switch(ip
->ino_data
.obj_type
) {
287 case HAMMER_OBJTYPE_CDEV
:
288 case HAMMER_OBJTYPE_BDEV
:
289 vp
->v_ops
= &hmp
->mp
->mnt_vn_spec_ops
;
290 addaliasu(vp
, ip
->ino_data
.rmajor
,
291 ip
->ino_data
.rminor
);
293 case HAMMER_OBJTYPE_FIFO
:
294 vp
->v_ops
= &hmp
->mp
->mnt_vn_fifo_ops
;
296 case HAMMER_OBJTYPE_REGFILE
:
303 * Only mark as the root vnode if the ip is not
304 * historical, otherwise the VFS cache will get
305 * confused. The other half of the special handling
306 * is in hammer_vop_nlookupdotdot().
308 * Pseudo-filesystem roots can be accessed via
309 * non-root filesystem paths and setting VROOT may
310 * confuse the namecache. Set VPFSROOT instead.
312 if (ip
->obj_id
== HAMMER_OBJID_ROOT
&&
313 ip
->obj_asof
== hmp
->asof
) {
314 if (ip
->obj_localization
== 0)
315 vsetflags(vp
, VROOT
);
317 vsetflags(vp
, VPFSROOT
);
320 vp
->v_data
= (void *)ip
;
321 /* vnode locked by getnewvnode() */
322 /* make related vnode dirty if inode dirty? */
323 hammer_unlock(&ip
->lock
);
324 if (vp
->v_type
== VREG
) {
325 vinitvmio(vp
, ip
->ino_data
.size
,
326 hammer_blocksize(ip
->ino_data
.size
),
327 hammer_blockoff(ip
->ino_data
.size
));
333 * Interlock vnode clearing. This does not prevent the
334 * vnode from going into a reclaimed state but it does
335 * prevent it from being destroyed or reused so the vget()
336 * will properly fail.
338 hammer_lock_ex(&ip
->lock
);
339 if ((vp
= ip
->vp
) == NULL
) {
340 hammer_unlock(&ip
->lock
);
343 vhold_interlocked(vp
);
344 hammer_unlock(&ip
->lock
);
347 * loop if the vget fails (aka races), or if the vp
348 * no longer matches ip->vp.
350 if (vget(vp
, LK_EXCLUSIVE
) == 0) {
364 * Locate all copies of the inode for obj_id compatible with the specified
365 * asof, reference, and issue the related call-back. This routine is used
366 * for direct-io invalidation and does not create any new inodes.
369 hammer_scan_inode_snapshots(hammer_mount_t hmp
, hammer_inode_info_t iinfo
,
370 int (*callback
)(hammer_inode_t ip
, void *data
),
373 hammer_ino_rb_tree_RB_SCAN(&hmp
->rb_inos_root
,
374 hammer_inode_info_cmp_all_history
,
379 * Acquire a HAMMER inode. The returned inode is not locked. These functions
380 * do not attach or detach the related vnode (use hammer_get_vnode() for
383 * The flags argument is only applied for newly created inodes, and only
384 * certain flags are inherited.
386 * Called from the frontend.
388 struct hammer_inode
*
389 hammer_get_inode(hammer_transaction_t trans
, hammer_inode_t dip
,
390 int64_t obj_id
, hammer_tid_t asof
, u_int32_t localization
,
391 int flags
, int *errorp
)
393 hammer_mount_t hmp
= trans
->hmp
;
394 struct hammer_node_cache
*cachep
;
395 struct hammer_inode_info iinfo
;
396 struct hammer_cursor cursor
;
397 struct hammer_inode
*ip
;
401 * Determine if we already have an inode cached. If we do then
404 * If we find an inode with no vnode we have to mark the
405 * transaction such that hammer_inode_waitreclaims() is
406 * called later on to avoid building up an infinite number
407 * of inodes. Otherwise we can continue to * add new inodes
408 * faster then they can be disposed of, even with the tsleep
411 * If we find a dummy inode we return a failure so dounlink
412 * (which does another lookup) doesn't try to mess with the
413 * link count. hammer_vop_nresolve() uses hammer_get_dummy_inode()
414 * to ref dummy inodes.
416 iinfo
.obj_id
= obj_id
;
417 iinfo
.obj_asof
= asof
;
418 iinfo
.obj_localization
= localization
;
420 ip
= hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp
->rb_inos_root
, &iinfo
);
422 if (ip
->flags
& HAMMER_INODE_DUMMY
) {
426 hammer_ref(&ip
->lock
);
432 * Allocate a new inode structure and deal with races later.
434 ip
= kmalloc(sizeof(*ip
), hmp
->m_inodes
, M_WAITOK
|M_ZERO
);
435 ++hammer_count_inodes
;
438 ip
->obj_asof
= iinfo
.obj_asof
;
439 ip
->obj_localization
= localization
;
441 ip
->flags
= flags
& HAMMER_INODE_RO
;
442 ip
->cache
[0].ip
= ip
;
443 ip
->cache
[1].ip
= ip
;
444 ip
->cache
[2].ip
= ip
;
445 ip
->cache
[3].ip
= ip
;
447 ip
->flags
|= HAMMER_INODE_RO
;
448 ip
->sync_trunc_off
= ip
->trunc_off
= ip
->save_trunc_off
=
449 0x7FFFFFFFFFFFFFFFLL
;
450 RB_INIT(&ip
->rec_tree
);
451 TAILQ_INIT(&ip
->target_list
);
452 hammer_ref(&ip
->lock
);
455 * Locate the on-disk inode. If this is a PFS root we always
456 * access the current version of the root inode and (if it is not
457 * a master) always access information under it with a snapshot
460 * We cache recent inode lookups in this directory in dip->cache[2].
461 * If we can't find it we assume the inode we are looking for is
462 * close to the directory inode.
467 if (dip
->cache
[2].node
)
468 cachep
= &dip
->cache
[2];
470 cachep
= &dip
->cache
[0];
472 hammer_init_cursor(trans
, &cursor
, cachep
, NULL
);
473 cursor
.key_beg
.localization
= localization
+ HAMMER_LOCALIZE_INODE
;
474 cursor
.key_beg
.obj_id
= ip
->obj_id
;
475 cursor
.key_beg
.key
= 0;
476 cursor
.key_beg
.create_tid
= 0;
477 cursor
.key_beg
.delete_tid
= 0;
478 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
479 cursor
.key_beg
.obj_type
= 0;
481 cursor
.asof
= iinfo
.obj_asof
;
482 cursor
.flags
= HAMMER_CURSOR_GET_LEAF
| HAMMER_CURSOR_GET_DATA
|
485 *errorp
= hammer_btree_lookup(&cursor
);
486 if (*errorp
== EDEADLK
) {
487 hammer_done_cursor(&cursor
);
492 * On success the B-Tree lookup will hold the appropriate
493 * buffer cache buffers and provide a pointer to the requested
494 * information. Copy the information to the in-memory inode
495 * and cache the B-Tree node to improve future operations.
498 ip
->ino_leaf
= cursor
.node
->ondisk
->elms
[cursor
.index
].leaf
;
499 ip
->ino_data
= cursor
.data
->inode
;
502 * cache[0] tries to cache the location of the object inode.
503 * The assumption is that it is near the directory inode.
505 * cache[1] tries to cache the location of the object data.
506 * We might have something in the governing directory from
507 * scan optimizations (see the strategy code in
510 * We update dip->cache[2], if possible, with the location
511 * of the object inode for future directory shortcuts.
513 hammer_cache_node(&ip
->cache
[0], cursor
.node
);
515 if (dip
->cache
[3].node
) {
516 hammer_cache_node(&ip
->cache
[1],
519 hammer_cache_node(&dip
->cache
[2], cursor
.node
);
523 * The file should not contain any data past the file size
524 * stored in the inode. Setting save_trunc_off to the
525 * file size instead of max reduces B-Tree lookup overheads
526 * on append by allowing the flusher to avoid checking for
529 ip
->save_trunc_off
= ip
->ino_data
.size
;
532 * Locate and assign the pseudofs management structure to
535 if (dip
&& dip
->obj_localization
== ip
->obj_localization
) {
536 ip
->pfsm
= dip
->pfsm
;
537 hammer_ref(&ip
->pfsm
->lock
);
539 ip
->pfsm
= hammer_load_pseudofs(trans
,
540 ip
->obj_localization
,
542 *errorp
= 0; /* ignore ENOENT */
547 * The inode is placed on the red-black tree and will be synced to
548 * the media when flushed or by the filesystem sync. If this races
549 * another instantiation/lookup the insertion will fail.
552 if (RB_INSERT(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
)) {
553 hammer_free_inode(ip
);
554 hammer_done_cursor(&cursor
);
557 ip
->flags
|= HAMMER_INODE_ONDISK
;
559 if (ip
->flags
& HAMMER_INODE_RSV_INODES
) {
560 ip
->flags
&= ~HAMMER_INODE_RSV_INODES
; /* sanity */
564 hammer_free_inode(ip
);
567 hammer_done_cursor(&cursor
);
570 * NEWINODE is only set if the inode becomes dirty later,
571 * setting it here just leads to unnecessary stalls.
573 * trans->flags |= HAMMER_TRANSF_NEWINODE;
579 * Get a dummy inode to placemark a broken directory entry.
581 struct hammer_inode
*
582 hammer_get_dummy_inode(hammer_transaction_t trans
, hammer_inode_t dip
,
583 int64_t obj_id
, hammer_tid_t asof
, u_int32_t localization
,
584 int flags
, int *errorp
)
586 hammer_mount_t hmp
= trans
->hmp
;
587 struct hammer_inode_info iinfo
;
588 struct hammer_inode
*ip
;
591 * Determine if we already have an inode cached. If we do then
594 * If we find an inode with no vnode we have to mark the
595 * transaction such that hammer_inode_waitreclaims() is
596 * called later on to avoid building up an infinite number
597 * of inodes. Otherwise we can continue to * add new inodes
598 * faster then they can be disposed of, even with the tsleep
601 * If we find a non-fake inode we return an error. Only fake
602 * inodes can be returned by this routine.
604 iinfo
.obj_id
= obj_id
;
605 iinfo
.obj_asof
= asof
;
606 iinfo
.obj_localization
= localization
;
609 ip
= hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp
->rb_inos_root
, &iinfo
);
611 if ((ip
->flags
& HAMMER_INODE_DUMMY
) == 0) {
615 hammer_ref(&ip
->lock
);
620 * Allocate a new inode structure and deal with races later.
622 ip
= kmalloc(sizeof(*ip
), hmp
->m_inodes
, M_WAITOK
|M_ZERO
);
623 ++hammer_count_inodes
;
626 ip
->obj_asof
= iinfo
.obj_asof
;
627 ip
->obj_localization
= localization
;
629 ip
->flags
= flags
| HAMMER_INODE_RO
| HAMMER_INODE_DUMMY
;
630 ip
->cache
[0].ip
= ip
;
631 ip
->cache
[1].ip
= ip
;
632 ip
->cache
[2].ip
= ip
;
633 ip
->cache
[3].ip
= ip
;
634 ip
->sync_trunc_off
= ip
->trunc_off
= ip
->save_trunc_off
=
635 0x7FFFFFFFFFFFFFFFLL
;
636 RB_INIT(&ip
->rec_tree
);
637 TAILQ_INIT(&ip
->target_list
);
638 hammer_ref(&ip
->lock
);
641 * Populate the dummy inode. Leave everything zero'd out.
643 * (ip->ino_leaf and ip->ino_data)
645 * Make the dummy inode a FIFO object which most copy programs
646 * will properly ignore.
648 ip
->save_trunc_off
= ip
->ino_data
.size
;
649 ip
->ino_data
.obj_type
= HAMMER_OBJTYPE_FIFO
;
652 * Locate and assign the pseudofs management structure to
655 if (dip
&& dip
->obj_localization
== ip
->obj_localization
) {
656 ip
->pfsm
= dip
->pfsm
;
657 hammer_ref(&ip
->pfsm
->lock
);
659 ip
->pfsm
= hammer_load_pseudofs(trans
, ip
->obj_localization
,
661 *errorp
= 0; /* ignore ENOENT */
665 * The inode is placed on the red-black tree and will be synced to
666 * the media when flushed or by the filesystem sync. If this races
667 * another instantiation/lookup the insertion will fail.
669 * NOTE: Do not set HAMMER_INODE_ONDISK. The inode is a fake.
672 if (RB_INSERT(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
)) {
673 hammer_free_inode(ip
);
677 if (ip
->flags
& HAMMER_INODE_RSV_INODES
) {
678 ip
->flags
&= ~HAMMER_INODE_RSV_INODES
; /* sanity */
681 hammer_free_inode(ip
);
684 trans
->flags
|= HAMMER_TRANSF_NEWINODE
;
689 * Return a referenced inode only if it is in our inode cache.
691 * Dummy inodes do not count.
693 struct hammer_inode
*
694 hammer_find_inode(hammer_transaction_t trans
, int64_t obj_id
,
695 hammer_tid_t asof
, u_int32_t localization
)
697 hammer_mount_t hmp
= trans
->hmp
;
698 struct hammer_inode_info iinfo
;
699 struct hammer_inode
*ip
;
701 iinfo
.obj_id
= obj_id
;
702 iinfo
.obj_asof
= asof
;
703 iinfo
.obj_localization
= localization
;
705 ip
= hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp
->rb_inos_root
, &iinfo
);
707 if (ip
->flags
& HAMMER_INODE_DUMMY
)
710 hammer_ref(&ip
->lock
);
716 * Create a new filesystem object, returning the inode in *ipp. The
717 * returned inode will be referenced. The inode is created in-memory.
719 * If pfsm is non-NULL the caller wishes to create the root inode for
723 hammer_create_inode(hammer_transaction_t trans
, struct vattr
*vap
,
725 hammer_inode_t dip
, const char *name
, int namelen
,
726 hammer_pseudofs_inmem_t pfsm
, struct hammer_inode
**ipp
)
737 ip
= kmalloc(sizeof(*ip
), hmp
->m_inodes
, M_WAITOK
|M_ZERO
);
738 ++hammer_count_inodes
;
740 trans
->flags
|= HAMMER_TRANSF_NEWINODE
;
743 KKASSERT(pfsm
->localization
!= 0);
744 ip
->obj_id
= HAMMER_OBJID_ROOT
;
745 ip
->obj_localization
= pfsm
->localization
;
747 KKASSERT(dip
!= NULL
);
748 namekey
= hammer_directory_namekey(dip
, name
, namelen
, &dummy
);
749 ip
->obj_id
= hammer_alloc_objid(hmp
, dip
, namekey
);
750 ip
->obj_localization
= dip
->obj_localization
;
753 KKASSERT(ip
->obj_id
!= 0);
754 ip
->obj_asof
= hmp
->asof
;
756 ip
->flush_state
= HAMMER_FST_IDLE
;
757 ip
->flags
= HAMMER_INODE_DDIRTY
|
758 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
;
759 ip
->cache
[0].ip
= ip
;
760 ip
->cache
[1].ip
= ip
;
761 ip
->cache
[2].ip
= ip
;
762 ip
->cache
[3].ip
= ip
;
764 ip
->trunc_off
= 0x7FFFFFFFFFFFFFFFLL
;
765 /* ip->save_trunc_off = 0; (already zero) */
766 RB_INIT(&ip
->rec_tree
);
767 TAILQ_INIT(&ip
->target_list
);
769 ip
->ino_data
.atime
= trans
->time
;
770 ip
->ino_data
.mtime
= trans
->time
;
771 ip
->ino_data
.size
= 0;
772 ip
->ino_data
.nlinks
= 0;
775 * A nohistory designator on the parent directory is inherited by
776 * the child. We will do this even for pseudo-fs creation... the
777 * sysad can turn it off.
780 ip
->ino_data
.uflags
= dip
->ino_data
.uflags
&
781 (SF_NOHISTORY
|UF_NOHISTORY
|UF_NODUMP
);
784 ip
->ino_leaf
.base
.btype
= HAMMER_BTREE_TYPE_RECORD
;
785 ip
->ino_leaf
.base
.localization
= ip
->obj_localization
+
786 HAMMER_LOCALIZE_INODE
;
787 ip
->ino_leaf
.base
.obj_id
= ip
->obj_id
;
788 ip
->ino_leaf
.base
.key
= 0;
789 ip
->ino_leaf
.base
.create_tid
= 0;
790 ip
->ino_leaf
.base
.delete_tid
= 0;
791 ip
->ino_leaf
.base
.rec_type
= HAMMER_RECTYPE_INODE
;
792 ip
->ino_leaf
.base
.obj_type
= hammer_get_obj_type(vap
->va_type
);
794 ip
->ino_data
.obj_type
= ip
->ino_leaf
.base
.obj_type
;
795 ip
->ino_data
.version
= HAMMER_INODE_DATA_VERSION
;
796 ip
->ino_data
.mode
= vap
->va_mode
;
797 ip
->ino_data
.ctime
= trans
->time
;
800 * If we are running version 2 or greater directory entries are
801 * inode-localized instead of data-localized.
803 if (trans
->hmp
->version
>= HAMMER_VOL_VERSION_TWO
) {
804 if (ip
->ino_leaf
.base
.obj_type
== HAMMER_OBJTYPE_DIRECTORY
) {
805 ip
->ino_data
.cap_flags
|=
806 HAMMER_INODE_CAP_DIR_LOCAL_INO
;
811 * Setup the ".." pointer. This only needs to be done for directories
812 * but we do it for all objects as a recovery aid.
815 ip
->ino_data
.parent_obj_id
= dip
->ino_leaf
.base
.obj_id
;
818 * The parent_obj_localization field only applies to pseudo-fs roots.
819 * XXX this is no longer applicable, PFSs are no longer directly
820 * tied into the parent's directory structure.
822 if (ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_DIRECTORY
&&
823 ip
->obj_id
== HAMMER_OBJID_ROOT
) {
824 ip
->ino_data
.ext
.obj
.parent_obj_localization
=
825 dip
->obj_localization
;
829 switch(ip
->ino_leaf
.base
.obj_type
) {
830 case HAMMER_OBJTYPE_CDEV
:
831 case HAMMER_OBJTYPE_BDEV
:
832 ip
->ino_data
.rmajor
= vap
->va_rmajor
;
833 ip
->ino_data
.rminor
= vap
->va_rminor
;
840 * Calculate default uid/gid and overwrite with information from
844 xuid
= hammer_to_unix_xid(&dip
->ino_data
.uid
);
845 xuid
= vop_helper_create_uid(hmp
->mp
, dip
->ino_data
.mode
,
846 xuid
, cred
, &vap
->va_mode
);
850 ip
->ino_data
.mode
= vap
->va_mode
;
852 if (vap
->va_vaflags
& VA_UID_UUID_VALID
)
853 ip
->ino_data
.uid
= vap
->va_uid_uuid
;
854 else if (vap
->va_uid
!= (uid_t
)VNOVAL
)
855 hammer_guid_to_uuid(&ip
->ino_data
.uid
, vap
->va_uid
);
857 hammer_guid_to_uuid(&ip
->ino_data
.uid
, xuid
);
859 if (vap
->va_vaflags
& VA_GID_UUID_VALID
)
860 ip
->ino_data
.gid
= vap
->va_gid_uuid
;
861 else if (vap
->va_gid
!= (gid_t
)VNOVAL
)
862 hammer_guid_to_uuid(&ip
->ino_data
.gid
, vap
->va_gid
);
864 ip
->ino_data
.gid
= dip
->ino_data
.gid
;
866 hammer_ref(&ip
->lock
);
870 hammer_ref(&pfsm
->lock
);
872 } else if (dip
->obj_localization
== ip
->obj_localization
) {
873 ip
->pfsm
= dip
->pfsm
;
874 hammer_ref(&ip
->pfsm
->lock
);
877 ip
->pfsm
= hammer_load_pseudofs(trans
,
878 ip
->obj_localization
,
880 error
= 0; /* ignore ENOENT */
884 hammer_free_inode(ip
);
886 } else if (RB_INSERT(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
)) {
887 panic("hammer_create_inode: duplicate obj_id %llx",
888 (long long)ip
->obj_id
);
890 hammer_free_inode(ip
);
897 * Final cleanup / freeing of an inode structure
900 hammer_free_inode(hammer_inode_t ip
)
902 struct hammer_mount
*hmp
;
905 KKASSERT(hammer_oneref(&ip
->lock
));
906 hammer_uncache_node(&ip
->cache
[0]);
907 hammer_uncache_node(&ip
->cache
[1]);
908 hammer_uncache_node(&ip
->cache
[2]);
909 hammer_uncache_node(&ip
->cache
[3]);
910 hammer_inode_wakereclaims(ip
);
912 hammer_clear_objid(ip
);
913 --hammer_count_inodes
;
916 hammer_rel_pseudofs(hmp
, ip
->pfsm
);
919 kfree(ip
, hmp
->m_inodes
);
924 * Retrieve pseudo-fs data. NULL will never be returned.
926 * If an error occurs *errorp will be set and a default template is returned,
927 * otherwise *errorp is set to 0. Typically when an error occurs it will
930 hammer_pseudofs_inmem_t
931 hammer_load_pseudofs(hammer_transaction_t trans
,
932 u_int32_t localization
, int *errorp
)
934 hammer_mount_t hmp
= trans
->hmp
;
936 hammer_pseudofs_inmem_t pfsm
;
937 struct hammer_cursor cursor
;
941 pfsm
= RB_LOOKUP(hammer_pfs_rb_tree
, &hmp
->rb_pfsm_root
, localization
);
943 hammer_ref(&pfsm
->lock
);
949 * PFS records are stored in the root inode (not the PFS root inode,
950 * but the real root). Avoid an infinite recursion if loading
951 * the PFS for the real root.
954 ip
= hammer_get_inode(trans
, NULL
, HAMMER_OBJID_ROOT
,
956 HAMMER_DEF_LOCALIZATION
, 0, errorp
);
961 pfsm
= kmalloc(sizeof(*pfsm
), hmp
->m_misc
, M_WAITOK
| M_ZERO
);
962 pfsm
->localization
= localization
;
963 pfsm
->pfsd
.unique_uuid
= trans
->rootvol
->ondisk
->vol_fsid
;
964 pfsm
->pfsd
.shared_uuid
= pfsm
->pfsd
.unique_uuid
;
966 hammer_init_cursor(trans
, &cursor
, (ip
? &ip
->cache
[1] : NULL
), ip
);
967 cursor
.key_beg
.localization
= HAMMER_DEF_LOCALIZATION
+
968 HAMMER_LOCALIZE_MISC
;
969 cursor
.key_beg
.obj_id
= HAMMER_OBJID_ROOT
;
970 cursor
.key_beg
.create_tid
= 0;
971 cursor
.key_beg
.delete_tid
= 0;
972 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_PFS
;
973 cursor
.key_beg
.obj_type
= 0;
974 cursor
.key_beg
.key
= localization
;
975 cursor
.asof
= HAMMER_MAX_TID
;
976 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
979 *errorp
= hammer_ip_lookup(&cursor
);
981 *errorp
= hammer_btree_lookup(&cursor
);
983 *errorp
= hammer_ip_resolve_data(&cursor
);
985 if (cursor
.data
->pfsd
.mirror_flags
&
986 HAMMER_PFSD_DELETED
) {
989 bytes
= cursor
.leaf
->data_len
;
990 if (bytes
> sizeof(pfsm
->pfsd
))
991 bytes
= sizeof(pfsm
->pfsd
);
992 bcopy(cursor
.data
, &pfsm
->pfsd
, bytes
);
996 hammer_done_cursor(&cursor
);
998 pfsm
->fsid_udev
= hammer_fsid_to_udev(&pfsm
->pfsd
.shared_uuid
);
999 hammer_ref(&pfsm
->lock
);
1001 hammer_rel_inode(ip
, 0);
1002 if (RB_INSERT(hammer_pfs_rb_tree
, &hmp
->rb_pfsm_root
, pfsm
)) {
1003 kfree(pfsm
, hmp
->m_misc
);
1010 * Store pseudo-fs data. The backend will automatically delete any prior
1011 * on-disk pseudo-fs data but we have to delete in-memory versions.
1014 hammer_save_pseudofs(hammer_transaction_t trans
, hammer_pseudofs_inmem_t pfsm
)
1016 struct hammer_cursor cursor
;
1017 hammer_record_t record
;
1021 ip
= hammer_get_inode(trans
, NULL
, HAMMER_OBJID_ROOT
, HAMMER_MAX_TID
,
1022 HAMMER_DEF_LOCALIZATION
, 0, &error
);
1024 pfsm
->fsid_udev
= hammer_fsid_to_udev(&pfsm
->pfsd
.shared_uuid
);
1025 hammer_init_cursor(trans
, &cursor
, &ip
->cache
[1], ip
);
1026 cursor
.key_beg
.localization
= ip
->obj_localization
+
1027 HAMMER_LOCALIZE_MISC
;
1028 cursor
.key_beg
.obj_id
= HAMMER_OBJID_ROOT
;
1029 cursor
.key_beg
.create_tid
= 0;
1030 cursor
.key_beg
.delete_tid
= 0;
1031 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_PFS
;
1032 cursor
.key_beg
.obj_type
= 0;
1033 cursor
.key_beg
.key
= pfsm
->localization
;
1034 cursor
.asof
= HAMMER_MAX_TID
;
1035 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
1038 * Replace any in-memory version of the record.
1040 error
= hammer_ip_lookup(&cursor
);
1041 if (error
== 0 && hammer_cursor_inmem(&cursor
)) {
1042 record
= cursor
.iprec
;
1043 if (record
->flags
& HAMMER_RECF_INTERLOCK_BE
) {
1044 KKASSERT(cursor
.deadlk_rec
== NULL
);
1045 hammer_ref(&record
->lock
);
1046 cursor
.deadlk_rec
= record
;
1049 record
->flags
|= HAMMER_RECF_DELETED_FE
;
1055 * Allocate replacement general record. The backend flush will
1056 * delete any on-disk version of the record.
1058 if (error
== 0 || error
== ENOENT
) {
1059 record
= hammer_alloc_mem_record(ip
, sizeof(pfsm
->pfsd
));
1060 record
->type
= HAMMER_MEM_RECORD_GENERAL
;
1062 record
->leaf
.base
.localization
= ip
->obj_localization
+
1063 HAMMER_LOCALIZE_MISC
;
1064 record
->leaf
.base
.rec_type
= HAMMER_RECTYPE_PFS
;
1065 record
->leaf
.base
.key
= pfsm
->localization
;
1066 record
->leaf
.data_len
= sizeof(pfsm
->pfsd
);
1067 bcopy(&pfsm
->pfsd
, record
->data
, sizeof(pfsm
->pfsd
));
1068 error
= hammer_ip_add_record(trans
, record
);
1070 hammer_done_cursor(&cursor
);
1071 if (error
== EDEADLK
)
1073 hammer_rel_inode(ip
, 0);
1078 * Create a root directory for a PFS if one does not alredy exist.
1080 * The PFS root stands alone so we must also bump the nlinks count
1081 * to prevent it from being destroyed on release.
1084 hammer_mkroot_pseudofs(hammer_transaction_t trans
, struct ucred
*cred
,
1085 hammer_pseudofs_inmem_t pfsm
)
1091 ip
= hammer_get_inode(trans
, NULL
, HAMMER_OBJID_ROOT
, HAMMER_MAX_TID
,
1092 pfsm
->localization
, 0, &error
);
1097 error
= hammer_create_inode(trans
, &vap
, cred
,
1101 ++ip
->ino_data
.nlinks
;
1102 hammer_modify_inode(trans
, ip
, HAMMER_INODE_DDIRTY
);
1106 hammer_rel_inode(ip
, 0);
1111 * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
1112 * if we are unable to disassociate all the inodes.
1116 hammer_unload_pseudofs_callback(hammer_inode_t ip
, void *data
)
1120 hammer_ref(&ip
->lock
);
1121 if (hammer_isactive(&ip
->lock
) == 2 && ip
->vp
)
1122 vclean_unlocked(ip
->vp
);
1123 if (hammer_isactive(&ip
->lock
) == 1 && ip
->vp
== NULL
)
1126 res
= -1; /* stop, someone is using the inode */
1127 hammer_rel_inode(ip
, 0);
1132 hammer_unload_pseudofs(hammer_transaction_t trans
, u_int32_t localization
)
1137 for (try = res
= 0; try < 4; ++try) {
1138 res
= hammer_ino_rb_tree_RB_SCAN(&trans
->hmp
->rb_inos_root
,
1139 hammer_inode_pfs_cmp
,
1140 hammer_unload_pseudofs_callback
,
1142 if (res
== 0 && try > 1)
1144 hammer_flusher_sync(trans
->hmp
);
1153 * Release a reference on a PFS
1156 hammer_rel_pseudofs(hammer_mount_t hmp
, hammer_pseudofs_inmem_t pfsm
)
1158 hammer_rel(&pfsm
->lock
);
1159 if (hammer_norefs(&pfsm
->lock
)) {
1160 RB_REMOVE(hammer_pfs_rb_tree
, &hmp
->rb_pfsm_root
, pfsm
);
1161 kfree(pfsm
, hmp
->m_misc
);
1166 * Called by hammer_sync_inode().
1169 hammer_update_inode(hammer_cursor_t cursor
, hammer_inode_t ip
)
1171 hammer_transaction_t trans
= cursor
->trans
;
1172 hammer_record_t record
;
1180 * If the inode has a presence on-disk then locate it and mark
1181 * it deleted, setting DELONDISK.
1183 * The record may or may not be physically deleted, depending on
1184 * the retention policy.
1186 if ((ip
->flags
& (HAMMER_INODE_ONDISK
|HAMMER_INODE_DELONDISK
)) ==
1187 HAMMER_INODE_ONDISK
) {
1188 hammer_normalize_cursor(cursor
);
1189 cursor
->key_beg
.localization
= ip
->obj_localization
+
1190 HAMMER_LOCALIZE_INODE
;
1191 cursor
->key_beg
.obj_id
= ip
->obj_id
;
1192 cursor
->key_beg
.key
= 0;
1193 cursor
->key_beg
.create_tid
= 0;
1194 cursor
->key_beg
.delete_tid
= 0;
1195 cursor
->key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
1196 cursor
->key_beg
.obj_type
= 0;
1197 cursor
->asof
= ip
->obj_asof
;
1198 cursor
->flags
&= ~HAMMER_CURSOR_INITMASK
;
1199 cursor
->flags
|= HAMMER_CURSOR_GET_LEAF
| HAMMER_CURSOR_ASOF
;
1200 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
1202 error
= hammer_btree_lookup(cursor
);
1203 if (hammer_debug_inode
)
1204 kprintf("IPDEL %p %08x %d", ip
, ip
->flags
, error
);
1207 error
= hammer_ip_delete_record(cursor
, ip
, trans
->tid
);
1208 if (hammer_debug_inode
)
1209 kprintf(" error %d\n", error
);
1211 ip
->flags
|= HAMMER_INODE_DELONDISK
;
1214 hammer_cache_node(&ip
->cache
[0], cursor
->node
);
1216 if (error
== EDEADLK
) {
1217 hammer_done_cursor(cursor
);
1218 error
= hammer_init_cursor(trans
, cursor
,
1220 if (hammer_debug_inode
)
1221 kprintf("IPDED %p %d\n", ip
, error
);
1228 * Ok, write out the initial record or a new record (after deleting
1229 * the old one), unless the DELETED flag is set. This routine will
1230 * clear DELONDISK if it writes out a record.
1232 * Update our inode statistics if this is the first application of
1233 * the inode on-disk.
1235 if (error
== 0 && (ip
->flags
& HAMMER_INODE_DELETED
) == 0) {
1237 * Generate a record and write it to the media. We clean-up
1238 * the state before releasing so we do not have to set-up
1241 record
= hammer_alloc_mem_record(ip
, 0);
1242 record
->type
= HAMMER_MEM_RECORD_INODE
;
1243 record
->flush_state
= HAMMER_FST_FLUSH
;
1244 record
->leaf
= ip
->sync_ino_leaf
;
1245 record
->leaf
.base
.create_tid
= trans
->tid
;
1246 record
->leaf
.data_len
= sizeof(ip
->sync_ino_data
);
1247 record
->leaf
.create_ts
= trans
->time32
;
1248 record
->data
= (void *)&ip
->sync_ino_data
;
1249 record
->flags
|= HAMMER_RECF_INTERLOCK_BE
;
1252 * If this flag is set we cannot sync the new file size
1253 * because we haven't finished related truncations. The
1254 * inode will be flushed in another flush group to finish
1257 if ((ip
->flags
& HAMMER_INODE_WOULDBLOCK
) &&
1258 ip
->sync_ino_data
.size
!= ip
->ino_data
.size
) {
1260 ip
->sync_ino_data
.size
= ip
->ino_data
.size
;
1266 error
= hammer_ip_sync_record_cursor(cursor
, record
);
1267 if (hammer_debug_inode
)
1268 kprintf("GENREC %p rec %08x %d\n",
1269 ip
, record
->flags
, error
);
1270 if (error
!= EDEADLK
)
1272 hammer_done_cursor(cursor
);
1273 error
= hammer_init_cursor(trans
, cursor
,
1275 if (hammer_debug_inode
)
1276 kprintf("GENREC reinit %d\n", error
);
1282 * Note: The record was never on the inode's record tree
1283 * so just wave our hands importantly and destroy it.
1285 record
->flags
|= HAMMER_RECF_COMMITTED
;
1286 record
->flags
&= ~HAMMER_RECF_INTERLOCK_BE
;
1287 record
->flush_state
= HAMMER_FST_IDLE
;
1288 ++ip
->rec_generation
;
1289 hammer_rel_mem_record(record
);
1295 if (hammer_debug_inode
)
1296 kprintf("CLEANDELOND %p %08x\n", ip
, ip
->flags
);
1297 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
|
1298 HAMMER_INODE_SDIRTY
|
1299 HAMMER_INODE_ATIME
|
1300 HAMMER_INODE_MTIME
);
1301 ip
->flags
&= ~HAMMER_INODE_DELONDISK
;
1303 ip
->sync_flags
|= HAMMER_INODE_DDIRTY
;
1306 * Root volume count of inodes
1308 hammer_sync_lock_sh(trans
);
1309 if ((ip
->flags
& HAMMER_INODE_ONDISK
) == 0) {
1310 hammer_modify_volume_field(trans
,
1313 ++ip
->hmp
->rootvol
->ondisk
->vol0_stat_inodes
;
1314 hammer_modify_volume_done(trans
->rootvol
);
1315 ip
->flags
|= HAMMER_INODE_ONDISK
;
1316 if (hammer_debug_inode
)
1317 kprintf("NOWONDISK %p\n", ip
);
1319 hammer_sync_unlock(trans
);
1324 * If the inode has been destroyed, clean out any left-over flags
1325 * that may have been set by the frontend.
1327 if (error
== 0 && (ip
->flags
& HAMMER_INODE_DELETED
)) {
1328 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
|
1329 HAMMER_INODE_SDIRTY
|
1330 HAMMER_INODE_ATIME
|
1331 HAMMER_INODE_MTIME
);
1337 * Update only the itimes fields.
1339 * ATIME can be updated without generating any UNDO. MTIME is updated
1340 * with UNDO so it is guaranteed to be synchronized properly in case of
1343 * Neither field is included in the B-Tree leaf element's CRC, which is how
1344 * we can get away with updating ATIME the way we do.
1347 hammer_update_itimes(hammer_cursor_t cursor
, hammer_inode_t ip
)
1349 hammer_transaction_t trans
= cursor
->trans
;
1353 if ((ip
->flags
& (HAMMER_INODE_ONDISK
|HAMMER_INODE_DELONDISK
)) !=
1354 HAMMER_INODE_ONDISK
) {
1358 hammer_normalize_cursor(cursor
);
1359 cursor
->key_beg
.localization
= ip
->obj_localization
+
1360 HAMMER_LOCALIZE_INODE
;
1361 cursor
->key_beg
.obj_id
= ip
->obj_id
;
1362 cursor
->key_beg
.key
= 0;
1363 cursor
->key_beg
.create_tid
= 0;
1364 cursor
->key_beg
.delete_tid
= 0;
1365 cursor
->key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
1366 cursor
->key_beg
.obj_type
= 0;
1367 cursor
->asof
= ip
->obj_asof
;
1368 cursor
->flags
&= ~HAMMER_CURSOR_INITMASK
;
1369 cursor
->flags
|= HAMMER_CURSOR_ASOF
;
1370 cursor
->flags
|= HAMMER_CURSOR_GET_LEAF
;
1371 cursor
->flags
|= HAMMER_CURSOR_GET_DATA
;
1372 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
1374 error
= hammer_btree_lookup(cursor
);
1376 hammer_cache_node(&ip
->cache
[0], cursor
->node
);
1377 if (ip
->sync_flags
& HAMMER_INODE_MTIME
) {
1379 * Updating MTIME requires an UNDO. Just cover
1380 * both atime and mtime.
1382 hammer_sync_lock_sh(trans
);
1383 hammer_modify_buffer(trans
, cursor
->data_buffer
,
1384 HAMMER_ITIMES_BASE(&cursor
->data
->inode
),
1385 HAMMER_ITIMES_BYTES
);
1386 cursor
->data
->inode
.atime
= ip
->sync_ino_data
.atime
;
1387 cursor
->data
->inode
.mtime
= ip
->sync_ino_data
.mtime
;
1388 hammer_modify_buffer_done(cursor
->data_buffer
);
1389 hammer_sync_unlock(trans
);
1390 } else if (ip
->sync_flags
& HAMMER_INODE_ATIME
) {
1392 * Updating atime only can be done in-place with
1395 hammer_sync_lock_sh(trans
);
1396 hammer_modify_buffer(trans
, cursor
->data_buffer
,
1398 cursor
->data
->inode
.atime
= ip
->sync_ino_data
.atime
;
1399 hammer_modify_buffer_done(cursor
->data_buffer
);
1400 hammer_sync_unlock(trans
);
1402 ip
->sync_flags
&= ~(HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
);
1404 if (error
== EDEADLK
) {
1405 hammer_done_cursor(cursor
);
1406 error
= hammer_init_cursor(trans
, cursor
,
1415 * Release a reference on an inode, flush as requested.
1417 * On the last reference we queue the inode to the flusher for its final
1421 hammer_rel_inode(struct hammer_inode
*ip
, int flush
)
1423 /*hammer_mount_t hmp = ip->hmp;*/
1426 * Handle disposition when dropping the last ref.
1429 if (hammer_oneref(&ip
->lock
)) {
1431 * Determine whether on-disk action is needed for
1432 * the inode's final disposition.
1434 KKASSERT(ip
->vp
== NULL
);
1435 hammer_inode_unloadable_check(ip
, 0);
1436 if (ip
->flags
& HAMMER_INODE_MODMASK
) {
1437 hammer_flush_inode(ip
, 0);
1438 } else if (hammer_oneref(&ip
->lock
)) {
1439 hammer_unload_inode(ip
);
1444 hammer_flush_inode(ip
, 0);
1447 * The inode still has multiple refs, try to drop
1450 KKASSERT(hammer_isactive(&ip
->lock
) >= 1);
1451 if (hammer_isactive(&ip
->lock
) > 1) {
1452 hammer_rel(&ip
->lock
);
1460 * Unload and destroy the specified inode. Must be called with one remaining
1461 * reference. The reference is disposed of.
1463 * The inode must be completely clean.
1466 hammer_unload_inode(struct hammer_inode
*ip
)
1468 hammer_mount_t hmp
= ip
->hmp
;
1470 KASSERT(hammer_oneref(&ip
->lock
),
1471 ("hammer_unload_inode: %d refs\n", hammer_isactive(&ip
->lock
)));
1472 KKASSERT(ip
->vp
== NULL
);
1473 KKASSERT(ip
->flush_state
== HAMMER_FST_IDLE
);
1474 KKASSERT(ip
->cursor_ip_refs
== 0);
1475 KKASSERT(hammer_notlocked(&ip
->lock
));
1476 KKASSERT((ip
->flags
& HAMMER_INODE_MODMASK
) == 0);
1478 KKASSERT(RB_EMPTY(&ip
->rec_tree
));
1479 KKASSERT(TAILQ_EMPTY(&ip
->target_list
));
1481 if (ip
->flags
& HAMMER_INODE_RDIRTY
) {
1482 RB_REMOVE(hammer_redo_rb_tree
, &hmp
->rb_redo_root
, ip
);
1483 ip
->flags
&= ~HAMMER_INODE_RDIRTY
;
1485 RB_REMOVE(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
);
1487 hammer_free_inode(ip
);
1492 * Called during unmounting if a critical error occured. The in-memory
1493 * inode and all related structures are destroyed.
1495 * If a critical error did not occur the unmount code calls the standard
1496 * release and asserts that the inode is gone.
1499 hammer_destroy_inode_callback(struct hammer_inode
*ip
, void *data __unused
)
1501 hammer_record_t rec
;
1504 * Get rid of the inodes in-memory records, regardless of their
1505 * state, and clear the mod-mask.
1507 while ((rec
= TAILQ_FIRST(&ip
->target_list
)) != NULL
) {
1508 TAILQ_REMOVE(&ip
->target_list
, rec
, target_entry
);
1509 rec
->target_ip
= NULL
;
1510 if (rec
->flush_state
== HAMMER_FST_SETUP
)
1511 rec
->flush_state
= HAMMER_FST_IDLE
;
1513 while ((rec
= RB_ROOT(&ip
->rec_tree
)) != NULL
) {
1514 if (rec
->flush_state
== HAMMER_FST_FLUSH
)
1515 --rec
->flush_group
->refs
;
1517 hammer_ref(&rec
->lock
);
1518 KKASSERT(hammer_oneref(&rec
->lock
));
1519 rec
->flush_state
= HAMMER_FST_IDLE
;
1520 rec
->flush_group
= NULL
;
1521 rec
->flags
|= HAMMER_RECF_DELETED_FE
; /* wave hands */
1522 rec
->flags
|= HAMMER_RECF_DELETED_BE
; /* wave hands */
1523 ++ip
->rec_generation
;
1524 hammer_rel_mem_record(rec
);
1526 ip
->flags
&= ~HAMMER_INODE_MODMASK
;
1527 ip
->sync_flags
&= ~HAMMER_INODE_MODMASK
;
1528 KKASSERT(ip
->vp
== NULL
);
1531 * Remove the inode from any flush group, force it idle. FLUSH
1532 * and SETUP states have an inode ref.
1534 switch(ip
->flush_state
) {
1535 case HAMMER_FST_FLUSH
:
1536 RB_REMOVE(hammer_fls_rb_tree
, &ip
->flush_group
->flush_tree
, ip
);
1537 --ip
->flush_group
->refs
;
1538 ip
->flush_group
= NULL
;
1540 case HAMMER_FST_SETUP
:
1541 hammer_rel(&ip
->lock
);
1542 ip
->flush_state
= HAMMER_FST_IDLE
;
1544 case HAMMER_FST_IDLE
:
1549 * There shouldn't be any associated vnode. The unload needs at
1550 * least one ref, if we do have a vp steal its ip ref.
1553 kprintf("hammer_destroy_inode_callback: Unexpected "
1554 "vnode association ip %p vp %p\n", ip
, ip
->vp
);
1555 ip
->vp
->v_data
= NULL
;
1558 hammer_ref(&ip
->lock
);
1560 hammer_unload_inode(ip
);
1565 * Called on mount -u when switching from RW to RO or vise-versa. Adjust
1566 * the read-only flag for cached inodes.
1568 * This routine is called from a RB_SCAN().
1571 hammer_reload_inode(hammer_inode_t ip
, void *arg __unused
)
1573 hammer_mount_t hmp
= ip
->hmp
;
1575 if (hmp
->ronly
|| hmp
->asof
!= HAMMER_MAX_TID
)
1576 ip
->flags
|= HAMMER_INODE_RO
;
1578 ip
->flags
&= ~HAMMER_INODE_RO
;
1583 * A transaction has modified an inode, requiring updates as specified by
1586 * HAMMER_INODE_DDIRTY: Inode data has been updated, not incl mtime/atime,
1587 * and not including size changes due to write-append
1588 * (but other size changes are included).
1589 * HAMMER_INODE_SDIRTY: Inode data has been updated, size changes due to
1591 * HAMMER_INODE_XDIRTY: Dirty in-memory records
1592 * HAMMER_INODE_BUFS: Dirty buffer cache buffers
1593 * HAMMER_INODE_DELETED: Inode record/data must be deleted
1594 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1597 hammer_modify_inode(hammer_transaction_t trans
, hammer_inode_t ip
, int flags
)
1600 * ronly of 0 or 2 does not trigger assertion.
1601 * 2 is a special error state
1603 KKASSERT(ip
->hmp
->ronly
!= 1 ||
1604 (flags
& (HAMMER_INODE_DDIRTY
| HAMMER_INODE_XDIRTY
|
1605 HAMMER_INODE_SDIRTY
|
1606 HAMMER_INODE_BUFS
| HAMMER_INODE_DELETED
|
1607 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
)) == 0);
1608 if ((ip
->flags
& HAMMER_INODE_RSV_INODES
) == 0) {
1609 ip
->flags
|= HAMMER_INODE_RSV_INODES
;
1610 ++ip
->hmp
->rsv_inodes
;
1614 * Set the NEWINODE flag in the transaction if the inode
1615 * transitions to a dirty state. This is used to track
1616 * the load on the inode cache.
1619 (ip
->flags
& HAMMER_INODE_MODMASK
) == 0 &&
1620 (flags
& HAMMER_INODE_MODMASK
)) {
1621 trans
->flags
|= HAMMER_TRANSF_NEWINODE
;
1628 * Request that an inode be flushed. This whole mess cannot block and may
1629 * recurse (if not synchronous). Once requested HAMMER will attempt to
1630 * actively flush the inode until the flush can be done.
1632 * The inode may already be flushing, or may be in a setup state. We can
1633 * place the inode in a flushing state if it is currently idle and flag it
1634 * to reflush if it is currently flushing.
1636 * Upon return if the inode could not be flushed due to a setup
1637 * dependancy, then it will be automatically flushed when the dependancy
1641 hammer_flush_inode(hammer_inode_t ip
, int flags
)
1644 hammer_flush_group_t flg
;
1648 * next_flush_group is the first flush group we can place the inode
1649 * in. It may be NULL. If it becomes full we append a new flush
1650 * group and make that the next_flush_group.
1653 while ((flg
= hmp
->next_flush_group
) != NULL
) {
1654 KKASSERT(flg
->running
== 0);
1655 if (flg
->total_count
+ flg
->refs
<= ip
->hmp
->undo_rec_limit
)
1657 hmp
->next_flush_group
= TAILQ_NEXT(flg
, flush_entry
);
1658 hammer_flusher_async(ip
->hmp
, flg
);
1661 flg
= kmalloc(sizeof(*flg
), hmp
->m_misc
, M_WAITOK
|M_ZERO
);
1662 hmp
->next_flush_group
= flg
;
1663 RB_INIT(&flg
->flush_tree
);
1664 TAILQ_INSERT_TAIL(&hmp
->flush_group_list
, flg
, flush_entry
);
1668 * Trivial 'nothing to flush' case. If the inode is in a SETUP
1669 * state we have to put it back into an IDLE state so we can
1670 * drop the extra ref.
1672 * If we have a parent dependancy we must still fall through
1675 if ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0) {
1676 if (ip
->flush_state
== HAMMER_FST_SETUP
&&
1677 TAILQ_EMPTY(&ip
->target_list
)) {
1678 ip
->flush_state
= HAMMER_FST_IDLE
;
1679 hammer_rel_inode(ip
, 0);
1681 if (ip
->flush_state
== HAMMER_FST_IDLE
)
1686 * Our flush action will depend on the current state.
1688 switch(ip
->flush_state
) {
1689 case HAMMER_FST_IDLE
:
1691 * We have no dependancies and can flush immediately. Some
1692 * our children may not be flushable so we have to re-test
1693 * with that additional knowledge.
1695 hammer_flush_inode_core(ip
, flg
, flags
);
1697 case HAMMER_FST_SETUP
:
1699 * Recurse upwards through dependancies via target_list
1700 * and start their flusher actions going if possible.
1702 * 'good' is our connectivity. -1 means we have none and
1703 * can't flush, 0 means there weren't any dependancies, and
1704 * 1 means we have good connectivity.
1706 good
= hammer_setup_parent_inodes(ip
, 0, flg
);
1710 * We can continue if good >= 0. Determine how
1711 * many records under our inode can be flushed (and
1714 hammer_flush_inode_core(ip
, flg
, flags
);
1717 * Parent has no connectivity, tell it to flush
1718 * us as soon as it does.
1720 * The REFLUSH flag is also needed to trigger
1721 * dependancy wakeups.
1723 ip
->flags
|= HAMMER_INODE_CONN_DOWN
|
1724 HAMMER_INODE_REFLUSH
;
1725 if (flags
& HAMMER_FLUSH_SIGNAL
) {
1726 ip
->flags
|= HAMMER_INODE_RESIGNAL
;
1727 hammer_flusher_async(ip
->hmp
, flg
);
1731 case HAMMER_FST_FLUSH
:
1733 * We are already flushing, flag the inode to reflush
1734 * if needed after it completes its current flush.
1736 * The REFLUSH flag is also needed to trigger
1737 * dependancy wakeups.
1739 if ((ip
->flags
& HAMMER_INODE_REFLUSH
) == 0)
1740 ip
->flags
|= HAMMER_INODE_REFLUSH
;
1741 if (flags
& HAMMER_FLUSH_SIGNAL
) {
1742 ip
->flags
|= HAMMER_INODE_RESIGNAL
;
1743 hammer_flusher_async(ip
->hmp
, flg
);
1750 * Scan ip->target_list, which is a list of records owned by PARENTS to our
1751 * ip which reference our ip.
1753 * XXX This is a huge mess of recursive code, but not one bit of it blocks
1754 * so for now do not ref/deref the structures. Note that if we use the
1755 * ref/rel code later, the rel CAN block.
1758 hammer_setup_parent_inodes(hammer_inode_t ip
, int depth
,
1759 hammer_flush_group_t flg
)
1761 hammer_record_t depend
;
1766 * If we hit our recursion limit and we have parent dependencies
1767 * We cannot continue. Returning < 0 will cause us to be flagged
1768 * for reflush. Returning -2 cuts off additional dependency checks
1769 * because they are likely to also hit the depth limit.
1771 * We cannot return < 0 if there are no dependencies or there might
1772 * not be anything to wakeup (ip).
1774 if (depth
== 20 && TAILQ_FIRST(&ip
->target_list
)) {
1775 kprintf("HAMMER Warning: depth limit reached on "
1776 "setup recursion, inode %p %016llx\n",
1777 ip
, (long long)ip
->obj_id
);
1785 TAILQ_FOREACH(depend
, &ip
->target_list
, target_entry
) {
1786 r
= hammer_setup_parent_inodes_helper(depend
, depth
, flg
);
1787 KKASSERT(depend
->target_ip
== ip
);
1788 if (r
< 0 && good
== 0)
1794 * If we failed due to the recursion depth limit then stop
1804 * This helper function takes a record representing the dependancy between
1805 * the parent inode and child inode.
1807 * record->ip = parent inode
1808 * record->target_ip = child inode
1810 * We are asked to recurse upwards and convert the record from SETUP
1811 * to FLUSH if possible.
1813 * Return 1 if the record gives us connectivity
1815 * Return 0 if the record is not relevant
1817 * Return -1 if we can't resolve the dependancy and there is no connectivity.
1820 hammer_setup_parent_inodes_helper(hammer_record_t record
, int depth
,
1821 hammer_flush_group_t flg
)
1827 KKASSERT(record
->flush_state
!= HAMMER_FST_IDLE
);
1832 * If the record is already flushing, is it in our flush group?
1834 * If it is in our flush group but it is a general record or a
1835 * delete-on-disk, it does not improve our connectivity (return 0),
1836 * and if the target inode is not trying to destroy itself we can't
1837 * allow the operation yet anyway (the second return -1).
1839 if (record
->flush_state
== HAMMER_FST_FLUSH
) {
1841 * If not in our flush group ask the parent to reflush
1842 * us as soon as possible.
1844 if (record
->flush_group
!= flg
) {
1845 pip
->flags
|= HAMMER_INODE_REFLUSH
;
1846 record
->target_ip
->flags
|= HAMMER_INODE_CONN_DOWN
;
1851 * If in our flush group everything is already set up,
1852 * just return whether the record will improve our
1853 * visibility or not.
1855 if (record
->type
== HAMMER_MEM_RECORD_ADD
)
1861 * It must be a setup record. Try to resolve the setup dependancies
1862 * by recursing upwards so we can place ip on the flush list.
1864 * Limit ourselves to 20 levels of recursion to avoid blowing out
1865 * the kernel stack. If we hit the recursion limit we can't flush
1866 * until the parent flushes. The parent will flush independantly
1867 * on its own and ultimately a deep recursion will be resolved.
1869 KKASSERT(record
->flush_state
== HAMMER_FST_SETUP
);
1871 good
= hammer_setup_parent_inodes(pip
, depth
+ 1, flg
);
1874 * If good < 0 the parent has no connectivity and we cannot safely
1875 * flush the directory entry, which also means we can't flush our
1876 * ip. Flag us for downward recursion once the parent's
1877 * connectivity is resolved. Flag the parent for [re]flush or it
1878 * may not check for downward recursions.
1881 pip
->flags
|= HAMMER_INODE_REFLUSH
;
1882 record
->target_ip
->flags
|= HAMMER_INODE_CONN_DOWN
;
1887 * We are go, place the parent inode in a flushing state so we can
1888 * place its record in a flushing state. Note that the parent
1889 * may already be flushing. The record must be in the same flush
1890 * group as the parent.
1892 if (pip
->flush_state
!= HAMMER_FST_FLUSH
)
1893 hammer_flush_inode_core(pip
, flg
, HAMMER_FLUSH_RECURSION
);
1894 KKASSERT(pip
->flush_state
== HAMMER_FST_FLUSH
);
1895 KKASSERT(record
->flush_state
== HAMMER_FST_SETUP
);
1898 if (record
->type
== HAMMER_MEM_RECORD_DEL
&&
1899 (record
->target_ip
->flags
& (HAMMER_INODE_DELETED
|HAMMER_INODE_DELONDISK
)) == 0) {
1901 * Regardless of flushing state we cannot sync this path if the
1902 * record represents a delete-on-disk but the target inode
1903 * is not ready to sync its own deletion.
1905 * XXX need to count effective nlinks to determine whether
1906 * the flush is ok, otherwise removing a hardlink will
1907 * just leave the DEL record to rot.
1909 record
->target_ip
->flags
|= HAMMER_INODE_REFLUSH
;
1913 if (pip
->flush_group
== flg
) {
1915 * Because we have not calculated nlinks yet we can just
1916 * set records to the flush state if the parent is in
1917 * the same flush group as we are.
1919 record
->flush_state
= HAMMER_FST_FLUSH
;
1920 record
->flush_group
= flg
;
1921 ++record
->flush_group
->refs
;
1922 hammer_ref(&record
->lock
);
1925 * A general directory-add contributes to our visibility.
1927 * Otherwise it is probably a directory-delete or
1928 * delete-on-disk record and does not contribute to our
1929 * visbility (but we can still flush it).
1931 if (record
->type
== HAMMER_MEM_RECORD_ADD
)
1936 * If the parent is not in our flush group we cannot
1937 * flush this record yet, there is no visibility.
1938 * We tell the parent to reflush and mark ourselves
1939 * so the parent knows it should flush us too.
1941 pip
->flags
|= HAMMER_INODE_REFLUSH
;
1942 record
->target_ip
->flags
|= HAMMER_INODE_CONN_DOWN
;
1948 * This is the core routine placing an inode into the FST_FLUSH state.
1951 hammer_flush_inode_core(hammer_inode_t ip
, hammer_flush_group_t flg
, int flags
)
1956 * Set flush state and prevent the flusher from cycling into
1957 * the next flush group. Do not place the ip on the list yet.
1958 * Inodes not in the idle state get an extra reference.
1960 KKASSERT(ip
->flush_state
!= HAMMER_FST_FLUSH
);
1961 if (ip
->flush_state
== HAMMER_FST_IDLE
)
1962 hammer_ref(&ip
->lock
);
1963 ip
->flush_state
= HAMMER_FST_FLUSH
;
1964 ip
->flush_group
= flg
;
1965 ++ip
->hmp
->flusher
.group_lock
;
1966 ++ip
->hmp
->count_iqueued
;
1967 ++hammer_count_iqueued
;
1969 hammer_redo_fifo_start_flush(ip
);
1972 * If the flush group reaches the autoflush limit we want to signal
1973 * the flusher. This is particularly important for remove()s.
1975 * If the default hammer_limit_reclaim is changed via sysctl
1976 * make sure we don't hit a degenerate case where we don't start
1977 * a flush but blocked on further inode ops.
1979 if (flg
->total_count
== hammer_autoflush
||
1980 flg
->total_count
>= hammer_limit_reclaim
/ 4)
1981 flags
|= HAMMER_FLUSH_SIGNAL
;
1985 * We need to be able to vfsync/truncate from the backend.
1987 * XXX Any truncation from the backend will acquire the vnode
1990 KKASSERT((ip
->flags
& HAMMER_INODE_VHELD
) == 0);
1991 if (ip
->vp
&& (ip
->vp
->v_flag
& VINACTIVE
) == 0) {
1992 ip
->flags
|= HAMMER_INODE_VHELD
;
1998 * Figure out how many in-memory records we can actually flush
1999 * (not including inode meta-data, buffers, etc).
2001 KKASSERT((ip
->flags
& HAMMER_INODE_WOULDBLOCK
) == 0);
2002 if (flags
& HAMMER_FLUSH_RECURSION
) {
2004 * If this is a upwards recursion we do not want to
2005 * recurse down again!
2009 } else if (ip
->flags
& HAMMER_INODE_WOULDBLOCK
) {
2011 * No new records are added if we must complete a flush
2012 * from a previous cycle, but we do have to move the records
2013 * from the previous cycle to the current one.
2016 go_count
= RB_SCAN(hammer_rec_rb_tree
, &ip
->rec_tree
, NULL
,
2017 hammer_syncgrp_child_callback
, NULL
);
2023 * Normal flush, scan records and bring them into the flush.
2024 * Directory adds and deletes are usually skipped (they are
2025 * grouped with the related inode rather then with the
2028 * go_count can be negative, which means the scan aborted
2029 * due to the flush group being over-full and we should
2030 * flush what we have.
2032 go_count
= RB_SCAN(hammer_rec_rb_tree
, &ip
->rec_tree
, NULL
,
2033 hammer_setup_child_callback
, NULL
);
2037 * This is a more involved test that includes go_count. If we
2038 * can't flush, flag the inode and return. If go_count is 0 we
2039 * were are unable to flush any records in our rec_tree and
2040 * must ignore the XDIRTY flag.
2042 if (go_count
== 0) {
2043 if ((ip
->flags
& HAMMER_INODE_MODMASK_NOXDIRTY
) == 0) {
2044 --ip
->hmp
->count_iqueued
;
2045 --hammer_count_iqueued
;
2048 ip
->flush_state
= HAMMER_FST_SETUP
;
2049 ip
->flush_group
= NULL
;
2051 if (ip
->flags
& HAMMER_INODE_VHELD
) {
2052 ip
->flags
&= ~HAMMER_INODE_VHELD
;
2058 * REFLUSH is needed to trigger dependancy wakeups
2059 * when an inode is in SETUP.
2061 ip
->flags
|= HAMMER_INODE_REFLUSH
;
2062 if (flags
& HAMMER_FLUSH_SIGNAL
) {
2063 ip
->flags
|= HAMMER_INODE_RESIGNAL
;
2064 hammer_flusher_async(ip
->hmp
, flg
);
2066 if (--ip
->hmp
->flusher
.group_lock
== 0)
2067 wakeup(&ip
->hmp
->flusher
.group_lock
);
2073 * Snapshot the state of the inode for the backend flusher.
2075 * We continue to retain save_trunc_off even when all truncations
2076 * have been resolved as an optimization to determine if we can
2077 * skip the B-Tree lookup for overwrite deletions.
2079 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
2080 * and stays in ip->flags. Once set, it stays set until the
2081 * inode is destroyed.
2083 if (ip
->flags
& HAMMER_INODE_TRUNCATED
) {
2084 KKASSERT((ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) == 0);
2085 ip
->sync_trunc_off
= ip
->trunc_off
;
2086 ip
->trunc_off
= 0x7FFFFFFFFFFFFFFFLL
;
2087 ip
->flags
&= ~HAMMER_INODE_TRUNCATED
;
2088 ip
->sync_flags
|= HAMMER_INODE_TRUNCATED
;
2091 * The save_trunc_off used to cache whether the B-Tree
2092 * holds any records past that point is not used until
2093 * after the truncation has succeeded, so we can safely
2096 if (ip
->save_trunc_off
> ip
->sync_trunc_off
)
2097 ip
->save_trunc_off
= ip
->sync_trunc_off
;
2099 ip
->sync_flags
|= (ip
->flags
& HAMMER_INODE_MODMASK
&
2100 ~HAMMER_INODE_TRUNCATED
);
2101 ip
->sync_ino_leaf
= ip
->ino_leaf
;
2102 ip
->sync_ino_data
= ip
->ino_data
;
2103 ip
->flags
&= ~HAMMER_INODE_MODMASK
| HAMMER_INODE_TRUNCATED
;
2104 #ifdef DEBUG_TRUNCATE
2105 if ((ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) && ip
== HammerTruncIp
)
2106 kprintf("truncateS %016llx\n", ip
->sync_trunc_off
);
2110 * The flusher list inherits our inode and reference.
2112 KKASSERT(flg
->running
== 0);
2113 RB_INSERT(hammer_fls_rb_tree
, &flg
->flush_tree
, ip
);
2114 if (--ip
->hmp
->flusher
.group_lock
== 0)
2115 wakeup(&ip
->hmp
->flusher
.group_lock
);
2117 if (flags
& HAMMER_FLUSH_SIGNAL
) {
2118 hammer_flusher_async(ip
->hmp
, flg
);
2123 * Callback for scan of ip->rec_tree. Try to include each record in our
2124 * flush. ip->flush_group has been set but the inode has not yet been
2125 * moved into a flushing state.
2127 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
2130 * We return 1 for any record placed or found in FST_FLUSH, which prevents
2131 * the caller from shortcutting the flush.
2134 hammer_setup_child_callback(hammer_record_t rec
, void *data
)
2136 hammer_flush_group_t flg
;
2137 hammer_inode_t target_ip
;
2142 * Records deleted or committed by the backend are ignored.
2143 * Note that the flush detects deleted frontend records at
2144 * multiple points to deal with races. This is just the first
2145 * line of defense. The only time HAMMER_RECF_DELETED_FE cannot
2146 * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it
2147 * messes up link-count calculations.
2149 * NOTE: Don't get confused between record deletion and, say,
2150 * directory entry deletion. The deletion of a directory entry
2151 * which is on-media has nothing to do with the record deletion
2154 if (rec
->flags
& (HAMMER_RECF_DELETED_FE
| HAMMER_RECF_DELETED_BE
|
2155 HAMMER_RECF_COMMITTED
)) {
2156 if (rec
->flush_state
== HAMMER_FST_FLUSH
) {
2157 KKASSERT(rec
->flush_group
== rec
->ip
->flush_group
);
2166 * If the record is in an idle state it has no dependancies and
2170 flg
= ip
->flush_group
;
2173 switch(rec
->flush_state
) {
2174 case HAMMER_FST_IDLE
:
2176 * The record has no setup dependancy, we can flush it.
2178 KKASSERT(rec
->target_ip
== NULL
);
2179 rec
->flush_state
= HAMMER_FST_FLUSH
;
2180 rec
->flush_group
= flg
;
2182 hammer_ref(&rec
->lock
);
2185 case HAMMER_FST_SETUP
:
2187 * The record has a setup dependancy. These are typically
2188 * directory entry adds and deletes. Such entries will be
2189 * flushed when their inodes are flushed so we do not
2190 * usually have to add them to the flush here. However,
2191 * if the target_ip has set HAMMER_INODE_CONN_DOWN then
2192 * it is asking us to flush this record (and it).
2194 target_ip
= rec
->target_ip
;
2195 KKASSERT(target_ip
!= NULL
);
2196 KKASSERT(target_ip
->flush_state
!= HAMMER_FST_IDLE
);
2199 * If the target IP is already flushing in our group
2200 * we could associate the record, but target_ip has
2201 * already synced ino_data to sync_ino_data and we
2202 * would also have to adjust nlinks. Plus there are
2203 * ordering issues for adds and deletes.
2205 * Reflush downward if this is an ADD, and upward if
2208 if (target_ip
->flush_state
== HAMMER_FST_FLUSH
) {
2209 if (rec
->flush_state
== HAMMER_MEM_RECORD_ADD
)
2210 ip
->flags
|= HAMMER_INODE_REFLUSH
;
2212 target_ip
->flags
|= HAMMER_INODE_REFLUSH
;
2217 * Target IP is not yet flushing. This can get complex
2218 * because we have to be careful about the recursion.
2220 * Directories create an issue for us in that if a flush
2221 * of a directory is requested the expectation is to flush
2222 * any pending directory entries, but this will cause the
2223 * related inodes to recursively flush as well. We can't
2224 * really defer the operation so just get as many as we
2228 if ((target_ip
->flags
& HAMMER_INODE_RECLAIM
) == 0 &&
2229 (target_ip
->flags
& HAMMER_INODE_CONN_DOWN
) == 0) {
2231 * We aren't reclaiming and the target ip was not
2232 * previously prevented from flushing due to this
2233 * record dependancy. Do not flush this record.
2238 if (flg
->total_count
+ flg
->refs
>
2239 ip
->hmp
->undo_rec_limit
) {
2241 * Our flush group is over-full and we risk blowing
2242 * out the UNDO FIFO. Stop the scan, flush what we
2243 * have, then reflush the directory.
2245 * The directory may be forced through multiple
2246 * flush groups before it can be completely
2249 ip
->flags
|= HAMMER_INODE_RESIGNAL
|
2250 HAMMER_INODE_REFLUSH
;
2252 } else if (rec
->type
== HAMMER_MEM_RECORD_ADD
) {
2254 * If the target IP is not flushing we can force
2255 * it to flush, even if it is unable to write out
2256 * any of its own records we have at least one in
2257 * hand that we CAN deal with.
2259 rec
->flush_state
= HAMMER_FST_FLUSH
;
2260 rec
->flush_group
= flg
;
2262 hammer_ref(&rec
->lock
);
2263 hammer_flush_inode_core(target_ip
, flg
,
2264 HAMMER_FLUSH_RECURSION
);
2268 * General or delete-on-disk record.
2270 * XXX this needs help. If a delete-on-disk we could
2271 * disconnect the target. If the target has its own
2272 * dependancies they really need to be flushed.
2276 rec
->flush_state
= HAMMER_FST_FLUSH
;
2277 rec
->flush_group
= flg
;
2279 hammer_ref(&rec
->lock
);
2280 hammer_flush_inode_core(target_ip
, flg
,
2281 HAMMER_FLUSH_RECURSION
);
2285 case HAMMER_FST_FLUSH
:
2287 * The flush_group should already match.
2289 KKASSERT(rec
->flush_group
== flg
);
2298 * This version just moves records already in a flush state to the new
2299 * flush group and that is it.
2302 hammer_syncgrp_child_callback(hammer_record_t rec
, void *data
)
2304 hammer_inode_t ip
= rec
->ip
;
2306 switch(rec
->flush_state
) {
2307 case HAMMER_FST_FLUSH
:
2308 KKASSERT(rec
->flush_group
== ip
->flush_group
);
2318 * Wait for a previously queued flush to complete.
2320 * If a critical error occured we don't try to wait.
2323 hammer_wait_inode(hammer_inode_t ip
)
2325 hammer_flush_group_t flg
;
2328 if ((ip
->hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
) == 0) {
2329 while (ip
->flush_state
!= HAMMER_FST_IDLE
&&
2330 (ip
->hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
) == 0) {
2331 if (ip
->flush_state
== HAMMER_FST_SETUP
)
2332 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
2333 if (ip
->flush_state
!= HAMMER_FST_IDLE
) {
2334 ip
->flags
|= HAMMER_INODE_FLUSHW
;
2335 tsleep(&ip
->flags
, 0, "hmrwin", 0);
2342 * Called by the backend code when a flush has been completed.
2343 * The inode has already been removed from the flush list.
2345 * A pipelined flush can occur, in which case we must re-enter the
2346 * inode on the list and re-copy its fields.
2349 hammer_flush_inode_done(hammer_inode_t ip
, int error
)
2354 KKASSERT(ip
->flush_state
== HAMMER_FST_FLUSH
);
2359 * Auto-reflush if the backend could not completely flush
2360 * the inode. This fixes a case where a deferred buffer flush
2361 * could cause fsync to return early.
2363 if (ip
->sync_flags
& HAMMER_INODE_MODMASK
)
2364 ip
->flags
|= HAMMER_INODE_REFLUSH
;
2367 * Merge left-over flags back into the frontend and fix the state.
2368 * Incomplete truncations are retained by the backend.
2371 ip
->flags
|= ip
->sync_flags
& ~HAMMER_INODE_TRUNCATED
;
2372 ip
->sync_flags
&= HAMMER_INODE_TRUNCATED
;
2375 * The backend may have adjusted nlinks, so if the adjusted nlinks
2376 * does not match the fronttend set the frontend's DDIRTY flag again.
2378 if (ip
->ino_data
.nlinks
!= ip
->sync_ino_data
.nlinks
)
2379 ip
->flags
|= HAMMER_INODE_DDIRTY
;
2382 * Fix up the dirty buffer status.
2384 if (ip
->vp
&& RB_ROOT(&ip
->vp
->v_rbdirty_tree
)) {
2385 ip
->flags
|= HAMMER_INODE_BUFS
;
2387 hammer_redo_fifo_end_flush(ip
);
2390 * Re-set the XDIRTY flag if some of the inode's in-memory records
2391 * could not be flushed.
2393 KKASSERT((RB_EMPTY(&ip
->rec_tree
) &&
2394 (ip
->flags
& HAMMER_INODE_XDIRTY
) == 0) ||
2395 (!RB_EMPTY(&ip
->rec_tree
) &&
2396 (ip
->flags
& HAMMER_INODE_XDIRTY
) != 0));
2399 * Do not lose track of inodes which no longer have vnode
2400 * assocations, otherwise they may never get flushed again.
2402 * The reflush flag can be set superfluously, causing extra pain
2403 * for no reason. If the inode is no longer modified it no longer
2404 * needs to be flushed.
2406 if (ip
->flags
& HAMMER_INODE_MODMASK
) {
2408 ip
->flags
|= HAMMER_INODE_REFLUSH
;
2410 ip
->flags
&= ~HAMMER_INODE_REFLUSH
;
2414 * Adjust the flush state.
2416 if (ip
->flags
& HAMMER_INODE_WOULDBLOCK
) {
2418 * We were unable to flush out all our records, leave the
2419 * inode in a flush state and in the current flush group.
2420 * The flush group will be re-run.
2422 * This occurs if the UNDO block gets too full or there is
2423 * too much dirty meta-data and allows the flusher to
2424 * finalize the UNDO block and then re-flush.
2426 ip
->flags
&= ~HAMMER_INODE_WOULDBLOCK
;
2430 * Remove from the flush_group
2432 RB_REMOVE(hammer_fls_rb_tree
, &ip
->flush_group
->flush_tree
, ip
);
2433 ip
->flush_group
= NULL
;
2437 * Clean up the vnode ref and tracking counts.
2439 if (ip
->flags
& HAMMER_INODE_VHELD
) {
2440 ip
->flags
&= ~HAMMER_INODE_VHELD
;
2444 --hmp
->count_iqueued
;
2445 --hammer_count_iqueued
;
2448 * And adjust the state.
2450 if (TAILQ_EMPTY(&ip
->target_list
) && RB_EMPTY(&ip
->rec_tree
)) {
2451 ip
->flush_state
= HAMMER_FST_IDLE
;
2454 ip
->flush_state
= HAMMER_FST_SETUP
;
2459 * If the frontend is waiting for a flush to complete,
2462 if (ip
->flags
& HAMMER_INODE_FLUSHW
) {
2463 ip
->flags
&= ~HAMMER_INODE_FLUSHW
;
2468 * If the frontend made more changes and requested another
2469 * flush, then try to get it running.
2471 * Reflushes are aborted when the inode is errored out.
2473 if (ip
->flags
& HAMMER_INODE_REFLUSH
) {
2474 ip
->flags
&= ~HAMMER_INODE_REFLUSH
;
2475 if (ip
->flags
& HAMMER_INODE_RESIGNAL
) {
2476 ip
->flags
&= ~HAMMER_INODE_RESIGNAL
;
2477 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
2479 hammer_flush_inode(ip
, 0);
2485 * If we have no parent dependancies we can clear CONN_DOWN
2487 if (TAILQ_EMPTY(&ip
->target_list
))
2488 ip
->flags
&= ~HAMMER_INODE_CONN_DOWN
;
2491 * If the inode is now clean drop the space reservation.
2493 if ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0 &&
2494 (ip
->flags
& HAMMER_INODE_RSV_INODES
)) {
2495 ip
->flags
&= ~HAMMER_INODE_RSV_INODES
;
2500 hammer_rel_inode(ip
, 0);
2504 * Called from hammer_sync_inode() to synchronize in-memory records
2508 hammer_sync_record_callback(hammer_record_t record
, void *data
)
2510 hammer_cursor_t cursor
= data
;
2511 hammer_transaction_t trans
= cursor
->trans
;
2512 hammer_mount_t hmp
= trans
->hmp
;
2516 * Skip records that do not belong to the current flush.
2518 ++hammer_stats_record_iterations
;
2519 if (record
->flush_state
!= HAMMER_FST_FLUSH
)
2523 if (record
->flush_group
!= record
->ip
->flush_group
) {
2524 kprintf("sync_record %p ip %p bad flush group %p %p\n", record
, record
->ip
, record
->flush_group
,record
->ip
->flush_group
);
2525 if (hammer_debug_critical
)
2530 KKASSERT(record
->flush_group
== record
->ip
->flush_group
);
2533 * Interlock the record using the BE flag. Once BE is set the
2534 * frontend cannot change the state of FE.
2536 * NOTE: If FE is set prior to us setting BE we still sync the
2537 * record out, but the flush completion code converts it to
2538 * a delete-on-disk record instead of destroying it.
2540 KKASSERT((record
->flags
& HAMMER_RECF_INTERLOCK_BE
) == 0);
2541 record
->flags
|= HAMMER_RECF_INTERLOCK_BE
;
2544 * The backend has already disposed of the record.
2546 if (record
->flags
& (HAMMER_RECF_DELETED_BE
| HAMMER_RECF_COMMITTED
)) {
2552 * If the whole inode is being deleting all on-disk records will
2553 * be deleted very soon, we can't sync any new records to disk
2554 * because they will be deleted in the same transaction they were
2555 * created in (delete_tid == create_tid), which will assert.
2557 * XXX There may be a case with RECORD_ADD with DELETED_FE set
2558 * that we currently panic on.
2560 if (record
->ip
->sync_flags
& HAMMER_INODE_DELETING
) {
2561 switch(record
->type
) {
2562 case HAMMER_MEM_RECORD_DATA
:
2564 * We don't have to do anything, if the record was
2565 * committed the space will have been accounted for
2569 case HAMMER_MEM_RECORD_GENERAL
:
2571 * Set deleted-by-backend flag. Do not set the
2572 * backend committed flag, because we are throwing
2575 record
->flags
|= HAMMER_RECF_DELETED_BE
;
2576 ++record
->ip
->rec_generation
;
2579 case HAMMER_MEM_RECORD_ADD
:
2580 panic("hammer_sync_record_callback: illegal add "
2581 "during inode deletion record %p", record
);
2582 break; /* NOT REACHED */
2583 case HAMMER_MEM_RECORD_INODE
:
2584 panic("hammer_sync_record_callback: attempt to "
2585 "sync inode record %p?", record
);
2586 break; /* NOT REACHED */
2587 case HAMMER_MEM_RECORD_DEL
:
2589 * Follow through and issue the on-disk deletion
2596 * If DELETED_FE is set special handling is needed for directory
2597 * entries. Dependant pieces related to the directory entry may
2598 * have already been synced to disk. If this occurs we have to
2599 * sync the directory entry and then change the in-memory record
2600 * from an ADD to a DELETE to cover the fact that it's been
2601 * deleted by the frontend.
2603 * A directory delete covering record (MEM_RECORD_DEL) can never
2604 * be deleted by the frontend.
2606 * Any other record type (aka DATA) can be deleted by the frontend.
2607 * XXX At the moment the flusher must skip it because there may
2608 * be another data record in the flush group for the same block,
2609 * meaning that some frontend data changes can leak into the backend's
2610 * synchronization point.
2612 if (record
->flags
& HAMMER_RECF_DELETED_FE
) {
2613 if (record
->type
== HAMMER_MEM_RECORD_ADD
) {
2615 * Convert a front-end deleted directory-add to
2616 * a directory-delete entry later.
2618 record
->flags
|= HAMMER_RECF_CONVERT_DELETE
;
2621 * Dispose of the record (race case). Mark as
2622 * deleted by backend (and not committed).
2624 KKASSERT(record
->type
!= HAMMER_MEM_RECORD_DEL
);
2625 record
->flags
|= HAMMER_RECF_DELETED_BE
;
2626 ++record
->ip
->rec_generation
;
2633 * Assign the create_tid for new records. Deletions already
2634 * have the record's entire key properly set up.
2636 if (record
->type
!= HAMMER_MEM_RECORD_DEL
) {
2637 record
->leaf
.base
.create_tid
= trans
->tid
;
2638 record
->leaf
.create_ts
= trans
->time32
;
2642 * This actually moves the record to the on-media B-Tree. We
2643 * must also generate REDO_TERM entries in the UNDO/REDO FIFO
2644 * indicating that the related REDO_WRITE(s) have been committed.
2646 * During recovery any REDO_TERM's within the nominal recovery span
2647 * are ignored since the related meta-data is being undone, causing
2648 * any matching REDO_WRITEs to execute. The REDO_TERMs outside
2649 * the nominal recovery span will match against REDO_WRITEs and
2650 * prevent them from being executed (because the meta-data has
2651 * already been synchronized).
2653 if (record
->flags
& HAMMER_RECF_REDO
) {
2654 KKASSERT(record
->type
== HAMMER_MEM_RECORD_DATA
);
2655 hammer_generate_redo(trans
, record
->ip
,
2656 record
->leaf
.base
.key
-
2657 record
->leaf
.data_len
,
2658 HAMMER_REDO_TERM_WRITE
,
2660 record
->leaf
.data_len
);
2663 error
= hammer_ip_sync_record_cursor(cursor
, record
);
2664 if (error
!= EDEADLK
)
2666 hammer_done_cursor(cursor
);
2667 error
= hammer_init_cursor(trans
, cursor
, &record
->ip
->cache
[0],
2672 record
->flags
&= ~HAMMER_RECF_CONVERT_DELETE
;
2677 hammer_flush_record_done(record
, error
);
2680 * Do partial finalization if we have built up too many dirty
2681 * buffers. Otherwise a buffer cache deadlock can occur when
2682 * doing things like creating tens of thousands of tiny files.
2684 * We must release our cursor lock to avoid a 3-way deadlock
2685 * due to the exclusive sync lock the finalizer must get.
2687 * WARNING: See warnings in hammer_unlock_cursor() function.
2689 if (hammer_flusher_meta_limit(hmp
)) {
2690 hammer_unlock_cursor(cursor
);
2691 hammer_flusher_finalize(trans
, 0);
2692 hammer_lock_cursor(cursor
);
2699 * Backend function called by the flusher to sync an inode to media.
2702 hammer_sync_inode(hammer_transaction_t trans
, hammer_inode_t ip
)
2704 struct hammer_cursor cursor
;
2705 hammer_node_t tmp_node
;
2706 hammer_record_t depend
;
2707 hammer_record_t next
;
2708 int error
, tmp_error
;
2711 if ((ip
->sync_flags
& HAMMER_INODE_MODMASK
) == 0)
2714 error
= hammer_init_cursor(trans
, &cursor
, &ip
->cache
[1], ip
);
2719 * Any directory records referencing this inode which are not in
2720 * our current flush group must adjust our nlink count for the
2721 * purposes of synchronizating to disk.
2723 * Records which are in our flush group can be unlinked from our
2724 * inode now, potentially allowing the inode to be physically
2727 * This cannot block.
2729 nlinks
= ip
->ino_data
.nlinks
;
2730 next
= TAILQ_FIRST(&ip
->target_list
);
2731 while ((depend
= next
) != NULL
) {
2732 next
= TAILQ_NEXT(depend
, target_entry
);
2733 if (depend
->flush_state
== HAMMER_FST_FLUSH
&&
2734 depend
->flush_group
== ip
->flush_group
) {
2736 * If this is an ADD that was deleted by the frontend
2737 * the frontend nlinks count will have already been
2738 * decremented, but the backend is going to sync its
2739 * directory entry and must account for it. The
2740 * record will be converted to a delete-on-disk when
2743 * If the ADD was not deleted by the frontend we
2744 * can remove the dependancy from our target_list.
2746 if (depend
->flags
& HAMMER_RECF_DELETED_FE
) {
2749 TAILQ_REMOVE(&ip
->target_list
, depend
,
2751 depend
->target_ip
= NULL
;
2753 } else if ((depend
->flags
& HAMMER_RECF_DELETED_FE
) == 0) {
2755 * Not part of our flush group and not deleted by
2756 * the front-end, adjust the link count synced to
2757 * the media (undo what the frontend did when it
2758 * queued the record).
2760 KKASSERT((depend
->flags
& HAMMER_RECF_DELETED_BE
) == 0);
2761 switch(depend
->type
) {
2762 case HAMMER_MEM_RECORD_ADD
:
2765 case HAMMER_MEM_RECORD_DEL
:
2775 * Set dirty if we had to modify the link count.
2777 if (ip
->sync_ino_data
.nlinks
!= nlinks
) {
2778 KKASSERT((int64_t)nlinks
>= 0);
2779 ip
->sync_ino_data
.nlinks
= nlinks
;
2780 ip
->sync_flags
|= HAMMER_INODE_DDIRTY
;
2784 * If there is a trunction queued destroy any data past the (aligned)
2785 * truncation point. Userland will have dealt with the buffer
2786 * containing the truncation point for us.
2788 * We don't flush pending frontend data buffers until after we've
2789 * dealt with the truncation.
2791 if (ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) {
2793 * Interlock trunc_off. The VOP front-end may continue to
2794 * make adjustments to it while we are blocked.
2797 off_t aligned_trunc_off
;
2800 trunc_off
= ip
->sync_trunc_off
;
2801 blkmask
= hammer_blocksize(trunc_off
) - 1;
2802 aligned_trunc_off
= (trunc_off
+ blkmask
) & ~(int64_t)blkmask
;
2805 * Delete any whole blocks on-media. The front-end has
2806 * already cleaned out any partial block and made it
2807 * pending. The front-end may have updated trunc_off
2808 * while we were blocked so we only use sync_trunc_off.
2810 * This operation can blow out the buffer cache, EWOULDBLOCK
2811 * means we were unable to complete the deletion. The
2812 * deletion will update sync_trunc_off in that case.
2814 error
= hammer_ip_delete_range(&cursor
, ip
,
2816 0x7FFFFFFFFFFFFFFFLL
, 2);
2817 if (error
== EWOULDBLOCK
) {
2818 ip
->flags
|= HAMMER_INODE_WOULDBLOCK
;
2820 goto defer_buffer_flush
;
2827 * Generate a REDO_TERM_TRUNC entry in the UNDO/REDO FIFO.
2829 * XXX we do this even if we did not previously generate
2830 * a REDO_TRUNC record. This operation may enclosed the
2831 * range for multiple prior truncation entries in the REDO
2834 if (trans
->hmp
->version
>= HAMMER_VOL_VERSION_FOUR
&&
2835 (ip
->flags
& HAMMER_INODE_RDIRTY
)) {
2836 hammer_generate_redo(trans
, ip
, aligned_trunc_off
,
2837 HAMMER_REDO_TERM_TRUNC
,
2842 * Clear the truncation flag on the backend after we have
2843 * completed the deletions. Backend data is now good again
2844 * (including new records we are about to sync, below).
2846 * Leave sync_trunc_off intact. As we write additional
2847 * records the backend will update sync_trunc_off. This
2848 * tells the backend whether it can skip the overwrite
2849 * test. This should work properly even when the backend
2850 * writes full blocks where the truncation point straddles
2851 * the block because the comparison is against the base
2852 * offset of the record.
2854 ip
->sync_flags
&= ~HAMMER_INODE_TRUNCATED
;
2855 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
2861 * Now sync related records. These will typically be directory
2862 * entries, records tracking direct-writes, or delete-on-disk records.
2865 tmp_error
= RB_SCAN(hammer_rec_rb_tree
, &ip
->rec_tree
, NULL
,
2866 hammer_sync_record_callback
, &cursor
);
2872 hammer_cache_node(&ip
->cache
[1], cursor
.node
);
2875 * Re-seek for inode update, assuming our cache hasn't been ripped
2876 * out from under us.
2879 tmp_node
= hammer_ref_node_safe(trans
, &ip
->cache
[0], &error
);
2881 hammer_cursor_downgrade(&cursor
);
2882 hammer_lock_sh(&tmp_node
->lock
);
2883 if ((tmp_node
->flags
& HAMMER_NODE_DELETED
) == 0)
2884 hammer_cursor_seek(&cursor
, tmp_node
, 0);
2885 hammer_unlock(&tmp_node
->lock
);
2886 hammer_rel_node(tmp_node
);
2892 * If we are deleting the inode the frontend had better not have
2893 * any active references on elements making up the inode.
2895 * The call to hammer_ip_delete_clean() cleans up auxillary records
2896 * but not DB or DATA records. Those must have already been deleted
2897 * by the normal truncation mechanic.
2899 if (error
== 0 && ip
->sync_ino_data
.nlinks
== 0 &&
2900 RB_EMPTY(&ip
->rec_tree
) &&
2901 (ip
->sync_flags
& HAMMER_INODE_DELETING
) &&
2902 (ip
->flags
& HAMMER_INODE_DELETED
) == 0) {
2905 error
= hammer_ip_delete_clean(&cursor
, ip
, &count1
);
2907 ip
->flags
|= HAMMER_INODE_DELETED
;
2908 ip
->sync_flags
&= ~HAMMER_INODE_DELETING
;
2909 ip
->sync_flags
&= ~HAMMER_INODE_TRUNCATED
;
2910 KKASSERT(RB_EMPTY(&ip
->rec_tree
));
2913 * Set delete_tid in both the frontend and backend
2914 * copy of the inode record. The DELETED flag handles
2915 * this, do not set DDIRTY.
2917 ip
->ino_leaf
.base
.delete_tid
= trans
->tid
;
2918 ip
->sync_ino_leaf
.base
.delete_tid
= trans
->tid
;
2919 ip
->ino_leaf
.delete_ts
= trans
->time32
;
2920 ip
->sync_ino_leaf
.delete_ts
= trans
->time32
;
2924 * Adjust the inode count in the volume header
2926 hammer_sync_lock_sh(trans
);
2927 if (ip
->flags
& HAMMER_INODE_ONDISK
) {
2928 hammer_modify_volume_field(trans
,
2931 --ip
->hmp
->rootvol
->ondisk
->vol0_stat_inodes
;
2932 hammer_modify_volume_done(trans
->rootvol
);
2934 hammer_sync_unlock(trans
);
2940 ip
->sync_flags
&= ~HAMMER_INODE_BUFS
;
2944 * Now update the inode's on-disk inode-data and/or on-disk record.
2945 * DELETED and ONDISK are managed only in ip->flags.
2947 * In the case of a defered buffer flush we still update the on-disk
2948 * inode to satisfy visibility requirements if there happen to be
2949 * directory dependancies.
2951 switch(ip
->flags
& (HAMMER_INODE_DELETED
| HAMMER_INODE_ONDISK
)) {
2952 case HAMMER_INODE_DELETED
|HAMMER_INODE_ONDISK
:
2954 * If deleted and on-disk, don't set any additional flags.
2955 * the delete flag takes care of things.
2957 * Clear flags which may have been set by the frontend.
2959 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
| HAMMER_INODE_XDIRTY
|
2960 HAMMER_INODE_SDIRTY
|
2961 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
|
2962 HAMMER_INODE_DELETING
);
2964 case HAMMER_INODE_DELETED
:
2966 * Take care of the case where a deleted inode was never
2967 * flushed to the disk in the first place.
2969 * Clear flags which may have been set by the frontend.
2971 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
| HAMMER_INODE_XDIRTY
|
2972 HAMMER_INODE_SDIRTY
|
2973 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
|
2974 HAMMER_INODE_DELETING
);
2975 while (RB_ROOT(&ip
->rec_tree
)) {
2976 hammer_record_t record
= RB_ROOT(&ip
->rec_tree
);
2977 hammer_ref(&record
->lock
);
2978 KKASSERT(hammer_oneref(&record
->lock
));
2979 record
->flags
|= HAMMER_RECF_DELETED_BE
;
2980 ++record
->ip
->rec_generation
;
2981 hammer_rel_mem_record(record
);
2984 case HAMMER_INODE_ONDISK
:
2986 * If already on-disk, do not set any additional flags.
2991 * If not on-disk and not deleted, set DDIRTY to force
2992 * an initial record to be written.
2994 * Also set the create_tid in both the frontend and backend
2995 * copy of the inode record.
2997 ip
->ino_leaf
.base
.create_tid
= trans
->tid
;
2998 ip
->ino_leaf
.create_ts
= trans
->time32
;
2999 ip
->sync_ino_leaf
.base
.create_tid
= trans
->tid
;
3000 ip
->sync_ino_leaf
.create_ts
= trans
->time32
;
3001 ip
->sync_flags
|= HAMMER_INODE_DDIRTY
;
3006 * If DDIRTY or SDIRTY is set, write out a new record.
3007 * If the inode is already on-disk the old record is marked as
3010 * If DELETED is set hammer_update_inode() will delete the existing
3011 * record without writing out a new one.
3013 * If *ONLY* the ITIMES flag is set we can update the record in-place.
3015 if (ip
->flags
& HAMMER_INODE_DELETED
) {
3016 error
= hammer_update_inode(&cursor
, ip
);
3018 if (!(ip
->sync_flags
& (HAMMER_INODE_DDIRTY
| HAMMER_INODE_SDIRTY
)) &&
3019 (ip
->sync_flags
& (HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
))) {
3020 error
= hammer_update_itimes(&cursor
, ip
);
3022 if (ip
->sync_flags
& (HAMMER_INODE_DDIRTY
| HAMMER_INODE_SDIRTY
|
3023 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
)) {
3024 error
= hammer_update_inode(&cursor
, ip
);
3028 hammer_critical_error(ip
->hmp
, ip
, error
,
3029 "while syncing inode");
3031 hammer_done_cursor(&cursor
);
3036 * This routine is called when the OS is no longer actively referencing
3037 * the inode (but might still be keeping it cached), or when releasing
3038 * the last reference to an inode.
3040 * At this point if the inode's nlinks count is zero we want to destroy
3041 * it, which may mean destroying it on-media too.
3044 hammer_inode_unloadable_check(hammer_inode_t ip
, int getvp
)
3049 * Set the DELETING flag when the link count drops to 0 and the
3050 * OS no longer has any opens on the inode.
3052 * The backend will clear DELETING (a mod flag) and set DELETED
3053 * (a state flag) when it is actually able to perform the
3056 * Don't reflag the deletion if the flusher is currently syncing
3057 * one that was already flagged. A previously set DELETING flag
3058 * may bounce around flags and sync_flags until the operation is
3061 if (ip
->ino_data
.nlinks
== 0 &&
3062 ((ip
->flags
| ip
->sync_flags
) & (HAMMER_INODE_DELETING
|HAMMER_INODE_DELETED
)) == 0) {
3063 ip
->flags
|= HAMMER_INODE_DELETING
;
3064 ip
->flags
|= HAMMER_INODE_TRUNCATED
;
3068 if (hammer_get_vnode(ip
, &vp
) != 0)
3076 nvtruncbuf(ip
->vp
, 0, HAMMER_BUFSIZE
, 0);
3083 * After potentially resolving a dependancy the inode is tested
3084 * to determine whether it needs to be reflushed.
3087 hammer_test_inode(hammer_inode_t ip
)
3089 if (ip
->flags
& HAMMER_INODE_REFLUSH
) {
3090 ip
->flags
&= ~HAMMER_INODE_REFLUSH
;
3091 hammer_ref(&ip
->lock
);
3092 if (ip
->flags
& HAMMER_INODE_RESIGNAL
) {
3093 ip
->flags
&= ~HAMMER_INODE_RESIGNAL
;
3094 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
3096 hammer_flush_inode(ip
, 0);
3098 hammer_rel_inode(ip
, 0);
3103 * Clear the RECLAIM flag on an inode. This occurs when the inode is
3104 * reassociated with a vp or just before it gets freed.
3106 * Pipeline wakeups to threads blocked due to an excessive number of
3107 * detached inodes. This typically occurs when atime updates accumulate
3108 * while scanning a directory tree.
3111 hammer_inode_wakereclaims(hammer_inode_t ip
)
3113 struct hammer_reclaim
*reclaim
;
3114 hammer_mount_t hmp
= ip
->hmp
;
3116 if ((ip
->flags
& HAMMER_INODE_RECLAIM
) == 0)
3119 --hammer_count_reclaiming
;
3120 --hmp
->inode_reclaims
;
3121 ip
->flags
&= ~HAMMER_INODE_RECLAIM
;
3123 while ((reclaim
= TAILQ_FIRST(&hmp
->reclaim_list
)) != NULL
) {
3124 if (reclaim
->count
> 0 && --reclaim
->count
== 0) {
3125 TAILQ_REMOVE(&hmp
->reclaim_list
, reclaim
, entry
);
3128 if (hmp
->inode_reclaims
> hammer_limit_reclaim
/ 2)
3134 * Setup our reclaim pipeline. We only let so many detached (and dirty)
3135 * inodes build up before we start blocking. This routine is called
3136 * if a new inode is created or an inode is loaded from media.
3138 * When we block we don't care *which* inode has finished reclaiming,
3139 * as lone as one does.
3142 hammer_inode_waitreclaims(hammer_transaction_t trans
)
3144 hammer_mount_t hmp
= trans
->hmp
;
3145 struct hammer_reclaim reclaim
;
3150 if (curthread
->td_proc
) {
3151 struct hammer_inostats
*stats
;
3154 stats
= hammer_inode_inostats(hmp
, curthread
->td_proc
->p_pid
);
3157 if (stats
->count
> hammer_limit_reclaim
/ 2)
3158 stats
->count
= hammer_limit_reclaim
/ 2;
3159 lower_limit
= hammer_limit_reclaim
- stats
->count
;
3160 if (hammer_debug_general
& 0x10000)
3161 kprintf("pid %5d limit %d\n", (int)curthread
->td_proc
->p_pid
, lower_limit
);
3163 if (hmp
->inode_reclaims
< lower_limit
)
3169 if (hmp
->inode_reclaims
< hammer_limit_reclaim
)
3173 TAILQ_INSERT_TAIL(&hmp
->reclaim_list
, &reclaim
, entry
);
3174 tsleep(&reclaim
, 0, "hmrrcm", hz
);
3175 if (reclaim
.count
> 0)
3176 TAILQ_REMOVE(&hmp
->reclaim_list
, &reclaim
, entry
);
3180 struct hammer_inostats
*
3181 hammer_inode_inostats(hammer_mount_t hmp
, pid_t pid
)
3183 struct hammer_inostats
*stats
;
3187 for (chain
= 0; chain
< 4; ++chain
) {
3188 stats
= &hmp
->inostats
[(pid
+ chain
) & HAMMER_INOSTATS_HMASK
];
3189 if (stats
->pid
== pid
)
3193 stats
= &hmp
->inostats
[(pid
+ ticks
) & HAMMER_INOSTATS_HMASK
];
3197 if (stats
->count
&& stats
->ltick
!= ticks
) {
3198 delta
= ticks
- stats
->ltick
;
3199 stats
->ltick
= ticks
;
3200 if (delta
<= 0 || delta
> hz
* 60)
3203 stats
->count
= stats
->count
* hz
/ (hz
+ delta
);
3205 if (hammer_debug_general
& 0x10000)
3206 kprintf("pid %5d stats %d\n", (int)pid
, stats
->count
);
3213 * XXX not used, doesn't work very well due to the large batching nature
3216 * A larger then normal backlog of inodes is sitting in the flusher,
3217 * enforce a general slowdown to let it catch up. This routine is only
3218 * called on completion of a non-flusher-related transaction which
3219 * performed B-Tree node I/O.
3221 * It is possible for the flusher to stall in a continuous load.
3222 * blogbench -i1000 -o seems to do a good job generating this sort of load.
3223 * If the flusher is unable to catch up the inode count can bloat until
3224 * we run out of kvm.
3226 * This is a bit of a hack.
3229 hammer_inode_waithard(hammer_mount_t hmp
)
3234 if (hmp
->flags
& HAMMER_MOUNT_FLUSH_RECOVERY
) {
3235 if (hmp
->inode_reclaims
< hammer_limit_reclaim
/ 2 &&
3236 hmp
->count_iqueued
< hmp
->count_inodes
/ 20) {
3237 hmp
->flags
&= ~HAMMER_MOUNT_FLUSH_RECOVERY
;
3241 if (hmp
->inode_reclaims
< hammer_limit_reclaim
||
3242 hmp
->count_iqueued
< hmp
->count_inodes
/ 10) {
3245 hmp
->flags
|= HAMMER_MOUNT_FLUSH_RECOVERY
;
3249 * Block for one flush cycle.
3251 hammer_flusher_wait_next(hmp
);