2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.114 2008/09/24 00:53:51 dillon Exp $
38 #include <vm/vm_extern.h>
40 static int hammer_unload_inode(struct hammer_inode
*ip
);
41 static void hammer_free_inode(hammer_inode_t ip
);
42 static void hammer_flush_inode_core(hammer_inode_t ip
,
43 hammer_flush_group_t flg
, int flags
);
44 static int hammer_setup_child_callback(hammer_record_t rec
, void *data
);
46 static int hammer_syncgrp_child_callback(hammer_record_t rec
, void *data
);
48 static int hammer_setup_parent_inodes(hammer_inode_t ip
, int depth
,
49 hammer_flush_group_t flg
);
50 static int hammer_setup_parent_inodes_helper(hammer_record_t record
,
51 int depth
, hammer_flush_group_t flg
);
52 static void hammer_inode_wakereclaims(hammer_inode_t ip
);
55 extern struct hammer_inode
*HammerTruncIp
;
59 * RB-Tree support for inode structures
62 hammer_ino_rb_compare(hammer_inode_t ip1
, hammer_inode_t ip2
)
64 if (ip1
->obj_localization
< ip2
->obj_localization
)
66 if (ip1
->obj_localization
> ip2
->obj_localization
)
68 if (ip1
->obj_id
< ip2
->obj_id
)
70 if (ip1
->obj_id
> ip2
->obj_id
)
72 if (ip1
->obj_asof
< ip2
->obj_asof
)
74 if (ip1
->obj_asof
> ip2
->obj_asof
)
80 * RB-Tree support for inode structures / special LOOKUP_INFO
83 hammer_inode_info_cmp(hammer_inode_info_t info
, hammer_inode_t ip
)
85 if (info
->obj_localization
< ip
->obj_localization
)
87 if (info
->obj_localization
> ip
->obj_localization
)
89 if (info
->obj_id
< ip
->obj_id
)
91 if (info
->obj_id
> ip
->obj_id
)
93 if (info
->obj_asof
< ip
->obj_asof
)
95 if (info
->obj_asof
> ip
->obj_asof
)
101 * Used by hammer_scan_inode_snapshots() to locate all of an object's
102 * snapshots. Note that the asof field is not tested, which we can get
103 * away with because it is the lowest-priority field.
106 hammer_inode_info_cmp_all_history(hammer_inode_t ip
, void *data
)
108 hammer_inode_info_t info
= data
;
110 if (ip
->obj_localization
> info
->obj_localization
)
112 if (ip
->obj_localization
< info
->obj_localization
)
114 if (ip
->obj_id
> info
->obj_id
)
116 if (ip
->obj_id
< info
->obj_id
)
122 * Used by hammer_unload_pseudofs() to locate all inodes associated with
126 hammer_inode_pfs_cmp(hammer_inode_t ip
, void *data
)
128 u_int32_t localization
= *(u_int32_t
*)data
;
129 if (ip
->obj_localization
> localization
)
131 if (ip
->obj_localization
< localization
)
137 * RB-Tree support for pseudofs structures
140 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1
, hammer_pseudofs_inmem_t p2
)
142 if (p1
->localization
< p2
->localization
)
144 if (p1
->localization
> p2
->localization
)
150 RB_GENERATE(hammer_ino_rb_tree
, hammer_inode
, rb_node
, hammer_ino_rb_compare
);
151 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree
, INFO
, hammer_inode
, rb_node
,
152 hammer_inode_info_cmp
, hammer_inode_info_t
);
153 RB_GENERATE2(hammer_pfs_rb_tree
, hammer_pseudofs_inmem
, rb_node
,
154 hammer_pfs_rb_compare
, u_int32_t
, localization
);
157 * The kernel is not actively referencing this vnode but is still holding
160 * This is called from the frontend.
165 hammer_vop_inactive(struct vop_inactive_args
*ap
)
167 struct hammer_inode
*ip
= VTOI(ap
->a_vp
);
178 * If the inode no longer has visibility in the filesystem try to
179 * recycle it immediately, even if the inode is dirty. Recycling
180 * it quickly allows the system to reclaim buffer cache and VM
181 * resources which can matter a lot in a heavily loaded system.
183 * This can deadlock in vfsync() if we aren't careful.
185 * Do not queue the inode to the flusher if we still have visibility,
186 * otherwise namespace calls such as chmod will unnecessarily generate
187 * multiple inode updates.
189 if (ip
->ino_data
.nlinks
== 0) {
191 hammer_inode_unloadable_check(ip
, 0);
192 if (ip
->flags
& HAMMER_INODE_MODMASK
)
193 hammer_flush_inode(ip
, 0);
201 * Release the vnode association. This is typically (but not always)
202 * the last reference on the inode.
204 * Once the association is lost we are on our own with regards to
205 * flushing the inode.
208 hammer_vop_reclaim(struct vop_reclaim_args
*ap
)
210 struct hammer_inode
*ip
;
216 if ((ip
= vp
->v_data
) != NULL
) {
221 if ((ip
->flags
& HAMMER_INODE_RECLAIM
) == 0) {
222 ++hammer_count_reclaiming
;
223 ++hmp
->inode_reclaims
;
224 ip
->flags
|= HAMMER_INODE_RECLAIM
;
226 hammer_rel_inode(ip
, 1);
232 * Return a locked vnode for the specified inode. The inode must be
233 * referenced but NOT LOCKED on entry and will remain referenced on
236 * Called from the frontend.
239 hammer_get_vnode(struct hammer_inode
*ip
, struct vnode
**vpp
)
249 if ((vp
= ip
->vp
) == NULL
) {
250 error
= getnewvnode(VT_HAMMER
, hmp
->mp
, vpp
, 0, 0);
253 hammer_lock_ex(&ip
->lock
);
254 if (ip
->vp
!= NULL
) {
255 hammer_unlock(&ip
->lock
);
261 hammer_ref(&ip
->lock
);
265 obj_type
= ip
->ino_data
.obj_type
;
266 vp
->v_type
= hammer_get_vnode_type(obj_type
);
268 hammer_inode_wakereclaims(ip
);
270 switch(ip
->ino_data
.obj_type
) {
271 case HAMMER_OBJTYPE_CDEV
:
272 case HAMMER_OBJTYPE_BDEV
:
273 vp
->v_ops
= &hmp
->mp
->mnt_vn_spec_ops
;
274 addaliasu(vp
, ip
->ino_data
.rmajor
,
275 ip
->ino_data
.rminor
);
277 case HAMMER_OBJTYPE_FIFO
:
278 vp
->v_ops
= &hmp
->mp
->mnt_vn_fifo_ops
;
280 case HAMMER_OBJTYPE_REGFILE
:
287 * Only mark as the root vnode if the ip is not
288 * historical, otherwise the VFS cache will get
289 * confused. The other half of the special handling
290 * is in hammer_vop_nlookupdotdot().
292 * Pseudo-filesystem roots can be accessed via
293 * non-root filesystem paths and setting VROOT may
294 * confuse the namecache. Set VPFSROOT instead.
296 if (ip
->obj_id
== HAMMER_OBJID_ROOT
&&
297 ip
->obj_asof
== hmp
->asof
) {
298 if (ip
->obj_localization
== 0)
299 vsetflags(vp
, VROOT
);
301 vsetflags(vp
, VPFSROOT
);
304 vp
->v_data
= (void *)ip
;
305 /* vnode locked by getnewvnode() */
306 /* make related vnode dirty if inode dirty? */
307 hammer_unlock(&ip
->lock
);
308 if (vp
->v_type
== VREG
)
309 vinitvmio(vp
, ip
->ino_data
.size
);
314 * loop if the vget fails (aka races), or if the vp
315 * no longer matches ip->vp.
317 if (vget(vp
, LK_EXCLUSIVE
) == 0) {
328 * Locate all copies of the inode for obj_id compatible with the specified
329 * asof, reference, and issue the related call-back. This routine is used
330 * for direct-io invalidation and does not create any new inodes.
333 hammer_scan_inode_snapshots(hammer_mount_t hmp
, hammer_inode_info_t iinfo
,
334 int (*callback
)(hammer_inode_t ip
, void *data
),
337 hammer_ino_rb_tree_RB_SCAN(&hmp
->rb_inos_root
,
338 hammer_inode_info_cmp_all_history
,
343 * Acquire a HAMMER inode. The returned inode is not locked. These functions
344 * do not attach or detach the related vnode (use hammer_get_vnode() for
347 * The flags argument is only applied for newly created inodes, and only
348 * certain flags are inherited.
350 * Called from the frontend.
352 struct hammer_inode
*
353 hammer_get_inode(hammer_transaction_t trans
, hammer_inode_t dip
,
354 int64_t obj_id
, hammer_tid_t asof
, u_int32_t localization
,
355 int flags
, int *errorp
)
357 hammer_mount_t hmp
= trans
->hmp
;
358 struct hammer_node_cache
*cachep
;
359 struct hammer_inode_info iinfo
;
360 struct hammer_cursor cursor
;
361 struct hammer_inode
*ip
;
365 * Determine if we already have an inode cached. If we do then
368 * If we find an inode with no vnode we have to mark the
369 * transaction such that hammer_inode_waitreclaims() is
370 * called later on to avoid building up an infinite number
371 * of inodes. Otherwise we can continue to * add new inodes
372 * faster then they can be disposed of, even with the tsleep
375 * If we find a dummy inode we return a failure so dounlink
376 * (which does another lookup) doesn't try to mess with the
377 * link count. hammer_vop_nresolve() uses hammer_get_dummy_inode()
378 * to ref dummy inodes.
380 iinfo
.obj_id
= obj_id
;
381 iinfo
.obj_asof
= asof
;
382 iinfo
.obj_localization
= localization
;
384 ip
= hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp
->rb_inos_root
, &iinfo
);
386 if (ip
->flags
& HAMMER_INODE_DUMMY
) {
390 hammer_ref(&ip
->lock
);
396 * Allocate a new inode structure and deal with races later.
398 ip
= kmalloc(sizeof(*ip
), hmp
->m_inodes
, M_WAITOK
|M_ZERO
);
399 ++hammer_count_inodes
;
402 ip
->obj_asof
= iinfo
.obj_asof
;
403 ip
->obj_localization
= localization
;
405 ip
->flags
= flags
& HAMMER_INODE_RO
;
406 ip
->cache
[0].ip
= ip
;
407 ip
->cache
[1].ip
= ip
;
408 ip
->cache
[2].ip
= ip
;
409 ip
->cache
[3].ip
= ip
;
411 ip
->flags
|= HAMMER_INODE_RO
;
412 ip
->sync_trunc_off
= ip
->trunc_off
= ip
->save_trunc_off
=
413 0x7FFFFFFFFFFFFFFFLL
;
414 RB_INIT(&ip
->rec_tree
);
415 TAILQ_INIT(&ip
->target_list
);
416 hammer_ref(&ip
->lock
);
419 * Locate the on-disk inode. If this is a PFS root we always
420 * access the current version of the root inode and (if it is not
421 * a master) always access information under it with a snapshot
424 * We cache recent inode lookups in this directory in dip->cache[2].
425 * If we can't find it we assume the inode we are looking for is
426 * close to the directory inode.
431 if (dip
->cache
[2].node
)
432 cachep
= &dip
->cache
[2];
434 cachep
= &dip
->cache
[0];
436 hammer_init_cursor(trans
, &cursor
, cachep
, NULL
);
437 cursor
.key_beg
.localization
= localization
+ HAMMER_LOCALIZE_INODE
;
438 cursor
.key_beg
.obj_id
= ip
->obj_id
;
439 cursor
.key_beg
.key
= 0;
440 cursor
.key_beg
.create_tid
= 0;
441 cursor
.key_beg
.delete_tid
= 0;
442 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
443 cursor
.key_beg
.obj_type
= 0;
445 cursor
.asof
= iinfo
.obj_asof
;
446 cursor
.flags
= HAMMER_CURSOR_GET_LEAF
| HAMMER_CURSOR_GET_DATA
|
449 *errorp
= hammer_btree_lookup(&cursor
);
450 if (*errorp
== EDEADLK
) {
451 hammer_done_cursor(&cursor
);
456 * On success the B-Tree lookup will hold the appropriate
457 * buffer cache buffers and provide a pointer to the requested
458 * information. Copy the information to the in-memory inode
459 * and cache the B-Tree node to improve future operations.
462 ip
->ino_leaf
= cursor
.node
->ondisk
->elms
[cursor
.index
].leaf
;
463 ip
->ino_data
= cursor
.data
->inode
;
466 * cache[0] tries to cache the location of the object inode.
467 * The assumption is that it is near the directory inode.
469 * cache[1] tries to cache the location of the object data.
470 * We might have something in the governing directory from
471 * scan optimizations (see the strategy code in
474 * We update dip->cache[2], if possible, with the location
475 * of the object inode for future directory shortcuts.
477 hammer_cache_node(&ip
->cache
[0], cursor
.node
);
479 if (dip
->cache
[3].node
) {
480 hammer_cache_node(&ip
->cache
[1],
483 hammer_cache_node(&dip
->cache
[2], cursor
.node
);
487 * The file should not contain any data past the file size
488 * stored in the inode. Setting save_trunc_off to the
489 * file size instead of max reduces B-Tree lookup overheads
490 * on append by allowing the flusher to avoid checking for
493 ip
->save_trunc_off
= ip
->ino_data
.size
;
496 * Locate and assign the pseudofs management structure to
499 if (dip
&& dip
->obj_localization
== ip
->obj_localization
) {
500 ip
->pfsm
= dip
->pfsm
;
501 hammer_ref(&ip
->pfsm
->lock
);
503 ip
->pfsm
= hammer_load_pseudofs(trans
,
504 ip
->obj_localization
,
506 *errorp
= 0; /* ignore ENOENT */
511 * The inode is placed on the red-black tree and will be synced to
512 * the media when flushed or by the filesystem sync. If this races
513 * another instantiation/lookup the insertion will fail.
516 if (RB_INSERT(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
)) {
517 hammer_free_inode(ip
);
518 hammer_done_cursor(&cursor
);
521 ip
->flags
|= HAMMER_INODE_ONDISK
;
523 if (ip
->flags
& HAMMER_INODE_RSV_INODES
) {
524 ip
->flags
&= ~HAMMER_INODE_RSV_INODES
; /* sanity */
528 hammer_free_inode(ip
);
531 hammer_done_cursor(&cursor
);
532 trans
->flags
|= HAMMER_TRANSF_NEWINODE
;
537 * Get a dummy inode to placemark a broken directory entry.
539 struct hammer_inode
*
540 hammer_get_dummy_inode(hammer_transaction_t trans
, hammer_inode_t dip
,
541 int64_t obj_id
, hammer_tid_t asof
, u_int32_t localization
,
542 int flags
, int *errorp
)
544 hammer_mount_t hmp
= trans
->hmp
;
545 struct hammer_inode_info iinfo
;
546 struct hammer_inode
*ip
;
549 * Determine if we already have an inode cached. If we do then
552 * If we find an inode with no vnode we have to mark the
553 * transaction such that hammer_inode_waitreclaims() is
554 * called later on to avoid building up an infinite number
555 * of inodes. Otherwise we can continue to * add new inodes
556 * faster then they can be disposed of, even with the tsleep
559 * If we find a non-fake inode we return an error. Only fake
560 * inodes can be returned by this routine.
562 iinfo
.obj_id
= obj_id
;
563 iinfo
.obj_asof
= asof
;
564 iinfo
.obj_localization
= localization
;
567 ip
= hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp
->rb_inos_root
, &iinfo
);
569 if ((ip
->flags
& HAMMER_INODE_DUMMY
) == 0) {
573 hammer_ref(&ip
->lock
);
578 * Allocate a new inode structure and deal with races later.
580 ip
= kmalloc(sizeof(*ip
), hmp
->m_inodes
, M_WAITOK
|M_ZERO
);
581 ++hammer_count_inodes
;
584 ip
->obj_asof
= iinfo
.obj_asof
;
585 ip
->obj_localization
= localization
;
587 ip
->flags
= flags
| HAMMER_INODE_RO
| HAMMER_INODE_DUMMY
;
588 ip
->cache
[0].ip
= ip
;
589 ip
->cache
[1].ip
= ip
;
590 ip
->cache
[2].ip
= ip
;
591 ip
->cache
[3].ip
= ip
;
592 ip
->sync_trunc_off
= ip
->trunc_off
= ip
->save_trunc_off
=
593 0x7FFFFFFFFFFFFFFFLL
;
594 RB_INIT(&ip
->rec_tree
);
595 TAILQ_INIT(&ip
->target_list
);
596 hammer_ref(&ip
->lock
);
599 * Populate the dummy inode. Leave everything zero'd out.
601 * (ip->ino_leaf and ip->ino_data)
603 * Make the dummy inode a FIFO object which most copy programs
604 * will properly ignore.
606 ip
->save_trunc_off
= ip
->ino_data
.size
;
607 ip
->ino_data
.obj_type
= HAMMER_OBJTYPE_FIFO
;
610 * Locate and assign the pseudofs management structure to
613 if (dip
&& dip
->obj_localization
== ip
->obj_localization
) {
614 ip
->pfsm
= dip
->pfsm
;
615 hammer_ref(&ip
->pfsm
->lock
);
617 ip
->pfsm
= hammer_load_pseudofs(trans
, ip
->obj_localization
,
619 *errorp
= 0; /* ignore ENOENT */
623 * The inode is placed on the red-black tree and will be synced to
624 * the media when flushed or by the filesystem sync. If this races
625 * another instantiation/lookup the insertion will fail.
627 * NOTE: Do not set HAMMER_INODE_ONDISK. The inode is a fake.
630 if (RB_INSERT(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
)) {
631 hammer_free_inode(ip
);
635 if (ip
->flags
& HAMMER_INODE_RSV_INODES
) {
636 ip
->flags
&= ~HAMMER_INODE_RSV_INODES
; /* sanity */
639 hammer_free_inode(ip
);
642 trans
->flags
|= HAMMER_TRANSF_NEWINODE
;
647 * Return a referenced inode only if it is in our inode cache.
649 * Dummy inodes do not count.
651 struct hammer_inode
*
652 hammer_find_inode(hammer_transaction_t trans
, int64_t obj_id
,
653 hammer_tid_t asof
, u_int32_t localization
)
655 hammer_mount_t hmp
= trans
->hmp
;
656 struct hammer_inode_info iinfo
;
657 struct hammer_inode
*ip
;
659 iinfo
.obj_id
= obj_id
;
660 iinfo
.obj_asof
= asof
;
661 iinfo
.obj_localization
= localization
;
663 ip
= hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp
->rb_inos_root
, &iinfo
);
665 if (ip
->flags
& HAMMER_INODE_DUMMY
)
668 hammer_ref(&ip
->lock
);
674 * Create a new filesystem object, returning the inode in *ipp. The
675 * returned inode will be referenced. The inode is created in-memory.
677 * If pfsm is non-NULL the caller wishes to create the root inode for
681 hammer_create_inode(hammer_transaction_t trans
, struct vattr
*vap
,
683 hammer_inode_t dip
, const char *name
, int namelen
,
684 hammer_pseudofs_inmem_t pfsm
, struct hammer_inode
**ipp
)
695 ip
= kmalloc(sizeof(*ip
), hmp
->m_inodes
, M_WAITOK
|M_ZERO
);
696 ++hammer_count_inodes
;
698 trans
->flags
|= HAMMER_TRANSF_NEWINODE
;
701 KKASSERT(pfsm
->localization
!= 0);
702 ip
->obj_id
= HAMMER_OBJID_ROOT
;
703 ip
->obj_localization
= pfsm
->localization
;
705 KKASSERT(dip
!= NULL
);
706 namekey
= hammer_directory_namekey(dip
, name
, namelen
, &dummy
);
707 ip
->obj_id
= hammer_alloc_objid(hmp
, dip
, namekey
);
708 ip
->obj_localization
= dip
->obj_localization
;
711 KKASSERT(ip
->obj_id
!= 0);
712 ip
->obj_asof
= hmp
->asof
;
714 ip
->flush_state
= HAMMER_FST_IDLE
;
715 ip
->flags
= HAMMER_INODE_DDIRTY
|
716 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
;
717 ip
->cache
[0].ip
= ip
;
718 ip
->cache
[1].ip
= ip
;
719 ip
->cache
[2].ip
= ip
;
720 ip
->cache
[3].ip
= ip
;
722 ip
->trunc_off
= 0x7FFFFFFFFFFFFFFFLL
;
723 /* ip->save_trunc_off = 0; (already zero) */
724 RB_INIT(&ip
->rec_tree
);
725 TAILQ_INIT(&ip
->target_list
);
727 ip
->ino_data
.atime
= trans
->time
;
728 ip
->ino_data
.mtime
= trans
->time
;
729 ip
->ino_data
.size
= 0;
730 ip
->ino_data
.nlinks
= 0;
733 * A nohistory designator on the parent directory is inherited by
734 * the child. We will do this even for pseudo-fs creation... the
735 * sysad can turn it off.
738 ip
->ino_data
.uflags
= dip
->ino_data
.uflags
&
739 (SF_NOHISTORY
|UF_NOHISTORY
|UF_NODUMP
);
742 ip
->ino_leaf
.base
.btype
= HAMMER_BTREE_TYPE_RECORD
;
743 ip
->ino_leaf
.base
.localization
= ip
->obj_localization
+
744 HAMMER_LOCALIZE_INODE
;
745 ip
->ino_leaf
.base
.obj_id
= ip
->obj_id
;
746 ip
->ino_leaf
.base
.key
= 0;
747 ip
->ino_leaf
.base
.create_tid
= 0;
748 ip
->ino_leaf
.base
.delete_tid
= 0;
749 ip
->ino_leaf
.base
.rec_type
= HAMMER_RECTYPE_INODE
;
750 ip
->ino_leaf
.base
.obj_type
= hammer_get_obj_type(vap
->va_type
);
752 ip
->ino_data
.obj_type
= ip
->ino_leaf
.base
.obj_type
;
753 ip
->ino_data
.version
= HAMMER_INODE_DATA_VERSION
;
754 ip
->ino_data
.mode
= vap
->va_mode
;
755 ip
->ino_data
.ctime
= trans
->time
;
758 * If we are running version 2 or greater directory entries are
759 * inode-localized instead of data-localized.
761 if (trans
->hmp
->version
>= HAMMER_VOL_VERSION_TWO
) {
762 if (ip
->ino_leaf
.base
.obj_type
== HAMMER_OBJTYPE_DIRECTORY
) {
763 ip
->ino_data
.cap_flags
|=
764 HAMMER_INODE_CAP_DIR_LOCAL_INO
;
769 * Setup the ".." pointer. This only needs to be done for directories
770 * but we do it for all objects as a recovery aid.
773 ip
->ino_data
.parent_obj_id
= dip
->ino_leaf
.base
.obj_id
;
776 * The parent_obj_localization field only applies to pseudo-fs roots.
777 * XXX this is no longer applicable, PFSs are no longer directly
778 * tied into the parent's directory structure.
780 if (ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_DIRECTORY
&&
781 ip
->obj_id
== HAMMER_OBJID_ROOT
) {
782 ip
->ino_data
.ext
.obj
.parent_obj_localization
=
783 dip
->obj_localization
;
787 switch(ip
->ino_leaf
.base
.obj_type
) {
788 case HAMMER_OBJTYPE_CDEV
:
789 case HAMMER_OBJTYPE_BDEV
:
790 ip
->ino_data
.rmajor
= vap
->va_rmajor
;
791 ip
->ino_data
.rminor
= vap
->va_rminor
;
798 * Calculate default uid/gid and overwrite with information from
802 xuid
= hammer_to_unix_xid(&dip
->ino_data
.uid
);
803 xuid
= vop_helper_create_uid(hmp
->mp
, dip
->ino_data
.mode
,
804 xuid
, cred
, &vap
->va_mode
);
808 ip
->ino_data
.mode
= vap
->va_mode
;
810 if (vap
->va_vaflags
& VA_UID_UUID_VALID
)
811 ip
->ino_data
.uid
= vap
->va_uid_uuid
;
812 else if (vap
->va_uid
!= (uid_t
)VNOVAL
)
813 hammer_guid_to_uuid(&ip
->ino_data
.uid
, vap
->va_uid
);
815 hammer_guid_to_uuid(&ip
->ino_data
.uid
, xuid
);
817 if (vap
->va_vaflags
& VA_GID_UUID_VALID
)
818 ip
->ino_data
.gid
= vap
->va_gid_uuid
;
819 else if (vap
->va_gid
!= (gid_t
)VNOVAL
)
820 hammer_guid_to_uuid(&ip
->ino_data
.gid
, vap
->va_gid
);
822 ip
->ino_data
.gid
= dip
->ino_data
.gid
;
824 hammer_ref(&ip
->lock
);
828 hammer_ref(&pfsm
->lock
);
830 } else if (dip
->obj_localization
== ip
->obj_localization
) {
831 ip
->pfsm
= dip
->pfsm
;
832 hammer_ref(&ip
->pfsm
->lock
);
835 ip
->pfsm
= hammer_load_pseudofs(trans
,
836 ip
->obj_localization
,
838 error
= 0; /* ignore ENOENT */
842 hammer_free_inode(ip
);
844 } else if (RB_INSERT(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
)) {
845 panic("hammer_create_inode: duplicate obj_id %llx",
846 (long long)ip
->obj_id
);
848 hammer_free_inode(ip
);
855 * Final cleanup / freeing of an inode structure
858 hammer_free_inode(hammer_inode_t ip
)
860 struct hammer_mount
*hmp
;
863 KKASSERT(ip
->lock
.refs
== 1);
864 hammer_uncache_node(&ip
->cache
[0]);
865 hammer_uncache_node(&ip
->cache
[1]);
866 hammer_uncache_node(&ip
->cache
[2]);
867 hammer_uncache_node(&ip
->cache
[3]);
868 hammer_inode_wakereclaims(ip
);
870 hammer_clear_objid(ip
);
871 --hammer_count_inodes
;
874 hammer_rel_pseudofs(hmp
, ip
->pfsm
);
877 kfree(ip
, hmp
->m_inodes
);
882 * Retrieve pseudo-fs data. NULL will never be returned.
884 * If an error occurs *errorp will be set and a default template is returned,
885 * otherwise *errorp is set to 0. Typically when an error occurs it will
888 hammer_pseudofs_inmem_t
889 hammer_load_pseudofs(hammer_transaction_t trans
,
890 u_int32_t localization
, int *errorp
)
892 hammer_mount_t hmp
= trans
->hmp
;
894 hammer_pseudofs_inmem_t pfsm
;
895 struct hammer_cursor cursor
;
899 pfsm
= RB_LOOKUP(hammer_pfs_rb_tree
, &hmp
->rb_pfsm_root
, localization
);
901 hammer_ref(&pfsm
->lock
);
907 * PFS records are stored in the root inode (not the PFS root inode,
908 * but the real root). Avoid an infinite recursion if loading
909 * the PFS for the real root.
912 ip
= hammer_get_inode(trans
, NULL
, HAMMER_OBJID_ROOT
,
914 HAMMER_DEF_LOCALIZATION
, 0, errorp
);
919 pfsm
= kmalloc(sizeof(*pfsm
), hmp
->m_misc
, M_WAITOK
| M_ZERO
);
920 pfsm
->localization
= localization
;
921 pfsm
->pfsd
.unique_uuid
= trans
->rootvol
->ondisk
->vol_fsid
;
922 pfsm
->pfsd
.shared_uuid
= pfsm
->pfsd
.unique_uuid
;
924 hammer_init_cursor(trans
, &cursor
, (ip
? &ip
->cache
[1] : NULL
), ip
);
925 cursor
.key_beg
.localization
= HAMMER_DEF_LOCALIZATION
+
926 HAMMER_LOCALIZE_MISC
;
927 cursor
.key_beg
.obj_id
= HAMMER_OBJID_ROOT
;
928 cursor
.key_beg
.create_tid
= 0;
929 cursor
.key_beg
.delete_tid
= 0;
930 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_PFS
;
931 cursor
.key_beg
.obj_type
= 0;
932 cursor
.key_beg
.key
= localization
;
933 cursor
.asof
= HAMMER_MAX_TID
;
934 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
937 *errorp
= hammer_ip_lookup(&cursor
);
939 *errorp
= hammer_btree_lookup(&cursor
);
941 *errorp
= hammer_ip_resolve_data(&cursor
);
943 if (cursor
.data
->pfsd
.mirror_flags
&
944 HAMMER_PFSD_DELETED
) {
947 bytes
= cursor
.leaf
->data_len
;
948 if (bytes
> sizeof(pfsm
->pfsd
))
949 bytes
= sizeof(pfsm
->pfsd
);
950 bcopy(cursor
.data
, &pfsm
->pfsd
, bytes
);
954 hammer_done_cursor(&cursor
);
956 pfsm
->fsid_udev
= hammer_fsid_to_udev(&pfsm
->pfsd
.shared_uuid
);
957 hammer_ref(&pfsm
->lock
);
959 hammer_rel_inode(ip
, 0);
960 if (RB_INSERT(hammer_pfs_rb_tree
, &hmp
->rb_pfsm_root
, pfsm
)) {
961 kfree(pfsm
, hmp
->m_misc
);
968 * Store pseudo-fs data. The backend will automatically delete any prior
969 * on-disk pseudo-fs data but we have to delete in-memory versions.
972 hammer_save_pseudofs(hammer_transaction_t trans
, hammer_pseudofs_inmem_t pfsm
)
974 struct hammer_cursor cursor
;
975 hammer_record_t record
;
979 ip
= hammer_get_inode(trans
, NULL
, HAMMER_OBJID_ROOT
, HAMMER_MAX_TID
,
980 HAMMER_DEF_LOCALIZATION
, 0, &error
);
982 pfsm
->fsid_udev
= hammer_fsid_to_udev(&pfsm
->pfsd
.shared_uuid
);
983 hammer_init_cursor(trans
, &cursor
, &ip
->cache
[1], ip
);
984 cursor
.key_beg
.localization
= ip
->obj_localization
+
985 HAMMER_LOCALIZE_MISC
;
986 cursor
.key_beg
.obj_id
= HAMMER_OBJID_ROOT
;
987 cursor
.key_beg
.create_tid
= 0;
988 cursor
.key_beg
.delete_tid
= 0;
989 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_PFS
;
990 cursor
.key_beg
.obj_type
= 0;
991 cursor
.key_beg
.key
= pfsm
->localization
;
992 cursor
.asof
= HAMMER_MAX_TID
;
993 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
996 * Replace any in-memory version of the record.
998 error
= hammer_ip_lookup(&cursor
);
999 if (error
== 0 && hammer_cursor_inmem(&cursor
)) {
1000 record
= cursor
.iprec
;
1001 if (record
->flags
& HAMMER_RECF_INTERLOCK_BE
) {
1002 KKASSERT(cursor
.deadlk_rec
== NULL
);
1003 hammer_ref(&record
->lock
);
1004 cursor
.deadlk_rec
= record
;
1007 record
->flags
|= HAMMER_RECF_DELETED_FE
;
1013 * Allocate replacement general record. The backend flush will
1014 * delete any on-disk version of the record.
1016 if (error
== 0 || error
== ENOENT
) {
1017 record
= hammer_alloc_mem_record(ip
, sizeof(pfsm
->pfsd
));
1018 record
->type
= HAMMER_MEM_RECORD_GENERAL
;
1020 record
->leaf
.base
.localization
= ip
->obj_localization
+
1021 HAMMER_LOCALIZE_MISC
;
1022 record
->leaf
.base
.rec_type
= HAMMER_RECTYPE_PFS
;
1023 record
->leaf
.base
.key
= pfsm
->localization
;
1024 record
->leaf
.data_len
= sizeof(pfsm
->pfsd
);
1025 bcopy(&pfsm
->pfsd
, record
->data
, sizeof(pfsm
->pfsd
));
1026 error
= hammer_ip_add_record(trans
, record
);
1028 hammer_done_cursor(&cursor
);
1029 if (error
== EDEADLK
)
1031 hammer_rel_inode(ip
, 0);
1036 * Create a root directory for a PFS if one does not alredy exist.
1038 * The PFS root stands alone so we must also bump the nlinks count
1039 * to prevent it from being destroyed on release.
1042 hammer_mkroot_pseudofs(hammer_transaction_t trans
, struct ucred
*cred
,
1043 hammer_pseudofs_inmem_t pfsm
)
1049 ip
= hammer_get_inode(trans
, NULL
, HAMMER_OBJID_ROOT
, HAMMER_MAX_TID
,
1050 pfsm
->localization
, 0, &error
);
1055 error
= hammer_create_inode(trans
, &vap
, cred
,
1059 ++ip
->ino_data
.nlinks
;
1060 hammer_modify_inode(ip
, HAMMER_INODE_DDIRTY
);
1064 hammer_rel_inode(ip
, 0);
1069 * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
1070 * if we are unable to disassociate all the inodes.
1074 hammer_unload_pseudofs_callback(hammer_inode_t ip
, void *data
)
1078 hammer_ref(&ip
->lock
);
1079 if (ip
->lock
.refs
== 2 && ip
->vp
)
1080 vclean_unlocked(ip
->vp
);
1081 if (ip
->lock
.refs
== 1 && ip
->vp
== NULL
)
1084 res
= -1; /* stop, someone is using the inode */
1085 hammer_rel_inode(ip
, 0);
1090 hammer_unload_pseudofs(hammer_transaction_t trans
, u_int32_t localization
)
1095 for (try = res
= 0; try < 4; ++try) {
1096 res
= hammer_ino_rb_tree_RB_SCAN(&trans
->hmp
->rb_inos_root
,
1097 hammer_inode_pfs_cmp
,
1098 hammer_unload_pseudofs_callback
,
1100 if (res
== 0 && try > 1)
1102 hammer_flusher_sync(trans
->hmp
);
1111 * Release a reference on a PFS
1114 hammer_rel_pseudofs(hammer_mount_t hmp
, hammer_pseudofs_inmem_t pfsm
)
1116 hammer_unref(&pfsm
->lock
);
1117 if (pfsm
->lock
.refs
== 0) {
1118 RB_REMOVE(hammer_pfs_rb_tree
, &hmp
->rb_pfsm_root
, pfsm
);
1119 kfree(pfsm
, hmp
->m_misc
);
1124 * Called by hammer_sync_inode().
1127 hammer_update_inode(hammer_cursor_t cursor
, hammer_inode_t ip
)
1129 hammer_transaction_t trans
= cursor
->trans
;
1130 hammer_record_t record
;
1138 * If the inode has a presence on-disk then locate it and mark
1139 * it deleted, setting DELONDISK.
1141 * The record may or may not be physically deleted, depending on
1142 * the retention policy.
1144 if ((ip
->flags
& (HAMMER_INODE_ONDISK
|HAMMER_INODE_DELONDISK
)) ==
1145 HAMMER_INODE_ONDISK
) {
1146 hammer_normalize_cursor(cursor
);
1147 cursor
->key_beg
.localization
= ip
->obj_localization
+
1148 HAMMER_LOCALIZE_INODE
;
1149 cursor
->key_beg
.obj_id
= ip
->obj_id
;
1150 cursor
->key_beg
.key
= 0;
1151 cursor
->key_beg
.create_tid
= 0;
1152 cursor
->key_beg
.delete_tid
= 0;
1153 cursor
->key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
1154 cursor
->key_beg
.obj_type
= 0;
1155 cursor
->asof
= ip
->obj_asof
;
1156 cursor
->flags
&= ~HAMMER_CURSOR_INITMASK
;
1157 cursor
->flags
|= HAMMER_CURSOR_GET_LEAF
| HAMMER_CURSOR_ASOF
;
1158 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
1160 error
= hammer_btree_lookup(cursor
);
1161 if (hammer_debug_inode
)
1162 kprintf("IPDEL %p %08x %d", ip
, ip
->flags
, error
);
1165 error
= hammer_ip_delete_record(cursor
, ip
, trans
->tid
);
1166 if (hammer_debug_inode
)
1167 kprintf(" error %d\n", error
);
1169 ip
->flags
|= HAMMER_INODE_DELONDISK
;
1172 hammer_cache_node(&ip
->cache
[0], cursor
->node
);
1174 if (error
== EDEADLK
) {
1175 hammer_done_cursor(cursor
);
1176 error
= hammer_init_cursor(trans
, cursor
,
1178 if (hammer_debug_inode
)
1179 kprintf("IPDED %p %d\n", ip
, error
);
1186 * Ok, write out the initial record or a new record (after deleting
1187 * the old one), unless the DELETED flag is set. This routine will
1188 * clear DELONDISK if it writes out a record.
1190 * Update our inode statistics if this is the first application of
1191 * the inode on-disk.
1193 if (error
== 0 && (ip
->flags
& HAMMER_INODE_DELETED
) == 0) {
1195 * Generate a record and write it to the media. We clean-up
1196 * the state before releasing so we do not have to set-up
1199 record
= hammer_alloc_mem_record(ip
, 0);
1200 record
->type
= HAMMER_MEM_RECORD_INODE
;
1201 record
->flush_state
= HAMMER_FST_FLUSH
;
1202 record
->leaf
= ip
->sync_ino_leaf
;
1203 record
->leaf
.base
.create_tid
= trans
->tid
;
1204 record
->leaf
.data_len
= sizeof(ip
->sync_ino_data
);
1205 record
->leaf
.create_ts
= trans
->time32
;
1206 record
->data
= (void *)&ip
->sync_ino_data
;
1207 record
->flags
|= HAMMER_RECF_INTERLOCK_BE
;
1210 * If this flag is set we cannot sync the new file size
1211 * because we haven't finished related truncations. The
1212 * inode will be flushed in another flush group to finish
1215 if ((ip
->flags
& HAMMER_INODE_WOULDBLOCK
) &&
1216 ip
->sync_ino_data
.size
!= ip
->ino_data
.size
) {
1218 ip
->sync_ino_data
.size
= ip
->ino_data
.size
;
1224 error
= hammer_ip_sync_record_cursor(cursor
, record
);
1225 if (hammer_debug_inode
)
1226 kprintf("GENREC %p rec %08x %d\n",
1227 ip
, record
->flags
, error
);
1228 if (error
!= EDEADLK
)
1230 hammer_done_cursor(cursor
);
1231 error
= hammer_init_cursor(trans
, cursor
,
1233 if (hammer_debug_inode
)
1234 kprintf("GENREC reinit %d\n", error
);
1240 * Note: The record was never on the inode's record tree
1241 * so just wave our hands importantly and destroy it.
1243 record
->flags
|= HAMMER_RECF_COMMITTED
;
1244 record
->flags
&= ~HAMMER_RECF_INTERLOCK_BE
;
1245 record
->flush_state
= HAMMER_FST_IDLE
;
1246 ++ip
->rec_generation
;
1247 hammer_rel_mem_record(record
);
1253 if (hammer_debug_inode
)
1254 kprintf("CLEANDELOND %p %08x\n", ip
, ip
->flags
);
1255 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
|
1256 HAMMER_INODE_ATIME
|
1257 HAMMER_INODE_MTIME
);
1258 ip
->flags
&= ~HAMMER_INODE_DELONDISK
;
1260 ip
->sync_flags
|= HAMMER_INODE_DDIRTY
;
1263 * Root volume count of inodes
1265 hammer_sync_lock_sh(trans
);
1266 if ((ip
->flags
& HAMMER_INODE_ONDISK
) == 0) {
1267 hammer_modify_volume_field(trans
,
1270 ++ip
->hmp
->rootvol
->ondisk
->vol0_stat_inodes
;
1271 hammer_modify_volume_done(trans
->rootvol
);
1272 ip
->flags
|= HAMMER_INODE_ONDISK
;
1273 if (hammer_debug_inode
)
1274 kprintf("NOWONDISK %p\n", ip
);
1276 hammer_sync_unlock(trans
);
1281 * If the inode has been destroyed, clean out any left-over flags
1282 * that may have been set by the frontend.
1284 if (error
== 0 && (ip
->flags
& HAMMER_INODE_DELETED
)) {
1285 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
|
1286 HAMMER_INODE_ATIME
|
1287 HAMMER_INODE_MTIME
);
1293 * Update only the itimes fields.
1295 * ATIME can be updated without generating any UNDO. MTIME is updated
1296 * with UNDO so it is guaranteed to be synchronized properly in case of
1299 * Neither field is included in the B-Tree leaf element's CRC, which is how
1300 * we can get away with updating ATIME the way we do.
1303 hammer_update_itimes(hammer_cursor_t cursor
, hammer_inode_t ip
)
1305 hammer_transaction_t trans
= cursor
->trans
;
1309 if ((ip
->flags
& (HAMMER_INODE_ONDISK
|HAMMER_INODE_DELONDISK
)) !=
1310 HAMMER_INODE_ONDISK
) {
1314 hammer_normalize_cursor(cursor
);
1315 cursor
->key_beg
.localization
= ip
->obj_localization
+
1316 HAMMER_LOCALIZE_INODE
;
1317 cursor
->key_beg
.obj_id
= ip
->obj_id
;
1318 cursor
->key_beg
.key
= 0;
1319 cursor
->key_beg
.create_tid
= 0;
1320 cursor
->key_beg
.delete_tid
= 0;
1321 cursor
->key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
1322 cursor
->key_beg
.obj_type
= 0;
1323 cursor
->asof
= ip
->obj_asof
;
1324 cursor
->flags
&= ~HAMMER_CURSOR_INITMASK
;
1325 cursor
->flags
|= HAMMER_CURSOR_ASOF
;
1326 cursor
->flags
|= HAMMER_CURSOR_GET_LEAF
;
1327 cursor
->flags
|= HAMMER_CURSOR_GET_DATA
;
1328 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
1330 error
= hammer_btree_lookup(cursor
);
1332 hammer_cache_node(&ip
->cache
[0], cursor
->node
);
1333 if (ip
->sync_flags
& HAMMER_INODE_MTIME
) {
1335 * Updating MTIME requires an UNDO. Just cover
1336 * both atime and mtime.
1338 hammer_sync_lock_sh(trans
);
1339 hammer_modify_buffer(trans
, cursor
->data_buffer
,
1340 HAMMER_ITIMES_BASE(&cursor
->data
->inode
),
1341 HAMMER_ITIMES_BYTES
);
1342 cursor
->data
->inode
.atime
= ip
->sync_ino_data
.atime
;
1343 cursor
->data
->inode
.mtime
= ip
->sync_ino_data
.mtime
;
1344 hammer_modify_buffer_done(cursor
->data_buffer
);
1345 hammer_sync_unlock(trans
);
1346 } else if (ip
->sync_flags
& HAMMER_INODE_ATIME
) {
1348 * Updating atime only can be done in-place with
1351 hammer_sync_lock_sh(trans
);
1352 hammer_modify_buffer(trans
, cursor
->data_buffer
,
1354 cursor
->data
->inode
.atime
= ip
->sync_ino_data
.atime
;
1355 hammer_modify_buffer_done(cursor
->data_buffer
);
1356 hammer_sync_unlock(trans
);
1358 ip
->sync_flags
&= ~(HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
);
1360 if (error
== EDEADLK
) {
1361 hammer_done_cursor(cursor
);
1362 error
= hammer_init_cursor(trans
, cursor
,
1371 * Release a reference on an inode, flush as requested.
1373 * On the last reference we queue the inode to the flusher for its final
1377 hammer_rel_inode(struct hammer_inode
*ip
, int flush
)
1379 /*hammer_mount_t hmp = ip->hmp;*/
1382 * Handle disposition when dropping the last ref.
1385 if (ip
->lock
.refs
== 1) {
1387 * Determine whether on-disk action is needed for
1388 * the inode's final disposition.
1390 KKASSERT(ip
->vp
== NULL
);
1391 hammer_inode_unloadable_check(ip
, 0);
1392 if (ip
->flags
& HAMMER_INODE_MODMASK
) {
1393 hammer_flush_inode(ip
, 0);
1394 } else if (ip
->lock
.refs
== 1) {
1395 hammer_unload_inode(ip
);
1400 hammer_flush_inode(ip
, 0);
1403 * The inode still has multiple refs, try to drop
1406 KKASSERT(ip
->lock
.refs
>= 1);
1407 if (ip
->lock
.refs
> 1) {
1408 hammer_unref(&ip
->lock
);
1416 * Unload and destroy the specified inode. Must be called with one remaining
1417 * reference. The reference is disposed of.
1419 * The inode must be completely clean.
1422 hammer_unload_inode(struct hammer_inode
*ip
)
1424 hammer_mount_t hmp
= ip
->hmp
;
1426 KASSERT(ip
->lock
.refs
== 1,
1427 ("hammer_unload_inode: %d refs\n", ip
->lock
.refs
));
1428 KKASSERT(ip
->vp
== NULL
);
1429 KKASSERT(ip
->flush_state
== HAMMER_FST_IDLE
);
1430 KKASSERT(ip
->cursor_ip_refs
== 0);
1431 KKASSERT(hammer_notlocked(&ip
->lock
));
1432 KKASSERT((ip
->flags
& HAMMER_INODE_MODMASK
) == 0);
1434 KKASSERT(RB_EMPTY(&ip
->rec_tree
));
1435 KKASSERT(TAILQ_EMPTY(&ip
->target_list
));
1437 RB_REMOVE(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
);
1439 hammer_free_inode(ip
);
1444 * Called during unmounting if a critical error occured. The in-memory
1445 * inode and all related structures are destroyed.
1447 * If a critical error did not occur the unmount code calls the standard
1448 * release and asserts that the inode is gone.
1451 hammer_destroy_inode_callback(struct hammer_inode
*ip
, void *data __unused
)
1453 hammer_record_t rec
;
1456 * Get rid of the inodes in-memory records, regardless of their
1457 * state, and clear the mod-mask.
1459 while ((rec
= TAILQ_FIRST(&ip
->target_list
)) != NULL
) {
1460 TAILQ_REMOVE(&ip
->target_list
, rec
, target_entry
);
1461 rec
->target_ip
= NULL
;
1462 if (rec
->flush_state
== HAMMER_FST_SETUP
)
1463 rec
->flush_state
= HAMMER_FST_IDLE
;
1465 while ((rec
= RB_ROOT(&ip
->rec_tree
)) != NULL
) {
1466 if (rec
->flush_state
== HAMMER_FST_FLUSH
)
1467 --rec
->flush_group
->refs
;
1469 hammer_ref(&rec
->lock
);
1470 KKASSERT(rec
->lock
.refs
== 1);
1471 rec
->flush_state
= HAMMER_FST_IDLE
;
1472 rec
->flush_group
= NULL
;
1473 rec
->flags
|= HAMMER_RECF_DELETED_FE
; /* wave hands */
1474 rec
->flags
|= HAMMER_RECF_DELETED_BE
; /* wave hands */
1475 ++ip
->rec_generation
;
1476 hammer_rel_mem_record(rec
);
1478 ip
->flags
&= ~HAMMER_INODE_MODMASK
;
1479 ip
->sync_flags
&= ~HAMMER_INODE_MODMASK
;
1480 KKASSERT(ip
->vp
== NULL
);
1483 * Remove the inode from any flush group, force it idle. FLUSH
1484 * and SETUP states have an inode ref.
1486 switch(ip
->flush_state
) {
1487 case HAMMER_FST_FLUSH
:
1488 RB_REMOVE(hammer_fls_rb_tree
, &ip
->flush_group
->flush_tree
, ip
);
1489 --ip
->flush_group
->refs
;
1490 ip
->flush_group
= NULL
;
1492 case HAMMER_FST_SETUP
:
1493 hammer_unref(&ip
->lock
);
1494 ip
->flush_state
= HAMMER_FST_IDLE
;
1496 case HAMMER_FST_IDLE
:
1501 * There shouldn't be any associated vnode. The unload needs at
1502 * least one ref, if we do have a vp steal its ip ref.
1505 kprintf("hammer_destroy_inode_callback: Unexpected "
1506 "vnode association ip %p vp %p\n", ip
, ip
->vp
);
1507 ip
->vp
->v_data
= NULL
;
1510 hammer_ref(&ip
->lock
);
1512 hammer_unload_inode(ip
);
1517 * Called on mount -u when switching from RW to RO or vise-versa. Adjust
1518 * the read-only flag for cached inodes.
1520 * This routine is called from a RB_SCAN().
1523 hammer_reload_inode(hammer_inode_t ip
, void *arg __unused
)
1525 hammer_mount_t hmp
= ip
->hmp
;
1527 if (hmp
->ronly
|| hmp
->asof
!= HAMMER_MAX_TID
)
1528 ip
->flags
|= HAMMER_INODE_RO
;
1530 ip
->flags
&= ~HAMMER_INODE_RO
;
1535 * A transaction has modified an inode, requiring updates as specified by
1538 * HAMMER_INODE_DDIRTY: Inode data has been updated
1539 * HAMMER_INODE_XDIRTY: Dirty in-memory records
1540 * HAMMER_INODE_BUFS: Dirty buffer cache buffers
1541 * HAMMER_INODE_DELETED: Inode record/data must be deleted
1542 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1545 hammer_modify_inode(hammer_inode_t ip
, int flags
)
1548 * ronly of 0 or 2 does not trigger assertion.
1549 * 2 is a special error state
1551 KKASSERT(ip
->hmp
->ronly
!= 1 ||
1552 (flags
& (HAMMER_INODE_DDIRTY
| HAMMER_INODE_XDIRTY
|
1553 HAMMER_INODE_BUFS
| HAMMER_INODE_DELETED
|
1554 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
)) == 0);
1555 if ((ip
->flags
& HAMMER_INODE_RSV_INODES
) == 0) {
1556 ip
->flags
|= HAMMER_INODE_RSV_INODES
;
1557 ++ip
->hmp
->rsv_inodes
;
1564 * Request that an inode be flushed. This whole mess cannot block and may
1565 * recurse (if not synchronous). Once requested HAMMER will attempt to
1566 * actively flush the inode until the flush can be done.
1568 * The inode may already be flushing, or may be in a setup state. We can
1569 * place the inode in a flushing state if it is currently idle and flag it
1570 * to reflush if it is currently flushing.
1572 * Upon return if the inode could not be flushed due to a setup
1573 * dependancy, then it will be automatically flushed when the dependancy
1577 hammer_flush_inode(hammer_inode_t ip
, int flags
)
1580 hammer_flush_group_t flg
;
1584 * next_flush_group is the first flush group we can place the inode
1585 * in. It may be NULL. If it becomes full we append a new flush
1586 * group and make that the next_flush_group.
1589 while ((flg
= hmp
->next_flush_group
) != NULL
) {
1590 KKASSERT(flg
->running
== 0);
1591 if (flg
->total_count
+ flg
->refs
<= ip
->hmp
->undo_rec_limit
)
1593 hmp
->next_flush_group
= TAILQ_NEXT(flg
, flush_entry
);
1594 hammer_flusher_async(ip
->hmp
, flg
);
1597 flg
= kmalloc(sizeof(*flg
), hmp
->m_misc
, M_WAITOK
|M_ZERO
);
1598 hmp
->next_flush_group
= flg
;
1599 RB_INIT(&flg
->flush_tree
);
1600 TAILQ_INSERT_TAIL(&hmp
->flush_group_list
, flg
, flush_entry
);
1604 * Trivial 'nothing to flush' case. If the inode is in a SETUP
1605 * state we have to put it back into an IDLE state so we can
1606 * drop the extra ref.
1608 * If we have a parent dependancy we must still fall through
1611 if ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0) {
1612 if (ip
->flush_state
== HAMMER_FST_SETUP
&&
1613 TAILQ_EMPTY(&ip
->target_list
)) {
1614 ip
->flush_state
= HAMMER_FST_IDLE
;
1615 hammer_rel_inode(ip
, 0);
1617 if (ip
->flush_state
== HAMMER_FST_IDLE
)
1622 * Our flush action will depend on the current state.
1624 switch(ip
->flush_state
) {
1625 case HAMMER_FST_IDLE
:
1627 * We have no dependancies and can flush immediately. Some
1628 * our children may not be flushable so we have to re-test
1629 * with that additional knowledge.
1631 hammer_flush_inode_core(ip
, flg
, flags
);
1633 case HAMMER_FST_SETUP
:
1635 * Recurse upwards through dependancies via target_list
1636 * and start their flusher actions going if possible.
1638 * 'good' is our connectivity. -1 means we have none and
1639 * can't flush, 0 means there weren't any dependancies, and
1640 * 1 means we have good connectivity.
1642 good
= hammer_setup_parent_inodes(ip
, 0, flg
);
1646 * We can continue if good >= 0. Determine how
1647 * many records under our inode can be flushed (and
1650 hammer_flush_inode_core(ip
, flg
, flags
);
1653 * Parent has no connectivity, tell it to flush
1654 * us as soon as it does.
1656 * The REFLUSH flag is also needed to trigger
1657 * dependancy wakeups.
1659 ip
->flags
|= HAMMER_INODE_CONN_DOWN
|
1660 HAMMER_INODE_REFLUSH
;
1661 if (flags
& HAMMER_FLUSH_SIGNAL
) {
1662 ip
->flags
|= HAMMER_INODE_RESIGNAL
;
1663 hammer_flusher_async(ip
->hmp
, flg
);
1667 case HAMMER_FST_FLUSH
:
1669 * We are already flushing, flag the inode to reflush
1670 * if needed after it completes its current flush.
1672 * The REFLUSH flag is also needed to trigger
1673 * dependancy wakeups.
1675 if ((ip
->flags
& HAMMER_INODE_REFLUSH
) == 0)
1676 ip
->flags
|= HAMMER_INODE_REFLUSH
;
1677 if (flags
& HAMMER_FLUSH_SIGNAL
) {
1678 ip
->flags
|= HAMMER_INODE_RESIGNAL
;
1679 hammer_flusher_async(ip
->hmp
, flg
);
1686 * Scan ip->target_list, which is a list of records owned by PARENTS to our
1687 * ip which reference our ip.
1689 * XXX This is a huge mess of recursive code, but not one bit of it blocks
1690 * so for now do not ref/deref the structures. Note that if we use the
1691 * ref/rel code later, the rel CAN block.
1694 hammer_setup_parent_inodes(hammer_inode_t ip
, int depth
,
1695 hammer_flush_group_t flg
)
1697 hammer_record_t depend
;
1702 * If we hit our recursion limit and we have parent dependencies
1703 * We cannot continue. Returning < 0 will cause us to be flagged
1704 * for reflush. Returning -2 cuts off additional dependency checks
1705 * because they are likely to also hit the depth limit.
1707 * We cannot return < 0 if there are no dependencies or there might
1708 * not be anything to wakeup (ip).
1710 if (depth
== 20 && TAILQ_FIRST(&ip
->target_list
)) {
1711 kprintf("HAMMER Warning: depth limit reached on "
1712 "setup recursion, inode %p %016llx\n",
1713 ip
, (long long)ip
->obj_id
);
1721 TAILQ_FOREACH(depend
, &ip
->target_list
, target_entry
) {
1722 r
= hammer_setup_parent_inodes_helper(depend
, depth
, flg
);
1723 KKASSERT(depend
->target_ip
== ip
);
1724 if (r
< 0 && good
== 0)
1730 * If we failed due to the recursion depth limit then stop
1740 * This helper function takes a record representing the dependancy between
1741 * the parent inode and child inode.
1743 * record->ip = parent inode
1744 * record->target_ip = child inode
1746 * We are asked to recurse upwards and convert the record from SETUP
1747 * to FLUSH if possible.
1749 * Return 1 if the record gives us connectivity
1751 * Return 0 if the record is not relevant
1753 * Return -1 if we can't resolve the dependancy and there is no connectivity.
1756 hammer_setup_parent_inodes_helper(hammer_record_t record
, int depth
,
1757 hammer_flush_group_t flg
)
1763 KKASSERT(record
->flush_state
!= HAMMER_FST_IDLE
);
1768 * If the record is already flushing, is it in our flush group?
1770 * If it is in our flush group but it is a general record or a
1771 * delete-on-disk, it does not improve our connectivity (return 0),
1772 * and if the target inode is not trying to destroy itself we can't
1773 * allow the operation yet anyway (the second return -1).
1775 if (record
->flush_state
== HAMMER_FST_FLUSH
) {
1777 * If not in our flush group ask the parent to reflush
1778 * us as soon as possible.
1780 if (record
->flush_group
!= flg
) {
1781 pip
->flags
|= HAMMER_INODE_REFLUSH
;
1782 record
->target_ip
->flags
|= HAMMER_INODE_CONN_DOWN
;
1787 * If in our flush group everything is already set up,
1788 * just return whether the record will improve our
1789 * visibility or not.
1791 if (record
->type
== HAMMER_MEM_RECORD_ADD
)
1797 * It must be a setup record. Try to resolve the setup dependancies
1798 * by recursing upwards so we can place ip on the flush list.
1800 * Limit ourselves to 20 levels of recursion to avoid blowing out
1801 * the kernel stack. If we hit the recursion limit we can't flush
1802 * until the parent flushes. The parent will flush independantly
1803 * on its own and ultimately a deep recursion will be resolved.
1805 KKASSERT(record
->flush_state
== HAMMER_FST_SETUP
);
1807 good
= hammer_setup_parent_inodes(pip
, depth
+ 1, flg
);
1810 * If good < 0 the parent has no connectivity and we cannot safely
1811 * flush the directory entry, which also means we can't flush our
1812 * ip. Flag us for downward recursion once the parent's
1813 * connectivity is resolved. Flag the parent for [re]flush or it
1814 * may not check for downward recursions.
1817 pip
->flags
|= HAMMER_INODE_REFLUSH
;
1818 record
->target_ip
->flags
|= HAMMER_INODE_CONN_DOWN
;
1823 * We are go, place the parent inode in a flushing state so we can
1824 * place its record in a flushing state. Note that the parent
1825 * may already be flushing. The record must be in the same flush
1826 * group as the parent.
1828 if (pip
->flush_state
!= HAMMER_FST_FLUSH
)
1829 hammer_flush_inode_core(pip
, flg
, HAMMER_FLUSH_RECURSION
);
1830 KKASSERT(pip
->flush_state
== HAMMER_FST_FLUSH
);
1831 KKASSERT(record
->flush_state
== HAMMER_FST_SETUP
);
1834 if (record
->type
== HAMMER_MEM_RECORD_DEL
&&
1835 (record
->target_ip
->flags
& (HAMMER_INODE_DELETED
|HAMMER_INODE_DELONDISK
)) == 0) {
1837 * Regardless of flushing state we cannot sync this path if the
1838 * record represents a delete-on-disk but the target inode
1839 * is not ready to sync its own deletion.
1841 * XXX need to count effective nlinks to determine whether
1842 * the flush is ok, otherwise removing a hardlink will
1843 * just leave the DEL record to rot.
1845 record
->target_ip
->flags
|= HAMMER_INODE_REFLUSH
;
1849 if (pip
->flush_group
== flg
) {
1851 * Because we have not calculated nlinks yet we can just
1852 * set records to the flush state if the parent is in
1853 * the same flush group as we are.
1855 record
->flush_state
= HAMMER_FST_FLUSH
;
1856 record
->flush_group
= flg
;
1857 ++record
->flush_group
->refs
;
1858 hammer_ref(&record
->lock
);
1861 * A general directory-add contributes to our visibility.
1863 * Otherwise it is probably a directory-delete or
1864 * delete-on-disk record and does not contribute to our
1865 * visbility (but we can still flush it).
1867 if (record
->type
== HAMMER_MEM_RECORD_ADD
)
1872 * If the parent is not in our flush group we cannot
1873 * flush this record yet, there is no visibility.
1874 * We tell the parent to reflush and mark ourselves
1875 * so the parent knows it should flush us too.
1877 pip
->flags
|= HAMMER_INODE_REFLUSH
;
1878 record
->target_ip
->flags
|= HAMMER_INODE_CONN_DOWN
;
1884 * This is the core routine placing an inode into the FST_FLUSH state.
1887 hammer_flush_inode_core(hammer_inode_t ip
, hammer_flush_group_t flg
, int flags
)
1892 * Set flush state and prevent the flusher from cycling into
1893 * the next flush group. Do not place the ip on the list yet.
1894 * Inodes not in the idle state get an extra reference.
1896 KKASSERT(ip
->flush_state
!= HAMMER_FST_FLUSH
);
1897 if (ip
->flush_state
== HAMMER_FST_IDLE
)
1898 hammer_ref(&ip
->lock
);
1899 ip
->flush_state
= HAMMER_FST_FLUSH
;
1900 ip
->flush_group
= flg
;
1901 ++ip
->hmp
->flusher
.group_lock
;
1902 ++ip
->hmp
->count_iqueued
;
1903 ++hammer_count_iqueued
;
1907 * If the flush group reaches the autoflush limit we want to signal
1908 * the flusher. This is particularly important for remove()s.
1910 if (flg
->total_count
== hammer_autoflush
)
1911 flags
|= HAMMER_FLUSH_SIGNAL
;
1914 * We need to be able to vfsync/truncate from the backend.
1916 KKASSERT((ip
->flags
& HAMMER_INODE_VHELD
) == 0);
1917 if (ip
->vp
&& (ip
->vp
->v_flag
& VINACTIVE
) == 0) {
1918 ip
->flags
|= HAMMER_INODE_VHELD
;
1923 * Figure out how many in-memory records we can actually flush
1924 * (not including inode meta-data, buffers, etc).
1926 KKASSERT((ip
->flags
& HAMMER_INODE_WOULDBLOCK
) == 0);
1927 if (flags
& HAMMER_FLUSH_RECURSION
) {
1929 * If this is a upwards recursion we do not want to
1930 * recurse down again!
1934 } else if (ip
->flags
& HAMMER_INODE_WOULDBLOCK
) {
1936 * No new records are added if we must complete a flush
1937 * from a previous cycle, but we do have to move the records
1938 * from the previous cycle to the current one.
1941 go_count
= RB_SCAN(hammer_rec_rb_tree
, &ip
->rec_tree
, NULL
,
1942 hammer_syncgrp_child_callback
, NULL
);
1948 * Normal flush, scan records and bring them into the flush.
1949 * Directory adds and deletes are usually skipped (they are
1950 * grouped with the related inode rather then with the
1953 * go_count can be negative, which means the scan aborted
1954 * due to the flush group being over-full and we should
1955 * flush what we have.
1957 go_count
= RB_SCAN(hammer_rec_rb_tree
, &ip
->rec_tree
, NULL
,
1958 hammer_setup_child_callback
, NULL
);
1962 * This is a more involved test that includes go_count. If we
1963 * can't flush, flag the inode and return. If go_count is 0 we
1964 * were are unable to flush any records in our rec_tree and
1965 * must ignore the XDIRTY flag.
1967 if (go_count
== 0) {
1968 if ((ip
->flags
& HAMMER_INODE_MODMASK_NOXDIRTY
) == 0) {
1969 --ip
->hmp
->count_iqueued
;
1970 --hammer_count_iqueued
;
1973 ip
->flush_state
= HAMMER_FST_SETUP
;
1974 ip
->flush_group
= NULL
;
1975 if (ip
->flags
& HAMMER_INODE_VHELD
) {
1976 ip
->flags
&= ~HAMMER_INODE_VHELD
;
1981 * REFLUSH is needed to trigger dependancy wakeups
1982 * when an inode is in SETUP.
1984 ip
->flags
|= HAMMER_INODE_REFLUSH
;
1985 if (flags
& HAMMER_FLUSH_SIGNAL
) {
1986 ip
->flags
|= HAMMER_INODE_RESIGNAL
;
1987 hammer_flusher_async(ip
->hmp
, flg
);
1989 if (--ip
->hmp
->flusher
.group_lock
== 0)
1990 wakeup(&ip
->hmp
->flusher
.group_lock
);
1996 * Snapshot the state of the inode for the backend flusher.
1998 * We continue to retain save_trunc_off even when all truncations
1999 * have been resolved as an optimization to determine if we can
2000 * skip the B-Tree lookup for overwrite deletions.
2002 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
2003 * and stays in ip->flags. Once set, it stays set until the
2004 * inode is destroyed.
2006 if (ip
->flags
& HAMMER_INODE_TRUNCATED
) {
2007 KKASSERT((ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) == 0);
2008 ip
->sync_trunc_off
= ip
->trunc_off
;
2009 ip
->trunc_off
= 0x7FFFFFFFFFFFFFFFLL
;
2010 ip
->flags
&= ~HAMMER_INODE_TRUNCATED
;
2011 ip
->sync_flags
|= HAMMER_INODE_TRUNCATED
;
2014 * The save_trunc_off used to cache whether the B-Tree
2015 * holds any records past that point is not used until
2016 * after the truncation has succeeded, so we can safely
2019 if (ip
->save_trunc_off
> ip
->sync_trunc_off
)
2020 ip
->save_trunc_off
= ip
->sync_trunc_off
;
2022 ip
->sync_flags
|= (ip
->flags
& HAMMER_INODE_MODMASK
&
2023 ~HAMMER_INODE_TRUNCATED
);
2024 ip
->sync_ino_leaf
= ip
->ino_leaf
;
2025 ip
->sync_ino_data
= ip
->ino_data
;
2026 ip
->flags
&= ~HAMMER_INODE_MODMASK
| HAMMER_INODE_TRUNCATED
;
2027 #ifdef DEBUG_TRUNCATE
2028 if ((ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) && ip
== HammerTruncIp
)
2029 kprintf("truncateS %016llx\n", ip
->sync_trunc_off
);
2033 * The flusher list inherits our inode and reference.
2035 KKASSERT(flg
->running
== 0);
2036 RB_INSERT(hammer_fls_rb_tree
, &flg
->flush_tree
, ip
);
2037 if (--ip
->hmp
->flusher
.group_lock
== 0)
2038 wakeup(&ip
->hmp
->flusher
.group_lock
);
2040 if (flags
& HAMMER_FLUSH_SIGNAL
) {
2041 hammer_flusher_async(ip
->hmp
, flg
);
2046 * Callback for scan of ip->rec_tree. Try to include each record in our
2047 * flush. ip->flush_group has been set but the inode has not yet been
2048 * moved into a flushing state.
2050 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
2053 * We return 1 for any record placed or found in FST_FLUSH, which prevents
2054 * the caller from shortcutting the flush.
2057 hammer_setup_child_callback(hammer_record_t rec
, void *data
)
2059 hammer_flush_group_t flg
;
2060 hammer_inode_t target_ip
;
2065 * Records deleted or committed by the backend are ignored.
2066 * Note that the flush detects deleted frontend records at
2067 * multiple points to deal with races. This is just the first
2068 * line of defense. The only time HAMMER_RECF_DELETED_FE cannot
2069 * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it
2070 * messes up link-count calculations.
2072 * NOTE: Don't get confused between record deletion and, say,
2073 * directory entry deletion. The deletion of a directory entry
2074 * which is on-media has nothing to do with the record deletion
2077 if (rec
->flags
& (HAMMER_RECF_DELETED_FE
| HAMMER_RECF_DELETED_BE
|
2078 HAMMER_RECF_COMMITTED
)) {
2079 if (rec
->flush_state
== HAMMER_FST_FLUSH
) {
2080 KKASSERT(rec
->flush_group
== rec
->ip
->flush_group
);
2089 * If the record is in an idle state it has no dependancies and
2093 flg
= ip
->flush_group
;
2096 switch(rec
->flush_state
) {
2097 case HAMMER_FST_IDLE
:
2099 * The record has no setup dependancy, we can flush it.
2101 KKASSERT(rec
->target_ip
== NULL
);
2102 rec
->flush_state
= HAMMER_FST_FLUSH
;
2103 rec
->flush_group
= flg
;
2105 hammer_ref(&rec
->lock
);
2108 case HAMMER_FST_SETUP
:
2110 * The record has a setup dependancy. These are typically
2111 * directory entry adds and deletes. Such entries will be
2112 * flushed when their inodes are flushed so we do not
2113 * usually have to add them to the flush here. However,
2114 * if the target_ip has set HAMMER_INODE_CONN_DOWN then
2115 * it is asking us to flush this record (and it).
2117 target_ip
= rec
->target_ip
;
2118 KKASSERT(target_ip
!= NULL
);
2119 KKASSERT(target_ip
->flush_state
!= HAMMER_FST_IDLE
);
2122 * If the target IP is already flushing in our group
2123 * we could associate the record, but target_ip has
2124 * already synced ino_data to sync_ino_data and we
2125 * would also have to adjust nlinks. Plus there are
2126 * ordering issues for adds and deletes.
2128 * Reflush downward if this is an ADD, and upward if
2131 if (target_ip
->flush_state
== HAMMER_FST_FLUSH
) {
2132 if (rec
->flush_state
== HAMMER_MEM_RECORD_ADD
)
2133 ip
->flags
|= HAMMER_INODE_REFLUSH
;
2135 target_ip
->flags
|= HAMMER_INODE_REFLUSH
;
2140 * Target IP is not yet flushing. This can get complex
2141 * because we have to be careful about the recursion.
2143 * Directories create an issue for us in that if a flush
2144 * of a directory is requested the expectation is to flush
2145 * any pending directory entries, but this will cause the
2146 * related inodes to recursively flush as well. We can't
2147 * really defer the operation so just get as many as we
2151 if ((target_ip
->flags
& HAMMER_INODE_RECLAIM
) == 0 &&
2152 (target_ip
->flags
& HAMMER_INODE_CONN_DOWN
) == 0) {
2154 * We aren't reclaiming and the target ip was not
2155 * previously prevented from flushing due to this
2156 * record dependancy. Do not flush this record.
2161 if (flg
->total_count
+ flg
->refs
>
2162 ip
->hmp
->undo_rec_limit
) {
2164 * Our flush group is over-full and we risk blowing
2165 * out the UNDO FIFO. Stop the scan, flush what we
2166 * have, then reflush the directory.
2168 * The directory may be forced through multiple
2169 * flush groups before it can be completely
2172 ip
->flags
|= HAMMER_INODE_RESIGNAL
|
2173 HAMMER_INODE_REFLUSH
;
2175 } else if (rec
->type
== HAMMER_MEM_RECORD_ADD
) {
2177 * If the target IP is not flushing we can force
2178 * it to flush, even if it is unable to write out
2179 * any of its own records we have at least one in
2180 * hand that we CAN deal with.
2182 rec
->flush_state
= HAMMER_FST_FLUSH
;
2183 rec
->flush_group
= flg
;
2185 hammer_ref(&rec
->lock
);
2186 hammer_flush_inode_core(target_ip
, flg
,
2187 HAMMER_FLUSH_RECURSION
);
2191 * General or delete-on-disk record.
2193 * XXX this needs help. If a delete-on-disk we could
2194 * disconnect the target. If the target has its own
2195 * dependancies they really need to be flushed.
2199 rec
->flush_state
= HAMMER_FST_FLUSH
;
2200 rec
->flush_group
= flg
;
2202 hammer_ref(&rec
->lock
);
2203 hammer_flush_inode_core(target_ip
, flg
,
2204 HAMMER_FLUSH_RECURSION
);
2208 case HAMMER_FST_FLUSH
:
2210 * The flush_group should already match.
2212 KKASSERT(rec
->flush_group
== flg
);
2221 * This version just moves records already in a flush state to the new
2222 * flush group and that is it.
2225 hammer_syncgrp_child_callback(hammer_record_t rec
, void *data
)
2227 hammer_inode_t ip
= rec
->ip
;
2229 switch(rec
->flush_state
) {
2230 case HAMMER_FST_FLUSH
:
2231 KKASSERT(rec
->flush_group
== ip
->flush_group
);
2241 * Wait for a previously queued flush to complete.
2243 * If a critical error occured we don't try to wait.
2246 hammer_wait_inode(hammer_inode_t ip
)
2248 hammer_flush_group_t flg
;
2251 if ((ip
->hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
) == 0) {
2252 while (ip
->flush_state
!= HAMMER_FST_IDLE
&&
2253 (ip
->hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
) == 0) {
2254 if (ip
->flush_state
== HAMMER_FST_SETUP
)
2255 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
2256 if (ip
->flush_state
!= HAMMER_FST_IDLE
) {
2257 ip
->flags
|= HAMMER_INODE_FLUSHW
;
2258 tsleep(&ip
->flags
, 0, "hmrwin", 0);
2265 * Called by the backend code when a flush has been completed.
2266 * The inode has already been removed from the flush list.
2268 * A pipelined flush can occur, in which case we must re-enter the
2269 * inode on the list and re-copy its fields.
2272 hammer_flush_inode_done(hammer_inode_t ip
, int error
)
2277 KKASSERT(ip
->flush_state
== HAMMER_FST_FLUSH
);
2282 * Auto-reflush if the backend could not completely flush
2283 * the inode. This fixes a case where a deferred buffer flush
2284 * could cause fsync to return early.
2286 if (ip
->sync_flags
& HAMMER_INODE_MODMASK
)
2287 ip
->flags
|= HAMMER_INODE_REFLUSH
;
2290 * Merge left-over flags back into the frontend and fix the state.
2291 * Incomplete truncations are retained by the backend.
2294 ip
->flags
|= ip
->sync_flags
& ~HAMMER_INODE_TRUNCATED
;
2295 ip
->sync_flags
&= HAMMER_INODE_TRUNCATED
;
2298 * The backend may have adjusted nlinks, so if the adjusted nlinks
2299 * does not match the fronttend set the frontend's RDIRTY flag again.
2301 if (ip
->ino_data
.nlinks
!= ip
->sync_ino_data
.nlinks
)
2302 ip
->flags
|= HAMMER_INODE_DDIRTY
;
2305 * Fix up the dirty buffer status.
2307 if (ip
->vp
&& RB_ROOT(&ip
->vp
->v_rbdirty_tree
)) {
2308 ip
->flags
|= HAMMER_INODE_BUFS
;
2312 * Re-set the XDIRTY flag if some of the inode's in-memory records
2313 * could not be flushed.
2315 KKASSERT((RB_EMPTY(&ip
->rec_tree
) &&
2316 (ip
->flags
& HAMMER_INODE_XDIRTY
) == 0) ||
2317 (!RB_EMPTY(&ip
->rec_tree
) &&
2318 (ip
->flags
& HAMMER_INODE_XDIRTY
) != 0));
2321 * Do not lose track of inodes which no longer have vnode
2322 * assocations, otherwise they may never get flushed again.
2324 * The reflush flag can be set superfluously, causing extra pain
2325 * for no reason. If the inode is no longer modified it no longer
2326 * needs to be flushed.
2328 if (ip
->flags
& HAMMER_INODE_MODMASK
) {
2330 ip
->flags
|= HAMMER_INODE_REFLUSH
;
2332 ip
->flags
&= ~HAMMER_INODE_REFLUSH
;
2336 * Adjust the flush state.
2338 if (ip
->flags
& HAMMER_INODE_WOULDBLOCK
) {
2340 * We were unable to flush out all our records, leave the
2341 * inode in a flush state and in the current flush group.
2342 * The flush group will be re-run.
2344 * This occurs if the UNDO block gets too full or there is
2345 * too much dirty meta-data and allows the flusher to
2346 * finalize the UNDO block and then re-flush.
2348 ip
->flags
&= ~HAMMER_INODE_WOULDBLOCK
;
2352 * Remove from the flush_group
2354 RB_REMOVE(hammer_fls_rb_tree
, &ip
->flush_group
->flush_tree
, ip
);
2355 ip
->flush_group
= NULL
;
2358 * Clean up the vnode ref and tracking counts.
2360 if (ip
->flags
& HAMMER_INODE_VHELD
) {
2361 ip
->flags
&= ~HAMMER_INODE_VHELD
;
2364 --hmp
->count_iqueued
;
2365 --hammer_count_iqueued
;
2368 * And adjust the state.
2370 if (TAILQ_EMPTY(&ip
->target_list
) && RB_EMPTY(&ip
->rec_tree
)) {
2371 ip
->flush_state
= HAMMER_FST_IDLE
;
2374 ip
->flush_state
= HAMMER_FST_SETUP
;
2379 * If the frontend is waiting for a flush to complete,
2382 if (ip
->flags
& HAMMER_INODE_FLUSHW
) {
2383 ip
->flags
&= ~HAMMER_INODE_FLUSHW
;
2388 * If the frontend made more changes and requested another
2389 * flush, then try to get it running.
2391 * Reflushes are aborted when the inode is errored out.
2393 if (ip
->flags
& HAMMER_INODE_REFLUSH
) {
2394 ip
->flags
&= ~HAMMER_INODE_REFLUSH
;
2395 if (ip
->flags
& HAMMER_INODE_RESIGNAL
) {
2396 ip
->flags
&= ~HAMMER_INODE_RESIGNAL
;
2397 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
2399 hammer_flush_inode(ip
, 0);
2405 * If we have no parent dependancies we can clear CONN_DOWN
2407 if (TAILQ_EMPTY(&ip
->target_list
))
2408 ip
->flags
&= ~HAMMER_INODE_CONN_DOWN
;
2411 * If the inode is now clean drop the space reservation.
2413 if ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0 &&
2414 (ip
->flags
& HAMMER_INODE_RSV_INODES
)) {
2415 ip
->flags
&= ~HAMMER_INODE_RSV_INODES
;
2420 hammer_rel_inode(ip
, 0);
2424 * Called from hammer_sync_inode() to synchronize in-memory records
2428 hammer_sync_record_callback(hammer_record_t record
, void *data
)
2430 hammer_cursor_t cursor
= data
;
2431 hammer_transaction_t trans
= cursor
->trans
;
2432 hammer_mount_t hmp
= trans
->hmp
;
2436 * Skip records that do not belong to the current flush.
2438 ++hammer_stats_record_iterations
;
2439 if (record
->flush_state
!= HAMMER_FST_FLUSH
)
2443 if (record
->flush_group
!= record
->ip
->flush_group
) {
2444 kprintf("sync_record %p ip %p bad flush group %p %p\n", record
, record
->ip
, record
->flush_group
,record
->ip
->flush_group
);
2445 if (hammer_debug_critical
)
2450 KKASSERT(record
->flush_group
== record
->ip
->flush_group
);
2453 * Interlock the record using the BE flag. Once BE is set the
2454 * frontend cannot change the state of FE.
2456 * NOTE: If FE is set prior to us setting BE we still sync the
2457 * record out, but the flush completion code converts it to
2458 * a delete-on-disk record instead of destroying it.
2460 KKASSERT((record
->flags
& HAMMER_RECF_INTERLOCK_BE
) == 0);
2461 record
->flags
|= HAMMER_RECF_INTERLOCK_BE
;
2464 * The backend has already disposed of the record.
2466 if (record
->flags
& (HAMMER_RECF_DELETED_BE
| HAMMER_RECF_COMMITTED
)) {
2472 * If the whole inode is being deleting all on-disk records will
2473 * be deleted very soon, we can't sync any new records to disk
2474 * because they will be deleted in the same transaction they were
2475 * created in (delete_tid == create_tid), which will assert.
2477 * XXX There may be a case with RECORD_ADD with DELETED_FE set
2478 * that we currently panic on.
2480 if (record
->ip
->sync_flags
& HAMMER_INODE_DELETING
) {
2481 switch(record
->type
) {
2482 case HAMMER_MEM_RECORD_DATA
:
2484 * We don't have to do anything, if the record was
2485 * committed the space will have been accounted for
2489 case HAMMER_MEM_RECORD_GENERAL
:
2491 * Set deleted-by-backend flag. Do not set the
2492 * backend committed flag, because we are throwing
2495 record
->flags
|= HAMMER_RECF_DELETED_BE
;
2496 ++record
->ip
->rec_generation
;
2499 case HAMMER_MEM_RECORD_ADD
:
2500 panic("hammer_sync_record_callback: illegal add "
2501 "during inode deletion record %p", record
);
2502 break; /* NOT REACHED */
2503 case HAMMER_MEM_RECORD_INODE
:
2504 panic("hammer_sync_record_callback: attempt to "
2505 "sync inode record %p?", record
);
2506 break; /* NOT REACHED */
2507 case HAMMER_MEM_RECORD_DEL
:
2509 * Follow through and issue the on-disk deletion
2516 * If DELETED_FE is set special handling is needed for directory
2517 * entries. Dependant pieces related to the directory entry may
2518 * have already been synced to disk. If this occurs we have to
2519 * sync the directory entry and then change the in-memory record
2520 * from an ADD to a DELETE to cover the fact that it's been
2521 * deleted by the frontend.
2523 * A directory delete covering record (MEM_RECORD_DEL) can never
2524 * be deleted by the frontend.
2526 * Any other record type (aka DATA) can be deleted by the frontend.
2527 * XXX At the moment the flusher must skip it because there may
2528 * be another data record in the flush group for the same block,
2529 * meaning that some frontend data changes can leak into the backend's
2530 * synchronization point.
2532 if (record
->flags
& HAMMER_RECF_DELETED_FE
) {
2533 if (record
->type
== HAMMER_MEM_RECORD_ADD
) {
2535 * Convert a front-end deleted directory-add to
2536 * a directory-delete entry later.
2538 record
->flags
|= HAMMER_RECF_CONVERT_DELETE
;
2541 * Dispose of the record (race case). Mark as
2542 * deleted by backend (and not committed).
2544 KKASSERT(record
->type
!= HAMMER_MEM_RECORD_DEL
);
2545 record
->flags
|= HAMMER_RECF_DELETED_BE
;
2546 ++record
->ip
->rec_generation
;
2553 * Assign the create_tid for new records. Deletions already
2554 * have the record's entire key properly set up.
2556 if (record
->type
!= HAMMER_MEM_RECORD_DEL
) {
2557 record
->leaf
.base
.create_tid
= trans
->tid
;
2558 record
->leaf
.create_ts
= trans
->time32
;
2561 error
= hammer_ip_sync_record_cursor(cursor
, record
);
2562 if (error
!= EDEADLK
)
2564 hammer_done_cursor(cursor
);
2565 error
= hammer_init_cursor(trans
, cursor
, &record
->ip
->cache
[0],
2570 record
->flags
&= ~HAMMER_RECF_CONVERT_DELETE
;
2575 hammer_flush_record_done(record
, error
);
2578 * Do partial finalization if we have built up too many dirty
2579 * buffers. Otherwise a buffer cache deadlock can occur when
2580 * doing things like creating tens of thousands of tiny files.
2582 * We must release our cursor lock to avoid a 3-way deadlock
2583 * due to the exclusive sync lock the finalizer must get.
2585 * WARNING: See warnings in hammer_unlock_cursor() function.
2587 if (hammer_flusher_meta_limit(hmp
)) {
2588 hammer_unlock_cursor(cursor
);
2589 hammer_flusher_finalize(trans
, 0);
2590 hammer_lock_cursor(cursor
);
2597 * Backend function called by the flusher to sync an inode to media.
2600 hammer_sync_inode(hammer_transaction_t trans
, hammer_inode_t ip
)
2602 struct hammer_cursor cursor
;
2603 hammer_node_t tmp_node
;
2604 hammer_record_t depend
;
2605 hammer_record_t next
;
2606 int error
, tmp_error
;
2609 if ((ip
->sync_flags
& HAMMER_INODE_MODMASK
) == 0)
2612 error
= hammer_init_cursor(trans
, &cursor
, &ip
->cache
[1], ip
);
2617 * Any directory records referencing this inode which are not in
2618 * our current flush group must adjust our nlink count for the
2619 * purposes of synchronization to disk.
2621 * Records which are in our flush group can be unlinked from our
2622 * inode now, potentially allowing the inode to be physically
2625 * This cannot block.
2627 nlinks
= ip
->ino_data
.nlinks
;
2628 next
= TAILQ_FIRST(&ip
->target_list
);
2629 while ((depend
= next
) != NULL
) {
2630 next
= TAILQ_NEXT(depend
, target_entry
);
2631 if (depend
->flush_state
== HAMMER_FST_FLUSH
&&
2632 depend
->flush_group
== ip
->flush_group
) {
2634 * If this is an ADD that was deleted by the frontend
2635 * the frontend nlinks count will have already been
2636 * decremented, but the backend is going to sync its
2637 * directory entry and must account for it. The
2638 * record will be converted to a delete-on-disk when
2641 * If the ADD was not deleted by the frontend we
2642 * can remove the dependancy from our target_list.
2644 if (depend
->flags
& HAMMER_RECF_DELETED_FE
) {
2647 TAILQ_REMOVE(&ip
->target_list
, depend
,
2649 depend
->target_ip
= NULL
;
2651 } else if ((depend
->flags
& HAMMER_RECF_DELETED_FE
) == 0) {
2653 * Not part of our flush group and not deleted by
2654 * the front-end, adjust the link count synced to
2655 * the media (undo what the frontend did when it
2656 * queued the record).
2658 KKASSERT((depend
->flags
& HAMMER_RECF_DELETED_BE
) == 0);
2659 switch(depend
->type
) {
2660 case HAMMER_MEM_RECORD_ADD
:
2663 case HAMMER_MEM_RECORD_DEL
:
2673 * Set dirty if we had to modify the link count.
2675 if (ip
->sync_ino_data
.nlinks
!= nlinks
) {
2676 KKASSERT((int64_t)nlinks
>= 0);
2677 ip
->sync_ino_data
.nlinks
= nlinks
;
2678 ip
->sync_flags
|= HAMMER_INODE_DDIRTY
;
2682 * If there is a trunction queued destroy any data past the (aligned)
2683 * truncation point. Userland will have dealt with the buffer
2684 * containing the truncation point for us.
2686 * We don't flush pending frontend data buffers until after we've
2687 * dealt with the truncation.
2689 if (ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) {
2691 * Interlock trunc_off. The VOP front-end may continue to
2692 * make adjustments to it while we are blocked.
2695 off_t aligned_trunc_off
;
2698 trunc_off
= ip
->sync_trunc_off
;
2699 blkmask
= hammer_blocksize(trunc_off
) - 1;
2700 aligned_trunc_off
= (trunc_off
+ blkmask
) & ~(int64_t)blkmask
;
2703 * Delete any whole blocks on-media. The front-end has
2704 * already cleaned out any partial block and made it
2705 * pending. The front-end may have updated trunc_off
2706 * while we were blocked so we only use sync_trunc_off.
2708 * This operation can blow out the buffer cache, EWOULDBLOCK
2709 * means we were unable to complete the deletion. The
2710 * deletion will update sync_trunc_off in that case.
2712 error
= hammer_ip_delete_range(&cursor
, ip
,
2714 0x7FFFFFFFFFFFFFFFLL
, 2);
2715 if (error
== EWOULDBLOCK
) {
2716 ip
->flags
|= HAMMER_INODE_WOULDBLOCK
;
2718 goto defer_buffer_flush
;
2725 * Clear the truncation flag on the backend after we have
2726 * complete the deletions. Backend data is now good again
2727 * (including new records we are about to sync, below).
2729 * Leave sync_trunc_off intact. As we write additional
2730 * records the backend will update sync_trunc_off. This
2731 * tells the backend whether it can skip the overwrite
2732 * test. This should work properly even when the backend
2733 * writes full blocks where the truncation point straddles
2734 * the block because the comparison is against the base
2735 * offset of the record.
2737 ip
->sync_flags
&= ~HAMMER_INODE_TRUNCATED
;
2738 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
2744 * Now sync related records. These will typically be directory
2745 * entries, records tracking direct-writes, or delete-on-disk records.
2748 tmp_error
= RB_SCAN(hammer_rec_rb_tree
, &ip
->rec_tree
, NULL
,
2749 hammer_sync_record_callback
, &cursor
);
2755 hammer_cache_node(&ip
->cache
[1], cursor
.node
);
2758 * Re-seek for inode update, assuming our cache hasn't been ripped
2759 * out from under us.
2762 tmp_node
= hammer_ref_node_safe(trans
, &ip
->cache
[0], &error
);
2764 hammer_cursor_downgrade(&cursor
);
2765 hammer_lock_sh(&tmp_node
->lock
);
2766 if ((tmp_node
->flags
& HAMMER_NODE_DELETED
) == 0)
2767 hammer_cursor_seek(&cursor
, tmp_node
, 0);
2768 hammer_unlock(&tmp_node
->lock
);
2769 hammer_rel_node(tmp_node
);
2775 * If we are deleting the inode the frontend had better not have
2776 * any active references on elements making up the inode.
2778 * The call to hammer_ip_delete_clean() cleans up auxillary records
2779 * but not DB or DATA records. Those must have already been deleted
2780 * by the normal truncation mechanic.
2782 if (error
== 0 && ip
->sync_ino_data
.nlinks
== 0 &&
2783 RB_EMPTY(&ip
->rec_tree
) &&
2784 (ip
->sync_flags
& HAMMER_INODE_DELETING
) &&
2785 (ip
->flags
& HAMMER_INODE_DELETED
) == 0) {
2788 error
= hammer_ip_delete_clean(&cursor
, ip
, &count1
);
2790 ip
->flags
|= HAMMER_INODE_DELETED
;
2791 ip
->sync_flags
&= ~HAMMER_INODE_DELETING
;
2792 ip
->sync_flags
&= ~HAMMER_INODE_TRUNCATED
;
2793 KKASSERT(RB_EMPTY(&ip
->rec_tree
));
2796 * Set delete_tid in both the frontend and backend
2797 * copy of the inode record. The DELETED flag handles
2798 * this, do not set RDIRTY.
2800 ip
->ino_leaf
.base
.delete_tid
= trans
->tid
;
2801 ip
->sync_ino_leaf
.base
.delete_tid
= trans
->tid
;
2802 ip
->ino_leaf
.delete_ts
= trans
->time32
;
2803 ip
->sync_ino_leaf
.delete_ts
= trans
->time32
;
2807 * Adjust the inode count in the volume header
2809 hammer_sync_lock_sh(trans
);
2810 if (ip
->flags
& HAMMER_INODE_ONDISK
) {
2811 hammer_modify_volume_field(trans
,
2814 --ip
->hmp
->rootvol
->ondisk
->vol0_stat_inodes
;
2815 hammer_modify_volume_done(trans
->rootvol
);
2817 hammer_sync_unlock(trans
);
2823 ip
->sync_flags
&= ~HAMMER_INODE_BUFS
;
2827 * Now update the inode's on-disk inode-data and/or on-disk record.
2828 * DELETED and ONDISK are managed only in ip->flags.
2830 * In the case of a defered buffer flush we still update the on-disk
2831 * inode to satisfy visibility requirements if there happen to be
2832 * directory dependancies.
2834 switch(ip
->flags
& (HAMMER_INODE_DELETED
| HAMMER_INODE_ONDISK
)) {
2835 case HAMMER_INODE_DELETED
|HAMMER_INODE_ONDISK
:
2837 * If deleted and on-disk, don't set any additional flags.
2838 * the delete flag takes care of things.
2840 * Clear flags which may have been set by the frontend.
2842 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
| HAMMER_INODE_XDIRTY
|
2843 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
|
2844 HAMMER_INODE_DELETING
);
2846 case HAMMER_INODE_DELETED
:
2848 * Take care of the case where a deleted inode was never
2849 * flushed to the disk in the first place.
2851 * Clear flags which may have been set by the frontend.
2853 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
| HAMMER_INODE_XDIRTY
|
2854 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
|
2855 HAMMER_INODE_DELETING
);
2856 while (RB_ROOT(&ip
->rec_tree
)) {
2857 hammer_record_t record
= RB_ROOT(&ip
->rec_tree
);
2858 hammer_ref(&record
->lock
);
2859 KKASSERT(record
->lock
.refs
== 1);
2860 record
->flags
|= HAMMER_RECF_DELETED_BE
;
2861 ++record
->ip
->rec_generation
;
2862 hammer_rel_mem_record(record
);
2865 case HAMMER_INODE_ONDISK
:
2867 * If already on-disk, do not set any additional flags.
2872 * If not on-disk and not deleted, set DDIRTY to force
2873 * an initial record to be written.
2875 * Also set the create_tid in both the frontend and backend
2876 * copy of the inode record.
2878 ip
->ino_leaf
.base
.create_tid
= trans
->tid
;
2879 ip
->ino_leaf
.create_ts
= trans
->time32
;
2880 ip
->sync_ino_leaf
.base
.create_tid
= trans
->tid
;
2881 ip
->sync_ino_leaf
.create_ts
= trans
->time32
;
2882 ip
->sync_flags
|= HAMMER_INODE_DDIRTY
;
2887 * If RDIRTY or DDIRTY is set, write out a new record. If the inode
2888 * is already on-disk the old record is marked as deleted.
2890 * If DELETED is set hammer_update_inode() will delete the existing
2891 * record without writing out a new one.
2893 * If *ONLY* the ITIMES flag is set we can update the record in-place.
2895 if (ip
->flags
& HAMMER_INODE_DELETED
) {
2896 error
= hammer_update_inode(&cursor
, ip
);
2898 if ((ip
->sync_flags
& HAMMER_INODE_DDIRTY
) == 0 &&
2899 (ip
->sync_flags
& (HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
))) {
2900 error
= hammer_update_itimes(&cursor
, ip
);
2902 if (ip
->sync_flags
& (HAMMER_INODE_DDIRTY
| HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
)) {
2903 error
= hammer_update_inode(&cursor
, ip
);
2907 hammer_critical_error(ip
->hmp
, ip
, error
,
2908 "while syncing inode");
2910 hammer_done_cursor(&cursor
);
2915 * This routine is called when the OS is no longer actively referencing
2916 * the inode (but might still be keeping it cached), or when releasing
2917 * the last reference to an inode.
2919 * At this point if the inode's nlinks count is zero we want to destroy
2920 * it, which may mean destroying it on-media too.
2923 hammer_inode_unloadable_check(hammer_inode_t ip
, int getvp
)
2928 * Set the DELETING flag when the link count drops to 0 and the
2929 * OS no longer has any opens on the inode.
2931 * The backend will clear DELETING (a mod flag) and set DELETED
2932 * (a state flag) when it is actually able to perform the
2935 * Don't reflag the deletion if the flusher is currently syncing
2936 * one that was already flagged. A previously set DELETING flag
2937 * may bounce around flags and sync_flags until the operation is
2940 if (ip
->ino_data
.nlinks
== 0 &&
2941 ((ip
->flags
| ip
->sync_flags
) & (HAMMER_INODE_DELETING
|HAMMER_INODE_DELETED
)) == 0) {
2942 ip
->flags
|= HAMMER_INODE_DELETING
;
2943 ip
->flags
|= HAMMER_INODE_TRUNCATED
;
2947 if (hammer_get_vnode(ip
, &vp
) != 0)
2955 vtruncbuf(ip
->vp
, 0, HAMMER_BUFSIZE
);
2956 vnode_pager_setsize(ip
->vp
, 0);
2965 * After potentially resolving a dependancy the inode is tested
2966 * to determine whether it needs to be reflushed.
2969 hammer_test_inode(hammer_inode_t ip
)
2971 if (ip
->flags
& HAMMER_INODE_REFLUSH
) {
2972 ip
->flags
&= ~HAMMER_INODE_REFLUSH
;
2973 hammer_ref(&ip
->lock
);
2974 if (ip
->flags
& HAMMER_INODE_RESIGNAL
) {
2975 ip
->flags
&= ~HAMMER_INODE_RESIGNAL
;
2976 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
2978 hammer_flush_inode(ip
, 0);
2980 hammer_rel_inode(ip
, 0);
2985 * Clear the RECLAIM flag on an inode. This occurs when the inode is
2986 * reassociated with a vp or just before it gets freed.
2988 * Pipeline wakeups to threads blocked due to an excessive number of
2989 * detached inodes. This typically occurs when atime updates accumulate
2990 * while scanning a directory tree.
2993 hammer_inode_wakereclaims(hammer_inode_t ip
)
2995 struct hammer_reclaim
*reclaim
;
2996 hammer_mount_t hmp
= ip
->hmp
;
2998 if ((ip
->flags
& HAMMER_INODE_RECLAIM
) == 0)
3001 --hammer_count_reclaiming
;
3002 --hmp
->inode_reclaims
;
3003 ip
->flags
&= ~HAMMER_INODE_RECLAIM
;
3005 while ((reclaim
= TAILQ_FIRST(&hmp
->reclaim_list
)) != NULL
) {
3006 if (reclaim
->count
> 0 && --reclaim
->count
== 0) {
3007 TAILQ_REMOVE(&hmp
->reclaim_list
, reclaim
, entry
);
3010 if (hmp
->inode_reclaims
> hammer_limit_reclaim
/ 2)
3016 * Setup our reclaim pipeline. We only let so many detached (and dirty)
3017 * inodes build up before we start blocking. This routine is called
3018 * if a new inode is created or an inode is loaded from media.
3020 * When we block we don't care *which* inode has finished reclaiming,
3021 * as lone as one does.
3024 hammer_inode_waitreclaims(hammer_mount_t hmp
)
3026 struct hammer_reclaim reclaim
;
3028 if (hmp
->inode_reclaims
< hammer_limit_reclaim
)
3031 TAILQ_INSERT_TAIL(&hmp
->reclaim_list
, &reclaim
, entry
);
3032 tsleep(&reclaim
, 0, "hmrrcm", hz
);
3033 if (reclaim
.count
> 0)
3034 TAILQ_REMOVE(&hmp
->reclaim_list
, &reclaim
, entry
);
3040 * XXX not used, doesn't work very well due to the large batching nature
3043 * A larger then normal backlog of inodes is sitting in the flusher,
3044 * enforce a general slowdown to let it catch up. This routine is only
3045 * called on completion of a non-flusher-related transaction which
3046 * performed B-Tree node I/O.
3048 * It is possible for the flusher to stall in a continuous load.
3049 * blogbench -i1000 -o seems to do a good job generating this sort of load.
3050 * If the flusher is unable to catch up the inode count can bloat until
3051 * we run out of kvm.
3053 * This is a bit of a hack.
3056 hammer_inode_waithard(hammer_mount_t hmp
)
3061 if (hmp
->flags
& HAMMER_MOUNT_FLUSH_RECOVERY
) {
3062 if (hmp
->inode_reclaims
< hammer_limit_reclaim
/ 2 &&
3063 hmp
->count_iqueued
< hmp
->count_inodes
/ 20) {
3064 hmp
->flags
&= ~HAMMER_MOUNT_FLUSH_RECOVERY
;
3068 if (hmp
->inode_reclaims
< hammer_limit_reclaim
||
3069 hmp
->count_iqueued
< hmp
->count_inodes
/ 10) {
3072 hmp
->flags
|= HAMMER_MOUNT_FLUSH_RECOVERY
;
3076 * Block for one flush cycle.
3078 hammer_flusher_wait_next(hmp
);