2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.114 2008/09/24 00:53:51 dillon Exp $
38 #include <vm/vm_extern.h>
40 static int hammer_unload_inode(struct hammer_inode
*ip
);
41 static void hammer_free_inode(hammer_inode_t ip
);
42 static void hammer_flush_inode_core(hammer_inode_t ip
,
43 hammer_flush_group_t flg
, int flags
);
44 static int hammer_setup_child_callback(hammer_record_t rec
, void *data
);
46 static int hammer_syncgrp_child_callback(hammer_record_t rec
, void *data
);
48 static int hammer_setup_parent_inodes(hammer_inode_t ip
, int depth
,
49 hammer_flush_group_t flg
);
50 static int hammer_setup_parent_inodes_helper(hammer_record_t record
,
51 int depth
, hammer_flush_group_t flg
);
52 static void hammer_inode_wakereclaims(hammer_inode_t ip
);
55 extern struct hammer_inode
*HammerTruncIp
;
59 * RB-Tree support for inode structures
62 hammer_ino_rb_compare(hammer_inode_t ip1
, hammer_inode_t ip2
)
64 if (ip1
->obj_localization
< ip2
->obj_localization
)
66 if (ip1
->obj_localization
> ip2
->obj_localization
)
68 if (ip1
->obj_id
< ip2
->obj_id
)
70 if (ip1
->obj_id
> ip2
->obj_id
)
72 if (ip1
->obj_asof
< ip2
->obj_asof
)
74 if (ip1
->obj_asof
> ip2
->obj_asof
)
80 hammer_redo_rb_compare(hammer_inode_t ip1
, hammer_inode_t ip2
)
82 if (ip1
->redo_fifo_start
< ip2
->redo_fifo_start
)
84 if (ip1
->redo_fifo_start
> ip2
->redo_fifo_start
)
90 * RB-Tree support for inode structures / special LOOKUP_INFO
93 hammer_inode_info_cmp(hammer_inode_info_t info
, hammer_inode_t ip
)
95 if (info
->obj_localization
< ip
->obj_localization
)
97 if (info
->obj_localization
> ip
->obj_localization
)
99 if (info
->obj_id
< ip
->obj_id
)
101 if (info
->obj_id
> ip
->obj_id
)
103 if (info
->obj_asof
< ip
->obj_asof
)
105 if (info
->obj_asof
> ip
->obj_asof
)
111 * Used by hammer_scan_inode_snapshots() to locate all of an object's
112 * snapshots. Note that the asof field is not tested, which we can get
113 * away with because it is the lowest-priority field.
116 hammer_inode_info_cmp_all_history(hammer_inode_t ip
, void *data
)
118 hammer_inode_info_t info
= data
;
120 if (ip
->obj_localization
> info
->obj_localization
)
122 if (ip
->obj_localization
< info
->obj_localization
)
124 if (ip
->obj_id
> info
->obj_id
)
126 if (ip
->obj_id
< info
->obj_id
)
132 * Used by hammer_unload_pseudofs() to locate all inodes associated with
136 hammer_inode_pfs_cmp(hammer_inode_t ip
, void *data
)
138 u_int32_t localization
= *(u_int32_t
*)data
;
139 if (ip
->obj_localization
> localization
)
141 if (ip
->obj_localization
< localization
)
147 * RB-Tree support for pseudofs structures
150 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1
, hammer_pseudofs_inmem_t p2
)
152 if (p1
->localization
< p2
->localization
)
154 if (p1
->localization
> p2
->localization
)
160 RB_GENERATE(hammer_ino_rb_tree
, hammer_inode
, rb_node
, hammer_ino_rb_compare
);
161 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree
, INFO
, hammer_inode
, rb_node
,
162 hammer_inode_info_cmp
, hammer_inode_info_t
);
163 RB_GENERATE2(hammer_pfs_rb_tree
, hammer_pseudofs_inmem
, rb_node
,
164 hammer_pfs_rb_compare
, u_int32_t
, localization
);
167 * The kernel is not actively referencing this vnode but is still holding
170 * This is called from the frontend.
175 hammer_vop_inactive(struct vop_inactive_args
*ap
)
177 struct hammer_inode
*ip
= VTOI(ap
->a_vp
);
188 * If the inode no longer has visibility in the filesystem try to
189 * recycle it immediately, even if the inode is dirty. Recycling
190 * it quickly allows the system to reclaim buffer cache and VM
191 * resources which can matter a lot in a heavily loaded system.
193 * This can deadlock in vfsync() if we aren't careful.
195 * Do not queue the inode to the flusher if we still have visibility,
196 * otherwise namespace calls such as chmod will unnecessarily generate
197 * multiple inode updates.
199 if (ip
->ino_data
.nlinks
== 0) {
201 hammer_inode_unloadable_check(ip
, 0);
202 if (ip
->flags
& HAMMER_INODE_MODMASK
)
203 hammer_flush_inode(ip
, 0);
211 * Release the vnode association. This is typically (but not always)
212 * the last reference on the inode.
214 * Once the association is lost we are on our own with regards to
215 * flushing the inode.
217 * We must interlock ip->vp so hammer_get_vnode() can avoid races.
220 hammer_vop_reclaim(struct vop_reclaim_args
*ap
)
222 struct hammer_inode
*ip
;
228 if ((ip
= vp
->v_data
) != NULL
) {
230 hammer_lock_ex(&ip
->lock
);
234 if ((ip
->flags
& HAMMER_INODE_RECLAIM
) == 0) {
235 ++hammer_count_reclaiming
;
236 ++hmp
->inode_reclaims
;
237 ip
->flags
|= HAMMER_INODE_RECLAIM
;
239 hammer_unlock(&ip
->lock
);
240 hammer_rel_inode(ip
, 1);
246 * Return a locked vnode for the specified inode. The inode must be
247 * referenced but NOT LOCKED on entry and will remain referenced on
250 * Called from the frontend.
253 hammer_get_vnode(struct hammer_inode
*ip
, struct vnode
**vpp
)
263 if ((vp
= ip
->vp
) == NULL
) {
264 error
= getnewvnode(VT_HAMMER
, hmp
->mp
, vpp
, 0, 0);
267 hammer_lock_ex(&ip
->lock
);
268 if (ip
->vp
!= NULL
) {
269 hammer_unlock(&ip
->lock
);
275 hammer_ref(&ip
->lock
);
279 obj_type
= ip
->ino_data
.obj_type
;
280 vp
->v_type
= hammer_get_vnode_type(obj_type
);
282 hammer_inode_wakereclaims(ip
);
284 switch(ip
->ino_data
.obj_type
) {
285 case HAMMER_OBJTYPE_CDEV
:
286 case HAMMER_OBJTYPE_BDEV
:
287 vp
->v_ops
= &hmp
->mp
->mnt_vn_spec_ops
;
288 addaliasu(vp
, ip
->ino_data
.rmajor
,
289 ip
->ino_data
.rminor
);
291 case HAMMER_OBJTYPE_FIFO
:
292 vp
->v_ops
= &hmp
->mp
->mnt_vn_fifo_ops
;
294 case HAMMER_OBJTYPE_REGFILE
:
301 * Only mark as the root vnode if the ip is not
302 * historical, otherwise the VFS cache will get
303 * confused. The other half of the special handling
304 * is in hammer_vop_nlookupdotdot().
306 * Pseudo-filesystem roots can be accessed via
307 * non-root filesystem paths and setting VROOT may
308 * confuse the namecache. Set VPFSROOT instead.
310 if (ip
->obj_id
== HAMMER_OBJID_ROOT
&&
311 ip
->obj_asof
== hmp
->asof
) {
312 if (ip
->obj_localization
== 0)
313 vsetflags(vp
, VROOT
);
315 vsetflags(vp
, VPFSROOT
);
318 vp
->v_data
= (void *)ip
;
319 /* vnode locked by getnewvnode() */
320 /* make related vnode dirty if inode dirty? */
321 hammer_unlock(&ip
->lock
);
322 if (vp
->v_type
== VREG
) {
323 vinitvmio(vp
, ip
->ino_data
.size
,
324 hammer_blocksize(ip
->ino_data
.size
),
325 hammer_blockoff(ip
->ino_data
.size
));
331 * Interlock vnode clearing. This does not prevent the
332 * vnode from going into a reclaimed state but it does
333 * prevent it from being destroyed or reused so the vget()
334 * will properly fail.
336 hammer_lock_ex(&ip
->lock
);
337 if ((vp
= ip
->vp
) == NULL
) {
338 hammer_unlock(&ip
->lock
);
341 vhold_interlocked(vp
);
342 hammer_unlock(&ip
->lock
);
345 * loop if the vget fails (aka races), or if the vp
346 * no longer matches ip->vp.
348 if (vget(vp
, LK_EXCLUSIVE
) == 0) {
362 * Locate all copies of the inode for obj_id compatible with the specified
363 * asof, reference, and issue the related call-back. This routine is used
364 * for direct-io invalidation and does not create any new inodes.
367 hammer_scan_inode_snapshots(hammer_mount_t hmp
, hammer_inode_info_t iinfo
,
368 int (*callback
)(hammer_inode_t ip
, void *data
),
371 hammer_ino_rb_tree_RB_SCAN(&hmp
->rb_inos_root
,
372 hammer_inode_info_cmp_all_history
,
377 * Acquire a HAMMER inode. The returned inode is not locked. These functions
378 * do not attach or detach the related vnode (use hammer_get_vnode() for
381 * The flags argument is only applied for newly created inodes, and only
382 * certain flags are inherited.
384 * Called from the frontend.
386 struct hammer_inode
*
387 hammer_get_inode(hammer_transaction_t trans
, hammer_inode_t dip
,
388 int64_t obj_id
, hammer_tid_t asof
, u_int32_t localization
,
389 int flags
, int *errorp
)
391 hammer_mount_t hmp
= trans
->hmp
;
392 struct hammer_node_cache
*cachep
;
393 struct hammer_inode_info iinfo
;
394 struct hammer_cursor cursor
;
395 struct hammer_inode
*ip
;
399 * Determine if we already have an inode cached. If we do then
402 * If we find an inode with no vnode we have to mark the
403 * transaction such that hammer_inode_waitreclaims() is
404 * called later on to avoid building up an infinite number
405 * of inodes. Otherwise we can continue to * add new inodes
406 * faster then they can be disposed of, even with the tsleep
409 * If we find a dummy inode we return a failure so dounlink
410 * (which does another lookup) doesn't try to mess with the
411 * link count. hammer_vop_nresolve() uses hammer_get_dummy_inode()
412 * to ref dummy inodes.
414 iinfo
.obj_id
= obj_id
;
415 iinfo
.obj_asof
= asof
;
416 iinfo
.obj_localization
= localization
;
418 ip
= hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp
->rb_inos_root
, &iinfo
);
420 if (ip
->flags
& HAMMER_INODE_DUMMY
) {
424 hammer_ref(&ip
->lock
);
430 * Allocate a new inode structure and deal with races later.
432 ip
= kmalloc(sizeof(*ip
), hmp
->m_inodes
, M_WAITOK
|M_ZERO
);
433 ++hammer_count_inodes
;
436 ip
->obj_asof
= iinfo
.obj_asof
;
437 ip
->obj_localization
= localization
;
439 ip
->flags
= flags
& HAMMER_INODE_RO
;
440 ip
->cache
[0].ip
= ip
;
441 ip
->cache
[1].ip
= ip
;
442 ip
->cache
[2].ip
= ip
;
443 ip
->cache
[3].ip
= ip
;
445 ip
->flags
|= HAMMER_INODE_RO
;
446 ip
->sync_trunc_off
= ip
->trunc_off
= ip
->save_trunc_off
=
447 0x7FFFFFFFFFFFFFFFLL
;
448 RB_INIT(&ip
->rec_tree
);
449 TAILQ_INIT(&ip
->target_list
);
450 hammer_ref(&ip
->lock
);
453 * Locate the on-disk inode. If this is a PFS root we always
454 * access the current version of the root inode and (if it is not
455 * a master) always access information under it with a snapshot
458 * We cache recent inode lookups in this directory in dip->cache[2].
459 * If we can't find it we assume the inode we are looking for is
460 * close to the directory inode.
465 if (dip
->cache
[2].node
)
466 cachep
= &dip
->cache
[2];
468 cachep
= &dip
->cache
[0];
470 hammer_init_cursor(trans
, &cursor
, cachep
, NULL
);
471 cursor
.key_beg
.localization
= localization
+ HAMMER_LOCALIZE_INODE
;
472 cursor
.key_beg
.obj_id
= ip
->obj_id
;
473 cursor
.key_beg
.key
= 0;
474 cursor
.key_beg
.create_tid
= 0;
475 cursor
.key_beg
.delete_tid
= 0;
476 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
477 cursor
.key_beg
.obj_type
= 0;
479 cursor
.asof
= iinfo
.obj_asof
;
480 cursor
.flags
= HAMMER_CURSOR_GET_LEAF
| HAMMER_CURSOR_GET_DATA
|
483 *errorp
= hammer_btree_lookup(&cursor
);
484 if (*errorp
== EDEADLK
) {
485 hammer_done_cursor(&cursor
);
490 * On success the B-Tree lookup will hold the appropriate
491 * buffer cache buffers and provide a pointer to the requested
492 * information. Copy the information to the in-memory inode
493 * and cache the B-Tree node to improve future operations.
496 ip
->ino_leaf
= cursor
.node
->ondisk
->elms
[cursor
.index
].leaf
;
497 ip
->ino_data
= cursor
.data
->inode
;
500 * cache[0] tries to cache the location of the object inode.
501 * The assumption is that it is near the directory inode.
503 * cache[1] tries to cache the location of the object data.
504 * We might have something in the governing directory from
505 * scan optimizations (see the strategy code in
508 * We update dip->cache[2], if possible, with the location
509 * of the object inode for future directory shortcuts.
511 hammer_cache_node(&ip
->cache
[0], cursor
.node
);
513 if (dip
->cache
[3].node
) {
514 hammer_cache_node(&ip
->cache
[1],
517 hammer_cache_node(&dip
->cache
[2], cursor
.node
);
521 * The file should not contain any data past the file size
522 * stored in the inode. Setting save_trunc_off to the
523 * file size instead of max reduces B-Tree lookup overheads
524 * on append by allowing the flusher to avoid checking for
527 ip
->save_trunc_off
= ip
->ino_data
.size
;
530 * Locate and assign the pseudofs management structure to
533 if (dip
&& dip
->obj_localization
== ip
->obj_localization
) {
534 ip
->pfsm
= dip
->pfsm
;
535 hammer_ref(&ip
->pfsm
->lock
);
537 ip
->pfsm
= hammer_load_pseudofs(trans
,
538 ip
->obj_localization
,
540 *errorp
= 0; /* ignore ENOENT */
545 * The inode is placed on the red-black tree and will be synced to
546 * the media when flushed or by the filesystem sync. If this races
547 * another instantiation/lookup the insertion will fail.
550 if (RB_INSERT(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
)) {
551 hammer_free_inode(ip
);
552 hammer_done_cursor(&cursor
);
555 ip
->flags
|= HAMMER_INODE_ONDISK
;
557 if (ip
->flags
& HAMMER_INODE_RSV_INODES
) {
558 ip
->flags
&= ~HAMMER_INODE_RSV_INODES
; /* sanity */
562 hammer_free_inode(ip
);
565 hammer_done_cursor(&cursor
);
566 trans
->flags
|= HAMMER_TRANSF_NEWINODE
;
571 * Get a dummy inode to placemark a broken directory entry.
573 struct hammer_inode
*
574 hammer_get_dummy_inode(hammer_transaction_t trans
, hammer_inode_t dip
,
575 int64_t obj_id
, hammer_tid_t asof
, u_int32_t localization
,
576 int flags
, int *errorp
)
578 hammer_mount_t hmp
= trans
->hmp
;
579 struct hammer_inode_info iinfo
;
580 struct hammer_inode
*ip
;
583 * Determine if we already have an inode cached. If we do then
586 * If we find an inode with no vnode we have to mark the
587 * transaction such that hammer_inode_waitreclaims() is
588 * called later on to avoid building up an infinite number
589 * of inodes. Otherwise we can continue to * add new inodes
590 * faster then they can be disposed of, even with the tsleep
593 * If we find a non-fake inode we return an error. Only fake
594 * inodes can be returned by this routine.
596 iinfo
.obj_id
= obj_id
;
597 iinfo
.obj_asof
= asof
;
598 iinfo
.obj_localization
= localization
;
601 ip
= hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp
->rb_inos_root
, &iinfo
);
603 if ((ip
->flags
& HAMMER_INODE_DUMMY
) == 0) {
607 hammer_ref(&ip
->lock
);
612 * Allocate a new inode structure and deal with races later.
614 ip
= kmalloc(sizeof(*ip
), hmp
->m_inodes
, M_WAITOK
|M_ZERO
);
615 ++hammer_count_inodes
;
618 ip
->obj_asof
= iinfo
.obj_asof
;
619 ip
->obj_localization
= localization
;
621 ip
->flags
= flags
| HAMMER_INODE_RO
| HAMMER_INODE_DUMMY
;
622 ip
->cache
[0].ip
= ip
;
623 ip
->cache
[1].ip
= ip
;
624 ip
->cache
[2].ip
= ip
;
625 ip
->cache
[3].ip
= ip
;
626 ip
->sync_trunc_off
= ip
->trunc_off
= ip
->save_trunc_off
=
627 0x7FFFFFFFFFFFFFFFLL
;
628 RB_INIT(&ip
->rec_tree
);
629 TAILQ_INIT(&ip
->target_list
);
630 hammer_ref(&ip
->lock
);
633 * Populate the dummy inode. Leave everything zero'd out.
635 * (ip->ino_leaf and ip->ino_data)
637 * Make the dummy inode a FIFO object which most copy programs
638 * will properly ignore.
640 ip
->save_trunc_off
= ip
->ino_data
.size
;
641 ip
->ino_data
.obj_type
= HAMMER_OBJTYPE_FIFO
;
644 * Locate and assign the pseudofs management structure to
647 if (dip
&& dip
->obj_localization
== ip
->obj_localization
) {
648 ip
->pfsm
= dip
->pfsm
;
649 hammer_ref(&ip
->pfsm
->lock
);
651 ip
->pfsm
= hammer_load_pseudofs(trans
, ip
->obj_localization
,
653 *errorp
= 0; /* ignore ENOENT */
657 * The inode is placed on the red-black tree and will be synced to
658 * the media when flushed or by the filesystem sync. If this races
659 * another instantiation/lookup the insertion will fail.
661 * NOTE: Do not set HAMMER_INODE_ONDISK. The inode is a fake.
664 if (RB_INSERT(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
)) {
665 hammer_free_inode(ip
);
669 if (ip
->flags
& HAMMER_INODE_RSV_INODES
) {
670 ip
->flags
&= ~HAMMER_INODE_RSV_INODES
; /* sanity */
673 hammer_free_inode(ip
);
676 trans
->flags
|= HAMMER_TRANSF_NEWINODE
;
681 * Return a referenced inode only if it is in our inode cache.
683 * Dummy inodes do not count.
685 struct hammer_inode
*
686 hammer_find_inode(hammer_transaction_t trans
, int64_t obj_id
,
687 hammer_tid_t asof
, u_int32_t localization
)
689 hammer_mount_t hmp
= trans
->hmp
;
690 struct hammer_inode_info iinfo
;
691 struct hammer_inode
*ip
;
693 iinfo
.obj_id
= obj_id
;
694 iinfo
.obj_asof
= asof
;
695 iinfo
.obj_localization
= localization
;
697 ip
= hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp
->rb_inos_root
, &iinfo
);
699 if (ip
->flags
& HAMMER_INODE_DUMMY
)
702 hammer_ref(&ip
->lock
);
708 * Create a new filesystem object, returning the inode in *ipp. The
709 * returned inode will be referenced. The inode is created in-memory.
711 * If pfsm is non-NULL the caller wishes to create the root inode for
715 hammer_create_inode(hammer_transaction_t trans
, struct vattr
*vap
,
717 hammer_inode_t dip
, const char *name
, int namelen
,
718 hammer_pseudofs_inmem_t pfsm
, struct hammer_inode
**ipp
)
729 ip
= kmalloc(sizeof(*ip
), hmp
->m_inodes
, M_WAITOK
|M_ZERO
);
730 ++hammer_count_inodes
;
732 trans
->flags
|= HAMMER_TRANSF_NEWINODE
;
735 KKASSERT(pfsm
->localization
!= 0);
736 ip
->obj_id
= HAMMER_OBJID_ROOT
;
737 ip
->obj_localization
= pfsm
->localization
;
739 KKASSERT(dip
!= NULL
);
740 namekey
= hammer_directory_namekey(dip
, name
, namelen
, &dummy
);
741 ip
->obj_id
= hammer_alloc_objid(hmp
, dip
, namekey
);
742 ip
->obj_localization
= dip
->obj_localization
;
745 KKASSERT(ip
->obj_id
!= 0);
746 ip
->obj_asof
= hmp
->asof
;
748 ip
->flush_state
= HAMMER_FST_IDLE
;
749 ip
->flags
= HAMMER_INODE_DDIRTY
|
750 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
;
751 ip
->cache
[0].ip
= ip
;
752 ip
->cache
[1].ip
= ip
;
753 ip
->cache
[2].ip
= ip
;
754 ip
->cache
[3].ip
= ip
;
756 ip
->trunc_off
= 0x7FFFFFFFFFFFFFFFLL
;
757 /* ip->save_trunc_off = 0; (already zero) */
758 RB_INIT(&ip
->rec_tree
);
759 TAILQ_INIT(&ip
->target_list
);
761 ip
->ino_data
.atime
= trans
->time
;
762 ip
->ino_data
.mtime
= trans
->time
;
763 ip
->ino_data
.size
= 0;
764 ip
->ino_data
.nlinks
= 0;
767 * A nohistory designator on the parent directory is inherited by
768 * the child. We will do this even for pseudo-fs creation... the
769 * sysad can turn it off.
772 ip
->ino_data
.uflags
= dip
->ino_data
.uflags
&
773 (SF_NOHISTORY
|UF_NOHISTORY
|UF_NODUMP
);
776 ip
->ino_leaf
.base
.btype
= HAMMER_BTREE_TYPE_RECORD
;
777 ip
->ino_leaf
.base
.localization
= ip
->obj_localization
+
778 HAMMER_LOCALIZE_INODE
;
779 ip
->ino_leaf
.base
.obj_id
= ip
->obj_id
;
780 ip
->ino_leaf
.base
.key
= 0;
781 ip
->ino_leaf
.base
.create_tid
= 0;
782 ip
->ino_leaf
.base
.delete_tid
= 0;
783 ip
->ino_leaf
.base
.rec_type
= HAMMER_RECTYPE_INODE
;
784 ip
->ino_leaf
.base
.obj_type
= hammer_get_obj_type(vap
->va_type
);
786 ip
->ino_data
.obj_type
= ip
->ino_leaf
.base
.obj_type
;
787 ip
->ino_data
.version
= HAMMER_INODE_DATA_VERSION
;
788 ip
->ino_data
.mode
= vap
->va_mode
;
789 ip
->ino_data
.ctime
= trans
->time
;
792 * If we are running version 2 or greater directory entries are
793 * inode-localized instead of data-localized.
795 if (trans
->hmp
->version
>= HAMMER_VOL_VERSION_TWO
) {
796 if (ip
->ino_leaf
.base
.obj_type
== HAMMER_OBJTYPE_DIRECTORY
) {
797 ip
->ino_data
.cap_flags
|=
798 HAMMER_INODE_CAP_DIR_LOCAL_INO
;
803 * Setup the ".." pointer. This only needs to be done for directories
804 * but we do it for all objects as a recovery aid.
807 ip
->ino_data
.parent_obj_id
= dip
->ino_leaf
.base
.obj_id
;
810 * The parent_obj_localization field only applies to pseudo-fs roots.
811 * XXX this is no longer applicable, PFSs are no longer directly
812 * tied into the parent's directory structure.
814 if (ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_DIRECTORY
&&
815 ip
->obj_id
== HAMMER_OBJID_ROOT
) {
816 ip
->ino_data
.ext
.obj
.parent_obj_localization
=
817 dip
->obj_localization
;
821 switch(ip
->ino_leaf
.base
.obj_type
) {
822 case HAMMER_OBJTYPE_CDEV
:
823 case HAMMER_OBJTYPE_BDEV
:
824 ip
->ino_data
.rmajor
= vap
->va_rmajor
;
825 ip
->ino_data
.rminor
= vap
->va_rminor
;
832 * Calculate default uid/gid and overwrite with information from
836 xuid
= hammer_to_unix_xid(&dip
->ino_data
.uid
);
837 xuid
= vop_helper_create_uid(hmp
->mp
, dip
->ino_data
.mode
,
838 xuid
, cred
, &vap
->va_mode
);
842 ip
->ino_data
.mode
= vap
->va_mode
;
844 if (vap
->va_vaflags
& VA_UID_UUID_VALID
)
845 ip
->ino_data
.uid
= vap
->va_uid_uuid
;
846 else if (vap
->va_uid
!= (uid_t
)VNOVAL
)
847 hammer_guid_to_uuid(&ip
->ino_data
.uid
, vap
->va_uid
);
849 hammer_guid_to_uuid(&ip
->ino_data
.uid
, xuid
);
851 if (vap
->va_vaflags
& VA_GID_UUID_VALID
)
852 ip
->ino_data
.gid
= vap
->va_gid_uuid
;
853 else if (vap
->va_gid
!= (gid_t
)VNOVAL
)
854 hammer_guid_to_uuid(&ip
->ino_data
.gid
, vap
->va_gid
);
856 ip
->ino_data
.gid
= dip
->ino_data
.gid
;
858 hammer_ref(&ip
->lock
);
862 hammer_ref(&pfsm
->lock
);
864 } else if (dip
->obj_localization
== ip
->obj_localization
) {
865 ip
->pfsm
= dip
->pfsm
;
866 hammer_ref(&ip
->pfsm
->lock
);
869 ip
->pfsm
= hammer_load_pseudofs(trans
,
870 ip
->obj_localization
,
872 error
= 0; /* ignore ENOENT */
876 hammer_free_inode(ip
);
878 } else if (RB_INSERT(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
)) {
879 panic("hammer_create_inode: duplicate obj_id %llx",
880 (long long)ip
->obj_id
);
882 hammer_free_inode(ip
);
889 * Final cleanup / freeing of an inode structure
892 hammer_free_inode(hammer_inode_t ip
)
894 struct hammer_mount
*hmp
;
897 KKASSERT(ip
->lock
.refs
== 1);
898 hammer_uncache_node(&ip
->cache
[0]);
899 hammer_uncache_node(&ip
->cache
[1]);
900 hammer_uncache_node(&ip
->cache
[2]);
901 hammer_uncache_node(&ip
->cache
[3]);
902 hammer_inode_wakereclaims(ip
);
904 hammer_clear_objid(ip
);
905 --hammer_count_inodes
;
908 hammer_rel_pseudofs(hmp
, ip
->pfsm
);
911 kfree(ip
, hmp
->m_inodes
);
916 * Retrieve pseudo-fs data. NULL will never be returned.
918 * If an error occurs *errorp will be set and a default template is returned,
919 * otherwise *errorp is set to 0. Typically when an error occurs it will
922 hammer_pseudofs_inmem_t
923 hammer_load_pseudofs(hammer_transaction_t trans
,
924 u_int32_t localization
, int *errorp
)
926 hammer_mount_t hmp
= trans
->hmp
;
928 hammer_pseudofs_inmem_t pfsm
;
929 struct hammer_cursor cursor
;
933 pfsm
= RB_LOOKUP(hammer_pfs_rb_tree
, &hmp
->rb_pfsm_root
, localization
);
935 hammer_ref(&pfsm
->lock
);
941 * PFS records are stored in the root inode (not the PFS root inode,
942 * but the real root). Avoid an infinite recursion if loading
943 * the PFS for the real root.
946 ip
= hammer_get_inode(trans
, NULL
, HAMMER_OBJID_ROOT
,
948 HAMMER_DEF_LOCALIZATION
, 0, errorp
);
953 pfsm
= kmalloc(sizeof(*pfsm
), hmp
->m_misc
, M_WAITOK
| M_ZERO
);
954 pfsm
->localization
= localization
;
955 pfsm
->pfsd
.unique_uuid
= trans
->rootvol
->ondisk
->vol_fsid
;
956 pfsm
->pfsd
.shared_uuid
= pfsm
->pfsd
.unique_uuid
;
958 hammer_init_cursor(trans
, &cursor
, (ip
? &ip
->cache
[1] : NULL
), ip
);
959 cursor
.key_beg
.localization
= HAMMER_DEF_LOCALIZATION
+
960 HAMMER_LOCALIZE_MISC
;
961 cursor
.key_beg
.obj_id
= HAMMER_OBJID_ROOT
;
962 cursor
.key_beg
.create_tid
= 0;
963 cursor
.key_beg
.delete_tid
= 0;
964 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_PFS
;
965 cursor
.key_beg
.obj_type
= 0;
966 cursor
.key_beg
.key
= localization
;
967 cursor
.asof
= HAMMER_MAX_TID
;
968 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
971 *errorp
= hammer_ip_lookup(&cursor
);
973 *errorp
= hammer_btree_lookup(&cursor
);
975 *errorp
= hammer_ip_resolve_data(&cursor
);
977 if (cursor
.data
->pfsd
.mirror_flags
&
978 HAMMER_PFSD_DELETED
) {
981 bytes
= cursor
.leaf
->data_len
;
982 if (bytes
> sizeof(pfsm
->pfsd
))
983 bytes
= sizeof(pfsm
->pfsd
);
984 bcopy(cursor
.data
, &pfsm
->pfsd
, bytes
);
988 hammer_done_cursor(&cursor
);
990 pfsm
->fsid_udev
= hammer_fsid_to_udev(&pfsm
->pfsd
.shared_uuid
);
991 hammer_ref(&pfsm
->lock
);
993 hammer_rel_inode(ip
, 0);
994 if (RB_INSERT(hammer_pfs_rb_tree
, &hmp
->rb_pfsm_root
, pfsm
)) {
995 kfree(pfsm
, hmp
->m_misc
);
1002 * Store pseudo-fs data. The backend will automatically delete any prior
1003 * on-disk pseudo-fs data but we have to delete in-memory versions.
1006 hammer_save_pseudofs(hammer_transaction_t trans
, hammer_pseudofs_inmem_t pfsm
)
1008 struct hammer_cursor cursor
;
1009 hammer_record_t record
;
1013 ip
= hammer_get_inode(trans
, NULL
, HAMMER_OBJID_ROOT
, HAMMER_MAX_TID
,
1014 HAMMER_DEF_LOCALIZATION
, 0, &error
);
1016 pfsm
->fsid_udev
= hammer_fsid_to_udev(&pfsm
->pfsd
.shared_uuid
);
1017 hammer_init_cursor(trans
, &cursor
, &ip
->cache
[1], ip
);
1018 cursor
.key_beg
.localization
= ip
->obj_localization
+
1019 HAMMER_LOCALIZE_MISC
;
1020 cursor
.key_beg
.obj_id
= HAMMER_OBJID_ROOT
;
1021 cursor
.key_beg
.create_tid
= 0;
1022 cursor
.key_beg
.delete_tid
= 0;
1023 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_PFS
;
1024 cursor
.key_beg
.obj_type
= 0;
1025 cursor
.key_beg
.key
= pfsm
->localization
;
1026 cursor
.asof
= HAMMER_MAX_TID
;
1027 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
1030 * Replace any in-memory version of the record.
1032 error
= hammer_ip_lookup(&cursor
);
1033 if (error
== 0 && hammer_cursor_inmem(&cursor
)) {
1034 record
= cursor
.iprec
;
1035 if (record
->flags
& HAMMER_RECF_INTERLOCK_BE
) {
1036 KKASSERT(cursor
.deadlk_rec
== NULL
);
1037 hammer_ref(&record
->lock
);
1038 cursor
.deadlk_rec
= record
;
1041 record
->flags
|= HAMMER_RECF_DELETED_FE
;
1047 * Allocate replacement general record. The backend flush will
1048 * delete any on-disk version of the record.
1050 if (error
== 0 || error
== ENOENT
) {
1051 record
= hammer_alloc_mem_record(ip
, sizeof(pfsm
->pfsd
));
1052 record
->type
= HAMMER_MEM_RECORD_GENERAL
;
1054 record
->leaf
.base
.localization
= ip
->obj_localization
+
1055 HAMMER_LOCALIZE_MISC
;
1056 record
->leaf
.base
.rec_type
= HAMMER_RECTYPE_PFS
;
1057 record
->leaf
.base
.key
= pfsm
->localization
;
1058 record
->leaf
.data_len
= sizeof(pfsm
->pfsd
);
1059 bcopy(&pfsm
->pfsd
, record
->data
, sizeof(pfsm
->pfsd
));
1060 error
= hammer_ip_add_record(trans
, record
);
1062 hammer_done_cursor(&cursor
);
1063 if (error
== EDEADLK
)
1065 hammer_rel_inode(ip
, 0);
1070 * Create a root directory for a PFS if one does not alredy exist.
1072 * The PFS root stands alone so we must also bump the nlinks count
1073 * to prevent it from being destroyed on release.
1076 hammer_mkroot_pseudofs(hammer_transaction_t trans
, struct ucred
*cred
,
1077 hammer_pseudofs_inmem_t pfsm
)
1083 ip
= hammer_get_inode(trans
, NULL
, HAMMER_OBJID_ROOT
, HAMMER_MAX_TID
,
1084 pfsm
->localization
, 0, &error
);
1089 error
= hammer_create_inode(trans
, &vap
, cred
,
1093 ++ip
->ino_data
.nlinks
;
1094 hammer_modify_inode(ip
, HAMMER_INODE_DDIRTY
);
1098 hammer_rel_inode(ip
, 0);
1103 * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
1104 * if we are unable to disassociate all the inodes.
1108 hammer_unload_pseudofs_callback(hammer_inode_t ip
, void *data
)
1112 hammer_ref(&ip
->lock
);
1113 if (ip
->lock
.refs
== 2 && ip
->vp
)
1114 vclean_unlocked(ip
->vp
);
1115 if (ip
->lock
.refs
== 1 && ip
->vp
== NULL
)
1118 res
= -1; /* stop, someone is using the inode */
1119 hammer_rel_inode(ip
, 0);
1124 hammer_unload_pseudofs(hammer_transaction_t trans
, u_int32_t localization
)
1129 for (try = res
= 0; try < 4; ++try) {
1130 res
= hammer_ino_rb_tree_RB_SCAN(&trans
->hmp
->rb_inos_root
,
1131 hammer_inode_pfs_cmp
,
1132 hammer_unload_pseudofs_callback
,
1134 if (res
== 0 && try > 1)
1136 hammer_flusher_sync(trans
->hmp
);
1145 * Release a reference on a PFS
1148 hammer_rel_pseudofs(hammer_mount_t hmp
, hammer_pseudofs_inmem_t pfsm
)
1150 hammer_unref(&pfsm
->lock
);
1151 if (pfsm
->lock
.refs
== 0) {
1152 RB_REMOVE(hammer_pfs_rb_tree
, &hmp
->rb_pfsm_root
, pfsm
);
1153 kfree(pfsm
, hmp
->m_misc
);
1158 * Called by hammer_sync_inode().
1161 hammer_update_inode(hammer_cursor_t cursor
, hammer_inode_t ip
)
1163 hammer_transaction_t trans
= cursor
->trans
;
1164 hammer_record_t record
;
1172 * If the inode has a presence on-disk then locate it and mark
1173 * it deleted, setting DELONDISK.
1175 * The record may or may not be physically deleted, depending on
1176 * the retention policy.
1178 if ((ip
->flags
& (HAMMER_INODE_ONDISK
|HAMMER_INODE_DELONDISK
)) ==
1179 HAMMER_INODE_ONDISK
) {
1180 hammer_normalize_cursor(cursor
);
1181 cursor
->key_beg
.localization
= ip
->obj_localization
+
1182 HAMMER_LOCALIZE_INODE
;
1183 cursor
->key_beg
.obj_id
= ip
->obj_id
;
1184 cursor
->key_beg
.key
= 0;
1185 cursor
->key_beg
.create_tid
= 0;
1186 cursor
->key_beg
.delete_tid
= 0;
1187 cursor
->key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
1188 cursor
->key_beg
.obj_type
= 0;
1189 cursor
->asof
= ip
->obj_asof
;
1190 cursor
->flags
&= ~HAMMER_CURSOR_INITMASK
;
1191 cursor
->flags
|= HAMMER_CURSOR_GET_LEAF
| HAMMER_CURSOR_ASOF
;
1192 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
1194 error
= hammer_btree_lookup(cursor
);
1195 if (hammer_debug_inode
)
1196 kprintf("IPDEL %p %08x %d", ip
, ip
->flags
, error
);
1199 error
= hammer_ip_delete_record(cursor
, ip
, trans
->tid
);
1200 if (hammer_debug_inode
)
1201 kprintf(" error %d\n", error
);
1203 ip
->flags
|= HAMMER_INODE_DELONDISK
;
1206 hammer_cache_node(&ip
->cache
[0], cursor
->node
);
1208 if (error
== EDEADLK
) {
1209 hammer_done_cursor(cursor
);
1210 error
= hammer_init_cursor(trans
, cursor
,
1212 if (hammer_debug_inode
)
1213 kprintf("IPDED %p %d\n", ip
, error
);
1220 * Ok, write out the initial record or a new record (after deleting
1221 * the old one), unless the DELETED flag is set. This routine will
1222 * clear DELONDISK if it writes out a record.
1224 * Update our inode statistics if this is the first application of
1225 * the inode on-disk.
1227 if (error
== 0 && (ip
->flags
& HAMMER_INODE_DELETED
) == 0) {
1229 * Generate a record and write it to the media. We clean-up
1230 * the state before releasing so we do not have to set-up
1233 record
= hammer_alloc_mem_record(ip
, 0);
1234 record
->type
= HAMMER_MEM_RECORD_INODE
;
1235 record
->flush_state
= HAMMER_FST_FLUSH
;
1236 record
->leaf
= ip
->sync_ino_leaf
;
1237 record
->leaf
.base
.create_tid
= trans
->tid
;
1238 record
->leaf
.data_len
= sizeof(ip
->sync_ino_data
);
1239 record
->leaf
.create_ts
= trans
->time32
;
1240 record
->data
= (void *)&ip
->sync_ino_data
;
1241 record
->flags
|= HAMMER_RECF_INTERLOCK_BE
;
1244 * If this flag is set we cannot sync the new file size
1245 * because we haven't finished related truncations. The
1246 * inode will be flushed in another flush group to finish
1249 if ((ip
->flags
& HAMMER_INODE_WOULDBLOCK
) &&
1250 ip
->sync_ino_data
.size
!= ip
->ino_data
.size
) {
1252 ip
->sync_ino_data
.size
= ip
->ino_data
.size
;
1258 error
= hammer_ip_sync_record_cursor(cursor
, record
);
1259 if (hammer_debug_inode
)
1260 kprintf("GENREC %p rec %08x %d\n",
1261 ip
, record
->flags
, error
);
1262 if (error
!= EDEADLK
)
1264 hammer_done_cursor(cursor
);
1265 error
= hammer_init_cursor(trans
, cursor
,
1267 if (hammer_debug_inode
)
1268 kprintf("GENREC reinit %d\n", error
);
1274 * Note: The record was never on the inode's record tree
1275 * so just wave our hands importantly and destroy it.
1277 record
->flags
|= HAMMER_RECF_COMMITTED
;
1278 record
->flags
&= ~HAMMER_RECF_INTERLOCK_BE
;
1279 record
->flush_state
= HAMMER_FST_IDLE
;
1280 ++ip
->rec_generation
;
1281 hammer_rel_mem_record(record
);
1287 if (hammer_debug_inode
)
1288 kprintf("CLEANDELOND %p %08x\n", ip
, ip
->flags
);
1289 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
|
1290 HAMMER_INODE_SDIRTY
|
1291 HAMMER_INODE_ATIME
|
1292 HAMMER_INODE_MTIME
);
1293 ip
->flags
&= ~HAMMER_INODE_DELONDISK
;
1295 ip
->sync_flags
|= HAMMER_INODE_DDIRTY
;
1298 * Root volume count of inodes
1300 hammer_sync_lock_sh(trans
);
1301 if ((ip
->flags
& HAMMER_INODE_ONDISK
) == 0) {
1302 hammer_modify_volume_field(trans
,
1305 ++ip
->hmp
->rootvol
->ondisk
->vol0_stat_inodes
;
1306 hammer_modify_volume_done(trans
->rootvol
);
1307 ip
->flags
|= HAMMER_INODE_ONDISK
;
1308 if (hammer_debug_inode
)
1309 kprintf("NOWONDISK %p\n", ip
);
1311 hammer_sync_unlock(trans
);
1316 * If the inode has been destroyed, clean out any left-over flags
1317 * that may have been set by the frontend.
1319 if (error
== 0 && (ip
->flags
& HAMMER_INODE_DELETED
)) {
1320 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
|
1321 HAMMER_INODE_SDIRTY
|
1322 HAMMER_INODE_ATIME
|
1323 HAMMER_INODE_MTIME
);
1329 * Update only the itimes fields.
1331 * ATIME can be updated without generating any UNDO. MTIME is updated
1332 * with UNDO so it is guaranteed to be synchronized properly in case of
1335 * Neither field is included in the B-Tree leaf element's CRC, which is how
1336 * we can get away with updating ATIME the way we do.
1339 hammer_update_itimes(hammer_cursor_t cursor
, hammer_inode_t ip
)
1341 hammer_transaction_t trans
= cursor
->trans
;
1345 if ((ip
->flags
& (HAMMER_INODE_ONDISK
|HAMMER_INODE_DELONDISK
)) !=
1346 HAMMER_INODE_ONDISK
) {
1350 hammer_normalize_cursor(cursor
);
1351 cursor
->key_beg
.localization
= ip
->obj_localization
+
1352 HAMMER_LOCALIZE_INODE
;
1353 cursor
->key_beg
.obj_id
= ip
->obj_id
;
1354 cursor
->key_beg
.key
= 0;
1355 cursor
->key_beg
.create_tid
= 0;
1356 cursor
->key_beg
.delete_tid
= 0;
1357 cursor
->key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
1358 cursor
->key_beg
.obj_type
= 0;
1359 cursor
->asof
= ip
->obj_asof
;
1360 cursor
->flags
&= ~HAMMER_CURSOR_INITMASK
;
1361 cursor
->flags
|= HAMMER_CURSOR_ASOF
;
1362 cursor
->flags
|= HAMMER_CURSOR_GET_LEAF
;
1363 cursor
->flags
|= HAMMER_CURSOR_GET_DATA
;
1364 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
1366 error
= hammer_btree_lookup(cursor
);
1368 hammer_cache_node(&ip
->cache
[0], cursor
->node
);
1369 if (ip
->sync_flags
& HAMMER_INODE_MTIME
) {
1371 * Updating MTIME requires an UNDO. Just cover
1372 * both atime and mtime.
1374 hammer_sync_lock_sh(trans
);
1375 hammer_modify_buffer(trans
, cursor
->data_buffer
,
1376 HAMMER_ITIMES_BASE(&cursor
->data
->inode
),
1377 HAMMER_ITIMES_BYTES
);
1378 cursor
->data
->inode
.atime
= ip
->sync_ino_data
.atime
;
1379 cursor
->data
->inode
.mtime
= ip
->sync_ino_data
.mtime
;
1380 hammer_modify_buffer_done(cursor
->data_buffer
);
1381 hammer_sync_unlock(trans
);
1382 } else if (ip
->sync_flags
& HAMMER_INODE_ATIME
) {
1384 * Updating atime only can be done in-place with
1387 hammer_sync_lock_sh(trans
);
1388 hammer_modify_buffer(trans
, cursor
->data_buffer
,
1390 cursor
->data
->inode
.atime
= ip
->sync_ino_data
.atime
;
1391 hammer_modify_buffer_done(cursor
->data_buffer
);
1392 hammer_sync_unlock(trans
);
1394 ip
->sync_flags
&= ~(HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
);
1396 if (error
== EDEADLK
) {
1397 hammer_done_cursor(cursor
);
1398 error
= hammer_init_cursor(trans
, cursor
,
1407 * Release a reference on an inode, flush as requested.
1409 * On the last reference we queue the inode to the flusher for its final
1413 hammer_rel_inode(struct hammer_inode
*ip
, int flush
)
1415 /*hammer_mount_t hmp = ip->hmp;*/
1418 * Handle disposition when dropping the last ref.
1421 if (ip
->lock
.refs
== 1) {
1423 * Determine whether on-disk action is needed for
1424 * the inode's final disposition.
1426 KKASSERT(ip
->vp
== NULL
);
1427 hammer_inode_unloadable_check(ip
, 0);
1428 if (ip
->flags
& HAMMER_INODE_MODMASK
) {
1429 hammer_flush_inode(ip
, 0);
1430 } else if (ip
->lock
.refs
== 1) {
1431 hammer_unload_inode(ip
);
1436 hammer_flush_inode(ip
, 0);
1439 * The inode still has multiple refs, try to drop
1442 KKASSERT(ip
->lock
.refs
>= 1);
1443 if (ip
->lock
.refs
> 1) {
1444 hammer_unref(&ip
->lock
);
1452 * Unload and destroy the specified inode. Must be called with one remaining
1453 * reference. The reference is disposed of.
1455 * The inode must be completely clean.
1458 hammer_unload_inode(struct hammer_inode
*ip
)
1460 hammer_mount_t hmp
= ip
->hmp
;
1462 KASSERT(ip
->lock
.refs
== 1,
1463 ("hammer_unload_inode: %d refs\n", ip
->lock
.refs
));
1464 KKASSERT(ip
->vp
== NULL
);
1465 KKASSERT(ip
->flush_state
== HAMMER_FST_IDLE
);
1466 KKASSERT(ip
->cursor_ip_refs
== 0);
1467 KKASSERT(hammer_notlocked(&ip
->lock
));
1468 KKASSERT((ip
->flags
& HAMMER_INODE_MODMASK
) == 0);
1470 KKASSERT(RB_EMPTY(&ip
->rec_tree
));
1471 KKASSERT(TAILQ_EMPTY(&ip
->target_list
));
1473 if (ip
->flags
& HAMMER_INODE_RDIRTY
) {
1474 RB_REMOVE(hammer_redo_rb_tree
, &hmp
->rb_redo_root
, ip
);
1475 ip
->flags
&= ~HAMMER_INODE_RDIRTY
;
1477 RB_REMOVE(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
);
1479 hammer_free_inode(ip
);
1484 * Called during unmounting if a critical error occured. The in-memory
1485 * inode and all related structures are destroyed.
1487 * If a critical error did not occur the unmount code calls the standard
1488 * release and asserts that the inode is gone.
1491 hammer_destroy_inode_callback(struct hammer_inode
*ip
, void *data __unused
)
1493 hammer_record_t rec
;
1496 * Get rid of the inodes in-memory records, regardless of their
1497 * state, and clear the mod-mask.
1499 while ((rec
= TAILQ_FIRST(&ip
->target_list
)) != NULL
) {
1500 TAILQ_REMOVE(&ip
->target_list
, rec
, target_entry
);
1501 rec
->target_ip
= NULL
;
1502 if (rec
->flush_state
== HAMMER_FST_SETUP
)
1503 rec
->flush_state
= HAMMER_FST_IDLE
;
1505 while ((rec
= RB_ROOT(&ip
->rec_tree
)) != NULL
) {
1506 if (rec
->flush_state
== HAMMER_FST_FLUSH
)
1507 --rec
->flush_group
->refs
;
1509 hammer_ref(&rec
->lock
);
1510 KKASSERT(rec
->lock
.refs
== 1);
1511 rec
->flush_state
= HAMMER_FST_IDLE
;
1512 rec
->flush_group
= NULL
;
1513 rec
->flags
|= HAMMER_RECF_DELETED_FE
; /* wave hands */
1514 rec
->flags
|= HAMMER_RECF_DELETED_BE
; /* wave hands */
1515 ++ip
->rec_generation
;
1516 hammer_rel_mem_record(rec
);
1518 ip
->flags
&= ~HAMMER_INODE_MODMASK
;
1519 ip
->sync_flags
&= ~HAMMER_INODE_MODMASK
;
1520 KKASSERT(ip
->vp
== NULL
);
1523 * Remove the inode from any flush group, force it idle. FLUSH
1524 * and SETUP states have an inode ref.
1526 switch(ip
->flush_state
) {
1527 case HAMMER_FST_FLUSH
:
1528 RB_REMOVE(hammer_fls_rb_tree
, &ip
->flush_group
->flush_tree
, ip
);
1529 --ip
->flush_group
->refs
;
1530 ip
->flush_group
= NULL
;
1532 case HAMMER_FST_SETUP
:
1533 hammer_unref(&ip
->lock
);
1534 ip
->flush_state
= HAMMER_FST_IDLE
;
1536 case HAMMER_FST_IDLE
:
1541 * There shouldn't be any associated vnode. The unload needs at
1542 * least one ref, if we do have a vp steal its ip ref.
1545 kprintf("hammer_destroy_inode_callback: Unexpected "
1546 "vnode association ip %p vp %p\n", ip
, ip
->vp
);
1547 ip
->vp
->v_data
= NULL
;
1550 hammer_ref(&ip
->lock
);
1552 hammer_unload_inode(ip
);
1557 * Called on mount -u when switching from RW to RO or vise-versa. Adjust
1558 * the read-only flag for cached inodes.
1560 * This routine is called from a RB_SCAN().
1563 hammer_reload_inode(hammer_inode_t ip
, void *arg __unused
)
1565 hammer_mount_t hmp
= ip
->hmp
;
1567 if (hmp
->ronly
|| hmp
->asof
!= HAMMER_MAX_TID
)
1568 ip
->flags
|= HAMMER_INODE_RO
;
1570 ip
->flags
&= ~HAMMER_INODE_RO
;
1575 * A transaction has modified an inode, requiring updates as specified by
1578 * HAMMER_INODE_DDIRTY: Inode data has been updated, not incl mtime/atime,
1579 * and not including size changes due to write-append
1580 * (but other size changes are included).
1581 * HAMMER_INODE_SDIRTY: Inode data has been updated, size changes due to
1583 * HAMMER_INODE_XDIRTY: Dirty in-memory records
1584 * HAMMER_INODE_BUFS: Dirty buffer cache buffers
1585 * HAMMER_INODE_DELETED: Inode record/data must be deleted
1586 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1589 hammer_modify_inode(hammer_inode_t ip
, int flags
)
1592 * ronly of 0 or 2 does not trigger assertion.
1593 * 2 is a special error state
1595 KKASSERT(ip
->hmp
->ronly
!= 1 ||
1596 (flags
& (HAMMER_INODE_DDIRTY
| HAMMER_INODE_XDIRTY
|
1597 HAMMER_INODE_SDIRTY
|
1598 HAMMER_INODE_BUFS
| HAMMER_INODE_DELETED
|
1599 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
)) == 0);
1600 if ((ip
->flags
& HAMMER_INODE_RSV_INODES
) == 0) {
1601 ip
->flags
|= HAMMER_INODE_RSV_INODES
;
1602 ++ip
->hmp
->rsv_inodes
;
1609 * Request that an inode be flushed. This whole mess cannot block and may
1610 * recurse (if not synchronous). Once requested HAMMER will attempt to
1611 * actively flush the inode until the flush can be done.
1613 * The inode may already be flushing, or may be in a setup state. We can
1614 * place the inode in a flushing state if it is currently idle and flag it
1615 * to reflush if it is currently flushing.
1617 * Upon return if the inode could not be flushed due to a setup
1618 * dependancy, then it will be automatically flushed when the dependancy
1622 hammer_flush_inode(hammer_inode_t ip
, int flags
)
1625 hammer_flush_group_t flg
;
1629 * next_flush_group is the first flush group we can place the inode
1630 * in. It may be NULL. If it becomes full we append a new flush
1631 * group and make that the next_flush_group.
1634 while ((flg
= hmp
->next_flush_group
) != NULL
) {
1635 KKASSERT(flg
->running
== 0);
1636 if (flg
->total_count
+ flg
->refs
<= ip
->hmp
->undo_rec_limit
)
1638 hmp
->next_flush_group
= TAILQ_NEXT(flg
, flush_entry
);
1639 hammer_flusher_async(ip
->hmp
, flg
);
1642 flg
= kmalloc(sizeof(*flg
), hmp
->m_misc
, M_WAITOK
|M_ZERO
);
1643 hmp
->next_flush_group
= flg
;
1644 RB_INIT(&flg
->flush_tree
);
1645 TAILQ_INSERT_TAIL(&hmp
->flush_group_list
, flg
, flush_entry
);
1649 * Trivial 'nothing to flush' case. If the inode is in a SETUP
1650 * state we have to put it back into an IDLE state so we can
1651 * drop the extra ref.
1653 * If we have a parent dependancy we must still fall through
1656 if ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0) {
1657 if (ip
->flush_state
== HAMMER_FST_SETUP
&&
1658 TAILQ_EMPTY(&ip
->target_list
)) {
1659 ip
->flush_state
= HAMMER_FST_IDLE
;
1660 hammer_rel_inode(ip
, 0);
1662 if (ip
->flush_state
== HAMMER_FST_IDLE
)
1667 * Our flush action will depend on the current state.
1669 switch(ip
->flush_state
) {
1670 case HAMMER_FST_IDLE
:
1672 * We have no dependancies and can flush immediately. Some
1673 * our children may not be flushable so we have to re-test
1674 * with that additional knowledge.
1676 hammer_flush_inode_core(ip
, flg
, flags
);
1678 case HAMMER_FST_SETUP
:
1680 * Recurse upwards through dependancies via target_list
1681 * and start their flusher actions going if possible.
1683 * 'good' is our connectivity. -1 means we have none and
1684 * can't flush, 0 means there weren't any dependancies, and
1685 * 1 means we have good connectivity.
1687 good
= hammer_setup_parent_inodes(ip
, 0, flg
);
1691 * We can continue if good >= 0. Determine how
1692 * many records under our inode can be flushed (and
1695 hammer_flush_inode_core(ip
, flg
, flags
);
1698 * Parent has no connectivity, tell it to flush
1699 * us as soon as it does.
1701 * The REFLUSH flag is also needed to trigger
1702 * dependancy wakeups.
1704 ip
->flags
|= HAMMER_INODE_CONN_DOWN
|
1705 HAMMER_INODE_REFLUSH
;
1706 if (flags
& HAMMER_FLUSH_SIGNAL
) {
1707 ip
->flags
|= HAMMER_INODE_RESIGNAL
;
1708 hammer_flusher_async(ip
->hmp
, flg
);
1712 case HAMMER_FST_FLUSH
:
1714 * We are already flushing, flag the inode to reflush
1715 * if needed after it completes its current flush.
1717 * The REFLUSH flag is also needed to trigger
1718 * dependancy wakeups.
1720 if ((ip
->flags
& HAMMER_INODE_REFLUSH
) == 0)
1721 ip
->flags
|= HAMMER_INODE_REFLUSH
;
1722 if (flags
& HAMMER_FLUSH_SIGNAL
) {
1723 ip
->flags
|= HAMMER_INODE_RESIGNAL
;
1724 hammer_flusher_async(ip
->hmp
, flg
);
1731 * Scan ip->target_list, which is a list of records owned by PARENTS to our
1732 * ip which reference our ip.
1734 * XXX This is a huge mess of recursive code, but not one bit of it blocks
1735 * so for now do not ref/deref the structures. Note that if we use the
1736 * ref/rel code later, the rel CAN block.
1739 hammer_setup_parent_inodes(hammer_inode_t ip
, int depth
,
1740 hammer_flush_group_t flg
)
1742 hammer_record_t depend
;
1747 * If we hit our recursion limit and we have parent dependencies
1748 * We cannot continue. Returning < 0 will cause us to be flagged
1749 * for reflush. Returning -2 cuts off additional dependency checks
1750 * because they are likely to also hit the depth limit.
1752 * We cannot return < 0 if there are no dependencies or there might
1753 * not be anything to wakeup (ip).
1755 if (depth
== 20 && TAILQ_FIRST(&ip
->target_list
)) {
1756 kprintf("HAMMER Warning: depth limit reached on "
1757 "setup recursion, inode %p %016llx\n",
1758 ip
, (long long)ip
->obj_id
);
1766 TAILQ_FOREACH(depend
, &ip
->target_list
, target_entry
) {
1767 r
= hammer_setup_parent_inodes_helper(depend
, depth
, flg
);
1768 KKASSERT(depend
->target_ip
== ip
);
1769 if (r
< 0 && good
== 0)
1775 * If we failed due to the recursion depth limit then stop
1785 * This helper function takes a record representing the dependancy between
1786 * the parent inode and child inode.
1788 * record->ip = parent inode
1789 * record->target_ip = child inode
1791 * We are asked to recurse upwards and convert the record from SETUP
1792 * to FLUSH if possible.
1794 * Return 1 if the record gives us connectivity
1796 * Return 0 if the record is not relevant
1798 * Return -1 if we can't resolve the dependancy and there is no connectivity.
1801 hammer_setup_parent_inodes_helper(hammer_record_t record
, int depth
,
1802 hammer_flush_group_t flg
)
1808 KKASSERT(record
->flush_state
!= HAMMER_FST_IDLE
);
1813 * If the record is already flushing, is it in our flush group?
1815 * If it is in our flush group but it is a general record or a
1816 * delete-on-disk, it does not improve our connectivity (return 0),
1817 * and if the target inode is not trying to destroy itself we can't
1818 * allow the operation yet anyway (the second return -1).
1820 if (record
->flush_state
== HAMMER_FST_FLUSH
) {
1822 * If not in our flush group ask the parent to reflush
1823 * us as soon as possible.
1825 if (record
->flush_group
!= flg
) {
1826 pip
->flags
|= HAMMER_INODE_REFLUSH
;
1827 record
->target_ip
->flags
|= HAMMER_INODE_CONN_DOWN
;
1832 * If in our flush group everything is already set up,
1833 * just return whether the record will improve our
1834 * visibility or not.
1836 if (record
->type
== HAMMER_MEM_RECORD_ADD
)
1842 * It must be a setup record. Try to resolve the setup dependancies
1843 * by recursing upwards so we can place ip on the flush list.
1845 * Limit ourselves to 20 levels of recursion to avoid blowing out
1846 * the kernel stack. If we hit the recursion limit we can't flush
1847 * until the parent flushes. The parent will flush independantly
1848 * on its own and ultimately a deep recursion will be resolved.
1850 KKASSERT(record
->flush_state
== HAMMER_FST_SETUP
);
1852 good
= hammer_setup_parent_inodes(pip
, depth
+ 1, flg
);
1855 * If good < 0 the parent has no connectivity and we cannot safely
1856 * flush the directory entry, which also means we can't flush our
1857 * ip. Flag us for downward recursion once the parent's
1858 * connectivity is resolved. Flag the parent for [re]flush or it
1859 * may not check for downward recursions.
1862 pip
->flags
|= HAMMER_INODE_REFLUSH
;
1863 record
->target_ip
->flags
|= HAMMER_INODE_CONN_DOWN
;
1868 * We are go, place the parent inode in a flushing state so we can
1869 * place its record in a flushing state. Note that the parent
1870 * may already be flushing. The record must be in the same flush
1871 * group as the parent.
1873 if (pip
->flush_state
!= HAMMER_FST_FLUSH
)
1874 hammer_flush_inode_core(pip
, flg
, HAMMER_FLUSH_RECURSION
);
1875 KKASSERT(pip
->flush_state
== HAMMER_FST_FLUSH
);
1876 KKASSERT(record
->flush_state
== HAMMER_FST_SETUP
);
1879 if (record
->type
== HAMMER_MEM_RECORD_DEL
&&
1880 (record
->target_ip
->flags
& (HAMMER_INODE_DELETED
|HAMMER_INODE_DELONDISK
)) == 0) {
1882 * Regardless of flushing state we cannot sync this path if the
1883 * record represents a delete-on-disk but the target inode
1884 * is not ready to sync its own deletion.
1886 * XXX need to count effective nlinks to determine whether
1887 * the flush is ok, otherwise removing a hardlink will
1888 * just leave the DEL record to rot.
1890 record
->target_ip
->flags
|= HAMMER_INODE_REFLUSH
;
1894 if (pip
->flush_group
== flg
) {
1896 * Because we have not calculated nlinks yet we can just
1897 * set records to the flush state if the parent is in
1898 * the same flush group as we are.
1900 record
->flush_state
= HAMMER_FST_FLUSH
;
1901 record
->flush_group
= flg
;
1902 ++record
->flush_group
->refs
;
1903 hammer_ref(&record
->lock
);
1906 * A general directory-add contributes to our visibility.
1908 * Otherwise it is probably a directory-delete or
1909 * delete-on-disk record and does not contribute to our
1910 * visbility (but we can still flush it).
1912 if (record
->type
== HAMMER_MEM_RECORD_ADD
)
1917 * If the parent is not in our flush group we cannot
1918 * flush this record yet, there is no visibility.
1919 * We tell the parent to reflush and mark ourselves
1920 * so the parent knows it should flush us too.
1922 pip
->flags
|= HAMMER_INODE_REFLUSH
;
1923 record
->target_ip
->flags
|= HAMMER_INODE_CONN_DOWN
;
1929 * This is the core routine placing an inode into the FST_FLUSH state.
1932 hammer_flush_inode_core(hammer_inode_t ip
, hammer_flush_group_t flg
, int flags
)
1937 * Set flush state and prevent the flusher from cycling into
1938 * the next flush group. Do not place the ip on the list yet.
1939 * Inodes not in the idle state get an extra reference.
1941 KKASSERT(ip
->flush_state
!= HAMMER_FST_FLUSH
);
1942 if (ip
->flush_state
== HAMMER_FST_IDLE
)
1943 hammer_ref(&ip
->lock
);
1944 ip
->flush_state
= HAMMER_FST_FLUSH
;
1945 ip
->flush_group
= flg
;
1946 ++ip
->hmp
->flusher
.group_lock
;
1947 ++ip
->hmp
->count_iqueued
;
1948 ++hammer_count_iqueued
;
1950 hammer_redo_fifo_start_flush(ip
);
1953 * If the flush group reaches the autoflush limit we want to signal
1954 * the flusher. This is particularly important for remove()s.
1956 * If the default hammer_limit_reclaim is changed via sysctl
1957 * make sure we don't hit a degenerate case where we don't start
1958 * a flush but blocked on further inode ops.
1960 if (flg
->total_count
== hammer_autoflush
||
1961 flg
->total_count
>= hammer_limit_reclaim
/ 4)
1962 flags
|= HAMMER_FLUSH_SIGNAL
;
1966 * We need to be able to vfsync/truncate from the backend.
1968 * XXX Any truncation from the backend will acquire the vnode
1971 KKASSERT((ip
->flags
& HAMMER_INODE_VHELD
) == 0);
1972 if (ip
->vp
&& (ip
->vp
->v_flag
& VINACTIVE
) == 0) {
1973 ip
->flags
|= HAMMER_INODE_VHELD
;
1979 * Figure out how many in-memory records we can actually flush
1980 * (not including inode meta-data, buffers, etc).
1982 KKASSERT((ip
->flags
& HAMMER_INODE_WOULDBLOCK
) == 0);
1983 if (flags
& HAMMER_FLUSH_RECURSION
) {
1985 * If this is a upwards recursion we do not want to
1986 * recurse down again!
1990 } else if (ip
->flags
& HAMMER_INODE_WOULDBLOCK
) {
1992 * No new records are added if we must complete a flush
1993 * from a previous cycle, but we do have to move the records
1994 * from the previous cycle to the current one.
1997 go_count
= RB_SCAN(hammer_rec_rb_tree
, &ip
->rec_tree
, NULL
,
1998 hammer_syncgrp_child_callback
, NULL
);
2004 * Normal flush, scan records and bring them into the flush.
2005 * Directory adds and deletes are usually skipped (they are
2006 * grouped with the related inode rather then with the
2009 * go_count can be negative, which means the scan aborted
2010 * due to the flush group being over-full and we should
2011 * flush what we have.
2013 go_count
= RB_SCAN(hammer_rec_rb_tree
, &ip
->rec_tree
, NULL
,
2014 hammer_setup_child_callback
, NULL
);
2018 * This is a more involved test that includes go_count. If we
2019 * can't flush, flag the inode and return. If go_count is 0 we
2020 * were are unable to flush any records in our rec_tree and
2021 * must ignore the XDIRTY flag.
2023 if (go_count
== 0) {
2024 if ((ip
->flags
& HAMMER_INODE_MODMASK_NOXDIRTY
) == 0) {
2025 --ip
->hmp
->count_iqueued
;
2026 --hammer_count_iqueued
;
2029 ip
->flush_state
= HAMMER_FST_SETUP
;
2030 ip
->flush_group
= NULL
;
2032 if (ip
->flags
& HAMMER_INODE_VHELD
) {
2033 ip
->flags
&= ~HAMMER_INODE_VHELD
;
2039 * REFLUSH is needed to trigger dependancy wakeups
2040 * when an inode is in SETUP.
2042 ip
->flags
|= HAMMER_INODE_REFLUSH
;
2043 if (flags
& HAMMER_FLUSH_SIGNAL
) {
2044 ip
->flags
|= HAMMER_INODE_RESIGNAL
;
2045 hammer_flusher_async(ip
->hmp
, flg
);
2047 if (--ip
->hmp
->flusher
.group_lock
== 0)
2048 wakeup(&ip
->hmp
->flusher
.group_lock
);
2054 * Snapshot the state of the inode for the backend flusher.
2056 * We continue to retain save_trunc_off even when all truncations
2057 * have been resolved as an optimization to determine if we can
2058 * skip the B-Tree lookup for overwrite deletions.
2060 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
2061 * and stays in ip->flags. Once set, it stays set until the
2062 * inode is destroyed.
2064 if (ip
->flags
& HAMMER_INODE_TRUNCATED
) {
2065 KKASSERT((ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) == 0);
2066 ip
->sync_trunc_off
= ip
->trunc_off
;
2067 ip
->trunc_off
= 0x7FFFFFFFFFFFFFFFLL
;
2068 ip
->flags
&= ~HAMMER_INODE_TRUNCATED
;
2069 ip
->sync_flags
|= HAMMER_INODE_TRUNCATED
;
2072 * The save_trunc_off used to cache whether the B-Tree
2073 * holds any records past that point is not used until
2074 * after the truncation has succeeded, so we can safely
2077 if (ip
->save_trunc_off
> ip
->sync_trunc_off
)
2078 ip
->save_trunc_off
= ip
->sync_trunc_off
;
2080 ip
->sync_flags
|= (ip
->flags
& HAMMER_INODE_MODMASK
&
2081 ~HAMMER_INODE_TRUNCATED
);
2082 ip
->sync_ino_leaf
= ip
->ino_leaf
;
2083 ip
->sync_ino_data
= ip
->ino_data
;
2084 ip
->flags
&= ~HAMMER_INODE_MODMASK
| HAMMER_INODE_TRUNCATED
;
2085 #ifdef DEBUG_TRUNCATE
2086 if ((ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) && ip
== HammerTruncIp
)
2087 kprintf("truncateS %016llx\n", ip
->sync_trunc_off
);
2091 * The flusher list inherits our inode and reference.
2093 KKASSERT(flg
->running
== 0);
2094 RB_INSERT(hammer_fls_rb_tree
, &flg
->flush_tree
, ip
);
2095 if (--ip
->hmp
->flusher
.group_lock
== 0)
2096 wakeup(&ip
->hmp
->flusher
.group_lock
);
2098 if (flags
& HAMMER_FLUSH_SIGNAL
) {
2099 hammer_flusher_async(ip
->hmp
, flg
);
2104 * Callback for scan of ip->rec_tree. Try to include each record in our
2105 * flush. ip->flush_group has been set but the inode has not yet been
2106 * moved into a flushing state.
2108 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
2111 * We return 1 for any record placed or found in FST_FLUSH, which prevents
2112 * the caller from shortcutting the flush.
2115 hammer_setup_child_callback(hammer_record_t rec
, void *data
)
2117 hammer_flush_group_t flg
;
2118 hammer_inode_t target_ip
;
2123 * Records deleted or committed by the backend are ignored.
2124 * Note that the flush detects deleted frontend records at
2125 * multiple points to deal with races. This is just the first
2126 * line of defense. The only time HAMMER_RECF_DELETED_FE cannot
2127 * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it
2128 * messes up link-count calculations.
2130 * NOTE: Don't get confused between record deletion and, say,
2131 * directory entry deletion. The deletion of a directory entry
2132 * which is on-media has nothing to do with the record deletion
2135 if (rec
->flags
& (HAMMER_RECF_DELETED_FE
| HAMMER_RECF_DELETED_BE
|
2136 HAMMER_RECF_COMMITTED
)) {
2137 if (rec
->flush_state
== HAMMER_FST_FLUSH
) {
2138 KKASSERT(rec
->flush_group
== rec
->ip
->flush_group
);
2147 * If the record is in an idle state it has no dependancies and
2151 flg
= ip
->flush_group
;
2154 switch(rec
->flush_state
) {
2155 case HAMMER_FST_IDLE
:
2157 * The record has no setup dependancy, we can flush it.
2159 KKASSERT(rec
->target_ip
== NULL
);
2160 rec
->flush_state
= HAMMER_FST_FLUSH
;
2161 rec
->flush_group
= flg
;
2163 hammer_ref(&rec
->lock
);
2166 case HAMMER_FST_SETUP
:
2168 * The record has a setup dependancy. These are typically
2169 * directory entry adds and deletes. Such entries will be
2170 * flushed when their inodes are flushed so we do not
2171 * usually have to add them to the flush here. However,
2172 * if the target_ip has set HAMMER_INODE_CONN_DOWN then
2173 * it is asking us to flush this record (and it).
2175 target_ip
= rec
->target_ip
;
2176 KKASSERT(target_ip
!= NULL
);
2177 KKASSERT(target_ip
->flush_state
!= HAMMER_FST_IDLE
);
2180 * If the target IP is already flushing in our group
2181 * we could associate the record, but target_ip has
2182 * already synced ino_data to sync_ino_data and we
2183 * would also have to adjust nlinks. Plus there are
2184 * ordering issues for adds and deletes.
2186 * Reflush downward if this is an ADD, and upward if
2189 if (target_ip
->flush_state
== HAMMER_FST_FLUSH
) {
2190 if (rec
->flush_state
== HAMMER_MEM_RECORD_ADD
)
2191 ip
->flags
|= HAMMER_INODE_REFLUSH
;
2193 target_ip
->flags
|= HAMMER_INODE_REFLUSH
;
2198 * Target IP is not yet flushing. This can get complex
2199 * because we have to be careful about the recursion.
2201 * Directories create an issue for us in that if a flush
2202 * of a directory is requested the expectation is to flush
2203 * any pending directory entries, but this will cause the
2204 * related inodes to recursively flush as well. We can't
2205 * really defer the operation so just get as many as we
2209 if ((target_ip
->flags
& HAMMER_INODE_RECLAIM
) == 0 &&
2210 (target_ip
->flags
& HAMMER_INODE_CONN_DOWN
) == 0) {
2212 * We aren't reclaiming and the target ip was not
2213 * previously prevented from flushing due to this
2214 * record dependancy. Do not flush this record.
2219 if (flg
->total_count
+ flg
->refs
>
2220 ip
->hmp
->undo_rec_limit
) {
2222 * Our flush group is over-full and we risk blowing
2223 * out the UNDO FIFO. Stop the scan, flush what we
2224 * have, then reflush the directory.
2226 * The directory may be forced through multiple
2227 * flush groups before it can be completely
2230 ip
->flags
|= HAMMER_INODE_RESIGNAL
|
2231 HAMMER_INODE_REFLUSH
;
2233 } else if (rec
->type
== HAMMER_MEM_RECORD_ADD
) {
2235 * If the target IP is not flushing we can force
2236 * it to flush, even if it is unable to write out
2237 * any of its own records we have at least one in
2238 * hand that we CAN deal with.
2240 rec
->flush_state
= HAMMER_FST_FLUSH
;
2241 rec
->flush_group
= flg
;
2243 hammer_ref(&rec
->lock
);
2244 hammer_flush_inode_core(target_ip
, flg
,
2245 HAMMER_FLUSH_RECURSION
);
2249 * General or delete-on-disk record.
2251 * XXX this needs help. If a delete-on-disk we could
2252 * disconnect the target. If the target has its own
2253 * dependancies they really need to be flushed.
2257 rec
->flush_state
= HAMMER_FST_FLUSH
;
2258 rec
->flush_group
= flg
;
2260 hammer_ref(&rec
->lock
);
2261 hammer_flush_inode_core(target_ip
, flg
,
2262 HAMMER_FLUSH_RECURSION
);
2266 case HAMMER_FST_FLUSH
:
2268 * The flush_group should already match.
2270 KKASSERT(rec
->flush_group
== flg
);
2279 * This version just moves records already in a flush state to the new
2280 * flush group and that is it.
2283 hammer_syncgrp_child_callback(hammer_record_t rec
, void *data
)
2285 hammer_inode_t ip
= rec
->ip
;
2287 switch(rec
->flush_state
) {
2288 case HAMMER_FST_FLUSH
:
2289 KKASSERT(rec
->flush_group
== ip
->flush_group
);
2299 * Wait for a previously queued flush to complete.
2301 * If a critical error occured we don't try to wait.
2304 hammer_wait_inode(hammer_inode_t ip
)
2306 hammer_flush_group_t flg
;
2309 if ((ip
->hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
) == 0) {
2310 while (ip
->flush_state
!= HAMMER_FST_IDLE
&&
2311 (ip
->hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
) == 0) {
2312 if (ip
->flush_state
== HAMMER_FST_SETUP
)
2313 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
2314 if (ip
->flush_state
!= HAMMER_FST_IDLE
) {
2315 ip
->flags
|= HAMMER_INODE_FLUSHW
;
2316 tsleep(&ip
->flags
, 0, "hmrwin", 0);
2323 * Called by the backend code when a flush has been completed.
2324 * The inode has already been removed from the flush list.
2326 * A pipelined flush can occur, in which case we must re-enter the
2327 * inode on the list and re-copy its fields.
2330 hammer_flush_inode_done(hammer_inode_t ip
, int error
)
2335 KKASSERT(ip
->flush_state
== HAMMER_FST_FLUSH
);
2340 * Auto-reflush if the backend could not completely flush
2341 * the inode. This fixes a case where a deferred buffer flush
2342 * could cause fsync to return early.
2344 if (ip
->sync_flags
& HAMMER_INODE_MODMASK
)
2345 ip
->flags
|= HAMMER_INODE_REFLUSH
;
2348 * Merge left-over flags back into the frontend and fix the state.
2349 * Incomplete truncations are retained by the backend.
2352 ip
->flags
|= ip
->sync_flags
& ~HAMMER_INODE_TRUNCATED
;
2353 ip
->sync_flags
&= HAMMER_INODE_TRUNCATED
;
2356 * The backend may have adjusted nlinks, so if the adjusted nlinks
2357 * does not match the fronttend set the frontend's DDIRTY flag again.
2359 if (ip
->ino_data
.nlinks
!= ip
->sync_ino_data
.nlinks
)
2360 ip
->flags
|= HAMMER_INODE_DDIRTY
;
2363 * Fix up the dirty buffer status.
2365 if (ip
->vp
&& RB_ROOT(&ip
->vp
->v_rbdirty_tree
)) {
2366 ip
->flags
|= HAMMER_INODE_BUFS
;
2368 hammer_redo_fifo_end_flush(ip
);
2371 * Re-set the XDIRTY flag if some of the inode's in-memory records
2372 * could not be flushed.
2374 KKASSERT((RB_EMPTY(&ip
->rec_tree
) &&
2375 (ip
->flags
& HAMMER_INODE_XDIRTY
) == 0) ||
2376 (!RB_EMPTY(&ip
->rec_tree
) &&
2377 (ip
->flags
& HAMMER_INODE_XDIRTY
) != 0));
2380 * Do not lose track of inodes which no longer have vnode
2381 * assocations, otherwise they may never get flushed again.
2383 * The reflush flag can be set superfluously, causing extra pain
2384 * for no reason. If the inode is no longer modified it no longer
2385 * needs to be flushed.
2387 if (ip
->flags
& HAMMER_INODE_MODMASK
) {
2389 ip
->flags
|= HAMMER_INODE_REFLUSH
;
2391 ip
->flags
&= ~HAMMER_INODE_REFLUSH
;
2395 * Adjust the flush state.
2397 if (ip
->flags
& HAMMER_INODE_WOULDBLOCK
) {
2399 * We were unable to flush out all our records, leave the
2400 * inode in a flush state and in the current flush group.
2401 * The flush group will be re-run.
2403 * This occurs if the UNDO block gets too full or there is
2404 * too much dirty meta-data and allows the flusher to
2405 * finalize the UNDO block and then re-flush.
2407 ip
->flags
&= ~HAMMER_INODE_WOULDBLOCK
;
2411 * Remove from the flush_group
2413 RB_REMOVE(hammer_fls_rb_tree
, &ip
->flush_group
->flush_tree
, ip
);
2414 ip
->flush_group
= NULL
;
2418 * Clean up the vnode ref and tracking counts.
2420 if (ip
->flags
& HAMMER_INODE_VHELD
) {
2421 ip
->flags
&= ~HAMMER_INODE_VHELD
;
2425 --hmp
->count_iqueued
;
2426 --hammer_count_iqueued
;
2429 * And adjust the state.
2431 if (TAILQ_EMPTY(&ip
->target_list
) && RB_EMPTY(&ip
->rec_tree
)) {
2432 ip
->flush_state
= HAMMER_FST_IDLE
;
2435 ip
->flush_state
= HAMMER_FST_SETUP
;
2440 * If the frontend is waiting for a flush to complete,
2443 if (ip
->flags
& HAMMER_INODE_FLUSHW
) {
2444 ip
->flags
&= ~HAMMER_INODE_FLUSHW
;
2449 * If the frontend made more changes and requested another
2450 * flush, then try to get it running.
2452 * Reflushes are aborted when the inode is errored out.
2454 if (ip
->flags
& HAMMER_INODE_REFLUSH
) {
2455 ip
->flags
&= ~HAMMER_INODE_REFLUSH
;
2456 if (ip
->flags
& HAMMER_INODE_RESIGNAL
) {
2457 ip
->flags
&= ~HAMMER_INODE_RESIGNAL
;
2458 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
2460 hammer_flush_inode(ip
, 0);
2466 * If we have no parent dependancies we can clear CONN_DOWN
2468 if (TAILQ_EMPTY(&ip
->target_list
))
2469 ip
->flags
&= ~HAMMER_INODE_CONN_DOWN
;
2472 * If the inode is now clean drop the space reservation.
2474 if ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0 &&
2475 (ip
->flags
& HAMMER_INODE_RSV_INODES
)) {
2476 ip
->flags
&= ~HAMMER_INODE_RSV_INODES
;
2481 hammer_rel_inode(ip
, 0);
2485 * Called from hammer_sync_inode() to synchronize in-memory records
2489 hammer_sync_record_callback(hammer_record_t record
, void *data
)
2491 hammer_cursor_t cursor
= data
;
2492 hammer_transaction_t trans
= cursor
->trans
;
2493 hammer_mount_t hmp
= trans
->hmp
;
2497 * Skip records that do not belong to the current flush.
2499 ++hammer_stats_record_iterations
;
2500 if (record
->flush_state
!= HAMMER_FST_FLUSH
)
2504 if (record
->flush_group
!= record
->ip
->flush_group
) {
2505 kprintf("sync_record %p ip %p bad flush group %p %p\n", record
, record
->ip
, record
->flush_group
,record
->ip
->flush_group
);
2506 if (hammer_debug_critical
)
2511 KKASSERT(record
->flush_group
== record
->ip
->flush_group
);
2514 * Interlock the record using the BE flag. Once BE is set the
2515 * frontend cannot change the state of FE.
2517 * NOTE: If FE is set prior to us setting BE we still sync the
2518 * record out, but the flush completion code converts it to
2519 * a delete-on-disk record instead of destroying it.
2521 KKASSERT((record
->flags
& HAMMER_RECF_INTERLOCK_BE
) == 0);
2522 record
->flags
|= HAMMER_RECF_INTERLOCK_BE
;
2525 * The backend has already disposed of the record.
2527 if (record
->flags
& (HAMMER_RECF_DELETED_BE
| HAMMER_RECF_COMMITTED
)) {
2533 * If the whole inode is being deleting all on-disk records will
2534 * be deleted very soon, we can't sync any new records to disk
2535 * because they will be deleted in the same transaction they were
2536 * created in (delete_tid == create_tid), which will assert.
2538 * XXX There may be a case with RECORD_ADD with DELETED_FE set
2539 * that we currently panic on.
2541 if (record
->ip
->sync_flags
& HAMMER_INODE_DELETING
) {
2542 switch(record
->type
) {
2543 case HAMMER_MEM_RECORD_DATA
:
2545 * We don't have to do anything, if the record was
2546 * committed the space will have been accounted for
2550 case HAMMER_MEM_RECORD_GENERAL
:
2552 * Set deleted-by-backend flag. Do not set the
2553 * backend committed flag, because we are throwing
2556 record
->flags
|= HAMMER_RECF_DELETED_BE
;
2557 ++record
->ip
->rec_generation
;
2560 case HAMMER_MEM_RECORD_ADD
:
2561 panic("hammer_sync_record_callback: illegal add "
2562 "during inode deletion record %p", record
);
2563 break; /* NOT REACHED */
2564 case HAMMER_MEM_RECORD_INODE
:
2565 panic("hammer_sync_record_callback: attempt to "
2566 "sync inode record %p?", record
);
2567 break; /* NOT REACHED */
2568 case HAMMER_MEM_RECORD_DEL
:
2570 * Follow through and issue the on-disk deletion
2577 * If DELETED_FE is set special handling is needed for directory
2578 * entries. Dependant pieces related to the directory entry may
2579 * have already been synced to disk. If this occurs we have to
2580 * sync the directory entry and then change the in-memory record
2581 * from an ADD to a DELETE to cover the fact that it's been
2582 * deleted by the frontend.
2584 * A directory delete covering record (MEM_RECORD_DEL) can never
2585 * be deleted by the frontend.
2587 * Any other record type (aka DATA) can be deleted by the frontend.
2588 * XXX At the moment the flusher must skip it because there may
2589 * be another data record in the flush group for the same block,
2590 * meaning that some frontend data changes can leak into the backend's
2591 * synchronization point.
2593 if (record
->flags
& HAMMER_RECF_DELETED_FE
) {
2594 if (record
->type
== HAMMER_MEM_RECORD_ADD
) {
2596 * Convert a front-end deleted directory-add to
2597 * a directory-delete entry later.
2599 record
->flags
|= HAMMER_RECF_CONVERT_DELETE
;
2602 * Dispose of the record (race case). Mark as
2603 * deleted by backend (and not committed).
2605 KKASSERT(record
->type
!= HAMMER_MEM_RECORD_DEL
);
2606 record
->flags
|= HAMMER_RECF_DELETED_BE
;
2607 ++record
->ip
->rec_generation
;
2614 * Assign the create_tid for new records. Deletions already
2615 * have the record's entire key properly set up.
2617 if (record
->type
!= HAMMER_MEM_RECORD_DEL
) {
2618 record
->leaf
.base
.create_tid
= trans
->tid
;
2619 record
->leaf
.create_ts
= trans
->time32
;
2623 * This actually moves the record to the on-media B-Tree. We
2624 * must also generate REDO_TERM entries in the UNDO/REDO FIFO
2625 * indicating that the related REDO_WRITE(s) have been committed.
2627 * During recovery any REDO_TERM's within the nominal recovery span
2628 * are ignored since the related meta-data is being undone, causing
2629 * any matching REDO_WRITEs to execute. The REDO_TERMs outside
2630 * the nominal recovery span will match against REDO_WRITEs and
2631 * prevent them from being executed (because the meta-data has
2632 * already been synchronized).
2634 if (record
->flags
& HAMMER_RECF_REDO
) {
2635 KKASSERT(record
->type
== HAMMER_MEM_RECORD_DATA
);
2636 hammer_generate_redo(trans
, record
->ip
,
2637 record
->leaf
.base
.key
-
2638 record
->leaf
.data_len
,
2639 HAMMER_REDO_TERM_WRITE
,
2641 record
->leaf
.data_len
);
2644 error
= hammer_ip_sync_record_cursor(cursor
, record
);
2645 if (error
!= EDEADLK
)
2647 hammer_done_cursor(cursor
);
2648 error
= hammer_init_cursor(trans
, cursor
, &record
->ip
->cache
[0],
2653 record
->flags
&= ~HAMMER_RECF_CONVERT_DELETE
;
2658 hammer_flush_record_done(record
, error
);
2661 * Do partial finalization if we have built up too many dirty
2662 * buffers. Otherwise a buffer cache deadlock can occur when
2663 * doing things like creating tens of thousands of tiny files.
2665 * We must release our cursor lock to avoid a 3-way deadlock
2666 * due to the exclusive sync lock the finalizer must get.
2668 * WARNING: See warnings in hammer_unlock_cursor() function.
2670 if (hammer_flusher_meta_limit(hmp
)) {
2671 hammer_unlock_cursor(cursor
);
2672 hammer_flusher_finalize(trans
, 0);
2673 hammer_lock_cursor(cursor
);
2680 * Backend function called by the flusher to sync an inode to media.
2683 hammer_sync_inode(hammer_transaction_t trans
, hammer_inode_t ip
)
2685 struct hammer_cursor cursor
;
2686 hammer_node_t tmp_node
;
2687 hammer_record_t depend
;
2688 hammer_record_t next
;
2689 int error
, tmp_error
;
2692 if ((ip
->sync_flags
& HAMMER_INODE_MODMASK
) == 0)
2695 error
= hammer_init_cursor(trans
, &cursor
, &ip
->cache
[1], ip
);
2700 * Any directory records referencing this inode which are not in
2701 * our current flush group must adjust our nlink count for the
2702 * purposes of synchronizating to disk.
2704 * Records which are in our flush group can be unlinked from our
2705 * inode now, potentially allowing the inode to be physically
2708 * This cannot block.
2710 nlinks
= ip
->ino_data
.nlinks
;
2711 next
= TAILQ_FIRST(&ip
->target_list
);
2712 while ((depend
= next
) != NULL
) {
2713 next
= TAILQ_NEXT(depend
, target_entry
);
2714 if (depend
->flush_state
== HAMMER_FST_FLUSH
&&
2715 depend
->flush_group
== ip
->flush_group
) {
2717 * If this is an ADD that was deleted by the frontend
2718 * the frontend nlinks count will have already been
2719 * decremented, but the backend is going to sync its
2720 * directory entry and must account for it. The
2721 * record will be converted to a delete-on-disk when
2724 * If the ADD was not deleted by the frontend we
2725 * can remove the dependancy from our target_list.
2727 if (depend
->flags
& HAMMER_RECF_DELETED_FE
) {
2730 TAILQ_REMOVE(&ip
->target_list
, depend
,
2732 depend
->target_ip
= NULL
;
2734 } else if ((depend
->flags
& HAMMER_RECF_DELETED_FE
) == 0) {
2736 * Not part of our flush group and not deleted by
2737 * the front-end, adjust the link count synced to
2738 * the media (undo what the frontend did when it
2739 * queued the record).
2741 KKASSERT((depend
->flags
& HAMMER_RECF_DELETED_BE
) == 0);
2742 switch(depend
->type
) {
2743 case HAMMER_MEM_RECORD_ADD
:
2746 case HAMMER_MEM_RECORD_DEL
:
2756 * Set dirty if we had to modify the link count.
2758 if (ip
->sync_ino_data
.nlinks
!= nlinks
) {
2759 KKASSERT((int64_t)nlinks
>= 0);
2760 ip
->sync_ino_data
.nlinks
= nlinks
;
2761 ip
->sync_flags
|= HAMMER_INODE_DDIRTY
;
2765 * If there is a trunction queued destroy any data past the (aligned)
2766 * truncation point. Userland will have dealt with the buffer
2767 * containing the truncation point for us.
2769 * We don't flush pending frontend data buffers until after we've
2770 * dealt with the truncation.
2772 if (ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) {
2774 * Interlock trunc_off. The VOP front-end may continue to
2775 * make adjustments to it while we are blocked.
2778 off_t aligned_trunc_off
;
2781 trunc_off
= ip
->sync_trunc_off
;
2782 blkmask
= hammer_blocksize(trunc_off
) - 1;
2783 aligned_trunc_off
= (trunc_off
+ blkmask
) & ~(int64_t)blkmask
;
2786 * Delete any whole blocks on-media. The front-end has
2787 * already cleaned out any partial block and made it
2788 * pending. The front-end may have updated trunc_off
2789 * while we were blocked so we only use sync_trunc_off.
2791 * This operation can blow out the buffer cache, EWOULDBLOCK
2792 * means we were unable to complete the deletion. The
2793 * deletion will update sync_trunc_off in that case.
2795 error
= hammer_ip_delete_range(&cursor
, ip
,
2797 0x7FFFFFFFFFFFFFFFLL
, 2);
2798 if (error
== EWOULDBLOCK
) {
2799 ip
->flags
|= HAMMER_INODE_WOULDBLOCK
;
2801 goto defer_buffer_flush
;
2808 * Generate a REDO_TERM_TRUNC entry in the UNDO/REDO FIFO.
2810 * XXX we do this even if we did not previously generate
2811 * a REDO_TRUNC record. This operation may enclosed the
2812 * range for multiple prior truncation entries in the REDO
2815 if (trans
->hmp
->version
>= HAMMER_VOL_VERSION_FOUR
&&
2816 (ip
->flags
& HAMMER_INODE_RDIRTY
)) {
2817 hammer_generate_redo(trans
, ip
, aligned_trunc_off
,
2818 HAMMER_REDO_TERM_TRUNC
,
2823 * Clear the truncation flag on the backend after we have
2824 * completed the deletions. Backend data is now good again
2825 * (including new records we are about to sync, below).
2827 * Leave sync_trunc_off intact. As we write additional
2828 * records the backend will update sync_trunc_off. This
2829 * tells the backend whether it can skip the overwrite
2830 * test. This should work properly even when the backend
2831 * writes full blocks where the truncation point straddles
2832 * the block because the comparison is against the base
2833 * offset of the record.
2835 ip
->sync_flags
&= ~HAMMER_INODE_TRUNCATED
;
2836 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
2842 * Now sync related records. These will typically be directory
2843 * entries, records tracking direct-writes, or delete-on-disk records.
2846 tmp_error
= RB_SCAN(hammer_rec_rb_tree
, &ip
->rec_tree
, NULL
,
2847 hammer_sync_record_callback
, &cursor
);
2853 hammer_cache_node(&ip
->cache
[1], cursor
.node
);
2856 * Re-seek for inode update, assuming our cache hasn't been ripped
2857 * out from under us.
2860 tmp_node
= hammer_ref_node_safe(trans
, &ip
->cache
[0], &error
);
2862 hammer_cursor_downgrade(&cursor
);
2863 hammer_lock_sh(&tmp_node
->lock
);
2864 if ((tmp_node
->flags
& HAMMER_NODE_DELETED
) == 0)
2865 hammer_cursor_seek(&cursor
, tmp_node
, 0);
2866 hammer_unlock(&tmp_node
->lock
);
2867 hammer_rel_node(tmp_node
);
2873 * If we are deleting the inode the frontend had better not have
2874 * any active references on elements making up the inode.
2876 * The call to hammer_ip_delete_clean() cleans up auxillary records
2877 * but not DB or DATA records. Those must have already been deleted
2878 * by the normal truncation mechanic.
2880 if (error
== 0 && ip
->sync_ino_data
.nlinks
== 0 &&
2881 RB_EMPTY(&ip
->rec_tree
) &&
2882 (ip
->sync_flags
& HAMMER_INODE_DELETING
) &&
2883 (ip
->flags
& HAMMER_INODE_DELETED
) == 0) {
2886 error
= hammer_ip_delete_clean(&cursor
, ip
, &count1
);
2888 ip
->flags
|= HAMMER_INODE_DELETED
;
2889 ip
->sync_flags
&= ~HAMMER_INODE_DELETING
;
2890 ip
->sync_flags
&= ~HAMMER_INODE_TRUNCATED
;
2891 KKASSERT(RB_EMPTY(&ip
->rec_tree
));
2894 * Set delete_tid in both the frontend and backend
2895 * copy of the inode record. The DELETED flag handles
2896 * this, do not set DDIRTY.
2898 ip
->ino_leaf
.base
.delete_tid
= trans
->tid
;
2899 ip
->sync_ino_leaf
.base
.delete_tid
= trans
->tid
;
2900 ip
->ino_leaf
.delete_ts
= trans
->time32
;
2901 ip
->sync_ino_leaf
.delete_ts
= trans
->time32
;
2905 * Adjust the inode count in the volume header
2907 hammer_sync_lock_sh(trans
);
2908 if (ip
->flags
& HAMMER_INODE_ONDISK
) {
2909 hammer_modify_volume_field(trans
,
2912 --ip
->hmp
->rootvol
->ondisk
->vol0_stat_inodes
;
2913 hammer_modify_volume_done(trans
->rootvol
);
2915 hammer_sync_unlock(trans
);
2921 ip
->sync_flags
&= ~HAMMER_INODE_BUFS
;
2925 * Now update the inode's on-disk inode-data and/or on-disk record.
2926 * DELETED and ONDISK are managed only in ip->flags.
2928 * In the case of a defered buffer flush we still update the on-disk
2929 * inode to satisfy visibility requirements if there happen to be
2930 * directory dependancies.
2932 switch(ip
->flags
& (HAMMER_INODE_DELETED
| HAMMER_INODE_ONDISK
)) {
2933 case HAMMER_INODE_DELETED
|HAMMER_INODE_ONDISK
:
2935 * If deleted and on-disk, don't set any additional flags.
2936 * the delete flag takes care of things.
2938 * Clear flags which may have been set by the frontend.
2940 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
| HAMMER_INODE_XDIRTY
|
2941 HAMMER_INODE_SDIRTY
|
2942 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
|
2943 HAMMER_INODE_DELETING
);
2945 case HAMMER_INODE_DELETED
:
2947 * Take care of the case where a deleted inode was never
2948 * flushed to the disk in the first place.
2950 * Clear flags which may have been set by the frontend.
2952 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
| HAMMER_INODE_XDIRTY
|
2953 HAMMER_INODE_SDIRTY
|
2954 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
|
2955 HAMMER_INODE_DELETING
);
2956 while (RB_ROOT(&ip
->rec_tree
)) {
2957 hammer_record_t record
= RB_ROOT(&ip
->rec_tree
);
2958 hammer_ref(&record
->lock
);
2959 KKASSERT(record
->lock
.refs
== 1);
2960 record
->flags
|= HAMMER_RECF_DELETED_BE
;
2961 ++record
->ip
->rec_generation
;
2962 hammer_rel_mem_record(record
);
2965 case HAMMER_INODE_ONDISK
:
2967 * If already on-disk, do not set any additional flags.
2972 * If not on-disk and not deleted, set DDIRTY to force
2973 * an initial record to be written.
2975 * Also set the create_tid in both the frontend and backend
2976 * copy of the inode record.
2978 ip
->ino_leaf
.base
.create_tid
= trans
->tid
;
2979 ip
->ino_leaf
.create_ts
= trans
->time32
;
2980 ip
->sync_ino_leaf
.base
.create_tid
= trans
->tid
;
2981 ip
->sync_ino_leaf
.create_ts
= trans
->time32
;
2982 ip
->sync_flags
|= HAMMER_INODE_DDIRTY
;
2987 * If DDIRTY or SDIRTY is set, write out a new record.
2988 * If the inode is already on-disk the old record is marked as
2991 * If DELETED is set hammer_update_inode() will delete the existing
2992 * record without writing out a new one.
2994 * If *ONLY* the ITIMES flag is set we can update the record in-place.
2996 if (ip
->flags
& HAMMER_INODE_DELETED
) {
2997 error
= hammer_update_inode(&cursor
, ip
);
2999 if (!(ip
->sync_flags
& (HAMMER_INODE_DDIRTY
| HAMMER_INODE_SDIRTY
)) &&
3000 (ip
->sync_flags
& (HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
))) {
3001 error
= hammer_update_itimes(&cursor
, ip
);
3003 if (ip
->sync_flags
& (HAMMER_INODE_DDIRTY
| HAMMER_INODE_SDIRTY
|
3004 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
)) {
3005 error
= hammer_update_inode(&cursor
, ip
);
3009 hammer_critical_error(ip
->hmp
, ip
, error
,
3010 "while syncing inode");
3012 hammer_done_cursor(&cursor
);
3017 * This routine is called when the OS is no longer actively referencing
3018 * the inode (but might still be keeping it cached), or when releasing
3019 * the last reference to an inode.
3021 * At this point if the inode's nlinks count is zero we want to destroy
3022 * it, which may mean destroying it on-media too.
3025 hammer_inode_unloadable_check(hammer_inode_t ip
, int getvp
)
3030 * Set the DELETING flag when the link count drops to 0 and the
3031 * OS no longer has any opens on the inode.
3033 * The backend will clear DELETING (a mod flag) and set DELETED
3034 * (a state flag) when it is actually able to perform the
3037 * Don't reflag the deletion if the flusher is currently syncing
3038 * one that was already flagged. A previously set DELETING flag
3039 * may bounce around flags and sync_flags until the operation is
3042 if (ip
->ino_data
.nlinks
== 0 &&
3043 ((ip
->flags
| ip
->sync_flags
) & (HAMMER_INODE_DELETING
|HAMMER_INODE_DELETED
)) == 0) {
3044 ip
->flags
|= HAMMER_INODE_DELETING
;
3045 ip
->flags
|= HAMMER_INODE_TRUNCATED
;
3049 if (hammer_get_vnode(ip
, &vp
) != 0)
3057 nvtruncbuf(ip
->vp
, 0, HAMMER_BUFSIZE
, 0);
3064 * After potentially resolving a dependancy the inode is tested
3065 * to determine whether it needs to be reflushed.
3068 hammer_test_inode(hammer_inode_t ip
)
3070 if (ip
->flags
& HAMMER_INODE_REFLUSH
) {
3071 ip
->flags
&= ~HAMMER_INODE_REFLUSH
;
3072 hammer_ref(&ip
->lock
);
3073 if (ip
->flags
& HAMMER_INODE_RESIGNAL
) {
3074 ip
->flags
&= ~HAMMER_INODE_RESIGNAL
;
3075 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
3077 hammer_flush_inode(ip
, 0);
3079 hammer_rel_inode(ip
, 0);
3084 * Clear the RECLAIM flag on an inode. This occurs when the inode is
3085 * reassociated with a vp or just before it gets freed.
3087 * Pipeline wakeups to threads blocked due to an excessive number of
3088 * detached inodes. This typically occurs when atime updates accumulate
3089 * while scanning a directory tree.
3092 hammer_inode_wakereclaims(hammer_inode_t ip
)
3094 struct hammer_reclaim
*reclaim
;
3095 hammer_mount_t hmp
= ip
->hmp
;
3097 if ((ip
->flags
& HAMMER_INODE_RECLAIM
) == 0)
3100 --hammer_count_reclaiming
;
3101 --hmp
->inode_reclaims
;
3102 ip
->flags
&= ~HAMMER_INODE_RECLAIM
;
3104 while ((reclaim
= TAILQ_FIRST(&hmp
->reclaim_list
)) != NULL
) {
3105 if (reclaim
->count
> 0 && --reclaim
->count
== 0) {
3106 TAILQ_REMOVE(&hmp
->reclaim_list
, reclaim
, entry
);
3109 if (hmp
->inode_reclaims
> hammer_limit_reclaim
/ 2)
3115 * Setup our reclaim pipeline. We only let so many detached (and dirty)
3116 * inodes build up before we start blocking. This routine is called
3117 * if a new inode is created or an inode is loaded from media.
3119 * When we block we don't care *which* inode has finished reclaiming,
3120 * as lone as one does.
3123 hammer_inode_waitreclaims(hammer_mount_t hmp
)
3125 struct hammer_reclaim reclaim
;
3127 if (hmp
->inode_reclaims
< hammer_limit_reclaim
)
3130 TAILQ_INSERT_TAIL(&hmp
->reclaim_list
, &reclaim
, entry
);
3131 tsleep(&reclaim
, 0, "hmrrcm", hz
);
3132 if (reclaim
.count
> 0)
3133 TAILQ_REMOVE(&hmp
->reclaim_list
, &reclaim
, entry
);
3139 * XXX not used, doesn't work very well due to the large batching nature
3142 * A larger then normal backlog of inodes is sitting in the flusher,
3143 * enforce a general slowdown to let it catch up. This routine is only
3144 * called on completion of a non-flusher-related transaction which
3145 * performed B-Tree node I/O.
3147 * It is possible for the flusher to stall in a continuous load.
3148 * blogbench -i1000 -o seems to do a good job generating this sort of load.
3149 * If the flusher is unable to catch up the inode count can bloat until
3150 * we run out of kvm.
3152 * This is a bit of a hack.
3155 hammer_inode_waithard(hammer_mount_t hmp
)
3160 if (hmp
->flags
& HAMMER_MOUNT_FLUSH_RECOVERY
) {
3161 if (hmp
->inode_reclaims
< hammer_limit_reclaim
/ 2 &&
3162 hmp
->count_iqueued
< hmp
->count_inodes
/ 20) {
3163 hmp
->flags
&= ~HAMMER_MOUNT_FLUSH_RECOVERY
;
3167 if (hmp
->inode_reclaims
< hammer_limit_reclaim
||
3168 hmp
->count_iqueued
< hmp
->count_inodes
/ 10) {
3171 hmp
->flags
|= HAMMER_MOUNT_FLUSH_RECOVERY
;
3175 * Block for one flush cycle.
3177 hammer_flusher_wait_next(hmp
);