2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.114 2008/09/24 00:53:51 dillon Exp $
38 #include <vm/vm_extern.h>
42 static int hammer_unload_inode(struct hammer_inode
*ip
);
43 static void hammer_free_inode(hammer_inode_t ip
);
44 static void hammer_flush_inode_core(hammer_inode_t ip
,
45 hammer_flush_group_t flg
, int flags
);
46 static int hammer_setup_child_callback(hammer_record_t rec
, void *data
);
48 static int hammer_syncgrp_child_callback(hammer_record_t rec
, void *data
);
50 static int hammer_setup_parent_inodes(hammer_inode_t ip
, int depth
,
51 hammer_flush_group_t flg
);
52 static int hammer_setup_parent_inodes_helper(hammer_record_t record
,
53 int depth
, hammer_flush_group_t flg
);
54 static void hammer_inode_wakereclaims(hammer_inode_t ip
, int dowake
);
57 extern struct hammer_inode
*HammerTruncIp
;
61 * RB-Tree support for inode structures
64 hammer_ino_rb_compare(hammer_inode_t ip1
, hammer_inode_t ip2
)
66 if (ip1
->obj_localization
< ip2
->obj_localization
)
68 if (ip1
->obj_localization
> ip2
->obj_localization
)
70 if (ip1
->obj_id
< ip2
->obj_id
)
72 if (ip1
->obj_id
> ip2
->obj_id
)
74 if (ip1
->obj_asof
< ip2
->obj_asof
)
76 if (ip1
->obj_asof
> ip2
->obj_asof
)
82 * RB-Tree support for inode structures / special LOOKUP_INFO
85 hammer_inode_info_cmp(hammer_inode_info_t info
, hammer_inode_t ip
)
87 if (info
->obj_localization
< ip
->obj_localization
)
89 if (info
->obj_localization
> ip
->obj_localization
)
91 if (info
->obj_id
< ip
->obj_id
)
93 if (info
->obj_id
> ip
->obj_id
)
95 if (info
->obj_asof
< ip
->obj_asof
)
97 if (info
->obj_asof
> ip
->obj_asof
)
103 * Used by hammer_scan_inode_snapshots() to locate all of an object's
104 * snapshots. Note that the asof field is not tested, which we can get
105 * away with because it is the lowest-priority field.
108 hammer_inode_info_cmp_all_history(hammer_inode_t ip
, void *data
)
110 hammer_inode_info_t info
= data
;
112 if (ip
->obj_localization
> info
->obj_localization
)
114 if (ip
->obj_localization
< info
->obj_localization
)
116 if (ip
->obj_id
> info
->obj_id
)
118 if (ip
->obj_id
< info
->obj_id
)
124 * Used by hammer_unload_pseudofs() to locate all inodes associated with
128 hammer_inode_pfs_cmp(hammer_inode_t ip
, void *data
)
130 u_int32_t localization
= *(u_int32_t
*)data
;
131 if (ip
->obj_localization
> localization
)
133 if (ip
->obj_localization
< localization
)
139 * RB-Tree support for pseudofs structures
142 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1
, hammer_pseudofs_inmem_t p2
)
144 if (p1
->localization
< p2
->localization
)
146 if (p1
->localization
> p2
->localization
)
152 RB_GENERATE(hammer_ino_rb_tree
, hammer_inode
, rb_node
, hammer_ino_rb_compare
);
153 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree
, INFO
, hammer_inode
, rb_node
,
154 hammer_inode_info_cmp
, hammer_inode_info_t
);
155 RB_GENERATE2(hammer_pfs_rb_tree
, hammer_pseudofs_inmem
, rb_node
,
156 hammer_pfs_rb_compare
, u_int32_t
, localization
);
159 * The kernel is not actively referencing this vnode but is still holding
162 * This is called from the frontend.
165 hammer_vop_inactive(struct vop_inactive_args
*ap
)
167 struct hammer_inode
*ip
= VTOI(ap
->a_vp
);
178 * If the inode no longer has visibility in the filesystem try to
179 * recycle it immediately, even if the inode is dirty. Recycling
180 * it quickly allows the system to reclaim buffer cache and VM
181 * resources which can matter a lot in a heavily loaded system.
183 * This can deadlock in vfsync() if we aren't careful.
185 * Do not queue the inode to the flusher if we still have visibility,
186 * otherwise namespace calls such as chmod will unnecessarily generate
187 * multiple inode updates.
189 hammer_inode_unloadable_check(ip
, 0);
190 if (ip
->ino_data
.nlinks
== 0) {
191 if (ip
->flags
& HAMMER_INODE_MODMASK
)
192 hammer_flush_inode(ip
, 0);
199 * Release the vnode association. This is typically (but not always)
200 * the last reference on the inode.
202 * Once the association is lost we are on our own with regards to
203 * flushing the inode.
206 hammer_vop_reclaim(struct vop_reclaim_args
*ap
)
208 struct hammer_inode
*ip
;
214 if ((ip
= vp
->v_data
) != NULL
) {
219 if ((ip
->flags
& HAMMER_INODE_RECLAIM
) == 0) {
220 ++hammer_count_reclaiming
;
221 ++hmp
->inode_reclaims
;
222 ip
->flags
|= HAMMER_INODE_RECLAIM
;
224 hammer_rel_inode(ip
, 1);
230 * Return a locked vnode for the specified inode. The inode must be
231 * referenced but NOT LOCKED on entry and will remain referenced on
234 * Called from the frontend.
237 hammer_get_vnode(struct hammer_inode
*ip
, struct vnode
**vpp
)
247 if ((vp
= ip
->vp
) == NULL
) {
248 error
= getnewvnode(VT_HAMMER
, hmp
->mp
, vpp
, 0, 0);
251 hammer_lock_ex(&ip
->lock
);
252 if (ip
->vp
!= NULL
) {
253 hammer_unlock(&ip
->lock
);
259 hammer_ref(&ip
->lock
);
263 obj_type
= ip
->ino_data
.obj_type
;
264 vp
->v_type
= hammer_get_vnode_type(obj_type
);
266 hammer_inode_wakereclaims(ip
, 0);
268 switch(ip
->ino_data
.obj_type
) {
269 case HAMMER_OBJTYPE_CDEV
:
270 case HAMMER_OBJTYPE_BDEV
:
271 vp
->v_ops
= &hmp
->mp
->mnt_vn_spec_ops
;
272 addaliasu(vp
, ip
->ino_data
.rmajor
,
273 ip
->ino_data
.rminor
);
275 case HAMMER_OBJTYPE_FIFO
:
276 vp
->v_ops
= &hmp
->mp
->mnt_vn_fifo_ops
;
283 * Only mark as the root vnode if the ip is not
284 * historical, otherwise the VFS cache will get
285 * confused. The other half of the special handling
286 * is in hammer_vop_nlookupdotdot().
288 * Pseudo-filesystem roots can be accessed via
289 * non-root filesystem paths and setting VROOT may
290 * confuse the namecache. Set VPFSROOT instead.
292 if (ip
->obj_id
== HAMMER_OBJID_ROOT
&&
293 ip
->obj_asof
== hmp
->asof
) {
294 if (ip
->obj_localization
== 0)
297 vp
->v_flag
|= VPFSROOT
;
300 vp
->v_data
= (void *)ip
;
301 /* vnode locked by getnewvnode() */
302 /* make related vnode dirty if inode dirty? */
303 hammer_unlock(&ip
->lock
);
304 if (vp
->v_type
== VREG
)
305 vinitvmio(vp
, ip
->ino_data
.size
);
310 * loop if the vget fails (aka races), or if the vp
311 * no longer matches ip->vp.
313 if (vget(vp
, LK_EXCLUSIVE
) == 0) {
324 * Locate all copies of the inode for obj_id compatible with the specified
325 * asof, reference, and issue the related call-back. This routine is used
326 * for direct-io invalidation and does not create any new inodes.
329 hammer_scan_inode_snapshots(hammer_mount_t hmp
, hammer_inode_info_t iinfo
,
330 int (*callback
)(hammer_inode_t ip
, void *data
),
333 hammer_ino_rb_tree_RB_SCAN(&hmp
->rb_inos_root
,
334 hammer_inode_info_cmp_all_history
,
339 * Acquire a HAMMER inode. The returned inode is not locked. These functions
340 * do not attach or detach the related vnode (use hammer_get_vnode() for
343 * The flags argument is only applied for newly created inodes, and only
344 * certain flags are inherited.
346 * Called from the frontend.
348 struct hammer_inode
*
349 hammer_get_inode(hammer_transaction_t trans
, hammer_inode_t dip
,
350 int64_t obj_id
, hammer_tid_t asof
, u_int32_t localization
,
351 int flags
, int *errorp
)
353 hammer_mount_t hmp
= trans
->hmp
;
354 struct hammer_node_cache
*cachep
;
355 struct hammer_inode_info iinfo
;
356 struct hammer_cursor cursor
;
357 struct hammer_inode
*ip
;
361 * Determine if we already have an inode cached. If we do then
364 * If we find an inode with no vnode we have to mark the
365 * transaction such that hammer_inode_waitreclaims() is
366 * called later on to avoid building up an infinite number
367 * of inodes. Otherwise we can continue to * add new inodes
368 * faster then they can be disposed of, even with the tsleep
371 * If we find a dummy inode we return a failure so dounlink
372 * (which does another lookup) doesn't try to mess with the
373 * link count. hammer_vop_nresolve() uses hammer_get_dummy_inode()
374 * to ref dummy inodes.
376 iinfo
.obj_id
= obj_id
;
377 iinfo
.obj_asof
= asof
;
378 iinfo
.obj_localization
= localization
;
380 ip
= hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp
->rb_inos_root
, &iinfo
);
382 if (ip
->flags
& HAMMER_INODE_DUMMY
) {
386 hammer_ref(&ip
->lock
);
392 * Allocate a new inode structure and deal with races later.
394 ip
= kmalloc(sizeof(*ip
), hmp
->m_inodes
, M_WAITOK
|M_ZERO
);
395 ++hammer_count_inodes
;
398 ip
->obj_asof
= iinfo
.obj_asof
;
399 ip
->obj_localization
= localization
;
401 ip
->flags
= flags
& HAMMER_INODE_RO
;
402 ip
->cache
[0].ip
= ip
;
403 ip
->cache
[1].ip
= ip
;
404 ip
->cache
[2].ip
= ip
;
405 ip
->cache
[3].ip
= ip
;
407 ip
->flags
|= HAMMER_INODE_RO
;
408 ip
->sync_trunc_off
= ip
->trunc_off
= ip
->save_trunc_off
=
409 0x7FFFFFFFFFFFFFFFLL
;
410 RB_INIT(&ip
->rec_tree
);
411 TAILQ_INIT(&ip
->target_list
);
412 hammer_ref(&ip
->lock
);
415 * Locate the on-disk inode. If this is a PFS root we always
416 * access the current version of the root inode and (if it is not
417 * a master) always access information under it with a snapshot
420 * We cache recent inode lookups in this directory in dip->cache[2].
421 * If we can't find it we assume the inode we are looking for is
422 * close to the directory inode.
427 if (dip
->cache
[2].node
)
428 cachep
= &dip
->cache
[2];
430 cachep
= &dip
->cache
[0];
432 hammer_init_cursor(trans
, &cursor
, cachep
, NULL
);
433 cursor
.key_beg
.localization
= localization
+ HAMMER_LOCALIZE_INODE
;
434 cursor
.key_beg
.obj_id
= ip
->obj_id
;
435 cursor
.key_beg
.key
= 0;
436 cursor
.key_beg
.create_tid
= 0;
437 cursor
.key_beg
.delete_tid
= 0;
438 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
439 cursor
.key_beg
.obj_type
= 0;
441 cursor
.asof
= iinfo
.obj_asof
;
442 cursor
.flags
= HAMMER_CURSOR_GET_LEAF
| HAMMER_CURSOR_GET_DATA
|
445 *errorp
= hammer_btree_lookup(&cursor
);
446 if (*errorp
== EDEADLK
) {
447 hammer_done_cursor(&cursor
);
452 * On success the B-Tree lookup will hold the appropriate
453 * buffer cache buffers and provide a pointer to the requested
454 * information. Copy the information to the in-memory inode
455 * and cache the B-Tree node to improve future operations.
458 ip
->ino_leaf
= cursor
.node
->ondisk
->elms
[cursor
.index
].leaf
;
459 ip
->ino_data
= cursor
.data
->inode
;
462 * cache[0] tries to cache the location of the object inode.
463 * The assumption is that it is near the directory inode.
465 * cache[1] tries to cache the location of the object data.
466 * We might have something in the governing directory from
467 * scan optimizations (see the strategy code in
470 * We update dip->cache[2], if possible, with the location
471 * of the object inode for future directory shortcuts.
473 hammer_cache_node(&ip
->cache
[0], cursor
.node
);
475 if (dip
->cache
[3].node
) {
476 hammer_cache_node(&ip
->cache
[1],
479 hammer_cache_node(&dip
->cache
[2], cursor
.node
);
483 * The file should not contain any data past the file size
484 * stored in the inode. Setting save_trunc_off to the
485 * file size instead of max reduces B-Tree lookup overheads
486 * on append by allowing the flusher to avoid checking for
489 ip
->save_trunc_off
= ip
->ino_data
.size
;
492 * Locate and assign the pseudofs management structure to
495 if (dip
&& dip
->obj_localization
== ip
->obj_localization
) {
496 ip
->pfsm
= dip
->pfsm
;
497 hammer_ref(&ip
->pfsm
->lock
);
499 ip
->pfsm
= hammer_load_pseudofs(trans
,
500 ip
->obj_localization
,
502 *errorp
= 0; /* ignore ENOENT */
507 * The inode is placed on the red-black tree and will be synced to
508 * the media when flushed or by the filesystem sync. If this races
509 * another instantiation/lookup the insertion will fail.
512 if (RB_INSERT(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
)) {
513 hammer_free_inode(ip
);
514 hammer_done_cursor(&cursor
);
517 ip
->flags
|= HAMMER_INODE_ONDISK
;
519 if (ip
->flags
& HAMMER_INODE_RSV_INODES
) {
520 ip
->flags
&= ~HAMMER_INODE_RSV_INODES
; /* sanity */
524 hammer_free_inode(ip
);
527 hammer_done_cursor(&cursor
);
528 trans
->flags
|= HAMMER_TRANSF_NEWINODE
;
533 * Get a dummy inode to placemark a broken directory entry.
535 struct hammer_inode
*
536 hammer_get_dummy_inode(hammer_transaction_t trans
, hammer_inode_t dip
,
537 int64_t obj_id
, hammer_tid_t asof
, u_int32_t localization
,
538 int flags
, int *errorp
)
540 hammer_mount_t hmp
= trans
->hmp
;
541 struct hammer_inode_info iinfo
;
542 struct hammer_inode
*ip
;
545 * Determine if we already have an inode cached. If we do then
548 * If we find an inode with no vnode we have to mark the
549 * transaction such that hammer_inode_waitreclaims() is
550 * called later on to avoid building up an infinite number
551 * of inodes. Otherwise we can continue to * add new inodes
552 * faster then they can be disposed of, even with the tsleep
555 * If we find a non-fake inode we return an error. Only fake
556 * inodes can be returned by this routine.
558 iinfo
.obj_id
= obj_id
;
559 iinfo
.obj_asof
= asof
;
560 iinfo
.obj_localization
= localization
;
563 ip
= hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp
->rb_inos_root
, &iinfo
);
565 if ((ip
->flags
& HAMMER_INODE_DUMMY
) == 0) {
569 hammer_ref(&ip
->lock
);
574 * Allocate a new inode structure and deal with races later.
576 ip
= kmalloc(sizeof(*ip
), hmp
->m_inodes
, M_WAITOK
|M_ZERO
);
577 ++hammer_count_inodes
;
580 ip
->obj_asof
= iinfo
.obj_asof
;
581 ip
->obj_localization
= localization
;
583 ip
->flags
= flags
| HAMMER_INODE_RO
| HAMMER_INODE_DUMMY
;
584 ip
->cache
[0].ip
= ip
;
585 ip
->cache
[1].ip
= ip
;
586 ip
->cache
[2].ip
= ip
;
587 ip
->cache
[3].ip
= ip
;
588 ip
->sync_trunc_off
= ip
->trunc_off
= ip
->save_trunc_off
=
589 0x7FFFFFFFFFFFFFFFLL
;
590 RB_INIT(&ip
->rec_tree
);
591 TAILQ_INIT(&ip
->target_list
);
592 hammer_ref(&ip
->lock
);
595 * Populate the dummy inode. Leave everything zero'd out.
597 * (ip->ino_leaf and ip->ino_data)
599 * Make the dummy inode a FIFO object which most copy programs
600 * will properly ignore.
602 ip
->save_trunc_off
= ip
->ino_data
.size
;
603 ip
->ino_data
.obj_type
= HAMMER_OBJTYPE_FIFO
;
606 * Locate and assign the pseudofs management structure to
609 if (dip
&& dip
->obj_localization
== ip
->obj_localization
) {
610 ip
->pfsm
= dip
->pfsm
;
611 hammer_ref(&ip
->pfsm
->lock
);
613 ip
->pfsm
= hammer_load_pseudofs(trans
, ip
->obj_localization
,
615 *errorp
= 0; /* ignore ENOENT */
619 * The inode is placed on the red-black tree and will be synced to
620 * the media when flushed or by the filesystem sync. If this races
621 * another instantiation/lookup the insertion will fail.
623 * NOTE: Do not set HAMMER_INODE_ONDISK. The inode is a fake.
626 if (RB_INSERT(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
)) {
627 hammer_free_inode(ip
);
631 if (ip
->flags
& HAMMER_INODE_RSV_INODES
) {
632 ip
->flags
&= ~HAMMER_INODE_RSV_INODES
; /* sanity */
635 hammer_free_inode(ip
);
638 trans
->flags
|= HAMMER_TRANSF_NEWINODE
;
643 * Return a referenced inode only if it is in our inode cache.
645 * Dummy inodes do not count.
647 struct hammer_inode
*
648 hammer_find_inode(hammer_transaction_t trans
, int64_t obj_id
,
649 hammer_tid_t asof
, u_int32_t localization
)
651 hammer_mount_t hmp
= trans
->hmp
;
652 struct hammer_inode_info iinfo
;
653 struct hammer_inode
*ip
;
655 iinfo
.obj_id
= obj_id
;
656 iinfo
.obj_asof
= asof
;
657 iinfo
.obj_localization
= localization
;
659 ip
= hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp
->rb_inos_root
, &iinfo
);
661 if (ip
->flags
& HAMMER_INODE_DUMMY
)
664 hammer_ref(&ip
->lock
);
670 * Create a new filesystem object, returning the inode in *ipp. The
671 * returned inode will be referenced. The inode is created in-memory.
673 * If pfsm is non-NULL the caller wishes to create the root inode for
677 hammer_create_inode(hammer_transaction_t trans
, struct vattr
*vap
,
679 hammer_inode_t dip
, const char *name
, int namelen
,
680 hammer_pseudofs_inmem_t pfsm
, struct hammer_inode
**ipp
)
691 ip
= kmalloc(sizeof(*ip
), hmp
->m_inodes
, M_WAITOK
|M_ZERO
);
692 ++hammer_count_inodes
;
694 trans
->flags
|= HAMMER_TRANSF_NEWINODE
;
697 KKASSERT(pfsm
->localization
!= 0);
698 ip
->obj_id
= HAMMER_OBJID_ROOT
;
699 ip
->obj_localization
= pfsm
->localization
;
701 KKASSERT(dip
!= NULL
);
702 namekey
= hammer_directory_namekey(dip
, name
, namelen
, &dummy
);
703 ip
->obj_id
= hammer_alloc_objid(hmp
, dip
, namekey
);
704 ip
->obj_localization
= dip
->obj_localization
;
707 KKASSERT(ip
->obj_id
!= 0);
708 ip
->obj_asof
= hmp
->asof
;
710 ip
->flush_state
= HAMMER_FST_IDLE
;
711 ip
->flags
= HAMMER_INODE_DDIRTY
|
712 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
;
713 ip
->cache
[0].ip
= ip
;
714 ip
->cache
[1].ip
= ip
;
715 ip
->cache
[2].ip
= ip
;
716 ip
->cache
[3].ip
= ip
;
718 ip
->trunc_off
= 0x7FFFFFFFFFFFFFFFLL
;
719 /* ip->save_trunc_off = 0; (already zero) */
720 RB_INIT(&ip
->rec_tree
);
721 TAILQ_INIT(&ip
->target_list
);
723 ip
->ino_data
.atime
= trans
->time
;
724 ip
->ino_data
.mtime
= trans
->time
;
725 ip
->ino_data
.size
= 0;
726 ip
->ino_data
.nlinks
= 0;
729 * A nohistory designator on the parent directory is inherited by
730 * the child. We will do this even for pseudo-fs creation... the
731 * sysad can turn it off.
734 ip
->ino_data
.uflags
= dip
->ino_data
.uflags
&
735 (SF_NOHISTORY
|UF_NOHISTORY
|UF_NODUMP
);
738 ip
->ino_leaf
.base
.btype
= HAMMER_BTREE_TYPE_RECORD
;
739 ip
->ino_leaf
.base
.localization
= ip
->obj_localization
+
740 HAMMER_LOCALIZE_INODE
;
741 ip
->ino_leaf
.base
.obj_id
= ip
->obj_id
;
742 ip
->ino_leaf
.base
.key
= 0;
743 ip
->ino_leaf
.base
.create_tid
= 0;
744 ip
->ino_leaf
.base
.delete_tid
= 0;
745 ip
->ino_leaf
.base
.rec_type
= HAMMER_RECTYPE_INODE
;
746 ip
->ino_leaf
.base
.obj_type
= hammer_get_obj_type(vap
->va_type
);
748 ip
->ino_data
.obj_type
= ip
->ino_leaf
.base
.obj_type
;
749 ip
->ino_data
.version
= HAMMER_INODE_DATA_VERSION
;
750 ip
->ino_data
.mode
= vap
->va_mode
;
751 ip
->ino_data
.ctime
= trans
->time
;
754 * If we are running version 2 or greater we use dirhash algorithm #1
755 * which is semi-sorted. Algorithm #0 was just a pure crc.
757 if (trans
->hmp
->version
>= HAMMER_VOL_VERSION_TWO
) {
758 if (ip
->ino_leaf
.base
.obj_type
== HAMMER_OBJTYPE_DIRECTORY
) {
759 ip
->ino_data
.cap_flags
|= HAMMER_INODE_CAP_DIRHASH_ALG1
;
764 * Setup the ".." pointer. This only needs to be done for directories
765 * but we do it for all objects as a recovery aid.
768 ip
->ino_data
.parent_obj_id
= dip
->ino_leaf
.base
.obj_id
;
771 * The parent_obj_localization field only applies to pseudo-fs roots.
772 * XXX this is no longer applicable, PFSs are no longer directly
773 * tied into the parent's directory structure.
775 if (ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_DIRECTORY
&&
776 ip
->obj_id
== HAMMER_OBJID_ROOT
) {
777 ip
->ino_data
.ext
.obj
.parent_obj_localization
=
778 dip
->obj_localization
;
782 switch(ip
->ino_leaf
.base
.obj_type
) {
783 case HAMMER_OBJTYPE_CDEV
:
784 case HAMMER_OBJTYPE_BDEV
:
785 ip
->ino_data
.rmajor
= vap
->va_rmajor
;
786 ip
->ino_data
.rminor
= vap
->va_rminor
;
793 * Calculate default uid/gid and overwrite with information from
797 xuid
= hammer_to_unix_xid(&dip
->ino_data
.uid
);
798 xuid
= vop_helper_create_uid(hmp
->mp
, dip
->ino_data
.mode
,
799 xuid
, cred
, &vap
->va_mode
);
803 ip
->ino_data
.mode
= vap
->va_mode
;
805 if (vap
->va_vaflags
& VA_UID_UUID_VALID
)
806 ip
->ino_data
.uid
= vap
->va_uid_uuid
;
807 else if (vap
->va_uid
!= (uid_t
)VNOVAL
)
808 hammer_guid_to_uuid(&ip
->ino_data
.uid
, vap
->va_uid
);
810 hammer_guid_to_uuid(&ip
->ino_data
.uid
, xuid
);
812 if (vap
->va_vaflags
& VA_GID_UUID_VALID
)
813 ip
->ino_data
.gid
= vap
->va_gid_uuid
;
814 else if (vap
->va_gid
!= (gid_t
)VNOVAL
)
815 hammer_guid_to_uuid(&ip
->ino_data
.gid
, vap
->va_gid
);
817 ip
->ino_data
.gid
= dip
->ino_data
.gid
;
819 hammer_ref(&ip
->lock
);
823 hammer_ref(&pfsm
->lock
);
825 } else if (dip
->obj_localization
== ip
->obj_localization
) {
826 ip
->pfsm
= dip
->pfsm
;
827 hammer_ref(&ip
->pfsm
->lock
);
830 ip
->pfsm
= hammer_load_pseudofs(trans
,
831 ip
->obj_localization
,
833 error
= 0; /* ignore ENOENT */
837 hammer_free_inode(ip
);
839 } else if (RB_INSERT(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
)) {
840 panic("hammer_create_inode: duplicate obj_id %llx",
841 (long long)ip
->obj_id
);
843 hammer_free_inode(ip
);
850 * Final cleanup / freeing of an inode structure
853 hammer_free_inode(hammer_inode_t ip
)
855 struct hammer_mount
*hmp
;
858 KKASSERT(ip
->lock
.refs
== 1);
859 hammer_uncache_node(&ip
->cache
[0]);
860 hammer_uncache_node(&ip
->cache
[1]);
861 hammer_uncache_node(&ip
->cache
[2]);
862 hammer_uncache_node(&ip
->cache
[3]);
863 hammer_inode_wakereclaims(ip
, 1);
865 hammer_clear_objid(ip
);
866 --hammer_count_inodes
;
869 hammer_rel_pseudofs(hmp
, ip
->pfsm
);
872 kfree(ip
, hmp
->m_inodes
);
877 * Retrieve pseudo-fs data. NULL will never be returned.
879 * If an error occurs *errorp will be set and a default template is returned,
880 * otherwise *errorp is set to 0. Typically when an error occurs it will
883 hammer_pseudofs_inmem_t
884 hammer_load_pseudofs(hammer_transaction_t trans
,
885 u_int32_t localization
, int *errorp
)
887 hammer_mount_t hmp
= trans
->hmp
;
889 hammer_pseudofs_inmem_t pfsm
;
890 struct hammer_cursor cursor
;
894 pfsm
= RB_LOOKUP(hammer_pfs_rb_tree
, &hmp
->rb_pfsm_root
, localization
);
896 hammer_ref(&pfsm
->lock
);
902 * PFS records are stored in the root inode (not the PFS root inode,
903 * but the real root). Avoid an infinite recursion if loading
904 * the PFS for the real root.
907 ip
= hammer_get_inode(trans
, NULL
, HAMMER_OBJID_ROOT
,
909 HAMMER_DEF_LOCALIZATION
, 0, errorp
);
914 pfsm
= kmalloc(sizeof(*pfsm
), hmp
->m_misc
, M_WAITOK
| M_ZERO
);
915 pfsm
->localization
= localization
;
916 pfsm
->pfsd
.unique_uuid
= trans
->rootvol
->ondisk
->vol_fsid
;
917 pfsm
->pfsd
.shared_uuid
= pfsm
->pfsd
.unique_uuid
;
919 hammer_init_cursor(trans
, &cursor
, (ip
? &ip
->cache
[1] : NULL
), ip
);
920 cursor
.key_beg
.localization
= HAMMER_DEF_LOCALIZATION
+
921 HAMMER_LOCALIZE_MISC
;
922 cursor
.key_beg
.obj_id
= HAMMER_OBJID_ROOT
;
923 cursor
.key_beg
.create_tid
= 0;
924 cursor
.key_beg
.delete_tid
= 0;
925 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_PFS
;
926 cursor
.key_beg
.obj_type
= 0;
927 cursor
.key_beg
.key
= localization
;
928 cursor
.asof
= HAMMER_MAX_TID
;
929 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
932 *errorp
= hammer_ip_lookup(&cursor
);
934 *errorp
= hammer_btree_lookup(&cursor
);
936 *errorp
= hammer_ip_resolve_data(&cursor
);
938 if (cursor
.data
->pfsd
.mirror_flags
&
939 HAMMER_PFSD_DELETED
) {
942 bytes
= cursor
.leaf
->data_len
;
943 if (bytes
> sizeof(pfsm
->pfsd
))
944 bytes
= sizeof(pfsm
->pfsd
);
945 bcopy(cursor
.data
, &pfsm
->pfsd
, bytes
);
949 hammer_done_cursor(&cursor
);
951 pfsm
->fsid_udev
= hammer_fsid_to_udev(&pfsm
->pfsd
.shared_uuid
);
952 hammer_ref(&pfsm
->lock
);
954 hammer_rel_inode(ip
, 0);
955 if (RB_INSERT(hammer_pfs_rb_tree
, &hmp
->rb_pfsm_root
, pfsm
)) {
956 kfree(pfsm
, hmp
->m_misc
);
963 * Store pseudo-fs data. The backend will automatically delete any prior
964 * on-disk pseudo-fs data but we have to delete in-memory versions.
967 hammer_save_pseudofs(hammer_transaction_t trans
, hammer_pseudofs_inmem_t pfsm
)
969 struct hammer_cursor cursor
;
970 hammer_record_t record
;
974 ip
= hammer_get_inode(trans
, NULL
, HAMMER_OBJID_ROOT
, HAMMER_MAX_TID
,
975 HAMMER_DEF_LOCALIZATION
, 0, &error
);
977 pfsm
->fsid_udev
= hammer_fsid_to_udev(&pfsm
->pfsd
.shared_uuid
);
978 hammer_init_cursor(trans
, &cursor
, &ip
->cache
[1], ip
);
979 cursor
.key_beg
.localization
= ip
->obj_localization
+
980 HAMMER_LOCALIZE_MISC
;
981 cursor
.key_beg
.obj_id
= HAMMER_OBJID_ROOT
;
982 cursor
.key_beg
.create_tid
= 0;
983 cursor
.key_beg
.delete_tid
= 0;
984 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_PFS
;
985 cursor
.key_beg
.obj_type
= 0;
986 cursor
.key_beg
.key
= pfsm
->localization
;
987 cursor
.asof
= HAMMER_MAX_TID
;
988 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
991 * Replace any in-memory version of the record.
993 error
= hammer_ip_lookup(&cursor
);
994 if (error
== 0 && hammer_cursor_inmem(&cursor
)) {
995 record
= cursor
.iprec
;
996 if (record
->flags
& HAMMER_RECF_INTERLOCK_BE
) {
997 KKASSERT(cursor
.deadlk_rec
== NULL
);
998 hammer_ref(&record
->lock
);
999 cursor
.deadlk_rec
= record
;
1002 record
->flags
|= HAMMER_RECF_DELETED_FE
;
1008 * Allocate replacement general record. The backend flush will
1009 * delete any on-disk version of the record.
1011 if (error
== 0 || error
== ENOENT
) {
1012 record
= hammer_alloc_mem_record(ip
, sizeof(pfsm
->pfsd
));
1013 record
->type
= HAMMER_MEM_RECORD_GENERAL
;
1015 record
->leaf
.base
.localization
= ip
->obj_localization
+
1016 HAMMER_LOCALIZE_MISC
;
1017 record
->leaf
.base
.rec_type
= HAMMER_RECTYPE_PFS
;
1018 record
->leaf
.base
.key
= pfsm
->localization
;
1019 record
->leaf
.data_len
= sizeof(pfsm
->pfsd
);
1020 bcopy(&pfsm
->pfsd
, record
->data
, sizeof(pfsm
->pfsd
));
1021 error
= hammer_ip_add_record(trans
, record
);
1023 hammer_done_cursor(&cursor
);
1024 if (error
== EDEADLK
)
1026 hammer_rel_inode(ip
, 0);
1031 * Create a root directory for a PFS if one does not alredy exist.
1033 * The PFS root stands alone so we must also bump the nlinks count
1034 * to prevent it from being destroyed on release.
1037 hammer_mkroot_pseudofs(hammer_transaction_t trans
, struct ucred
*cred
,
1038 hammer_pseudofs_inmem_t pfsm
)
1044 ip
= hammer_get_inode(trans
, NULL
, HAMMER_OBJID_ROOT
, HAMMER_MAX_TID
,
1045 pfsm
->localization
, 0, &error
);
1050 error
= hammer_create_inode(trans
, &vap
, cred
,
1054 ++ip
->ino_data
.nlinks
;
1055 hammer_modify_inode(ip
, HAMMER_INODE_DDIRTY
);
1059 hammer_rel_inode(ip
, 0);
1064 * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
1065 * if we are unable to disassociate all the inodes.
1069 hammer_unload_pseudofs_callback(hammer_inode_t ip
, void *data
)
1073 hammer_ref(&ip
->lock
);
1074 if (ip
->lock
.refs
== 2 && ip
->vp
)
1075 vclean_unlocked(ip
->vp
);
1076 if (ip
->lock
.refs
== 1 && ip
->vp
== NULL
)
1079 res
= -1; /* stop, someone is using the inode */
1080 hammer_rel_inode(ip
, 0);
1085 hammer_unload_pseudofs(hammer_transaction_t trans
, u_int32_t localization
)
1090 for (try = res
= 0; try < 4; ++try) {
1091 res
= hammer_ino_rb_tree_RB_SCAN(&trans
->hmp
->rb_inos_root
,
1092 hammer_inode_pfs_cmp
,
1093 hammer_unload_pseudofs_callback
,
1095 if (res
== 0 && try > 1)
1097 hammer_flusher_sync(trans
->hmp
);
1106 * Release a reference on a PFS
1109 hammer_rel_pseudofs(hammer_mount_t hmp
, hammer_pseudofs_inmem_t pfsm
)
1111 hammer_unref(&pfsm
->lock
);
1112 if (pfsm
->lock
.refs
== 0) {
1113 RB_REMOVE(hammer_pfs_rb_tree
, &hmp
->rb_pfsm_root
, pfsm
);
1114 kfree(pfsm
, hmp
->m_misc
);
1119 * Called by hammer_sync_inode().
1122 hammer_update_inode(hammer_cursor_t cursor
, hammer_inode_t ip
)
1124 hammer_transaction_t trans
= cursor
->trans
;
1125 hammer_record_t record
;
1133 * If the inode has a presence on-disk then locate it and mark
1134 * it deleted, setting DELONDISK.
1136 * The record may or may not be physically deleted, depending on
1137 * the retention policy.
1139 if ((ip
->flags
& (HAMMER_INODE_ONDISK
|HAMMER_INODE_DELONDISK
)) ==
1140 HAMMER_INODE_ONDISK
) {
1141 hammer_normalize_cursor(cursor
);
1142 cursor
->key_beg
.localization
= ip
->obj_localization
+
1143 HAMMER_LOCALIZE_INODE
;
1144 cursor
->key_beg
.obj_id
= ip
->obj_id
;
1145 cursor
->key_beg
.key
= 0;
1146 cursor
->key_beg
.create_tid
= 0;
1147 cursor
->key_beg
.delete_tid
= 0;
1148 cursor
->key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
1149 cursor
->key_beg
.obj_type
= 0;
1150 cursor
->asof
= ip
->obj_asof
;
1151 cursor
->flags
&= ~HAMMER_CURSOR_INITMASK
;
1152 cursor
->flags
|= HAMMER_CURSOR_GET_LEAF
| HAMMER_CURSOR_ASOF
;
1153 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
1155 error
= hammer_btree_lookup(cursor
);
1156 if (hammer_debug_inode
)
1157 kprintf("IPDEL %p %08x %d", ip
, ip
->flags
, error
);
1160 error
= hammer_ip_delete_record(cursor
, ip
, trans
->tid
);
1161 if (hammer_debug_inode
)
1162 kprintf(" error %d\n", error
);
1164 ip
->flags
|= HAMMER_INODE_DELONDISK
;
1167 hammer_cache_node(&ip
->cache
[0], cursor
->node
);
1169 if (error
== EDEADLK
) {
1170 hammer_done_cursor(cursor
);
1171 error
= hammer_init_cursor(trans
, cursor
,
1173 if (hammer_debug_inode
)
1174 kprintf("IPDED %p %d\n", ip
, error
);
1181 * Ok, write out the initial record or a new record (after deleting
1182 * the old one), unless the DELETED flag is set. This routine will
1183 * clear DELONDISK if it writes out a record.
1185 * Update our inode statistics if this is the first application of
1186 * the inode on-disk.
1188 if (error
== 0 && (ip
->flags
& HAMMER_INODE_DELETED
) == 0) {
1190 * Generate a record and write it to the media. We clean-up
1191 * the state before releasing so we do not have to set-up
1194 record
= hammer_alloc_mem_record(ip
, 0);
1195 record
->type
= HAMMER_MEM_RECORD_INODE
;
1196 record
->flush_state
= HAMMER_FST_FLUSH
;
1197 record
->leaf
= ip
->sync_ino_leaf
;
1198 record
->leaf
.base
.create_tid
= trans
->tid
;
1199 record
->leaf
.data_len
= sizeof(ip
->sync_ino_data
);
1200 record
->leaf
.create_ts
= trans
->time32
;
1201 record
->data
= (void *)&ip
->sync_ino_data
;
1202 record
->flags
|= HAMMER_RECF_INTERLOCK_BE
;
1205 * If this flag is set we cannot sync the new file size
1206 * because we haven't finished related truncations. The
1207 * inode will be flushed in another flush group to finish
1210 if ((ip
->flags
& HAMMER_INODE_WOULDBLOCK
) &&
1211 ip
->sync_ino_data
.size
!= ip
->ino_data
.size
) {
1213 ip
->sync_ino_data
.size
= ip
->ino_data
.size
;
1219 error
= hammer_ip_sync_record_cursor(cursor
, record
);
1220 if (hammer_debug_inode
)
1221 kprintf("GENREC %p rec %08x %d\n",
1222 ip
, record
->flags
, error
);
1223 if (error
!= EDEADLK
)
1225 hammer_done_cursor(cursor
);
1226 error
= hammer_init_cursor(trans
, cursor
,
1228 if (hammer_debug_inode
)
1229 kprintf("GENREC reinit %d\n", error
);
1235 * Note: The record was never on the inode's record tree
1236 * so just wave our hands importantly and destroy it.
1238 record
->flags
|= HAMMER_RECF_COMMITTED
;
1239 record
->flags
&= ~HAMMER_RECF_INTERLOCK_BE
;
1240 record
->flush_state
= HAMMER_FST_IDLE
;
1241 ++ip
->rec_generation
;
1242 hammer_rel_mem_record(record
);
1248 if (hammer_debug_inode
)
1249 kprintf("CLEANDELOND %p %08x\n", ip
, ip
->flags
);
1250 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
|
1251 HAMMER_INODE_ATIME
|
1252 HAMMER_INODE_MTIME
);
1253 ip
->flags
&= ~HAMMER_INODE_DELONDISK
;
1255 ip
->sync_flags
|= HAMMER_INODE_DDIRTY
;
1258 * Root volume count of inodes
1260 hammer_sync_lock_sh(trans
);
1261 if ((ip
->flags
& HAMMER_INODE_ONDISK
) == 0) {
1262 hammer_modify_volume_field(trans
,
1265 ++ip
->hmp
->rootvol
->ondisk
->vol0_stat_inodes
;
1266 hammer_modify_volume_done(trans
->rootvol
);
1267 ip
->flags
|= HAMMER_INODE_ONDISK
;
1268 if (hammer_debug_inode
)
1269 kprintf("NOWONDISK %p\n", ip
);
1271 hammer_sync_unlock(trans
);
1276 * If the inode has been destroyed, clean out any left-over flags
1277 * that may have been set by the frontend.
1279 if (error
== 0 && (ip
->flags
& HAMMER_INODE_DELETED
)) {
1280 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
|
1281 HAMMER_INODE_ATIME
|
1282 HAMMER_INODE_MTIME
);
1288 * Update only the itimes fields.
1290 * ATIME can be updated without generating any UNDO. MTIME is updated
1291 * with UNDO so it is guaranteed to be synchronized properly in case of
1294 * Neither field is included in the B-Tree leaf element's CRC, which is how
1295 * we can get away with updating ATIME the way we do.
1298 hammer_update_itimes(hammer_cursor_t cursor
, hammer_inode_t ip
)
1300 hammer_transaction_t trans
= cursor
->trans
;
1304 if ((ip
->flags
& (HAMMER_INODE_ONDISK
|HAMMER_INODE_DELONDISK
)) !=
1305 HAMMER_INODE_ONDISK
) {
1309 hammer_normalize_cursor(cursor
);
1310 cursor
->key_beg
.localization
= ip
->obj_localization
+
1311 HAMMER_LOCALIZE_INODE
;
1312 cursor
->key_beg
.obj_id
= ip
->obj_id
;
1313 cursor
->key_beg
.key
= 0;
1314 cursor
->key_beg
.create_tid
= 0;
1315 cursor
->key_beg
.delete_tid
= 0;
1316 cursor
->key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
1317 cursor
->key_beg
.obj_type
= 0;
1318 cursor
->asof
= ip
->obj_asof
;
1319 cursor
->flags
&= ~HAMMER_CURSOR_INITMASK
;
1320 cursor
->flags
|= HAMMER_CURSOR_ASOF
;
1321 cursor
->flags
|= HAMMER_CURSOR_GET_LEAF
;
1322 cursor
->flags
|= HAMMER_CURSOR_GET_DATA
;
1323 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
1325 error
= hammer_btree_lookup(cursor
);
1327 hammer_cache_node(&ip
->cache
[0], cursor
->node
);
1328 if (ip
->sync_flags
& HAMMER_INODE_MTIME
) {
1330 * Updating MTIME requires an UNDO. Just cover
1331 * both atime and mtime.
1333 hammer_sync_lock_sh(trans
);
1334 hammer_modify_buffer(trans
, cursor
->data_buffer
,
1335 HAMMER_ITIMES_BASE(&cursor
->data
->inode
),
1336 HAMMER_ITIMES_BYTES
);
1337 cursor
->data
->inode
.atime
= ip
->sync_ino_data
.atime
;
1338 cursor
->data
->inode
.mtime
= ip
->sync_ino_data
.mtime
;
1339 hammer_modify_buffer_done(cursor
->data_buffer
);
1340 hammer_sync_unlock(trans
);
1341 } else if (ip
->sync_flags
& HAMMER_INODE_ATIME
) {
1343 * Updating atime only can be done in-place with
1346 hammer_sync_lock_sh(trans
);
1347 hammer_modify_buffer(trans
, cursor
->data_buffer
,
1349 cursor
->data
->inode
.atime
= ip
->sync_ino_data
.atime
;
1350 hammer_modify_buffer_done(cursor
->data_buffer
);
1351 hammer_sync_unlock(trans
);
1353 ip
->sync_flags
&= ~(HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
);
1355 if (error
== EDEADLK
) {
1356 hammer_done_cursor(cursor
);
1357 error
= hammer_init_cursor(trans
, cursor
,
1366 * Release a reference on an inode, flush as requested.
1368 * On the last reference we queue the inode to the flusher for its final
1372 hammer_rel_inode(struct hammer_inode
*ip
, int flush
)
1374 /*hammer_mount_t hmp = ip->hmp;*/
1377 * Handle disposition when dropping the last ref.
1380 if (ip
->lock
.refs
== 1) {
1382 * Determine whether on-disk action is needed for
1383 * the inode's final disposition.
1385 KKASSERT(ip
->vp
== NULL
);
1386 hammer_inode_unloadable_check(ip
, 0);
1387 if (ip
->flags
& HAMMER_INODE_MODMASK
) {
1388 hammer_flush_inode(ip
, 0);
1389 } else if (ip
->lock
.refs
== 1) {
1390 hammer_unload_inode(ip
);
1395 hammer_flush_inode(ip
, 0);
1398 * The inode still has multiple refs, try to drop
1401 KKASSERT(ip
->lock
.refs
>= 1);
1402 if (ip
->lock
.refs
> 1) {
1403 hammer_unref(&ip
->lock
);
1411 * Unload and destroy the specified inode. Must be called with one remaining
1412 * reference. The reference is disposed of.
1414 * The inode must be completely clean.
1417 hammer_unload_inode(struct hammer_inode
*ip
)
1419 hammer_mount_t hmp
= ip
->hmp
;
1421 KASSERT(ip
->lock
.refs
== 1,
1422 ("hammer_unload_inode: %d refs\n", ip
->lock
.refs
));
1423 KKASSERT(ip
->vp
== NULL
);
1424 KKASSERT(ip
->flush_state
== HAMMER_FST_IDLE
);
1425 KKASSERT(ip
->cursor_ip_refs
== 0);
1426 KKASSERT(ip
->lock
.lockcount
== 0);
1427 KKASSERT((ip
->flags
& HAMMER_INODE_MODMASK
) == 0);
1429 KKASSERT(RB_EMPTY(&ip
->rec_tree
));
1430 KKASSERT(TAILQ_EMPTY(&ip
->target_list
));
1432 RB_REMOVE(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
);
1434 hammer_free_inode(ip
);
1439 * Called during unmounting if a critical error occured. The in-memory
1440 * inode and all related structures are destroyed.
1442 * If a critical error did not occur the unmount code calls the standard
1443 * release and asserts that the inode is gone.
1446 hammer_destroy_inode_callback(struct hammer_inode
*ip
, void *data __unused
)
1448 hammer_record_t rec
;
1451 * Get rid of the inodes in-memory records, regardless of their
1452 * state, and clear the mod-mask.
1454 while ((rec
= TAILQ_FIRST(&ip
->target_list
)) != NULL
) {
1455 TAILQ_REMOVE(&ip
->target_list
, rec
, target_entry
);
1456 rec
->target_ip
= NULL
;
1457 if (rec
->flush_state
== HAMMER_FST_SETUP
)
1458 rec
->flush_state
= HAMMER_FST_IDLE
;
1460 while ((rec
= RB_ROOT(&ip
->rec_tree
)) != NULL
) {
1461 if (rec
->flush_state
== HAMMER_FST_FLUSH
)
1462 --rec
->flush_group
->refs
;
1464 hammer_ref(&rec
->lock
);
1465 KKASSERT(rec
->lock
.refs
== 1);
1466 rec
->flush_state
= HAMMER_FST_IDLE
;
1467 rec
->flush_group
= NULL
;
1468 rec
->flags
|= HAMMER_RECF_DELETED_FE
; /* wave hands */
1469 rec
->flags
|= HAMMER_RECF_DELETED_BE
; /* wave hands */
1470 ++ip
->rec_generation
;
1471 hammer_rel_mem_record(rec
);
1473 ip
->flags
&= ~HAMMER_INODE_MODMASK
;
1474 ip
->sync_flags
&= ~HAMMER_INODE_MODMASK
;
1475 KKASSERT(ip
->vp
== NULL
);
1478 * Remove the inode from any flush group, force it idle. FLUSH
1479 * and SETUP states have an inode ref.
1481 switch(ip
->flush_state
) {
1482 case HAMMER_FST_FLUSH
:
1483 TAILQ_REMOVE(&ip
->flush_group
->flush_list
, ip
, flush_entry
);
1484 --ip
->flush_group
->refs
;
1485 ip
->flush_group
= NULL
;
1487 case HAMMER_FST_SETUP
:
1488 hammer_unref(&ip
->lock
);
1489 ip
->flush_state
= HAMMER_FST_IDLE
;
1491 case HAMMER_FST_IDLE
:
1496 * There shouldn't be any associated vnode. The unload needs at
1497 * least one ref, if we do have a vp steal its ip ref.
1500 kprintf("hammer_destroy_inode_callback: Unexpected "
1501 "vnode association ip %p vp %p\n", ip
, ip
->vp
);
1502 ip
->vp
->v_data
= NULL
;
1505 hammer_ref(&ip
->lock
);
1507 hammer_unload_inode(ip
);
1512 * Called on mount -u when switching from RW to RO or vise-versa. Adjust
1513 * the read-only flag for cached inodes.
1515 * This routine is called from a RB_SCAN().
1518 hammer_reload_inode(hammer_inode_t ip
, void *arg __unused
)
1520 hammer_mount_t hmp
= ip
->hmp
;
1522 if (hmp
->ronly
|| hmp
->asof
!= HAMMER_MAX_TID
)
1523 ip
->flags
|= HAMMER_INODE_RO
;
1525 ip
->flags
&= ~HAMMER_INODE_RO
;
1530 * A transaction has modified an inode, requiring updates as specified by
1533 * HAMMER_INODE_DDIRTY: Inode data has been updated
1534 * HAMMER_INODE_XDIRTY: Dirty in-memory records
1535 * HAMMER_INODE_BUFS: Dirty buffer cache buffers
1536 * HAMMER_INODE_DELETED: Inode record/data must be deleted
1537 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1540 hammer_modify_inode(hammer_inode_t ip
, int flags
)
1543 * ronly of 0 or 2 does not trigger assertion.
1544 * 2 is a special error state
1546 KKASSERT(ip
->hmp
->ronly
!= 1 ||
1547 (flags
& (HAMMER_INODE_DDIRTY
| HAMMER_INODE_XDIRTY
|
1548 HAMMER_INODE_BUFS
| HAMMER_INODE_DELETED
|
1549 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
)) == 0);
1550 if ((ip
->flags
& HAMMER_INODE_RSV_INODES
) == 0) {
1551 ip
->flags
|= HAMMER_INODE_RSV_INODES
;
1552 ++ip
->hmp
->rsv_inodes
;
1559 * Request that an inode be flushed. This whole mess cannot block and may
1560 * recurse (if not synchronous). Once requested HAMMER will attempt to
1561 * actively flush the inode until the flush can be done.
1563 * The inode may already be flushing, or may be in a setup state. We can
1564 * place the inode in a flushing state if it is currently idle and flag it
1565 * to reflush if it is currently flushing.
1567 * Upon return if the inode could not be flushed due to a setup
1568 * dependancy, then it will be automatically flushed when the dependancy
1572 hammer_flush_inode(hammer_inode_t ip
, int flags
)
1575 hammer_flush_group_t flg
;
1579 * next_flush_group is the first flush group we can place the inode
1580 * in. It may be NULL. If it becomes full we append a new flush
1581 * group and make that the next_flush_group.
1584 while ((flg
= hmp
->next_flush_group
) != NULL
) {
1585 KKASSERT(flg
->running
== 0);
1586 if (flg
->total_count
+ flg
->refs
<= ip
->hmp
->undo_rec_limit
)
1588 hmp
->next_flush_group
= TAILQ_NEXT(flg
, flush_entry
);
1589 hammer_flusher_async(ip
->hmp
, flg
);
1592 flg
= kmalloc(sizeof(*flg
), hmp
->m_misc
, M_WAITOK
|M_ZERO
);
1593 hmp
->next_flush_group
= flg
;
1594 TAILQ_INIT(&flg
->flush_list
);
1595 TAILQ_INSERT_TAIL(&hmp
->flush_group_list
, flg
, flush_entry
);
1599 * Trivial 'nothing to flush' case. If the inode is in a SETUP
1600 * state we have to put it back into an IDLE state so we can
1601 * drop the extra ref.
1603 * If we have a parent dependancy we must still fall through
1606 if ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0) {
1607 if (ip
->flush_state
== HAMMER_FST_SETUP
&&
1608 TAILQ_EMPTY(&ip
->target_list
)) {
1609 ip
->flush_state
= HAMMER_FST_IDLE
;
1610 hammer_rel_inode(ip
, 0);
1612 if (ip
->flush_state
== HAMMER_FST_IDLE
)
1617 * Our flush action will depend on the current state.
1619 switch(ip
->flush_state
) {
1620 case HAMMER_FST_IDLE
:
1622 * We have no dependancies and can flush immediately. Some
1623 * our children may not be flushable so we have to re-test
1624 * with that additional knowledge.
1626 hammer_flush_inode_core(ip
, flg
, flags
);
1628 case HAMMER_FST_SETUP
:
1630 * Recurse upwards through dependancies via target_list
1631 * and start their flusher actions going if possible.
1633 * 'good' is our connectivity. -1 means we have none and
1634 * can't flush, 0 means there weren't any dependancies, and
1635 * 1 means we have good connectivity.
1637 good
= hammer_setup_parent_inodes(ip
, 0, flg
);
1641 * We can continue if good >= 0. Determine how
1642 * many records under our inode can be flushed (and
1645 hammer_flush_inode_core(ip
, flg
, flags
);
1648 * Parent has no connectivity, tell it to flush
1649 * us as soon as it does.
1651 * The REFLUSH flag is also needed to trigger
1652 * dependancy wakeups.
1654 ip
->flags
|= HAMMER_INODE_CONN_DOWN
|
1655 HAMMER_INODE_REFLUSH
;
1656 if (flags
& HAMMER_FLUSH_SIGNAL
) {
1657 ip
->flags
|= HAMMER_INODE_RESIGNAL
;
1658 hammer_flusher_async(ip
->hmp
, flg
);
1662 case HAMMER_FST_FLUSH
:
1664 * We are already flushing, flag the inode to reflush
1665 * if needed after it completes its current flush.
1667 * The REFLUSH flag is also needed to trigger
1668 * dependancy wakeups.
1670 if ((ip
->flags
& HAMMER_INODE_REFLUSH
) == 0)
1671 ip
->flags
|= HAMMER_INODE_REFLUSH
;
1672 if (flags
& HAMMER_FLUSH_SIGNAL
) {
1673 ip
->flags
|= HAMMER_INODE_RESIGNAL
;
1674 hammer_flusher_async(ip
->hmp
, flg
);
1681 * Scan ip->target_list, which is a list of records owned by PARENTS to our
1682 * ip which reference our ip.
1684 * XXX This is a huge mess of recursive code, but not one bit of it blocks
1685 * so for now do not ref/deref the structures. Note that if we use the
1686 * ref/rel code later, the rel CAN block.
1689 hammer_setup_parent_inodes(hammer_inode_t ip
, int depth
,
1690 hammer_flush_group_t flg
)
1692 hammer_record_t depend
;
1697 * If we hit our recursion limit and we have parent dependencies
1698 * We cannot continue. Returning < 0 will cause us to be flagged
1699 * for reflush. Returning -2 cuts off additional dependency checks
1700 * because they are likely to also hit the depth limit.
1702 * We cannot return < 0 if there are no dependencies or there might
1703 * not be anything to wakeup (ip).
1705 if (depth
== 20 && TAILQ_FIRST(&ip
->target_list
)) {
1706 kprintf("HAMMER Warning: depth limit reached on "
1707 "setup recursion, inode %p %016llx\n",
1708 ip
, (long long)ip
->obj_id
);
1716 TAILQ_FOREACH(depend
, &ip
->target_list
, target_entry
) {
1717 r
= hammer_setup_parent_inodes_helper(depend
, depth
, flg
);
1718 KKASSERT(depend
->target_ip
== ip
);
1719 if (r
< 0 && good
== 0)
1725 * If we failed due to the recursion depth limit then stop
1735 * This helper function takes a record representing the dependancy between
1736 * the parent inode and child inode.
1738 * record->ip = parent inode
1739 * record->target_ip = child inode
1741 * We are asked to recurse upwards and convert the record from SETUP
1742 * to FLUSH if possible.
1744 * Return 1 if the record gives us connectivity
1746 * Return 0 if the record is not relevant
1748 * Return -1 if we can't resolve the dependancy and there is no connectivity.
1751 hammer_setup_parent_inodes_helper(hammer_record_t record
, int depth
,
1752 hammer_flush_group_t flg
)
1758 KKASSERT(record
->flush_state
!= HAMMER_FST_IDLE
);
1763 * If the record is already flushing, is it in our flush group?
1765 * If it is in our flush group but it is a general record or a
1766 * delete-on-disk, it does not improve our connectivity (return 0),
1767 * and if the target inode is not trying to destroy itself we can't
1768 * allow the operation yet anyway (the second return -1).
1770 if (record
->flush_state
== HAMMER_FST_FLUSH
) {
1772 * If not in our flush group ask the parent to reflush
1773 * us as soon as possible.
1775 if (record
->flush_group
!= flg
) {
1776 pip
->flags
|= HAMMER_INODE_REFLUSH
;
1777 record
->target_ip
->flags
|= HAMMER_INODE_CONN_DOWN
;
1782 * If in our flush group everything is already set up,
1783 * just return whether the record will improve our
1784 * visibility or not.
1786 if (record
->type
== HAMMER_MEM_RECORD_ADD
)
1792 * It must be a setup record. Try to resolve the setup dependancies
1793 * by recursing upwards so we can place ip on the flush list.
1795 * Limit ourselves to 20 levels of recursion to avoid blowing out
1796 * the kernel stack. If we hit the recursion limit we can't flush
1797 * until the parent flushes. The parent will flush independantly
1798 * on its own and ultimately a deep recursion will be resolved.
1800 KKASSERT(record
->flush_state
== HAMMER_FST_SETUP
);
1802 good
= hammer_setup_parent_inodes(pip
, depth
+ 1, flg
);
1805 * If good < 0 the parent has no connectivity and we cannot safely
1806 * flush the directory entry, which also means we can't flush our
1807 * ip. Flag us for downward recursion once the parent's
1808 * connectivity is resolved. Flag the parent for [re]flush or it
1809 * may not check for downward recursions.
1812 pip
->flags
|= HAMMER_INODE_REFLUSH
;
1813 record
->target_ip
->flags
|= HAMMER_INODE_CONN_DOWN
;
1818 * We are go, place the parent inode in a flushing state so we can
1819 * place its record in a flushing state. Note that the parent
1820 * may already be flushing. The record must be in the same flush
1821 * group as the parent.
1823 if (pip
->flush_state
!= HAMMER_FST_FLUSH
)
1824 hammer_flush_inode_core(pip
, flg
, HAMMER_FLUSH_RECURSION
);
1825 KKASSERT(pip
->flush_state
== HAMMER_FST_FLUSH
);
1826 KKASSERT(record
->flush_state
== HAMMER_FST_SETUP
);
1829 if (record
->type
== HAMMER_MEM_RECORD_DEL
&&
1830 (record
->target_ip
->flags
& (HAMMER_INODE_DELETED
|HAMMER_INODE_DELONDISK
)) == 0) {
1832 * Regardless of flushing state we cannot sync this path if the
1833 * record represents a delete-on-disk but the target inode
1834 * is not ready to sync its own deletion.
1836 * XXX need to count effective nlinks to determine whether
1837 * the flush is ok, otherwise removing a hardlink will
1838 * just leave the DEL record to rot.
1840 record
->target_ip
->flags
|= HAMMER_INODE_REFLUSH
;
1844 if (pip
->flush_group
== flg
) {
1846 * Because we have not calculated nlinks yet we can just
1847 * set records to the flush state if the parent is in
1848 * the same flush group as we are.
1850 record
->flush_state
= HAMMER_FST_FLUSH
;
1851 record
->flush_group
= flg
;
1852 ++record
->flush_group
->refs
;
1853 hammer_ref(&record
->lock
);
1856 * A general directory-add contributes to our visibility.
1858 * Otherwise it is probably a directory-delete or
1859 * delete-on-disk record and does not contribute to our
1860 * visbility (but we can still flush it).
1862 if (record
->type
== HAMMER_MEM_RECORD_ADD
)
1867 * If the parent is not in our flush group we cannot
1868 * flush this record yet, there is no visibility.
1869 * We tell the parent to reflush and mark ourselves
1870 * so the parent knows it should flush us too.
1872 pip
->flags
|= HAMMER_INODE_REFLUSH
;
1873 record
->target_ip
->flags
|= HAMMER_INODE_CONN_DOWN
;
1879 * This is the core routine placing an inode into the FST_FLUSH state.
1882 hammer_flush_inode_core(hammer_inode_t ip
, hammer_flush_group_t flg
, int flags
)
1887 * Set flush state and prevent the flusher from cycling into
1888 * the next flush group. Do not place the ip on the list yet.
1889 * Inodes not in the idle state get an extra reference.
1891 KKASSERT(ip
->flush_state
!= HAMMER_FST_FLUSH
);
1892 if (ip
->flush_state
== HAMMER_FST_IDLE
)
1893 hammer_ref(&ip
->lock
);
1894 ip
->flush_state
= HAMMER_FST_FLUSH
;
1895 ip
->flush_group
= flg
;
1896 ++ip
->hmp
->flusher
.group_lock
;
1897 ++ip
->hmp
->count_iqueued
;
1898 ++hammer_count_iqueued
;
1902 * If the flush group reaches the autoflush limit we want to signal
1903 * the flusher. This is particularly important for remove()s.
1905 if (flg
->total_count
== hammer_autoflush
)
1906 flags
|= HAMMER_FLUSH_SIGNAL
;
1909 * We need to be able to vfsync/truncate from the backend.
1911 KKASSERT((ip
->flags
& HAMMER_INODE_VHELD
) == 0);
1912 if (ip
->vp
&& (ip
->vp
->v_flag
& VINACTIVE
) == 0) {
1913 ip
->flags
|= HAMMER_INODE_VHELD
;
1918 * Figure out how many in-memory records we can actually flush
1919 * (not including inode meta-data, buffers, etc).
1921 KKASSERT((ip
->flags
& HAMMER_INODE_WOULDBLOCK
) == 0);
1922 if (flags
& HAMMER_FLUSH_RECURSION
) {
1924 * If this is a upwards recursion we do not want to
1925 * recurse down again!
1929 } else if (ip
->flags
& HAMMER_INODE_WOULDBLOCK
) {
1931 * No new records are added if we must complete a flush
1932 * from a previous cycle, but we do have to move the records
1933 * from the previous cycle to the current one.
1936 go_count
= RB_SCAN(hammer_rec_rb_tree
, &ip
->rec_tree
, NULL
,
1937 hammer_syncgrp_child_callback
, NULL
);
1943 * Normal flush, scan records and bring them into the flush.
1944 * Directory adds and deletes are usually skipped (they are
1945 * grouped with the related inode rather then with the
1948 * go_count can be negative, which means the scan aborted
1949 * due to the flush group being over-full and we should
1950 * flush what we have.
1952 go_count
= RB_SCAN(hammer_rec_rb_tree
, &ip
->rec_tree
, NULL
,
1953 hammer_setup_child_callback
, NULL
);
1957 * This is a more involved test that includes go_count. If we
1958 * can't flush, flag the inode and return. If go_count is 0 we
1959 * were are unable to flush any records in our rec_tree and
1960 * must ignore the XDIRTY flag.
1962 if (go_count
== 0) {
1963 if ((ip
->flags
& HAMMER_INODE_MODMASK_NOXDIRTY
) == 0) {
1964 --ip
->hmp
->count_iqueued
;
1965 --hammer_count_iqueued
;
1968 ip
->flush_state
= HAMMER_FST_SETUP
;
1969 ip
->flush_group
= NULL
;
1970 if (ip
->flags
& HAMMER_INODE_VHELD
) {
1971 ip
->flags
&= ~HAMMER_INODE_VHELD
;
1976 * REFLUSH is needed to trigger dependancy wakeups
1977 * when an inode is in SETUP.
1979 ip
->flags
|= HAMMER_INODE_REFLUSH
;
1980 if (flags
& HAMMER_FLUSH_SIGNAL
) {
1981 ip
->flags
|= HAMMER_INODE_RESIGNAL
;
1982 hammer_flusher_async(ip
->hmp
, flg
);
1984 if (--ip
->hmp
->flusher
.group_lock
== 0)
1985 wakeup(&ip
->hmp
->flusher
.group_lock
);
1991 * Snapshot the state of the inode for the backend flusher.
1993 * We continue to retain save_trunc_off even when all truncations
1994 * have been resolved as an optimization to determine if we can
1995 * skip the B-Tree lookup for overwrite deletions.
1997 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1998 * and stays in ip->flags. Once set, it stays set until the
1999 * inode is destroyed.
2001 if (ip
->flags
& HAMMER_INODE_TRUNCATED
) {
2002 KKASSERT((ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) == 0);
2003 ip
->sync_trunc_off
= ip
->trunc_off
;
2004 ip
->trunc_off
= 0x7FFFFFFFFFFFFFFFLL
;
2005 ip
->flags
&= ~HAMMER_INODE_TRUNCATED
;
2006 ip
->sync_flags
|= HAMMER_INODE_TRUNCATED
;
2009 * The save_trunc_off used to cache whether the B-Tree
2010 * holds any records past that point is not used until
2011 * after the truncation has succeeded, so we can safely
2014 if (ip
->save_trunc_off
> ip
->sync_trunc_off
)
2015 ip
->save_trunc_off
= ip
->sync_trunc_off
;
2017 ip
->sync_flags
|= (ip
->flags
& HAMMER_INODE_MODMASK
&
2018 ~HAMMER_INODE_TRUNCATED
);
2019 ip
->sync_ino_leaf
= ip
->ino_leaf
;
2020 ip
->sync_ino_data
= ip
->ino_data
;
2021 ip
->flags
&= ~HAMMER_INODE_MODMASK
| HAMMER_INODE_TRUNCATED
;
2022 #ifdef DEBUG_TRUNCATE
2023 if ((ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) && ip
== HammerTruncIp
)
2024 kprintf("truncateS %016llx\n", ip
->sync_trunc_off
);
2028 * The flusher list inherits our inode and reference.
2030 KKASSERT(flg
->running
== 0);
2031 TAILQ_INSERT_TAIL(&flg
->flush_list
, ip
, flush_entry
);
2032 if (--ip
->hmp
->flusher
.group_lock
== 0)
2033 wakeup(&ip
->hmp
->flusher
.group_lock
);
2035 if (flags
& HAMMER_FLUSH_SIGNAL
) {
2036 hammer_flusher_async(ip
->hmp
, flg
);
2041 * Callback for scan of ip->rec_tree. Try to include each record in our
2042 * flush. ip->flush_group has been set but the inode has not yet been
2043 * moved into a flushing state.
2045 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
2048 * We return 1 for any record placed or found in FST_FLUSH, which prevents
2049 * the caller from shortcutting the flush.
2052 hammer_setup_child_callback(hammer_record_t rec
, void *data
)
2054 hammer_flush_group_t flg
;
2055 hammer_inode_t target_ip
;
2060 * Records deleted or committed by the backend are ignored.
2061 * Note that the flush detects deleted frontend records at
2062 * multiple points to deal with races. This is just the first
2063 * line of defense. The only time HAMMER_RECF_DELETED_FE cannot
2064 * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it
2065 * messes up link-count calculations.
2067 * NOTE: Don't get confused between record deletion and, say,
2068 * directory entry deletion. The deletion of a directory entry
2069 * which is on-media has nothing to do with the record deletion
2072 if (rec
->flags
& (HAMMER_RECF_DELETED_FE
| HAMMER_RECF_DELETED_BE
|
2073 HAMMER_RECF_COMMITTED
)) {
2074 if (rec
->flush_state
== HAMMER_FST_FLUSH
) {
2075 KKASSERT(rec
->flush_group
== rec
->ip
->flush_group
);
2084 * If the record is in an idle state it has no dependancies and
2088 flg
= ip
->flush_group
;
2091 switch(rec
->flush_state
) {
2092 case HAMMER_FST_IDLE
:
2094 * The record has no setup dependancy, we can flush it.
2096 KKASSERT(rec
->target_ip
== NULL
);
2097 rec
->flush_state
= HAMMER_FST_FLUSH
;
2098 rec
->flush_group
= flg
;
2100 hammer_ref(&rec
->lock
);
2103 case HAMMER_FST_SETUP
:
2105 * The record has a setup dependancy. These are typically
2106 * directory entry adds and deletes. Such entries will be
2107 * flushed when their inodes are flushed so we do not
2108 * usually have to add them to the flush here. However,
2109 * if the target_ip has set HAMMER_INODE_CONN_DOWN then
2110 * it is asking us to flush this record (and it).
2112 target_ip
= rec
->target_ip
;
2113 KKASSERT(target_ip
!= NULL
);
2114 KKASSERT(target_ip
->flush_state
!= HAMMER_FST_IDLE
);
2117 * If the target IP is already flushing in our group
2118 * we could associate the record, but target_ip has
2119 * already synced ino_data to sync_ino_data and we
2120 * would also have to adjust nlinks. Plus there are
2121 * ordering issues for adds and deletes.
2123 * Reflush downward if this is an ADD, and upward if
2126 if (target_ip
->flush_state
== HAMMER_FST_FLUSH
) {
2127 if (rec
->flush_state
== HAMMER_MEM_RECORD_ADD
)
2128 ip
->flags
|= HAMMER_INODE_REFLUSH
;
2130 target_ip
->flags
|= HAMMER_INODE_REFLUSH
;
2135 * Target IP is not yet flushing. This can get complex
2136 * because we have to be careful about the recursion.
2138 * Directories create an issue for us in that if a flush
2139 * of a directory is requested the expectation is to flush
2140 * any pending directory entries, but this will cause the
2141 * related inodes to recursively flush as well. We can't
2142 * really defer the operation so just get as many as we
2146 if ((target_ip
->flags
& HAMMER_INODE_RECLAIM
) == 0 &&
2147 (target_ip
->flags
& HAMMER_INODE_CONN_DOWN
) == 0) {
2149 * We aren't reclaiming and the target ip was not
2150 * previously prevented from flushing due to this
2151 * record dependancy. Do not flush this record.
2156 if (flg
->total_count
+ flg
->refs
>
2157 ip
->hmp
->undo_rec_limit
) {
2159 * Our flush group is over-full and we risk blowing
2160 * out the UNDO FIFO. Stop the scan, flush what we
2161 * have, then reflush the directory.
2163 * The directory may be forced through multiple
2164 * flush groups before it can be completely
2167 ip
->flags
|= HAMMER_INODE_RESIGNAL
|
2168 HAMMER_INODE_REFLUSH
;
2170 } else if (rec
->type
== HAMMER_MEM_RECORD_ADD
) {
2172 * If the target IP is not flushing we can force
2173 * it to flush, even if it is unable to write out
2174 * any of its own records we have at least one in
2175 * hand that we CAN deal with.
2177 rec
->flush_state
= HAMMER_FST_FLUSH
;
2178 rec
->flush_group
= flg
;
2180 hammer_ref(&rec
->lock
);
2181 hammer_flush_inode_core(target_ip
, flg
,
2182 HAMMER_FLUSH_RECURSION
);
2186 * General or delete-on-disk record.
2188 * XXX this needs help. If a delete-on-disk we could
2189 * disconnect the target. If the target has its own
2190 * dependancies they really need to be flushed.
2194 rec
->flush_state
= HAMMER_FST_FLUSH
;
2195 rec
->flush_group
= flg
;
2197 hammer_ref(&rec
->lock
);
2198 hammer_flush_inode_core(target_ip
, flg
,
2199 HAMMER_FLUSH_RECURSION
);
2203 case HAMMER_FST_FLUSH
:
2205 * The flush_group should already match.
2207 KKASSERT(rec
->flush_group
== flg
);
2216 * This version just moves records already in a flush state to the new
2217 * flush group and that is it.
2220 hammer_syncgrp_child_callback(hammer_record_t rec
, void *data
)
2222 hammer_inode_t ip
= rec
->ip
;
2224 switch(rec
->flush_state
) {
2225 case HAMMER_FST_FLUSH
:
2226 KKASSERT(rec
->flush_group
== ip
->flush_group
);
2236 * Wait for a previously queued flush to complete.
2238 * If a critical error occured we don't try to wait.
2241 hammer_wait_inode(hammer_inode_t ip
)
2243 hammer_flush_group_t flg
;
2246 if ((ip
->hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
) == 0) {
2247 while (ip
->flush_state
!= HAMMER_FST_IDLE
&&
2248 (ip
->hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
) == 0) {
2249 if (ip
->flush_state
== HAMMER_FST_SETUP
)
2250 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
2251 if (ip
->flush_state
!= HAMMER_FST_IDLE
) {
2252 ip
->flags
|= HAMMER_INODE_FLUSHW
;
2253 tsleep(&ip
->flags
, 0, "hmrwin", 0);
2260 * Called by the backend code when a flush has been completed.
2261 * The inode has already been removed from the flush list.
2263 * A pipelined flush can occur, in which case we must re-enter the
2264 * inode on the list and re-copy its fields.
2267 hammer_flush_inode_done(hammer_inode_t ip
, int error
)
2272 KKASSERT(ip
->flush_state
== HAMMER_FST_FLUSH
);
2277 * Auto-reflush if the backend could not completely flush
2278 * the inode. This fixes a case where a deferred buffer flush
2279 * could cause fsync to return early.
2281 if (ip
->sync_flags
& HAMMER_INODE_MODMASK
)
2282 ip
->flags
|= HAMMER_INODE_REFLUSH
;
2285 * Merge left-over flags back into the frontend and fix the state.
2286 * Incomplete truncations are retained by the backend.
2289 ip
->flags
|= ip
->sync_flags
& ~HAMMER_INODE_TRUNCATED
;
2290 ip
->sync_flags
&= HAMMER_INODE_TRUNCATED
;
2293 * The backend may have adjusted nlinks, so if the adjusted nlinks
2294 * does not match the fronttend set the frontend's RDIRTY flag again.
2296 if (ip
->ino_data
.nlinks
!= ip
->sync_ino_data
.nlinks
)
2297 ip
->flags
|= HAMMER_INODE_DDIRTY
;
2300 * Fix up the dirty buffer status.
2302 if (ip
->vp
&& RB_ROOT(&ip
->vp
->v_rbdirty_tree
)) {
2303 ip
->flags
|= HAMMER_INODE_BUFS
;
2307 * Re-set the XDIRTY flag if some of the inode's in-memory records
2308 * could not be flushed.
2310 KKASSERT((RB_EMPTY(&ip
->rec_tree
) &&
2311 (ip
->flags
& HAMMER_INODE_XDIRTY
) == 0) ||
2312 (!RB_EMPTY(&ip
->rec_tree
) &&
2313 (ip
->flags
& HAMMER_INODE_XDIRTY
) != 0));
2316 * Do not lose track of inodes which no longer have vnode
2317 * assocations, otherwise they may never get flushed again.
2319 * The reflush flag can be set superfluously, causing extra pain
2320 * for no reason. If the inode is no longer modified it no longer
2321 * needs to be flushed.
2323 if (ip
->flags
& HAMMER_INODE_MODMASK
) {
2325 ip
->flags
|= HAMMER_INODE_REFLUSH
;
2327 ip
->flags
&= ~HAMMER_INODE_REFLUSH
;
2331 * Adjust the flush state.
2333 if (ip
->flags
& HAMMER_INODE_WOULDBLOCK
) {
2335 * We were unable to flush out all our records, leave the
2336 * inode in a flush state and in the current flush group.
2337 * The flush group will be re-run.
2339 * This occurs if the UNDO block gets too full or there is
2340 * too much dirty meta-data and allows the flusher to
2341 * finalize the UNDO block and then re-flush.
2343 ip
->flags
&= ~HAMMER_INODE_WOULDBLOCK
;
2347 * Remove from the flush_group
2349 TAILQ_REMOVE(&ip
->flush_group
->flush_list
, ip
, flush_entry
);
2350 ip
->flush_group
= NULL
;
2353 * Clean up the vnode ref and tracking counts.
2355 if (ip
->flags
& HAMMER_INODE_VHELD
) {
2356 ip
->flags
&= ~HAMMER_INODE_VHELD
;
2359 --hmp
->count_iqueued
;
2360 --hammer_count_iqueued
;
2363 * And adjust the state.
2365 if (TAILQ_EMPTY(&ip
->target_list
) && RB_EMPTY(&ip
->rec_tree
)) {
2366 ip
->flush_state
= HAMMER_FST_IDLE
;
2369 ip
->flush_state
= HAMMER_FST_SETUP
;
2374 * If the frontend is waiting for a flush to complete,
2377 if (ip
->flags
& HAMMER_INODE_FLUSHW
) {
2378 ip
->flags
&= ~HAMMER_INODE_FLUSHW
;
2383 * If the frontend made more changes and requested another
2384 * flush, then try to get it running.
2386 * Reflushes are aborted when the inode is errored out.
2388 if (ip
->flags
& HAMMER_INODE_REFLUSH
) {
2389 ip
->flags
&= ~HAMMER_INODE_REFLUSH
;
2390 if (ip
->flags
& HAMMER_INODE_RESIGNAL
) {
2391 ip
->flags
&= ~HAMMER_INODE_RESIGNAL
;
2392 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
2394 hammer_flush_inode(ip
, 0);
2400 * If we have no parent dependancies we can clear CONN_DOWN
2402 if (TAILQ_EMPTY(&ip
->target_list
))
2403 ip
->flags
&= ~HAMMER_INODE_CONN_DOWN
;
2406 * If the inode is now clean drop the space reservation.
2408 if ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0 &&
2409 (ip
->flags
& HAMMER_INODE_RSV_INODES
)) {
2410 ip
->flags
&= ~HAMMER_INODE_RSV_INODES
;
2415 hammer_rel_inode(ip
, 0);
2419 * Called from hammer_sync_inode() to synchronize in-memory records
2423 hammer_sync_record_callback(hammer_record_t record
, void *data
)
2425 hammer_cursor_t cursor
= data
;
2426 hammer_transaction_t trans
= cursor
->trans
;
2427 hammer_mount_t hmp
= trans
->hmp
;
2431 * Skip records that do not belong to the current flush.
2433 ++hammer_stats_record_iterations
;
2434 if (record
->flush_state
!= HAMMER_FST_FLUSH
)
2438 if (record
->flush_group
!= record
->ip
->flush_group
) {
2439 kprintf("sync_record %p ip %p bad flush group %p %p\n", record
, record
->ip
, record
->flush_group
,record
->ip
->flush_group
);
2444 KKASSERT(record
->flush_group
== record
->ip
->flush_group
);
2447 * Interlock the record using the BE flag. Once BE is set the
2448 * frontend cannot change the state of FE.
2450 * NOTE: If FE is set prior to us setting BE we still sync the
2451 * record out, but the flush completion code converts it to
2452 * a delete-on-disk record instead of destroying it.
2454 KKASSERT((record
->flags
& HAMMER_RECF_INTERLOCK_BE
) == 0);
2455 record
->flags
|= HAMMER_RECF_INTERLOCK_BE
;
2458 * The backend has already disposed of the record.
2460 if (record
->flags
& (HAMMER_RECF_DELETED_BE
| HAMMER_RECF_COMMITTED
)) {
2466 * If the whole inode is being deleting all on-disk records will
2467 * be deleted very soon, we can't sync any new records to disk
2468 * because they will be deleted in the same transaction they were
2469 * created in (delete_tid == create_tid), which will assert.
2471 * XXX There may be a case with RECORD_ADD with DELETED_FE set
2472 * that we currently panic on.
2474 if (record
->ip
->sync_flags
& HAMMER_INODE_DELETING
) {
2475 switch(record
->type
) {
2476 case HAMMER_MEM_RECORD_DATA
:
2478 * We don't have to do anything, if the record was
2479 * committed the space will have been accounted for
2483 case HAMMER_MEM_RECORD_GENERAL
:
2485 * Set deleted-by-backend flag. Do not set the
2486 * backend committed flag, because we are throwing
2489 record
->flags
|= HAMMER_RECF_DELETED_BE
;
2490 ++record
->ip
->rec_generation
;
2493 case HAMMER_MEM_RECORD_ADD
:
2494 panic("hammer_sync_record_callback: illegal add "
2495 "during inode deletion record %p", record
);
2496 break; /* NOT REACHED */
2497 case HAMMER_MEM_RECORD_INODE
:
2498 panic("hammer_sync_record_callback: attempt to "
2499 "sync inode record %p?", record
);
2500 break; /* NOT REACHED */
2501 case HAMMER_MEM_RECORD_DEL
:
2503 * Follow through and issue the on-disk deletion
2510 * If DELETED_FE is set special handling is needed for directory
2511 * entries. Dependant pieces related to the directory entry may
2512 * have already been synced to disk. If this occurs we have to
2513 * sync the directory entry and then change the in-memory record
2514 * from an ADD to a DELETE to cover the fact that it's been
2515 * deleted by the frontend.
2517 * A directory delete covering record (MEM_RECORD_DEL) can never
2518 * be deleted by the frontend.
2520 * Any other record type (aka DATA) can be deleted by the frontend.
2521 * XXX At the moment the flusher must skip it because there may
2522 * be another data record in the flush group for the same block,
2523 * meaning that some frontend data changes can leak into the backend's
2524 * synchronization point.
2526 if (record
->flags
& HAMMER_RECF_DELETED_FE
) {
2527 if (record
->type
== HAMMER_MEM_RECORD_ADD
) {
2529 * Convert a front-end deleted directory-add to
2530 * a directory-delete entry later.
2532 record
->flags
|= HAMMER_RECF_CONVERT_DELETE
;
2535 * Dispose of the record (race case). Mark as
2536 * deleted by backend (and not committed).
2538 KKASSERT(record
->type
!= HAMMER_MEM_RECORD_DEL
);
2539 record
->flags
|= HAMMER_RECF_DELETED_BE
;
2540 ++record
->ip
->rec_generation
;
2547 * Assign the create_tid for new records. Deletions already
2548 * have the record's entire key properly set up.
2550 if (record
->type
!= HAMMER_MEM_RECORD_DEL
) {
2551 record
->leaf
.base
.create_tid
= trans
->tid
;
2552 record
->leaf
.create_ts
= trans
->time32
;
2555 error
= hammer_ip_sync_record_cursor(cursor
, record
);
2556 if (error
!= EDEADLK
)
2558 hammer_done_cursor(cursor
);
2559 error
= hammer_init_cursor(trans
, cursor
, &record
->ip
->cache
[0],
2564 record
->flags
&= ~HAMMER_RECF_CONVERT_DELETE
;
2569 hammer_flush_record_done(record
, error
);
2572 * Do partial finalization if we have built up too many dirty
2573 * buffers. Otherwise a buffer cache deadlock can occur when
2574 * doing things like creating tens of thousands of tiny files.
2576 * We must release our cursor lock to avoid a 3-way deadlock
2577 * due to the exclusive sync lock the finalizer must get.
2579 if (hammer_flusher_meta_limit(hmp
)) {
2580 hammer_unlock_cursor(cursor
);
2581 hammer_flusher_finalize(trans
, 0);
2582 hammer_lock_cursor(cursor
);
2589 * Backend function called by the flusher to sync an inode to media.
2592 hammer_sync_inode(hammer_transaction_t trans
, hammer_inode_t ip
)
2594 struct hammer_cursor cursor
;
2595 hammer_node_t tmp_node
;
2596 hammer_record_t depend
;
2597 hammer_record_t next
;
2598 int error
, tmp_error
;
2601 if ((ip
->sync_flags
& HAMMER_INODE_MODMASK
) == 0)
2604 error
= hammer_init_cursor(trans
, &cursor
, &ip
->cache
[1], ip
);
2609 * Any directory records referencing this inode which are not in
2610 * our current flush group must adjust our nlink count for the
2611 * purposes of synchronization to disk.
2613 * Records which are in our flush group can be unlinked from our
2614 * inode now, potentially allowing the inode to be physically
2617 * This cannot block.
2619 nlinks
= ip
->ino_data
.nlinks
;
2620 next
= TAILQ_FIRST(&ip
->target_list
);
2621 while ((depend
= next
) != NULL
) {
2622 next
= TAILQ_NEXT(depend
, target_entry
);
2623 if (depend
->flush_state
== HAMMER_FST_FLUSH
&&
2624 depend
->flush_group
== ip
->flush_group
) {
2626 * If this is an ADD that was deleted by the frontend
2627 * the frontend nlinks count will have already been
2628 * decremented, but the backend is going to sync its
2629 * directory entry and must account for it. The
2630 * record will be converted to a delete-on-disk when
2633 * If the ADD was not deleted by the frontend we
2634 * can remove the dependancy from our target_list.
2636 if (depend
->flags
& HAMMER_RECF_DELETED_FE
) {
2639 TAILQ_REMOVE(&ip
->target_list
, depend
,
2641 depend
->target_ip
= NULL
;
2643 } else if ((depend
->flags
& HAMMER_RECF_DELETED_FE
) == 0) {
2645 * Not part of our flush group and not deleted by
2646 * the front-end, adjust the link count synced to
2647 * the media (undo what the frontend did when it
2648 * queued the record).
2650 KKASSERT((depend
->flags
& HAMMER_RECF_DELETED_BE
) == 0);
2651 switch(depend
->type
) {
2652 case HAMMER_MEM_RECORD_ADD
:
2655 case HAMMER_MEM_RECORD_DEL
:
2665 * Set dirty if we had to modify the link count.
2667 if (ip
->sync_ino_data
.nlinks
!= nlinks
) {
2668 KKASSERT((int64_t)nlinks
>= 0);
2669 ip
->sync_ino_data
.nlinks
= nlinks
;
2670 ip
->sync_flags
|= HAMMER_INODE_DDIRTY
;
2674 * If there is a trunction queued destroy any data past the (aligned)
2675 * truncation point. Userland will have dealt with the buffer
2676 * containing the truncation point for us.
2678 * We don't flush pending frontend data buffers until after we've
2679 * dealt with the truncation.
2681 if (ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) {
2683 * Interlock trunc_off. The VOP front-end may continue to
2684 * make adjustments to it while we are blocked.
2687 off_t aligned_trunc_off
;
2690 trunc_off
= ip
->sync_trunc_off
;
2691 blkmask
= hammer_blocksize(trunc_off
) - 1;
2692 aligned_trunc_off
= (trunc_off
+ blkmask
) & ~(int64_t)blkmask
;
2695 * Delete any whole blocks on-media. The front-end has
2696 * already cleaned out any partial block and made it
2697 * pending. The front-end may have updated trunc_off
2698 * while we were blocked so we only use sync_trunc_off.
2700 * This operation can blow out the buffer cache, EWOULDBLOCK
2701 * means we were unable to complete the deletion. The
2702 * deletion will update sync_trunc_off in that case.
2704 error
= hammer_ip_delete_range(&cursor
, ip
,
2706 0x7FFFFFFFFFFFFFFFLL
, 2);
2707 if (error
== EWOULDBLOCK
) {
2708 ip
->flags
|= HAMMER_INODE_WOULDBLOCK
;
2710 goto defer_buffer_flush
;
2717 * Clear the truncation flag on the backend after we have
2718 * complete the deletions. Backend data is now good again
2719 * (including new records we are about to sync, below).
2721 * Leave sync_trunc_off intact. As we write additional
2722 * records the backend will update sync_trunc_off. This
2723 * tells the backend whether it can skip the overwrite
2724 * test. This should work properly even when the backend
2725 * writes full blocks where the truncation point straddles
2726 * the block because the comparison is against the base
2727 * offset of the record.
2729 ip
->sync_flags
&= ~HAMMER_INODE_TRUNCATED
;
2730 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
2736 * Now sync related records. These will typically be directory
2737 * entries, records tracking direct-writes, or delete-on-disk records.
2740 tmp_error
= RB_SCAN(hammer_rec_rb_tree
, &ip
->rec_tree
, NULL
,
2741 hammer_sync_record_callback
, &cursor
);
2747 hammer_cache_node(&ip
->cache
[1], cursor
.node
);
2750 * Re-seek for inode update, assuming our cache hasn't been ripped
2751 * out from under us.
2754 tmp_node
= hammer_ref_node_safe(trans
, &ip
->cache
[0], &error
);
2756 hammer_cursor_downgrade(&cursor
);
2757 hammer_lock_sh(&tmp_node
->lock
);
2758 if ((tmp_node
->flags
& HAMMER_NODE_DELETED
) == 0)
2759 hammer_cursor_seek(&cursor
, tmp_node
, 0);
2760 hammer_unlock(&tmp_node
->lock
);
2761 hammer_rel_node(tmp_node
);
2767 * If we are deleting the inode the frontend had better not have
2768 * any active references on elements making up the inode.
2770 * The call to hammer_ip_delete_clean() cleans up auxillary records
2771 * but not DB or DATA records. Those must have already been deleted
2772 * by the normal truncation mechanic.
2774 if (error
== 0 && ip
->sync_ino_data
.nlinks
== 0 &&
2775 RB_EMPTY(&ip
->rec_tree
) &&
2776 (ip
->sync_flags
& HAMMER_INODE_DELETING
) &&
2777 (ip
->flags
& HAMMER_INODE_DELETED
) == 0) {
2780 error
= hammer_ip_delete_clean(&cursor
, ip
, &count1
);
2782 ip
->flags
|= HAMMER_INODE_DELETED
;
2783 ip
->sync_flags
&= ~HAMMER_INODE_DELETING
;
2784 ip
->sync_flags
&= ~HAMMER_INODE_TRUNCATED
;
2785 KKASSERT(RB_EMPTY(&ip
->rec_tree
));
2788 * Set delete_tid in both the frontend and backend
2789 * copy of the inode record. The DELETED flag handles
2790 * this, do not set RDIRTY.
2792 ip
->ino_leaf
.base
.delete_tid
= trans
->tid
;
2793 ip
->sync_ino_leaf
.base
.delete_tid
= trans
->tid
;
2794 ip
->ino_leaf
.delete_ts
= trans
->time32
;
2795 ip
->sync_ino_leaf
.delete_ts
= trans
->time32
;
2799 * Adjust the inode count in the volume header
2801 hammer_sync_lock_sh(trans
);
2802 if (ip
->flags
& HAMMER_INODE_ONDISK
) {
2803 hammer_modify_volume_field(trans
,
2806 --ip
->hmp
->rootvol
->ondisk
->vol0_stat_inodes
;
2807 hammer_modify_volume_done(trans
->rootvol
);
2809 hammer_sync_unlock(trans
);
2815 ip
->sync_flags
&= ~HAMMER_INODE_BUFS
;
2819 * Now update the inode's on-disk inode-data and/or on-disk record.
2820 * DELETED and ONDISK are managed only in ip->flags.
2822 * In the case of a defered buffer flush we still update the on-disk
2823 * inode to satisfy visibility requirements if there happen to be
2824 * directory dependancies.
2826 switch(ip
->flags
& (HAMMER_INODE_DELETED
| HAMMER_INODE_ONDISK
)) {
2827 case HAMMER_INODE_DELETED
|HAMMER_INODE_ONDISK
:
2829 * If deleted and on-disk, don't set any additional flags.
2830 * the delete flag takes care of things.
2832 * Clear flags which may have been set by the frontend.
2834 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
| HAMMER_INODE_XDIRTY
|
2835 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
|
2836 HAMMER_INODE_DELETING
);
2838 case HAMMER_INODE_DELETED
:
2840 * Take care of the case where a deleted inode was never
2841 * flushed to the disk in the first place.
2843 * Clear flags which may have been set by the frontend.
2845 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
| HAMMER_INODE_XDIRTY
|
2846 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
|
2847 HAMMER_INODE_DELETING
);
2848 while (RB_ROOT(&ip
->rec_tree
)) {
2849 hammer_record_t record
= RB_ROOT(&ip
->rec_tree
);
2850 hammer_ref(&record
->lock
);
2851 KKASSERT(record
->lock
.refs
== 1);
2852 record
->flags
|= HAMMER_RECF_DELETED_BE
;
2853 ++record
->ip
->rec_generation
;
2854 hammer_rel_mem_record(record
);
2857 case HAMMER_INODE_ONDISK
:
2859 * If already on-disk, do not set any additional flags.
2864 * If not on-disk and not deleted, set DDIRTY to force
2865 * an initial record to be written.
2867 * Also set the create_tid in both the frontend and backend
2868 * copy of the inode record.
2870 ip
->ino_leaf
.base
.create_tid
= trans
->tid
;
2871 ip
->ino_leaf
.create_ts
= trans
->time32
;
2872 ip
->sync_ino_leaf
.base
.create_tid
= trans
->tid
;
2873 ip
->sync_ino_leaf
.create_ts
= trans
->time32
;
2874 ip
->sync_flags
|= HAMMER_INODE_DDIRTY
;
2879 * If RDIRTY or DDIRTY is set, write out a new record. If the inode
2880 * is already on-disk the old record is marked as deleted.
2882 * If DELETED is set hammer_update_inode() will delete the existing
2883 * record without writing out a new one.
2885 * If *ONLY* the ITIMES flag is set we can update the record in-place.
2887 if (ip
->flags
& HAMMER_INODE_DELETED
) {
2888 error
= hammer_update_inode(&cursor
, ip
);
2890 if ((ip
->sync_flags
& HAMMER_INODE_DDIRTY
) == 0 &&
2891 (ip
->sync_flags
& (HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
))) {
2892 error
= hammer_update_itimes(&cursor
, ip
);
2894 if (ip
->sync_flags
& (HAMMER_INODE_DDIRTY
| HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
)) {
2895 error
= hammer_update_inode(&cursor
, ip
);
2899 hammer_critical_error(ip
->hmp
, ip
, error
,
2900 "while syncing inode");
2902 hammer_done_cursor(&cursor
);
2907 * This routine is called when the OS is no longer actively referencing
2908 * the inode (but might still be keeping it cached), or when releasing
2909 * the last reference to an inode.
2911 * At this point if the inode's nlinks count is zero we want to destroy
2912 * it, which may mean destroying it on-media too.
2915 hammer_inode_unloadable_check(hammer_inode_t ip
, int getvp
)
2920 * Set the DELETING flag when the link count drops to 0 and the
2921 * OS no longer has any opens on the inode.
2923 * The backend will clear DELETING (a mod flag) and set DELETED
2924 * (a state flag) when it is actually able to perform the
2927 * Don't reflag the deletion if the flusher is currently syncing
2928 * one that was already flagged. A previously set DELETING flag
2929 * may bounce around flags and sync_flags until the operation is
2932 if (ip
->ino_data
.nlinks
== 0 &&
2933 ((ip
->flags
| ip
->sync_flags
) & (HAMMER_INODE_DELETING
|HAMMER_INODE_DELETED
)) == 0) {
2934 ip
->flags
|= HAMMER_INODE_DELETING
;
2935 ip
->flags
|= HAMMER_INODE_TRUNCATED
;
2939 if (hammer_get_vnode(ip
, &vp
) != 0)
2947 vtruncbuf(ip
->vp
, 0, HAMMER_BUFSIZE
);
2948 vnode_pager_setsize(ip
->vp
, 0);
2957 * After potentially resolving a dependancy the inode is tested
2958 * to determine whether it needs to be reflushed.
2961 hammer_test_inode(hammer_inode_t ip
)
2963 if (ip
->flags
& HAMMER_INODE_REFLUSH
) {
2964 ip
->flags
&= ~HAMMER_INODE_REFLUSH
;
2965 hammer_ref(&ip
->lock
);
2966 if (ip
->flags
& HAMMER_INODE_RESIGNAL
) {
2967 ip
->flags
&= ~HAMMER_INODE_RESIGNAL
;
2968 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
2970 hammer_flush_inode(ip
, 0);
2972 hammer_rel_inode(ip
, 0);
2977 * Clear the RECLAIM flag on an inode. This occurs when the inode is
2978 * reassociated with a vp or just before it gets freed.
2980 * Pipeline wakeups to threads blocked due to an excessive number of
2981 * detached inodes. The reclaim count generates a bit of negative
2985 hammer_inode_wakereclaims(hammer_inode_t ip
, int dowake
)
2987 struct hammer_reclaim
*reclaim
;
2988 hammer_mount_t hmp
= ip
->hmp
;
2990 if ((ip
->flags
& HAMMER_INODE_RECLAIM
) == 0)
2993 --hammer_count_reclaiming
;
2994 --hmp
->inode_reclaims
;
2995 ip
->flags
&= ~HAMMER_INODE_RECLAIM
;
2997 if (hmp
->inode_reclaims
< HAMMER_RECLAIM_WAIT
|| dowake
) {
2998 reclaim
= TAILQ_FIRST(&hmp
->reclaim_list
);
2999 if (reclaim
&& reclaim
->count
> 0 && --reclaim
->count
== 0) {
3000 TAILQ_REMOVE(&hmp
->reclaim_list
, reclaim
, entry
);
3007 * Setup our reclaim pipeline. We only let so many detached (and dirty)
3008 * inodes build up before we start blocking.
3010 * When we block we don't care *which* inode has finished reclaiming,
3011 * as lone as one does. This is somewhat heuristical... we also put a
3012 * cap on how long we are willing to wait.
3015 hammer_inode_waitreclaims(hammer_mount_t hmp
)
3017 struct hammer_reclaim reclaim
;
3020 if (hmp
->inode_reclaims
< HAMMER_RECLAIM_WAIT
)
3022 delay
= (hmp
->inode_reclaims
- HAMMER_RECLAIM_WAIT
) * hz
/
3023 (HAMMER_RECLAIM_WAIT
* 3) + 1;
3026 TAILQ_INSERT_TAIL(&hmp
->reclaim_list
, &reclaim
, entry
);
3027 tsleep(&reclaim
, 0, "hmrrcm", delay
);
3028 if (reclaim
.count
> 0)
3029 TAILQ_REMOVE(&hmp
->reclaim_list
, &reclaim
, entry
);
3034 * A larger then normal backlog of inodes is sitting in the flusher,
3035 * enforce a general slowdown to let it catch up. This routine is only
3036 * called on completion of a non-flusher-related transaction which
3037 * performed B-Tree node I/O.
3039 * It is possible for the flusher to stall in a continuous load.
3040 * blogbench -i1000 -o seems to do a good job generating this sort of load.
3041 * If the flusher is unable to catch up the inode count can bloat until
3042 * we run out of kvm.
3044 * This is a bit of a hack.
3047 hammer_inode_waithard(hammer_mount_t hmp
)
3052 if (hmp
->flags
& HAMMER_MOUNT_FLUSH_RECOVERY
) {
3053 if (hmp
->inode_reclaims
< HAMMER_RECLAIM_WAIT
/ 2 &&
3054 hmp
->count_iqueued
< hmp
->count_inodes
/ 20) {
3055 hmp
->flags
&= ~HAMMER_MOUNT_FLUSH_RECOVERY
;
3059 if (hmp
->inode_reclaims
< HAMMER_RECLAIM_WAIT
||
3060 hmp
->count_iqueued
< hmp
->count_inodes
/ 10) {
3063 hmp
->flags
|= HAMMER_MOUNT_FLUSH_RECOVERY
;
3067 * Block for one flush cycle.
3069 hammer_flusher_wait_next(hmp
);