2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.76 2008/06/17 04:02:38 dillon Exp $
38 #include <vm/vm_extern.h>
42 static int hammer_unload_inode(struct hammer_inode
*ip
);
43 static void hammer_flush_inode_core(hammer_inode_t ip
, int flags
);
44 static int hammer_setup_child_callback(hammer_record_t rec
, void *data
);
45 static int hammer_setup_parent_inodes(hammer_inode_t ip
);
46 static int hammer_setup_parent_inodes_helper(hammer_record_t record
);
47 static void hammer_inode_wakereclaims(hammer_inode_t ip
);
50 extern struct hammer_inode
*HammerTruncIp
;
54 * The kernel is not actively referencing this vnode but is still holding
57 * This is called from the frontend.
60 hammer_vop_inactive(struct vop_inactive_args
*ap
)
62 struct hammer_inode
*ip
= VTOI(ap
->a_vp
);
73 * If the inode no longer has visibility in the filesystem and is
74 * fairly clean, try to recycle it immediately. This can deadlock
75 * in vfsync() if we aren't careful.
77 * Do not queue the inode to the flusher if we still have visibility,
78 * otherwise namespace calls such as chmod will unnecessarily generate
79 * multiple inode updates.
81 hammer_inode_unloadable_check(ip
, 0);
82 if (ip
->ino_data
.nlinks
== 0) {
83 if (ip
->flags
& HAMMER_INODE_MODMASK
)
84 hammer_flush_inode(ip
, 0);
92 * Release the vnode association. This is typically (but not always)
93 * the last reference on the inode.
95 * Once the association is lost we are on our own with regards to
99 hammer_vop_reclaim(struct vop_reclaim_args
*ap
)
101 struct hammer_reclaim reclaim
;
102 struct hammer_inode
*ip
;
109 if ((ip
= vp
->v_data
) != NULL
) {
115 * Setup our reclaim pipeline. We only let so many detached
116 * (and dirty) inodes build up before we start blocking. Do
117 * not bother tracking the immediate increment/decrement if
118 * the inode is not actually dirty.
120 * When we block we don't care *which* inode has finished
121 * reclaiming, as lone as one does.
123 if ((ip
->flags
& HAMMER_INODE_RECLAIM
) == 0 &&
124 ((ip
->flags
|ip
->sync_flags
) & HAMMER_INODE_MODMASK
)) {
125 ++hammer_count_reclaiming
;
126 ++hmp
->inode_reclaims
;
127 ip
->flags
|= HAMMER_INODE_RECLAIM
;
128 if (hmp
->inode_reclaims
> HAMMER_RECLAIM_PIPESIZE
) {
130 TAILQ_INSERT_TAIL(&hmp
->reclaim_list
,
138 hammer_rel_inode(ip
, 1);
141 * Reclaim pipeline. We can't let too many reclaimed inodes
142 * build-up in the flusher or the flusher loses its locality
143 * of reference, or worse blows out our memory. Once we have
144 * exceeded the reclaim pipe size start slowing down. Our
145 * imposed delay can be cut short if the flusher catches up
148 if (reclaim
.okydoky
== 0) {
149 delay
= (hmp
->inode_reclaims
-
150 HAMMER_RECLAIM_PIPESIZE
) * hz
/
151 HAMMER_RECLAIM_PIPESIZE
;
154 hammer_flusher_async(hmp
);
155 if (reclaim
.okydoky
== 0) {
156 tsleep(&reclaim
, 0, "hmrrcm", delay
);
158 if (reclaim
.okydoky
== 0) {
159 TAILQ_REMOVE(&hmp
->reclaim_list
, &reclaim
,
168 * Return a locked vnode for the specified inode. The inode must be
169 * referenced but NOT LOCKED on entry and will remain referenced on
172 * Called from the frontend.
175 hammer_get_vnode(struct hammer_inode
*ip
, struct vnode
**vpp
)
184 if ((vp
= ip
->vp
) == NULL
) {
185 error
= getnewvnode(VT_HAMMER
, hmp
->mp
, vpp
, 0, 0);
188 hammer_lock_ex(&ip
->lock
);
189 if (ip
->vp
!= NULL
) {
190 hammer_unlock(&ip
->lock
);
195 hammer_ref(&ip
->lock
);
199 hammer_get_vnode_type(ip
->ino_data
.obj_type
);
201 hammer_inode_wakereclaims(ip
);
203 switch(ip
->ino_data
.obj_type
) {
204 case HAMMER_OBJTYPE_CDEV
:
205 case HAMMER_OBJTYPE_BDEV
:
206 vp
->v_ops
= &hmp
->mp
->mnt_vn_spec_ops
;
207 addaliasu(vp
, ip
->ino_data
.rmajor
,
208 ip
->ino_data
.rminor
);
210 case HAMMER_OBJTYPE_FIFO
:
211 vp
->v_ops
= &hmp
->mp
->mnt_vn_fifo_ops
;
218 * Only mark as the root vnode if the ip is not
219 * historical, otherwise the VFS cache will get
220 * confused. The other half of the special handling
221 * is in hammer_vop_nlookupdotdot().
223 if (ip
->obj_id
== HAMMER_OBJID_ROOT
&&
224 ip
->obj_asof
== hmp
->asof
) {
228 vp
->v_data
= (void *)ip
;
229 /* vnode locked by getnewvnode() */
230 /* make related vnode dirty if inode dirty? */
231 hammer_unlock(&ip
->lock
);
232 if (vp
->v_type
== VREG
)
233 vinitvmio(vp
, ip
->ino_data
.size
);
238 * loop if the vget fails (aka races), or if the vp
239 * no longer matches ip->vp.
241 if (vget(vp
, LK_EXCLUSIVE
) == 0) {
252 * Acquire a HAMMER inode. The returned inode is not locked. These functions
253 * do not attach or detach the related vnode (use hammer_get_vnode() for
256 * The flags argument is only applied for newly created inodes, and only
257 * certain flags are inherited.
259 * Called from the frontend.
261 struct hammer_inode
*
262 hammer_get_inode(hammer_transaction_t trans
, struct hammer_node
**cache
,
263 u_int64_t obj_id
, hammer_tid_t asof
, int flags
, int *errorp
)
265 hammer_mount_t hmp
= trans
->hmp
;
266 struct hammer_inode_info iinfo
;
267 struct hammer_cursor cursor
;
268 struct hammer_inode
*ip
;
271 * Determine if we already have an inode cached. If we do then
274 iinfo
.obj_id
= obj_id
;
275 iinfo
.obj_asof
= asof
;
277 ip
= hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp
->rb_inos_root
, &iinfo
);
279 hammer_ref(&ip
->lock
);
285 * Allocate a new inode structure and deal with races later.
287 ip
= kmalloc(sizeof(*ip
), M_HAMMER
, M_WAITOK
|M_ZERO
);
288 ++hammer_count_inodes
;
291 ip
->obj_asof
= iinfo
.obj_asof
;
293 ip
->flags
= flags
& HAMMER_INODE_RO
;
295 ip
->flags
|= HAMMER_INODE_RO
;
296 ip
->sync_trunc_off
= ip
->trunc_off
= 0x7FFFFFFFFFFFFFFFLL
;
297 RB_INIT(&ip
->rec_tree
);
298 TAILQ_INIT(&ip
->target_list
);
301 * Locate the on-disk inode.
304 hammer_init_cursor(trans
, &cursor
, cache
, NULL
);
305 cursor
.key_beg
.localization
= HAMMER_LOCALIZE_INODE
;
306 cursor
.key_beg
.obj_id
= ip
->obj_id
;
307 cursor
.key_beg
.key
= 0;
308 cursor
.key_beg
.create_tid
= 0;
309 cursor
.key_beg
.delete_tid
= 0;
310 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
311 cursor
.key_beg
.obj_type
= 0;
312 cursor
.asof
= iinfo
.obj_asof
;
313 cursor
.flags
= HAMMER_CURSOR_GET_LEAF
| HAMMER_CURSOR_GET_DATA
|
316 *errorp
= hammer_btree_lookup(&cursor
);
317 if (*errorp
== EDEADLK
) {
318 hammer_done_cursor(&cursor
);
323 * On success the B-Tree lookup will hold the appropriate
324 * buffer cache buffers and provide a pointer to the requested
325 * information. Copy the information to the in-memory inode
326 * and cache the B-Tree node to improve future operations.
329 ip
->ino_leaf
= cursor
.node
->ondisk
->elms
[cursor
.index
].leaf
;
330 ip
->ino_data
= cursor
.data
->inode
;
331 hammer_cache_node(cursor
.node
, &ip
->cache
[0]);
333 hammer_cache_node(cursor
.node
, cache
);
336 * The file should not contain any data past the file size
337 * stored in the inode. Setting sync_trunc_off to the
338 * file size instead of max reduces B-Tree lookup overheads
339 * on append by allowing the flusher to avoid checking for
342 ip
->sync_trunc_off
= ip
->ino_data
.size
;
346 * The inode is placed on the red-black tree and will be synced to
347 * the media when flushed or by the filesystem sync. If this races
348 * another instantiation/lookup the insertion will fail.
351 hammer_ref(&ip
->lock
);
352 if (RB_INSERT(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
)) {
353 hammer_uncache_node(&ip
->cache
[0]);
354 hammer_uncache_node(&ip
->cache
[1]);
355 KKASSERT(ip
->lock
.refs
== 1);
356 --hammer_count_inodes
;
359 hammer_done_cursor(&cursor
);
362 ip
->flags
|= HAMMER_INODE_ONDISK
;
365 * Do not panic on read-only accesses which fail, particularly
366 * historical accesses where the snapshot might not have
367 * complete connectivity.
369 if ((flags
& HAMMER_INODE_RO
) == 0) {
370 kprintf("hammer_get_inode: failed ip %p obj_id %016llx cursor %p error %d\n",
371 ip
, ip
->obj_id
, &cursor
, *errorp
);
374 if (ip
->flags
& HAMMER_INODE_RSV_INODES
) {
375 ip
->flags
&= ~HAMMER_INODE_RSV_INODES
; /* sanity */
378 hmp
->rsv_databufs
-= ip
->rsv_databufs
;
379 ip
->rsv_databufs
= 0; /* sanity */
381 --hammer_count_inodes
;
386 hammer_done_cursor(&cursor
);
391 * Create a new filesystem object, returning the inode in *ipp. The
392 * returned inode will be referenced.
394 * The inode is created in-memory.
397 hammer_create_inode(hammer_transaction_t trans
, struct vattr
*vap
,
398 struct ucred
*cred
, hammer_inode_t dip
,
399 struct hammer_inode
**ipp
)
406 ip
= kmalloc(sizeof(*ip
), M_HAMMER
, M_WAITOK
|M_ZERO
);
407 ++hammer_count_inodes
;
409 ip
->obj_id
= hammer_alloc_objid(trans
, dip
);
410 KKASSERT(ip
->obj_id
!= 0);
411 ip
->obj_asof
= hmp
->asof
;
413 ip
->flush_state
= HAMMER_FST_IDLE
;
414 ip
->flags
= HAMMER_INODE_DDIRTY
| HAMMER_INODE_ITIMES
;
416 ip
->trunc_off
= 0x7FFFFFFFFFFFFFFFLL
;
417 RB_INIT(&ip
->rec_tree
);
418 TAILQ_INIT(&ip
->target_list
);
420 ip
->ino_leaf
.atime
= trans
->time
;
421 ip
->ino_data
.mtime
= trans
->time
;
422 ip
->ino_data
.size
= 0;
423 ip
->ino_data
.nlinks
= 0;
426 * A nohistory designator on the parent directory is inherited by
429 ip
->ino_data
.uflags
= dip
->ino_data
.uflags
&
430 (SF_NOHISTORY
|UF_NOHISTORY
|UF_NODUMP
);
432 ip
->ino_leaf
.base
.btype
= HAMMER_BTREE_TYPE_RECORD
;
433 ip
->ino_leaf
.base
.localization
= HAMMER_LOCALIZE_INODE
;
434 ip
->ino_leaf
.base
.obj_id
= ip
->obj_id
;
435 ip
->ino_leaf
.base
.key
= 0;
436 ip
->ino_leaf
.base
.create_tid
= 0;
437 ip
->ino_leaf
.base
.delete_tid
= 0;
438 ip
->ino_leaf
.base
.rec_type
= HAMMER_RECTYPE_INODE
;
439 ip
->ino_leaf
.base
.obj_type
= hammer_get_obj_type(vap
->va_type
);
441 ip
->ino_data
.obj_type
= ip
->ino_leaf
.base
.obj_type
;
442 ip
->ino_data
.version
= HAMMER_INODE_DATA_VERSION
;
443 ip
->ino_data
.mode
= vap
->va_mode
;
444 ip
->ino_data
.ctime
= trans
->time
;
445 ip
->ino_data
.parent_obj_id
= (dip
) ? dip
->ino_leaf
.base
.obj_id
: 0;
447 switch(ip
->ino_leaf
.base
.obj_type
) {
448 case HAMMER_OBJTYPE_CDEV
:
449 case HAMMER_OBJTYPE_BDEV
:
450 ip
->ino_data
.rmajor
= vap
->va_rmajor
;
451 ip
->ino_data
.rminor
= vap
->va_rminor
;
458 * Calculate default uid/gid and overwrite with information from
461 xuid
= hammer_to_unix_xid(&dip
->ino_data
.uid
);
462 xuid
= vop_helper_create_uid(hmp
->mp
, dip
->ino_data
.mode
, xuid
, cred
,
464 ip
->ino_data
.mode
= vap
->va_mode
;
466 if (vap
->va_vaflags
& VA_UID_UUID_VALID
)
467 ip
->ino_data
.uid
= vap
->va_uid_uuid
;
468 else if (vap
->va_uid
!= (uid_t
)VNOVAL
)
469 hammer_guid_to_uuid(&ip
->ino_data
.uid
, vap
->va_uid
);
471 hammer_guid_to_uuid(&ip
->ino_data
.uid
, xuid
);
473 if (vap
->va_vaflags
& VA_GID_UUID_VALID
)
474 ip
->ino_data
.gid
= vap
->va_gid_uuid
;
475 else if (vap
->va_gid
!= (gid_t
)VNOVAL
)
476 hammer_guid_to_uuid(&ip
->ino_data
.gid
, vap
->va_gid
);
478 ip
->ino_data
.gid
= dip
->ino_data
.gid
;
480 hammer_ref(&ip
->lock
);
481 if (RB_INSERT(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
)) {
482 hammer_unref(&ip
->lock
);
483 panic("hammer_create_inode: duplicate obj_id %llx", ip
->obj_id
);
490 * Called by hammer_sync_inode().
493 hammer_update_inode(hammer_cursor_t cursor
, hammer_inode_t ip
)
495 hammer_transaction_t trans
= cursor
->trans
;
496 hammer_record_t record
;
503 * If the inode has a presence on-disk then locate it and mark
504 * it deleted, setting DELONDISK.
506 * The record may or may not be physically deleted, depending on
507 * the retention policy.
509 if ((ip
->flags
& (HAMMER_INODE_ONDISK
|HAMMER_INODE_DELONDISK
)) ==
510 HAMMER_INODE_ONDISK
) {
511 hammer_normalize_cursor(cursor
);
512 cursor
->key_beg
.localization
= HAMMER_LOCALIZE_INODE
;
513 cursor
->key_beg
.obj_id
= ip
->obj_id
;
514 cursor
->key_beg
.key
= 0;
515 cursor
->key_beg
.create_tid
= 0;
516 cursor
->key_beg
.delete_tid
= 0;
517 cursor
->key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
518 cursor
->key_beg
.obj_type
= 0;
519 cursor
->asof
= ip
->obj_asof
;
520 cursor
->flags
&= ~HAMMER_CURSOR_INITMASK
;
521 cursor
->flags
|= HAMMER_CURSOR_GET_LEAF
| HAMMER_CURSOR_ASOF
;
522 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
524 error
= hammer_btree_lookup(cursor
);
525 if (hammer_debug_inode
)
526 kprintf("IPDEL %p %08x %d", ip
, ip
->flags
, error
);
528 kprintf("error %d\n", error
);
529 Debugger("hammer_update_inode");
533 error
= hammer_ip_delete_record(cursor
, ip
, trans
->tid
);
534 if (hammer_debug_inode
)
535 kprintf(" error %d\n", error
);
536 if (error
&& error
!= EDEADLK
) {
537 kprintf("error %d\n", error
);
538 Debugger("hammer_update_inode2");
541 ip
->flags
|= HAMMER_INODE_DELONDISK
;
544 hammer_cache_node(cursor
->node
, &ip
->cache
[0]);
546 if (error
== EDEADLK
) {
547 hammer_done_cursor(cursor
);
548 error
= hammer_init_cursor(trans
, cursor
,
550 if (hammer_debug_inode
)
551 kprintf("IPDED %p %d\n", ip
, error
);
558 * Ok, write out the initial record or a new record (after deleting
559 * the old one), unless the DELETED flag is set. This routine will
560 * clear DELONDISK if it writes out a record.
562 * Update our inode statistics if this is the first application of
565 if (error
== 0 && (ip
->flags
& HAMMER_INODE_DELETED
) == 0) {
567 * Generate a record and write it to the media
569 record
= hammer_alloc_mem_record(ip
, 0);
570 record
->type
= HAMMER_MEM_RECORD_INODE
;
571 record
->flush_state
= HAMMER_FST_FLUSH
;
572 record
->leaf
= ip
->sync_ino_leaf
;
573 record
->leaf
.base
.create_tid
= trans
->tid
;
574 record
->leaf
.data_len
= sizeof(ip
->sync_ino_data
);
575 record
->data
= (void *)&ip
->sync_ino_data
;
576 record
->flags
|= HAMMER_RECF_INTERLOCK_BE
;
578 error
= hammer_ip_sync_record_cursor(cursor
, record
);
579 if (hammer_debug_inode
)
580 kprintf("GENREC %p rec %08x %d\n",
581 ip
, record
->flags
, error
);
582 if (error
!= EDEADLK
)
584 hammer_done_cursor(cursor
);
585 error
= hammer_init_cursor(trans
, cursor
,
587 if (hammer_debug_inode
)
588 kprintf("GENREC reinit %d\n", error
);
593 kprintf("error %d\n", error
);
594 Debugger("hammer_update_inode3");
598 * The record isn't managed by the inode's record tree,
599 * destroy it whether we succeed or fail.
601 record
->flags
&= ~HAMMER_RECF_INTERLOCK_BE
;
602 record
->flags
|= HAMMER_RECF_DELETED_FE
;
603 record
->flush_state
= HAMMER_FST_IDLE
;
604 hammer_rel_mem_record(record
);
610 if (hammer_debug_inode
)
611 kprintf("CLEANDELOND %p %08x\n", ip
, ip
->flags
);
612 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
|
613 HAMMER_INODE_ITIMES
);
614 ip
->flags
&= ~HAMMER_INODE_DELONDISK
;
617 * Root volume count of inodes
619 if ((ip
->flags
& HAMMER_INODE_ONDISK
) == 0) {
620 hammer_modify_volume_field(trans
,
623 ++ip
->hmp
->rootvol
->ondisk
->vol0_stat_inodes
;
624 hammer_modify_volume_done(trans
->rootvol
);
625 ip
->flags
|= HAMMER_INODE_ONDISK
;
626 if (hammer_debug_inode
)
627 kprintf("NOWONDISK %p\n", ip
);
633 * If the inode has been destroyed, clean out any left-over flags
634 * that may have been set by the frontend.
636 if (error
== 0 && (ip
->flags
& HAMMER_INODE_DELETED
)) {
637 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
|
638 HAMMER_INODE_ITIMES
);
644 * Update only the itimes fields. This is done no-historically. The
645 * record is updated in-place on the disk.
648 hammer_update_itimes(hammer_cursor_t cursor
, hammer_inode_t ip
)
650 hammer_transaction_t trans
= cursor
->trans
;
651 struct hammer_btree_leaf_elm
*leaf
;
656 if ((ip
->flags
& (HAMMER_INODE_ONDISK
|HAMMER_INODE_DELONDISK
)) ==
657 HAMMER_INODE_ONDISK
) {
658 hammer_normalize_cursor(cursor
);
659 cursor
->key_beg
.localization
= HAMMER_LOCALIZE_INODE
;
660 cursor
->key_beg
.obj_id
= ip
->obj_id
;
661 cursor
->key_beg
.key
= 0;
662 cursor
->key_beg
.create_tid
= 0;
663 cursor
->key_beg
.delete_tid
= 0;
664 cursor
->key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
665 cursor
->key_beg
.obj_type
= 0;
666 cursor
->asof
= ip
->obj_asof
;
667 cursor
->flags
&= ~HAMMER_CURSOR_INITMASK
;
668 cursor
->flags
|= HAMMER_CURSOR_GET_LEAF
| HAMMER_CURSOR_ASOF
;
669 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
671 error
= hammer_btree_lookup(cursor
);
673 kprintf("error %d\n", error
);
674 Debugger("hammer_update_itimes1");
678 * Do not generate UNDO records for atime updates.
681 hammer_modify_node(trans
, cursor
->node
,
682 &leaf
->atime
, sizeof(leaf
->atime
));
683 leaf
->atime
= ip
->sync_ino_leaf
.atime
;
684 hammer_modify_node_done(cursor
->node
);
685 /*rec->ino_mtime = ip->sync_ino_rec.ino_mtime;*/
686 ip
->sync_flags
&= ~HAMMER_INODE_ITIMES
;
687 /* XXX recalculate crc */
688 hammer_cache_node(cursor
->node
, &ip
->cache
[0]);
690 if (error
== EDEADLK
) {
691 hammer_done_cursor(cursor
);
692 error
= hammer_init_cursor(trans
, cursor
,
702 * Release a reference on an inode, flush as requested.
704 * On the last reference we queue the inode to the flusher for its final
708 hammer_rel_inode(struct hammer_inode
*ip
, int flush
)
710 hammer_mount_t hmp
= ip
->hmp
;
713 * Handle disposition when dropping the last ref.
716 if (ip
->lock
.refs
== 1) {
718 * Determine whether on-disk action is needed for
719 * the inode's final disposition.
721 KKASSERT(ip
->vp
== NULL
);
722 hammer_inode_unloadable_check(ip
, 0);
723 if (ip
->flags
& HAMMER_INODE_MODMASK
) {
724 if (hmp
->rsv_inodes
> desiredvnodes
) {
725 hammer_flush_inode(ip
,
726 HAMMER_FLUSH_SIGNAL
);
728 hammer_flush_inode(ip
, 0);
730 } else if (ip
->lock
.refs
== 1) {
731 hammer_unload_inode(ip
);
736 hammer_flush_inode(ip
, 0);
739 * The inode still has multiple refs, try to drop
742 KKASSERT(ip
->lock
.refs
>= 1);
743 if (ip
->lock
.refs
> 1) {
744 hammer_unref(&ip
->lock
);
752 * Unload and destroy the specified inode. Must be called with one remaining
753 * reference. The reference is disposed of.
755 * This can only be called in the context of the flusher.
758 hammer_unload_inode(struct hammer_inode
*ip
)
760 hammer_mount_t hmp
= ip
->hmp
;
762 KASSERT(ip
->lock
.refs
== 1,
763 ("hammer_unload_inode: %d refs\n", ip
->lock
.refs
));
764 KKASSERT(ip
->vp
== NULL
);
765 KKASSERT(ip
->flush_state
== HAMMER_FST_IDLE
);
766 KKASSERT(ip
->cursor_ip_refs
== 0);
767 KKASSERT(ip
->lock
.lockcount
== 0);
768 KKASSERT((ip
->flags
& HAMMER_INODE_MODMASK
) == 0);
770 KKASSERT(RB_EMPTY(&ip
->rec_tree
));
771 KKASSERT(TAILQ_EMPTY(&ip
->target_list
));
773 RB_REMOVE(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
);
775 hammer_uncache_node(&ip
->cache
[0]);
776 hammer_uncache_node(&ip
->cache
[1]);
778 hammer_clear_objid(ip
);
779 --hammer_count_inodes
;
782 hammer_inode_wakereclaims(ip
);
789 * Called on mount -u when switching from RW to RO or vise-versa. Adjust
790 * the read-only flag for cached inodes.
792 * This routine is called from a RB_SCAN().
795 hammer_reload_inode(hammer_inode_t ip
, void *arg __unused
)
797 hammer_mount_t hmp
= ip
->hmp
;
799 if (hmp
->ronly
|| hmp
->asof
!= HAMMER_MAX_TID
)
800 ip
->flags
|= HAMMER_INODE_RO
;
802 ip
->flags
&= ~HAMMER_INODE_RO
;
807 * A transaction has modified an inode, requiring updates as specified by
810 * HAMMER_INODE_DDIRTY: Inode data has been updated
811 * HAMMER_INODE_XDIRTY: Dirty in-memory records
812 * HAMMER_INODE_BUFS: Dirty buffer cache buffers
813 * HAMMER_INODE_DELETED: Inode record/data must be deleted
814 * HAMMER_INODE_ITIMES: mtime/atime has been updated
817 hammer_modify_inode(hammer_inode_t ip
, int flags
)
819 KKASSERT ((ip
->flags
& HAMMER_INODE_RO
) == 0 ||
820 (flags
& (HAMMER_INODE_DDIRTY
|
821 HAMMER_INODE_XDIRTY
| HAMMER_INODE_BUFS
|
822 HAMMER_INODE_DELETED
| HAMMER_INODE_ITIMES
)) == 0);
823 if ((ip
->flags
& HAMMER_INODE_RSV_INODES
) == 0) {
824 ip
->flags
|= HAMMER_INODE_RSV_INODES
;
825 ++ip
->hmp
->rsv_inodes
;
832 * Request that an inode be flushed. This whole mess cannot block and may
833 * recurse (if not synchronous). Once requested HAMMER will attempt to
834 * actively flush the inode until the flush can be done.
836 * The inode may already be flushing, or may be in a setup state. We can
837 * place the inode in a flushing state if it is currently idle and flag it
838 * to reflush if it is currently flushing.
840 * If the HAMMER_FLUSH_SYNCHRONOUS flag is specified we will attempt to
841 * flush the indoe synchronously using the caller's context.
844 hammer_flush_inode(hammer_inode_t ip
, int flags
)
849 * Trivial 'nothing to flush' case. If the inode is ina SETUP
850 * state we have to put it back into an IDLE state so we can
851 * drop the extra ref.
853 if ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0) {
854 if (ip
->flush_state
== HAMMER_FST_SETUP
) {
855 ip
->flush_state
= HAMMER_FST_IDLE
;
856 hammer_rel_inode(ip
, 0);
862 * Our flush action will depend on the current state.
864 switch(ip
->flush_state
) {
865 case HAMMER_FST_IDLE
:
867 * We have no dependancies and can flush immediately. Some
868 * our children may not be flushable so we have to re-test
869 * with that additional knowledge.
871 hammer_flush_inode_core(ip
, flags
);
873 case HAMMER_FST_SETUP
:
875 * Recurse upwards through dependancies via target_list
876 * and start their flusher actions going if possible.
878 * 'good' is our connectivity. -1 means we have none and
879 * can't flush, 0 means there weren't any dependancies, and
880 * 1 means we have good connectivity.
882 good
= hammer_setup_parent_inodes(ip
);
885 * We can continue if good >= 0. Determine how many records
886 * under our inode can be flushed (and mark them).
889 hammer_flush_inode_core(ip
, flags
);
891 ip
->flags
|= HAMMER_INODE_REFLUSH
;
892 if (flags
& HAMMER_FLUSH_SIGNAL
) {
893 ip
->flags
|= HAMMER_INODE_RESIGNAL
;
894 hammer_flusher_async(ip
->hmp
);
900 * We are already flushing, flag the inode to reflush
901 * if needed after it completes its current flush.
903 if ((ip
->flags
& HAMMER_INODE_REFLUSH
) == 0)
904 ip
->flags
|= HAMMER_INODE_REFLUSH
;
905 if (flags
& HAMMER_FLUSH_SIGNAL
) {
906 ip
->flags
|= HAMMER_INODE_RESIGNAL
;
907 hammer_flusher_async(ip
->hmp
);
914 * Scan ip->target_list, which is a list of records owned by PARENTS to our
915 * ip which reference our ip.
917 * XXX This is a huge mess of recursive code, but not one bit of it blocks
918 * so for now do not ref/deref the structures. Note that if we use the
919 * ref/rel code later, the rel CAN block.
922 hammer_setup_parent_inodes(hammer_inode_t ip
)
924 hammer_record_t depend
;
926 hammer_record_t next
;
933 TAILQ_FOREACH(depend
, &ip
->target_list
, target_entry
) {
934 r
= hammer_setup_parent_inodes_helper(depend
);
935 KKASSERT(depend
->target_ip
== ip
);
936 if (r
< 0 && good
== 0)
946 next
= TAILQ_FIRST(&ip
->target_list
);
948 hammer_ref(&next
->lock
);
949 hammer_ref(&next
->ip
->lock
);
951 while ((depend
= next
) != NULL
) {
952 if (depend
->target_ip
== NULL
) {
954 hammer_rel_mem_record(depend
);
955 hammer_rel_inode(pip
, 0);
958 KKASSERT(depend
->target_ip
== ip
);
959 next
= TAILQ_NEXT(depend
, target_entry
);
961 hammer_ref(&next
->lock
);
962 hammer_ref(&next
->ip
->lock
);
964 r
= hammer_setup_parent_inodes_helper(depend
);
965 if (r
< 0 && good
== 0)
970 hammer_rel_mem_record(depend
);
971 hammer_rel_inode(pip
, 0);
978 * This helper function takes a record representing the dependancy between
979 * the parent inode and child inode.
981 * record->ip = parent inode
982 * record->target_ip = child inode
984 * We are asked to recurse upwards and convert the record from SETUP
985 * to FLUSH if possible.
987 * Return 1 if the record gives us connectivity
989 * Return 0 if the record is not relevant
991 * Return -1 if we can't resolve the dependancy and there is no connectivity.
994 hammer_setup_parent_inodes_helper(hammer_record_t record
)
1000 KKASSERT(record
->flush_state
!= HAMMER_FST_IDLE
);
1005 * If the record is already flushing, is it in our flush group?
1007 * If it is in our flush group but it is a general record or a
1008 * delete-on-disk, it does not improve our connectivity (return 0),
1009 * and if the target inode is not trying to destroy itself we can't
1010 * allow the operation yet anyway (the second return -1).
1012 if (record
->flush_state
== HAMMER_FST_FLUSH
) {
1013 if (record
->flush_group
!= hmp
->flusher
.next
) {
1014 pip
->flags
|= HAMMER_INODE_REFLUSH
;
1017 if (record
->type
== HAMMER_MEM_RECORD_ADD
)
1019 /* GENERAL or DEL */
1024 * It must be a setup record. Try to resolve the setup dependancies
1025 * by recursing upwards so we can place ip on the flush list.
1027 KKASSERT(record
->flush_state
== HAMMER_FST_SETUP
);
1029 good
= hammer_setup_parent_inodes(pip
);
1032 * We can't flush ip because it has no connectivity (XXX also check
1033 * nlinks for pre-existing connectivity!). Flag it so any resolution
1034 * recurses back down.
1037 pip
->flags
|= HAMMER_INODE_REFLUSH
;
1042 * We are go, place the parent inode in a flushing state so we can
1043 * place its record in a flushing state. Note that the parent
1044 * may already be flushing. The record must be in the same flush
1045 * group as the parent.
1047 if (pip
->flush_state
!= HAMMER_FST_FLUSH
)
1048 hammer_flush_inode_core(pip
, HAMMER_FLUSH_RECURSION
);
1049 KKASSERT(pip
->flush_state
== HAMMER_FST_FLUSH
);
1050 KKASSERT(record
->flush_state
== HAMMER_FST_SETUP
);
1053 if (record
->type
== HAMMER_MEM_RECORD_DEL
&&
1054 (record
->target_ip
->flags
& (HAMMER_INODE_DELETED
|HAMMER_INODE_DELONDISK
)) == 0) {
1056 * Regardless of flushing state we cannot sync this path if the
1057 * record represents a delete-on-disk but the target inode
1058 * is not ready to sync its own deletion.
1060 * XXX need to count effective nlinks to determine whether
1061 * the flush is ok, otherwise removing a hardlink will
1062 * just leave the DEL record to rot.
1064 record
->target_ip
->flags
|= HAMMER_INODE_REFLUSH
;
1068 if (pip
->flush_group
== pip
->hmp
->flusher
.next
) {
1070 * This is the record we wanted to synchronize. If the
1071 * record went into a flush state while we blocked it
1072 * had better be in the correct flush group.
1074 if (record
->flush_state
!= HAMMER_FST_FLUSH
) {
1075 record
->flush_state
= HAMMER_FST_FLUSH
;
1076 record
->flush_group
= pip
->flush_group
;
1077 hammer_ref(&record
->lock
);
1079 KKASSERT(record
->flush_group
== pip
->flush_group
);
1081 if (record
->type
== HAMMER_MEM_RECORD_ADD
)
1085 * A general or delete-on-disk record does not contribute
1086 * to our visibility. We can still flush it, however.
1091 * We couldn't resolve the dependancies, request that the
1092 * inode be flushed when the dependancies can be resolved.
1094 pip
->flags
|= HAMMER_INODE_REFLUSH
;
1100 * This is the core routine placing an inode into the FST_FLUSH state.
1103 hammer_flush_inode_core(hammer_inode_t ip
, int flags
)
1108 * Set flush state and prevent the flusher from cycling into
1109 * the next flush group. Do not place the ip on the list yet.
1110 * Inodes not in the idle state get an extra reference.
1112 KKASSERT(ip
->flush_state
!= HAMMER_FST_FLUSH
);
1113 if (ip
->flush_state
== HAMMER_FST_IDLE
)
1114 hammer_ref(&ip
->lock
);
1115 ip
->flush_state
= HAMMER_FST_FLUSH
;
1116 ip
->flush_group
= ip
->hmp
->flusher
.next
;
1117 ++ip
->hmp
->flusher
.group_lock
;
1118 ++ip
->hmp
->count_iqueued
;
1119 ++hammer_count_iqueued
;
1122 * We need to be able to vfsync/truncate from the backend.
1124 KKASSERT((ip
->flags
& HAMMER_INODE_VHELD
) == 0);
1125 if (ip
->vp
&& (ip
->vp
->v_flag
& VINACTIVE
) == 0) {
1126 ip
->flags
|= HAMMER_INODE_VHELD
;
1131 * Figure out how many in-memory records we can actually flush
1132 * (not including inode meta-data, buffers, etc).
1134 if (flags
& HAMMER_FLUSH_RECURSION
) {
1137 go_count
= RB_SCAN(hammer_rec_rb_tree
, &ip
->rec_tree
, NULL
,
1138 hammer_setup_child_callback
, NULL
);
1142 * This is a more involved test that includes go_count. If we
1143 * can't flush, flag the inode and return. If go_count is 0 we
1144 * were are unable to flush any records in our rec_tree and
1145 * must ignore the XDIRTY flag.
1147 if (go_count
== 0) {
1148 if ((ip
->flags
& HAMMER_INODE_MODMASK_NOXDIRTY
) == 0) {
1149 ip
->flags
|= HAMMER_INODE_REFLUSH
;
1151 --ip
->hmp
->count_iqueued
;
1152 --hammer_count_iqueued
;
1154 ip
->flush_state
= HAMMER_FST_SETUP
;
1155 if (ip
->flags
& HAMMER_INODE_VHELD
) {
1156 ip
->flags
&= ~HAMMER_INODE_VHELD
;
1159 if (flags
& HAMMER_FLUSH_SIGNAL
) {
1160 ip
->flags
|= HAMMER_INODE_RESIGNAL
;
1161 hammer_flusher_async(ip
->hmp
);
1163 if (--ip
->hmp
->flusher
.group_lock
== 0)
1164 wakeup(&ip
->hmp
->flusher
.group_lock
);
1170 * Snapshot the state of the inode for the backend flusher.
1172 * The truncation must be retained in the frontend until after
1173 * we've actually performed the record deletion.
1175 * We continue to retain sync_trunc_off even when all truncations
1176 * have been resolved as an optimization to determine if we can
1177 * skip the B-Tree lookup for overwrite deletions.
1179 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1180 * and stays in ip->flags. Once set, it stays set until the
1181 * inode is destroyed.
1183 ip
->sync_flags
= (ip
->flags
& HAMMER_INODE_MODMASK
);
1184 if (ip
->sync_flags
& HAMMER_INODE_TRUNCATED
)
1185 ip
->sync_trunc_off
= ip
->trunc_off
;
1186 ip
->sync_ino_leaf
= ip
->ino_leaf
;
1187 ip
->sync_ino_data
= ip
->ino_data
;
1188 ip
->trunc_off
= 0x7FFFFFFFFFFFFFFFLL
;
1189 ip
->flags
&= ~HAMMER_INODE_MODMASK
;
1190 #ifdef DEBUG_TRUNCATE
1191 if ((ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) && ip
== HammerTruncIp
)
1192 kprintf("truncateS %016llx\n", ip
->sync_trunc_off
);
1196 * The flusher list inherits our inode and reference.
1198 TAILQ_INSERT_TAIL(&ip
->hmp
->flush_list
, ip
, flush_entry
);
1199 if (--ip
->hmp
->flusher
.group_lock
== 0)
1200 wakeup(&ip
->hmp
->flusher
.group_lock
);
1202 if (flags
& HAMMER_FLUSH_SIGNAL
) {
1203 hammer_flusher_async(ip
->hmp
);
1208 * Callback for scan of ip->rec_tree. Try to include each record in our
1209 * flush. ip->flush_group has been set but the inode has not yet been
1210 * moved into a flushing state.
1212 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1215 * We return 1 for any record placed or found in FST_FLUSH, which prevents
1216 * the caller from shortcutting the flush.
1219 hammer_setup_child_callback(hammer_record_t rec
, void *data
)
1221 hammer_inode_t target_ip
;
1226 * Deleted records are ignored. Note that the flush detects deleted
1227 * front-end records at multiple points to deal with races. This is
1228 * just the first line of defense. The only time DELETED_FE cannot
1229 * be set is when HAMMER_RECF_INTERLOCK_BE is set.
1231 * Don't get confused between record deletion and, say, directory
1232 * entry deletion. The deletion of a directory entry that is on
1233 * the media has nothing to do with the record deletion flags.
1235 if (rec
->flags
& (HAMMER_RECF_DELETED_FE
|HAMMER_RECF_DELETED_BE
))
1239 * If the record is in an idle state it has no dependancies and
1245 switch(rec
->flush_state
) {
1246 case HAMMER_FST_IDLE
:
1248 * Record has no setup dependancy, we can flush it.
1250 KKASSERT(rec
->target_ip
== NULL
);
1251 rec
->flush_state
= HAMMER_FST_FLUSH
;
1252 rec
->flush_group
= ip
->flush_group
;
1253 hammer_ref(&rec
->lock
);
1256 case HAMMER_FST_SETUP
:
1258 * Record has a setup dependancy. Try to include the
1259 * target ip in the flush.
1261 * We have to be careful here, if we do not do the right
1262 * thing we can lose track of dirty inodes and the system
1263 * will lockup trying to allocate buffers.
1265 target_ip
= rec
->target_ip
;
1266 KKASSERT(target_ip
!= NULL
);
1267 KKASSERT(target_ip
->flush_state
!= HAMMER_FST_IDLE
);
1268 if (target_ip
->flush_state
== HAMMER_FST_FLUSH
) {
1270 * If the target IP is already flushing in our group
1271 * we are golden, otherwise make sure the target
1274 if (target_ip
->flush_group
== ip
->flush_group
) {
1275 rec
->flush_state
= HAMMER_FST_FLUSH
;
1276 rec
->flush_group
= ip
->flush_group
;
1277 hammer_ref(&rec
->lock
);
1280 target_ip
->flags
|= HAMMER_INODE_REFLUSH
;
1282 } else if (rec
->type
== HAMMER_MEM_RECORD_ADD
) {
1284 * If the target IP is not flushing we can force
1285 * it to flush, even if it is unable to write out
1286 * any of its own records we have at least one in
1287 * hand that we CAN deal with.
1289 rec
->flush_state
= HAMMER_FST_FLUSH
;
1290 rec
->flush_group
= ip
->flush_group
;
1291 hammer_ref(&rec
->lock
);
1292 hammer_flush_inode_core(target_ip
,
1293 HAMMER_FLUSH_RECURSION
);
1297 * General or delete-on-disk record.
1299 * XXX this needs help. If a delete-on-disk we could
1300 * disconnect the target. If the target has its own
1301 * dependancies they really need to be flushed.
1305 rec
->flush_state
= HAMMER_FST_FLUSH
;
1306 rec
->flush_group
= ip
->flush_group
;
1307 hammer_ref(&rec
->lock
);
1308 hammer_flush_inode_core(target_ip
,
1309 HAMMER_FLUSH_RECURSION
);
1313 case HAMMER_FST_FLUSH
:
1315 * Record already associated with a flush group. It had
1318 KKASSERT(rec
->flush_group
== ip
->flush_group
);
1326 * Wait for a previously queued flush to complete
1329 hammer_wait_inode(hammer_inode_t ip
)
1331 while (ip
->flush_state
!= HAMMER_FST_IDLE
) {
1332 if (ip
->flush_state
== HAMMER_FST_SETUP
) {
1333 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
1335 ip
->flags
|= HAMMER_INODE_FLUSHW
;
1336 tsleep(&ip
->flags
, 0, "hmrwin", 0);
1342 * Wait for records to drain
1345 hammer_wait_inode_recs(hammer_inode_t ip
)
1347 while (ip
->rsv_recs
> hammer_limit_irecs
) {
1348 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
1349 if (ip
->rsv_recs
> hammer_limit_irecs
) {
1350 ip
->flags
|= HAMMER_INODE_PARTIALW
;
1351 tsleep(&ip
->flags
, 0, "hmrwpp", 0);
1357 * Called by the backend code when a flush has been completed.
1358 * The inode has already been removed from the flush list.
1360 * A pipelined flush can occur, in which case we must re-enter the
1361 * inode on the list and re-copy its fields.
1364 hammer_flush_inode_done(hammer_inode_t ip
)
1369 KKASSERT(ip
->flush_state
== HAMMER_FST_FLUSH
);
1374 * Merge left-over flags back into the frontend and fix the state.
1376 ip
->flags
|= ip
->sync_flags
;
1379 * The backend may have adjusted nlinks, so if the adjusted nlinks
1380 * does not match the fronttend set the frontend's RDIRTY flag again.
1382 if (ip
->ino_data
.nlinks
!= ip
->sync_ino_data
.nlinks
)
1383 ip
->flags
|= HAMMER_INODE_DDIRTY
;
1386 * Fix up the dirty buffer status. IO completions will also
1387 * try to clean up rsv_databufs.
1389 if (ip
->vp
&& RB_ROOT(&ip
->vp
->v_rbdirty_tree
)) {
1390 ip
->flags
|= HAMMER_INODE_BUFS
;
1392 hmp
->rsv_databufs
-= ip
->rsv_databufs
;
1393 ip
->rsv_databufs
= 0;
1397 * Re-set the XDIRTY flag if some of the inode's in-memory records
1398 * could not be flushed.
1400 KKASSERT((RB_EMPTY(&ip
->rec_tree
) &&
1401 (ip
->flags
& HAMMER_INODE_XDIRTY
) == 0) ||
1402 (!RB_EMPTY(&ip
->rec_tree
) &&
1403 (ip
->flags
& HAMMER_INODE_XDIRTY
) != 0));
1406 * Do not lose track of inodes which no longer have vnode
1407 * assocations, otherwise they may never get flushed again.
1409 if ((ip
->flags
& HAMMER_INODE_MODMASK
) && ip
->vp
== NULL
)
1410 ip
->flags
|= HAMMER_INODE_REFLUSH
;
1413 * Adjust flush_state. The target state (idle or setup) shouldn't
1414 * be terribly important since we will reflush if we really need
1415 * to do anything. XXX
1417 if (TAILQ_EMPTY(&ip
->target_list
) && RB_EMPTY(&ip
->rec_tree
)) {
1418 ip
->flush_state
= HAMMER_FST_IDLE
;
1421 ip
->flush_state
= HAMMER_FST_SETUP
;
1425 --hmp
->count_iqueued
;
1426 --hammer_count_iqueued
;
1429 * Clean up the vnode ref
1431 if (ip
->flags
& HAMMER_INODE_VHELD
) {
1432 ip
->flags
&= ~HAMMER_INODE_VHELD
;
1437 * If the frontend made more changes and requested another flush,
1438 * then try to get it running.
1440 if (ip
->flags
& HAMMER_INODE_REFLUSH
) {
1441 ip
->flags
&= ~HAMMER_INODE_REFLUSH
;
1442 if (ip
->flags
& HAMMER_INODE_RESIGNAL
) {
1443 ip
->flags
&= ~HAMMER_INODE_RESIGNAL
;
1444 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
1446 hammer_flush_inode(ip
, 0);
1451 * If the inode is now clean drop the space reservation.
1453 if ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0 &&
1454 (ip
->flags
& HAMMER_INODE_RSV_INODES
)) {
1455 ip
->flags
&= ~HAMMER_INODE_RSV_INODES
;
1460 * Finally, if the frontend is waiting for a flush to complete,
1463 if (ip
->flush_state
!= HAMMER_FST_FLUSH
) {
1464 if (ip
->flags
& HAMMER_INODE_FLUSHW
) {
1465 ip
->flags
&= ~HAMMER_INODE_FLUSHW
;
1470 hammer_rel_inode(ip
, 0);
1474 * Called from hammer_sync_inode() to synchronize in-memory records
1478 hammer_sync_record_callback(hammer_record_t record
, void *data
)
1480 hammer_cursor_t cursor
= data
;
1481 hammer_transaction_t trans
= cursor
->trans
;
1485 * Skip records that do not belong to the current flush.
1487 ++hammer_stats_record_iterations
;
1488 if (record
->flush_state
!= HAMMER_FST_FLUSH
)
1492 if (record
->flush_group
!= record
->ip
->flush_group
) {
1493 kprintf("sync_record %p ip %p bad flush group %d %d\n", record
, record
->ip
, record
->flush_group
,record
->ip
->flush_group
);
1498 KKASSERT(record
->flush_group
== record
->ip
->flush_group
);
1501 * Interlock the record using the BE flag. Once BE is set the
1502 * frontend cannot change the state of FE.
1504 * NOTE: If FE is set prior to us setting BE we still sync the
1505 * record out, but the flush completion code converts it to
1506 * a delete-on-disk record instead of destroying it.
1508 KKASSERT((record
->flags
& HAMMER_RECF_INTERLOCK_BE
) == 0);
1509 record
->flags
|= HAMMER_RECF_INTERLOCK_BE
;
1512 * The backend may have already disposed of the record.
1514 if (record
->flags
& HAMMER_RECF_DELETED_BE
) {
1520 * If the whole inode is being deleting all on-disk records will
1521 * be deleted very soon, we can't sync any new records to disk
1522 * because they will be deleted in the same transaction they were
1523 * created in (delete_tid == create_tid), which will assert.
1525 * XXX There may be a case with RECORD_ADD with DELETED_FE set
1526 * that we currently panic on.
1528 if (record
->ip
->sync_flags
& HAMMER_INODE_DELETING
) {
1529 switch(record
->type
) {
1530 case HAMMER_MEM_RECORD_DATA
:
1532 * We don't have to do anything, if the record was
1533 * committed the space will have been accounted for
1537 case HAMMER_MEM_RECORD_GENERAL
:
1538 record
->flags
|= HAMMER_RECF_DELETED_FE
;
1539 record
->flags
|= HAMMER_RECF_DELETED_BE
;
1542 case HAMMER_MEM_RECORD_ADD
:
1543 panic("hammer_sync_record_callback: illegal add "
1544 "during inode deletion record %p", record
);
1545 break; /* NOT REACHED */
1546 case HAMMER_MEM_RECORD_INODE
:
1547 panic("hammer_sync_record_callback: attempt to "
1548 "sync inode record %p?", record
);
1549 break; /* NOT REACHED */
1550 case HAMMER_MEM_RECORD_DEL
:
1552 * Follow through and issue the on-disk deletion
1559 * If DELETED_FE is set special handling is needed for directory
1560 * entries. Dependant pieces related to the directory entry may
1561 * have already been synced to disk. If this occurs we have to
1562 * sync the directory entry and then change the in-memory record
1563 * from an ADD to a DELETE to cover the fact that it's been
1564 * deleted by the frontend.
1566 * A directory delete covering record (MEM_RECORD_DEL) can never
1567 * be deleted by the frontend.
1569 * Any other record type (aka DATA) can be deleted by the frontend.
1570 * XXX At the moment the flusher must skip it because there may
1571 * be another data record in the flush group for the same block,
1572 * meaning that some frontend data changes can leak into the backend's
1573 * synchronization point.
1575 if (record
->flags
& HAMMER_RECF_DELETED_FE
) {
1576 if (record
->type
== HAMMER_MEM_RECORD_ADD
) {
1577 record
->flags
|= HAMMER_RECF_CONVERT_DELETE
;
1579 KKASSERT(record
->type
!= HAMMER_MEM_RECORD_DEL
);
1580 record
->flags
|= HAMMER_RECF_DELETED_BE
;
1587 * Assign the create_tid for new records. Deletions already
1588 * have the record's entire key properly set up.
1590 if (record
->type
!= HAMMER_MEM_RECORD_DEL
)
1591 record
->leaf
.base
.create_tid
= trans
->tid
;
1593 error
= hammer_ip_sync_record_cursor(cursor
, record
);
1594 if (error
!= EDEADLK
)
1596 hammer_done_cursor(cursor
);
1597 error
= hammer_init_cursor(trans
, cursor
, &record
->ip
->cache
[0],
1602 record
->flags
&= ~HAMMER_RECF_CONVERT_DELETE
;
1606 if (error
!= -ENOSPC
) {
1607 kprintf("hammer_sync_record_callback: sync failed rec "
1608 "%p, error %d\n", record
, error
);
1609 Debugger("sync failed rec");
1613 hammer_flush_record_done(record
, error
);
1618 * XXX error handling
1621 hammer_sync_inode(hammer_inode_t ip
)
1623 struct hammer_transaction trans
;
1624 struct hammer_cursor cursor
;
1625 hammer_node_t tmp_node
;
1626 hammer_record_t depend
;
1627 hammer_record_t next
;
1628 int error
, tmp_error
;
1631 if ((ip
->sync_flags
& HAMMER_INODE_MODMASK
) == 0)
1634 hammer_start_transaction_fls(&trans
, ip
->hmp
);
1635 error
= hammer_init_cursor(&trans
, &cursor
, &ip
->cache
[1], ip
);
1640 * Any directory records referencing this inode which are not in
1641 * our current flush group must adjust our nlink count for the
1642 * purposes of synchronization to disk.
1644 * Records which are in our flush group can be unlinked from our
1645 * inode now, potentially allowing the inode to be physically
1648 * This cannot block.
1650 nlinks
= ip
->ino_data
.nlinks
;
1651 next
= TAILQ_FIRST(&ip
->target_list
);
1652 while ((depend
= next
) != NULL
) {
1653 next
= TAILQ_NEXT(depend
, target_entry
);
1654 if (depend
->flush_state
== HAMMER_FST_FLUSH
&&
1655 depend
->flush_group
== ip
->hmp
->flusher
.act
) {
1657 * If this is an ADD that was deleted by the frontend
1658 * the frontend nlinks count will have already been
1659 * decremented, but the backend is going to sync its
1660 * directory entry and must account for it. The
1661 * record will be converted to a delete-on-disk when
1664 * If the ADD was not deleted by the frontend we
1665 * can remove the dependancy from our target_list.
1667 if (depend
->flags
& HAMMER_RECF_DELETED_FE
) {
1670 TAILQ_REMOVE(&ip
->target_list
, depend
,
1672 depend
->target_ip
= NULL
;
1674 } else if ((depend
->flags
& HAMMER_RECF_DELETED_FE
) == 0) {
1676 * Not part of our flush group
1678 KKASSERT((depend
->flags
& HAMMER_RECF_DELETED_BE
) == 0);
1679 switch(depend
->type
) {
1680 case HAMMER_MEM_RECORD_ADD
:
1683 case HAMMER_MEM_RECORD_DEL
:
1693 * Set dirty if we had to modify the link count.
1695 if (ip
->sync_ino_data
.nlinks
!= nlinks
) {
1696 KKASSERT((int64_t)nlinks
>= 0);
1697 ip
->sync_ino_data
.nlinks
= nlinks
;
1698 ip
->sync_flags
|= HAMMER_INODE_DDIRTY
;
1702 * If there is a trunction queued destroy any data past the (aligned)
1703 * truncation point. Userland will have dealt with the buffer
1704 * containing the truncation point for us.
1706 * We don't flush pending frontend data buffers until after we've
1707 * dealt with the truncation.
1709 if (ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) {
1711 * Interlock trunc_off. The VOP front-end may continue to
1712 * make adjustments to it while we are blocked.
1715 off_t aligned_trunc_off
;
1717 trunc_off
= ip
->sync_trunc_off
;
1718 aligned_trunc_off
= (trunc_off
+ HAMMER_BUFMASK
) &
1722 * Delete any whole blocks on-media. The front-end has
1723 * already cleaned out any partial block and made it
1724 * pending. The front-end may have updated trunc_off
1725 * while we were blocked so we only use sync_trunc_off.
1727 error
= hammer_ip_delete_range(&cursor
, ip
,
1729 0x7FFFFFFFFFFFFFFFLL
, 1);
1731 Debugger("hammer_ip_delete_range errored");
1734 * Clear the truncation flag on the backend after we have
1735 * complete the deletions. Backend data is now good again
1736 * (including new records we are about to sync, below).
1738 * Leave sync_trunc_off intact. As we write additional
1739 * records the backend will update sync_trunc_off. This
1740 * tells the backend whether it can skip the overwrite
1741 * test. This should work properly even when the backend
1742 * writes full blocks where the truncation point straddles
1743 * the block because the comparison is against the base
1744 * offset of the record.
1746 ip
->sync_flags
&= ~HAMMER_INODE_TRUNCATED
;
1747 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
1753 * Now sync related records. These will typically be directory
1754 * entries or delete-on-disk records.
1756 * Not all records will be flushed, but clear XDIRTY anyway. We
1757 * will set it again in the frontend hammer_flush_inode_done()
1758 * if records remain.
1761 tmp_error
= RB_SCAN(hammer_rec_rb_tree
, &ip
->rec_tree
, NULL
,
1762 hammer_sync_record_callback
, &cursor
);
1768 hammer_cache_node(cursor
.node
, &ip
->cache
[1]);
1771 * Re-seek for inode update.
1774 tmp_node
= hammer_ref_node_safe(ip
->hmp
, &ip
->cache
[0], &error
);
1776 hammer_cursor_seek(&cursor
, tmp_node
, 0);
1777 hammer_rel_node(tmp_node
);
1783 * If we are deleting the inode the frontend had better not have
1784 * any active references on elements making up the inode.
1786 if (error
== 0 && ip
->sync_ino_data
.nlinks
== 0 &&
1787 RB_EMPTY(&ip
->rec_tree
) &&
1788 (ip
->sync_flags
& HAMMER_INODE_DELETING
) &&
1789 (ip
->flags
& HAMMER_INODE_DELETED
) == 0) {
1792 ip
->flags
|= HAMMER_INODE_DELETED
;
1793 error
= hammer_ip_delete_range_all(&cursor
, ip
, &count1
);
1795 ip
->sync_flags
&= ~HAMMER_INODE_DELETING
;
1796 ip
->sync_flags
&= ~HAMMER_INODE_TRUNCATED
;
1797 KKASSERT(RB_EMPTY(&ip
->rec_tree
));
1800 * Set delete_tid in both the frontend and backend
1801 * copy of the inode record. The DELETED flag handles
1802 * this, do not set RDIRTY.
1804 ip
->ino_leaf
.base
.delete_tid
= trans
.tid
;
1805 ip
->sync_ino_leaf
.base
.delete_tid
= trans
.tid
;
1808 * Adjust the inode count in the volume header
1810 if (ip
->flags
& HAMMER_INODE_ONDISK
) {
1811 hammer_modify_volume_field(&trans
,
1814 --ip
->hmp
->rootvol
->ondisk
->vol0_stat_inodes
;
1815 hammer_modify_volume_done(trans
.rootvol
);
1818 ip
->flags
&= ~HAMMER_INODE_DELETED
;
1819 Debugger("hammer_ip_delete_range_all errored");
1823 ip
->sync_flags
&= ~HAMMER_INODE_BUFS
;
1826 Debugger("RB_SCAN errored");
1829 * Now update the inode's on-disk inode-data and/or on-disk record.
1830 * DELETED and ONDISK are managed only in ip->flags.
1832 switch(ip
->flags
& (HAMMER_INODE_DELETED
| HAMMER_INODE_ONDISK
)) {
1833 case HAMMER_INODE_DELETED
|HAMMER_INODE_ONDISK
:
1835 * If deleted and on-disk, don't set any additional flags.
1836 * the delete flag takes care of things.
1838 * Clear flags which may have been set by the frontend.
1840 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
|
1841 HAMMER_INODE_XDIRTY
|HAMMER_INODE_ITIMES
|
1842 HAMMER_INODE_DELETING
);
1844 case HAMMER_INODE_DELETED
:
1846 * Take care of the case where a deleted inode was never
1847 * flushed to the disk in the first place.
1849 * Clear flags which may have been set by the frontend.
1851 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
|
1852 HAMMER_INODE_XDIRTY
|HAMMER_INODE_ITIMES
|
1853 HAMMER_INODE_DELETING
);
1854 while (RB_ROOT(&ip
->rec_tree
)) {
1855 hammer_record_t record
= RB_ROOT(&ip
->rec_tree
);
1856 hammer_ref(&record
->lock
);
1857 KKASSERT(record
->lock
.refs
== 1);
1858 record
->flags
|= HAMMER_RECF_DELETED_FE
;
1859 record
->flags
|= HAMMER_RECF_DELETED_BE
;
1860 hammer_rel_mem_record(record
);
1863 case HAMMER_INODE_ONDISK
:
1865 * If already on-disk, do not set any additional flags.
1870 * If not on-disk and not deleted, set both dirty flags
1871 * to force an initial record to be written. Also set
1872 * the create_tid for the inode.
1874 * Set create_tid in both the frontend and backend
1875 * copy of the inode record.
1877 ip
->ino_leaf
.base
.create_tid
= trans
.tid
;
1878 ip
->sync_ino_leaf
.base
.create_tid
= trans
.tid
;
1879 ip
->sync_flags
|= HAMMER_INODE_DDIRTY
;
1884 * If RDIRTY or DDIRTY is set, write out a new record. If the inode
1885 * is already on-disk the old record is marked as deleted.
1887 * If DELETED is set hammer_update_inode() will delete the existing
1888 * record without writing out a new one.
1890 * If *ONLY* the ITIMES flag is set we can update the record in-place.
1892 if (ip
->flags
& HAMMER_INODE_DELETED
) {
1893 error
= hammer_update_inode(&cursor
, ip
);
1895 if ((ip
->sync_flags
& (HAMMER_INODE_DDIRTY
| HAMMER_INODE_ITIMES
)) ==
1896 HAMMER_INODE_ITIMES
) {
1897 error
= hammer_update_itimes(&cursor
, ip
);
1899 if (ip
->sync_flags
& (HAMMER_INODE_DDIRTY
| HAMMER_INODE_ITIMES
)) {
1900 error
= hammer_update_inode(&cursor
, ip
);
1903 Debugger("hammer_update_itimes/inode errored");
1906 * Save the TID we used to sync the inode with to make sure we
1907 * do not improperly reuse it.
1909 hammer_done_cursor(&cursor
);
1910 hammer_done_transaction(&trans
);
1915 * This routine is called when the OS is no longer actively referencing
1916 * the inode (but might still be keeping it cached), or when releasing
1917 * the last reference to an inode.
1919 * At this point if the inode's nlinks count is zero we want to destroy
1920 * it, which may mean destroying it on-media too.
1923 hammer_inode_unloadable_check(hammer_inode_t ip
, int getvp
)
1928 * Set the DELETING flag when the link count drops to 0 and the
1929 * OS no longer has any opens on the inode.
1931 * The backend will clear DELETING (a mod flag) and set DELETED
1932 * (a state flag) when it is actually able to perform the
1935 if (ip
->ino_data
.nlinks
== 0 &&
1936 (ip
->flags
& (HAMMER_INODE_DELETING
|HAMMER_INODE_DELETED
)) == 0) {
1937 ip
->flags
|= HAMMER_INODE_DELETING
;
1938 ip
->flags
|= HAMMER_INODE_TRUNCATED
;
1942 if (hammer_get_vnode(ip
, &vp
) != 0)
1950 vtruncbuf(ip
->vp
, 0, HAMMER_BUFSIZE
);
1951 vnode_pager_setsize(ip
->vp
, 0);
1960 * Re-test an inode when a dependancy had gone away to see if we
1961 * can chain flush it.
1964 hammer_test_inode(hammer_inode_t ip
)
1966 if (ip
->flags
& HAMMER_INODE_REFLUSH
) {
1967 ip
->flags
&= ~HAMMER_INODE_REFLUSH
;
1968 hammer_ref(&ip
->lock
);
1969 if (ip
->flags
& HAMMER_INODE_RESIGNAL
) {
1970 ip
->flags
&= ~HAMMER_INODE_RESIGNAL
;
1971 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
1973 hammer_flush_inode(ip
, 0);
1975 hammer_rel_inode(ip
, 0);
1980 * Clear the RECLAIM flag on an inode. This occurs when the inode is
1981 * reassociated with a vp or just before it gets freed.
1983 * Wakeup one thread blocked waiting on reclaims to complete. Note that
1984 * the inode the thread is waiting on behalf of is a different inode then
1985 * the inode we are called with. This is to create a pipeline.
1988 hammer_inode_wakereclaims(hammer_inode_t ip
)
1990 struct hammer_reclaim
*reclaim
;
1991 hammer_mount_t hmp
= ip
->hmp
;
1993 if ((ip
->flags
& HAMMER_INODE_RECLAIM
) == 0)
1996 --hammer_count_reclaiming
;
1997 --hmp
->inode_reclaims
;
1998 ip
->flags
&= ~HAMMER_INODE_RECLAIM
;
2000 if ((reclaim
= TAILQ_FIRST(&hmp
->reclaim_list
)) != NULL
) {
2001 TAILQ_REMOVE(&hmp
->reclaim_list
, reclaim
, entry
);
2002 reclaim
->okydoky
= 1;