2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.84 2008/06/24 17:38:17 dillon Exp $
38 #include <vm/vm_extern.h>
42 static int hammer_unload_inode(struct hammer_inode
*ip
);
43 static void hammer_flush_inode_core(hammer_inode_t ip
, int flags
);
44 static int hammer_setup_child_callback(hammer_record_t rec
, void *data
);
45 static int hammer_setup_parent_inodes(hammer_inode_t ip
);
46 static int hammer_setup_parent_inodes_helper(hammer_record_t record
);
47 static void hammer_inode_wakereclaims(hammer_inode_t ip
);
50 extern struct hammer_inode
*HammerTruncIp
;
54 * Red-Black tree support for inode structures.
59 hammer_ino_rb_compare(hammer_inode_t ip1
, hammer_inode_t ip2
)
61 if (ip1
->obj_localization
< ip2
->obj_localization
)
63 if (ip1
->obj_localization
> ip2
->obj_localization
)
65 if (ip1
->obj_id
< ip2
->obj_id
)
67 if (ip1
->obj_id
> ip2
->obj_id
)
69 if (ip1
->obj_asof
< ip2
->obj_asof
)
71 if (ip1
->obj_asof
> ip2
->obj_asof
)
80 hammer_inode_info_cmp(hammer_inode_info_t info
, hammer_inode_t ip
)
82 if (info
->obj_localization
< ip
->obj_localization
)
84 if (info
->obj_localization
> ip
->obj_localization
)
86 if (info
->obj_id
< ip
->obj_id
)
88 if (info
->obj_id
> ip
->obj_id
)
90 if (info
->obj_asof
< ip
->obj_asof
)
92 if (info
->obj_asof
> ip
->obj_asof
)
98 * Used by hammer_scan_inode_snapshots() to locate all of an object's
99 * snapshots. Note that the asof field is not tested, which we can get
100 * away with because it is the lowest-priority field.
103 hammer_inode_info_cmp_all_history(hammer_inode_t ip
, void *data
)
105 hammer_inode_info_t info
= data
;
107 if (ip
->obj_localization
> info
->obj_localization
)
109 if (ip
->obj_localization
< info
->obj_localization
)
111 if (ip
->obj_id
> info
->obj_id
)
113 if (ip
->obj_id
< info
->obj_id
)
118 RB_GENERATE(hammer_ino_rb_tree
, hammer_inode
, rb_node
, hammer_ino_rb_compare
);
119 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree
, INFO
, hammer_inode
, rb_node
,
120 hammer_inode_info_cmp
, hammer_inode_info_t
);
123 * The kernel is not actively referencing this vnode but is still holding
126 * This is called from the frontend.
129 hammer_vop_inactive(struct vop_inactive_args
*ap
)
131 struct hammer_inode
*ip
= VTOI(ap
->a_vp
);
142 * If the inode no longer has visibility in the filesystem try to
143 * recycle it immediately, even if the inode is dirty. Recycling
144 * it quickly allows the system to reclaim buffer cache and VM
145 * resources which can matter a lot in a heavily loaded system.
147 * This can deadlock in vfsync() if we aren't careful.
149 * Do not queue the inode to the flusher if we still have visibility,
150 * otherwise namespace calls such as chmod will unnecessarily generate
151 * multiple inode updates.
153 hammer_inode_unloadable_check(ip
, 0);
154 if (ip
->ino_data
.nlinks
== 0) {
155 if (ip
->flags
& HAMMER_INODE_MODMASK
)
156 hammer_flush_inode(ip
, 0);
163 * Release the vnode association. This is typically (but not always)
164 * the last reference on the inode.
166 * Once the association is lost we are on our own with regards to
167 * flushing the inode.
170 hammer_vop_reclaim(struct vop_reclaim_args
*ap
)
172 struct hammer_inode
*ip
;
178 if ((ip
= vp
->v_data
) != NULL
) {
183 if ((ip
->flags
& HAMMER_INODE_RECLAIM
) == 0) {
184 ++hammer_count_reclaiming
;
185 ++hmp
->inode_reclaims
;
186 ip
->flags
|= HAMMER_INODE_RECLAIM
;
187 if (hmp
->inode_reclaims
> HAMMER_RECLAIM_FLUSH
&&
188 (hmp
->inode_reclaims
& 255) == 0) {
189 hammer_flusher_async(hmp
);
192 hammer_rel_inode(ip
, 1);
198 * Return a locked vnode for the specified inode. The inode must be
199 * referenced but NOT LOCKED on entry and will remain referenced on
202 * Called from the frontend.
205 hammer_get_vnode(struct hammer_inode
*ip
, struct vnode
**vpp
)
214 if ((vp
= ip
->vp
) == NULL
) {
215 error
= getnewvnode(VT_HAMMER
, hmp
->mp
, vpp
, 0, 0);
218 hammer_lock_ex(&ip
->lock
);
219 if (ip
->vp
!= NULL
) {
220 hammer_unlock(&ip
->lock
);
225 hammer_ref(&ip
->lock
);
229 hammer_get_vnode_type(ip
->ino_data
.obj_type
);
231 hammer_inode_wakereclaims(ip
);
233 switch(ip
->ino_data
.obj_type
) {
234 case HAMMER_OBJTYPE_CDEV
:
235 case HAMMER_OBJTYPE_BDEV
:
236 vp
->v_ops
= &hmp
->mp
->mnt_vn_spec_ops
;
237 addaliasu(vp
, ip
->ino_data
.rmajor
,
238 ip
->ino_data
.rminor
);
240 case HAMMER_OBJTYPE_FIFO
:
241 vp
->v_ops
= &hmp
->mp
->mnt_vn_fifo_ops
;
248 * Only mark as the root vnode if the ip is not
249 * historical, otherwise the VFS cache will get
250 * confused. The other half of the special handling
251 * is in hammer_vop_nlookupdotdot().
253 * Pseudo-filesystem roots also do not count.
255 if (ip
->obj_id
== HAMMER_OBJID_ROOT
&&
256 ip
->obj_asof
== hmp
->asof
&&
257 ip
->obj_localization
== 0) {
261 vp
->v_data
= (void *)ip
;
262 /* vnode locked by getnewvnode() */
263 /* make related vnode dirty if inode dirty? */
264 hammer_unlock(&ip
->lock
);
265 if (vp
->v_type
== VREG
)
266 vinitvmio(vp
, ip
->ino_data
.size
);
271 * loop if the vget fails (aka races), or if the vp
272 * no longer matches ip->vp.
274 if (vget(vp
, LK_EXCLUSIVE
) == 0) {
285 * Locate all copies of the inode for obj_id compatible with the specified
286 * asof, reference, and issue the related call-back. This routine is used
287 * for direct-io invalidation and does not create any new inodes.
290 hammer_scan_inode_snapshots(hammer_mount_t hmp
, hammer_inode_info_t iinfo
,
291 int (*callback
)(hammer_inode_t ip
, void *data
),
294 hammer_ino_rb_tree_RB_SCAN(&hmp
->rb_inos_root
,
295 hammer_inode_info_cmp_all_history
,
300 * Acquire a HAMMER inode. The returned inode is not locked. These functions
301 * do not attach or detach the related vnode (use hammer_get_vnode() for
304 * The flags argument is only applied for newly created inodes, and only
305 * certain flags are inherited.
307 * Called from the frontend.
309 struct hammer_inode
*
310 hammer_get_inode(hammer_transaction_t trans
, hammer_inode_t dip
,
311 u_int64_t obj_id
, hammer_tid_t asof
, u_int32_t localization
,
312 int flags
, int *errorp
)
314 hammer_mount_t hmp
= trans
->hmp
;
315 struct hammer_inode_info iinfo
;
316 struct hammer_cursor cursor
;
317 struct hammer_inode
*ip
;
320 * Determine if we already have an inode cached. If we do then
323 iinfo
.obj_id
= obj_id
;
324 iinfo
.obj_asof
= asof
;
325 iinfo
.obj_localization
= localization
;
327 ip
= hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp
->rb_inos_root
, &iinfo
);
329 hammer_ref(&ip
->lock
);
335 * Allocate a new inode structure and deal with races later.
337 ip
= kmalloc(sizeof(*ip
), M_HAMMER
, M_WAITOK
|M_ZERO
);
338 ++hammer_count_inodes
;
341 ip
->obj_asof
= iinfo
.obj_asof
;
342 ip
->obj_localization
= localization
;
344 ip
->flags
= flags
& HAMMER_INODE_RO
;
345 ip
->cache
[0].ip
= ip
;
346 ip
->cache
[1].ip
= ip
;
348 ip
->flags
|= HAMMER_INODE_RO
;
349 ip
->sync_trunc_off
= ip
->trunc_off
= 0x7FFFFFFFFFFFFFFFLL
;
350 RB_INIT(&ip
->rec_tree
);
351 TAILQ_INIT(&ip
->target_list
);
354 * Locate the on-disk inode.
357 hammer_init_cursor(trans
, &cursor
, (dip
? &dip
->cache
[0] : NULL
), NULL
);
358 cursor
.key_beg
.localization
= localization
+ HAMMER_LOCALIZE_INODE
;
359 cursor
.key_beg
.obj_id
= ip
->obj_id
;
360 cursor
.key_beg
.key
= 0;
361 cursor
.key_beg
.create_tid
= 0;
362 cursor
.key_beg
.delete_tid
= 0;
363 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
364 cursor
.key_beg
.obj_type
= 0;
365 cursor
.asof
= iinfo
.obj_asof
;
366 cursor
.flags
= HAMMER_CURSOR_GET_LEAF
| HAMMER_CURSOR_GET_DATA
|
369 *errorp
= hammer_btree_lookup(&cursor
);
370 if (*errorp
== EDEADLK
) {
371 hammer_done_cursor(&cursor
);
376 * On success the B-Tree lookup will hold the appropriate
377 * buffer cache buffers and provide a pointer to the requested
378 * information. Copy the information to the in-memory inode
379 * and cache the B-Tree node to improve future operations.
382 ip
->ino_leaf
= cursor
.node
->ondisk
->elms
[cursor
.index
].leaf
;
383 ip
->ino_data
= cursor
.data
->inode
;
386 * cache[0] tries to cache the location of the object inode.
387 * The assumption is that it is near the directory inode.
389 * cache[1] tries to cache the location of the object data.
390 * The assumption is that it is near the directory data.
392 hammer_cache_node(&ip
->cache
[0], cursor
.node
);
393 if (dip
&& dip
->cache
[1].node
)
394 hammer_cache_node(&ip
->cache
[1], dip
->cache
[1].node
);
397 * The file should not contain any data past the file size
398 * stored in the inode. Setting sync_trunc_off to the
399 * file size instead of max reduces B-Tree lookup overheads
400 * on append by allowing the flusher to avoid checking for
403 ip
->sync_trunc_off
= ip
->ino_data
.size
;
407 * The inode is placed on the red-black tree and will be synced to
408 * the media when flushed or by the filesystem sync. If this races
409 * another instantiation/lookup the insertion will fail.
412 hammer_ref(&ip
->lock
);
413 if (RB_INSERT(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
)) {
414 hammer_uncache_node(&ip
->cache
[0]);
415 hammer_uncache_node(&ip
->cache
[1]);
416 KKASSERT(ip
->lock
.refs
== 1);
417 --hammer_count_inodes
;
420 hammer_done_cursor(&cursor
);
423 ip
->flags
|= HAMMER_INODE_ONDISK
;
425 if (ip
->flags
& HAMMER_INODE_RSV_INODES
) {
426 ip
->flags
&= ~HAMMER_INODE_RSV_INODES
; /* sanity */
429 hmp
->rsv_databufs
-= ip
->rsv_databufs
;
430 ip
->rsv_databufs
= 0; /* sanity */
432 --hammer_count_inodes
;
437 hammer_done_cursor(&cursor
);
442 * Create a new filesystem object, returning the inode in *ipp. The
443 * returned inode will be referenced.
445 * The inode is created in-memory.
448 hammer_create_inode(hammer_transaction_t trans
, struct vattr
*vap
,
449 struct ucred
*cred
, hammer_inode_t dip
,
450 int pseudofs
, struct hammer_inode
**ipp
)
455 u_int32_t localization
;
461 * Assign the localization domain. If if dip is NULL we are creating
462 * a pseudo-fs and must locate an unused localization domain.
465 for (localization
= HAMMER_DEF_LOCALIZATION
;
466 localization
< HAMMER_LOCALIZE_PSEUDOFS_MASK
;
467 localization
+= HAMMER_LOCALIZE_PSEUDOFS_INC
) {
468 ip
= hammer_get_inode(trans
, NULL
, HAMMER_OBJID_ROOT
,
469 hmp
->asof
, localization
,
477 hammer_rel_inode(ip
, 0);
480 localization
= dip
->obj_localization
;
483 ip
= kmalloc(sizeof(*ip
), M_HAMMER
, M_WAITOK
|M_ZERO
);
484 ++hammer_count_inodes
;
488 * Allocate a new object id. If creating a new pseudo-fs the
492 ip
->obj_id
= HAMMER_OBJID_ROOT
;
494 ip
->obj_id
= hammer_alloc_objid(hmp
, dip
);
495 ip
->obj_localization
= localization
;
497 KKASSERT(ip
->obj_id
!= 0);
498 ip
->obj_asof
= hmp
->asof
;
500 ip
->flush_state
= HAMMER_FST_IDLE
;
501 ip
->flags
= HAMMER_INODE_DDIRTY
|
502 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
;
503 ip
->cache
[0].ip
= ip
;
504 ip
->cache
[1].ip
= ip
;
506 ip
->trunc_off
= 0x7FFFFFFFFFFFFFFFLL
;
507 RB_INIT(&ip
->rec_tree
);
508 TAILQ_INIT(&ip
->target_list
);
510 ip
->ino_data
.atime
= trans
->time
;
511 ip
->ino_data
.mtime
= trans
->time
;
512 ip
->ino_data
.size
= 0;
513 ip
->ino_data
.nlinks
= 0;
516 * A nohistory designator on the parent directory is inherited by
517 * the child. We will do this even for pseudo-fs creation... the
518 * sysad can turn it off.
520 ip
->ino_data
.uflags
= dip
->ino_data
.uflags
&
521 (SF_NOHISTORY
|UF_NOHISTORY
|UF_NODUMP
);
523 ip
->ino_leaf
.base
.btype
= HAMMER_BTREE_TYPE_RECORD
;
524 ip
->ino_leaf
.base
.localization
= ip
->obj_localization
+
525 HAMMER_LOCALIZE_INODE
;
526 ip
->ino_leaf
.base
.obj_id
= ip
->obj_id
;
527 ip
->ino_leaf
.base
.key
= 0;
528 ip
->ino_leaf
.base
.create_tid
= 0;
529 ip
->ino_leaf
.base
.delete_tid
= 0;
530 ip
->ino_leaf
.base
.rec_type
= HAMMER_RECTYPE_INODE
;
531 ip
->ino_leaf
.base
.obj_type
= hammer_get_obj_type(vap
->va_type
);
533 ip
->ino_data
.obj_type
= ip
->ino_leaf
.base
.obj_type
;
534 ip
->ino_data
.version
= HAMMER_INODE_DATA_VERSION
;
535 ip
->ino_data
.mode
= vap
->va_mode
;
536 ip
->ino_data
.ctime
= trans
->time
;
539 * Setup the ".." pointer. This only needs to be done for directories
540 * but we do it for all objects as a recovery aid.
542 * The parent_obj_localization field only applies to pseudo-fs roots.
544 ip
->ino_data
.parent_obj_id
= dip
->ino_leaf
.base
.obj_id
;
545 if (ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_DIRECTORY
&&
546 ip
->obj_id
== HAMMER_OBJID_ROOT
) {
547 ip
->ino_data
.ext
.obj
.parent_obj_localization
=
548 dip
->obj_localization
;
551 switch(ip
->ino_leaf
.base
.obj_type
) {
552 case HAMMER_OBJTYPE_CDEV
:
553 case HAMMER_OBJTYPE_BDEV
:
554 ip
->ino_data
.rmajor
= vap
->va_rmajor
;
555 ip
->ino_data
.rminor
= vap
->va_rminor
;
562 * Calculate default uid/gid and overwrite with information from
565 xuid
= hammer_to_unix_xid(&dip
->ino_data
.uid
);
566 xuid
= vop_helper_create_uid(hmp
->mp
, dip
->ino_data
.mode
, xuid
, cred
,
568 ip
->ino_data
.mode
= vap
->va_mode
;
570 if (vap
->va_vaflags
& VA_UID_UUID_VALID
)
571 ip
->ino_data
.uid
= vap
->va_uid_uuid
;
572 else if (vap
->va_uid
!= (uid_t
)VNOVAL
)
573 hammer_guid_to_uuid(&ip
->ino_data
.uid
, vap
->va_uid
);
575 hammer_guid_to_uuid(&ip
->ino_data
.uid
, xuid
);
577 if (vap
->va_vaflags
& VA_GID_UUID_VALID
)
578 ip
->ino_data
.gid
= vap
->va_gid_uuid
;
579 else if (vap
->va_gid
!= (gid_t
)VNOVAL
)
580 hammer_guid_to_uuid(&ip
->ino_data
.gid
, vap
->va_gid
);
582 ip
->ino_data
.gid
= dip
->ino_data
.gid
;
584 hammer_ref(&ip
->lock
);
585 if (RB_INSERT(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
)) {
586 hammer_unref(&ip
->lock
);
587 panic("hammer_create_inode: duplicate obj_id %llx", ip
->obj_id
);
594 * Called by hammer_sync_inode().
597 hammer_update_inode(hammer_cursor_t cursor
, hammer_inode_t ip
)
599 hammer_transaction_t trans
= cursor
->trans
;
600 hammer_record_t record
;
607 * If the inode has a presence on-disk then locate it and mark
608 * it deleted, setting DELONDISK.
610 * The record may or may not be physically deleted, depending on
611 * the retention policy.
613 if ((ip
->flags
& (HAMMER_INODE_ONDISK
|HAMMER_INODE_DELONDISK
)) ==
614 HAMMER_INODE_ONDISK
) {
615 hammer_normalize_cursor(cursor
);
616 cursor
->key_beg
.localization
= ip
->obj_localization
+
617 HAMMER_LOCALIZE_INODE
;
618 cursor
->key_beg
.obj_id
= ip
->obj_id
;
619 cursor
->key_beg
.key
= 0;
620 cursor
->key_beg
.create_tid
= 0;
621 cursor
->key_beg
.delete_tid
= 0;
622 cursor
->key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
623 cursor
->key_beg
.obj_type
= 0;
624 cursor
->asof
= ip
->obj_asof
;
625 cursor
->flags
&= ~HAMMER_CURSOR_INITMASK
;
626 cursor
->flags
|= HAMMER_CURSOR_GET_LEAF
| HAMMER_CURSOR_ASOF
;
627 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
629 error
= hammer_btree_lookup(cursor
);
630 if (hammer_debug_inode
)
631 kprintf("IPDEL %p %08x %d", ip
, ip
->flags
, error
);
633 kprintf("error %d\n", error
);
634 Debugger("hammer_update_inode");
638 error
= hammer_ip_delete_record(cursor
, ip
, trans
->tid
);
639 if (hammer_debug_inode
)
640 kprintf(" error %d\n", error
);
641 if (error
&& error
!= EDEADLK
) {
642 kprintf("error %d\n", error
);
643 Debugger("hammer_update_inode2");
646 ip
->flags
|= HAMMER_INODE_DELONDISK
;
649 hammer_cache_node(&ip
->cache
[0], cursor
->node
);
651 if (error
== EDEADLK
) {
652 hammer_done_cursor(cursor
);
653 error
= hammer_init_cursor(trans
, cursor
,
655 if (hammer_debug_inode
)
656 kprintf("IPDED %p %d\n", ip
, error
);
663 * Ok, write out the initial record or a new record (after deleting
664 * the old one), unless the DELETED flag is set. This routine will
665 * clear DELONDISK if it writes out a record.
667 * Update our inode statistics if this is the first application of
670 if (error
== 0 && (ip
->flags
& HAMMER_INODE_DELETED
) == 0) {
672 * Generate a record and write it to the media
674 record
= hammer_alloc_mem_record(ip
, 0);
675 record
->type
= HAMMER_MEM_RECORD_INODE
;
676 record
->flush_state
= HAMMER_FST_FLUSH
;
677 record
->leaf
= ip
->sync_ino_leaf
;
678 record
->leaf
.base
.create_tid
= trans
->tid
;
679 record
->leaf
.data_len
= sizeof(ip
->sync_ino_data
);
680 record
->leaf
.create_ts
= trans
->time32
;
681 record
->data
= (void *)&ip
->sync_ino_data
;
682 record
->flags
|= HAMMER_RECF_INTERLOCK_BE
;
684 error
= hammer_ip_sync_record_cursor(cursor
, record
);
685 if (hammer_debug_inode
)
686 kprintf("GENREC %p rec %08x %d\n",
687 ip
, record
->flags
, error
);
688 if (error
!= EDEADLK
)
690 hammer_done_cursor(cursor
);
691 error
= hammer_init_cursor(trans
, cursor
,
693 if (hammer_debug_inode
)
694 kprintf("GENREC reinit %d\n", error
);
699 kprintf("error %d\n", error
);
700 Debugger("hammer_update_inode3");
704 * The record isn't managed by the inode's record tree,
705 * destroy it whether we succeed or fail.
707 record
->flags
&= ~HAMMER_RECF_INTERLOCK_BE
;
708 record
->flags
|= HAMMER_RECF_DELETED_FE
;
709 record
->flush_state
= HAMMER_FST_IDLE
;
710 hammer_rel_mem_record(record
);
716 if (hammer_debug_inode
)
717 kprintf("CLEANDELOND %p %08x\n", ip
, ip
->flags
);
718 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
|
721 ip
->flags
&= ~HAMMER_INODE_DELONDISK
;
724 * Root volume count of inodes
726 if ((ip
->flags
& HAMMER_INODE_ONDISK
) == 0) {
727 hammer_modify_volume_field(trans
,
730 ++ip
->hmp
->rootvol
->ondisk
->vol0_stat_inodes
;
731 hammer_modify_volume_done(trans
->rootvol
);
732 ip
->flags
|= HAMMER_INODE_ONDISK
;
733 if (hammer_debug_inode
)
734 kprintf("NOWONDISK %p\n", ip
);
740 * If the inode has been destroyed, clean out any left-over flags
741 * that may have been set by the frontend.
743 if (error
== 0 && (ip
->flags
& HAMMER_INODE_DELETED
)) {
744 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
|
752 * Update only the itimes fields.
754 * ATIME can be updated without generating any UNDO. MTIME is updated
755 * with UNDO so it is guaranteed to be synchronized properly in case of
758 * Neither field is included in the B-Tree leaf element's CRC, which is how
759 * we can get away with updating ATIME the way we do.
762 hammer_update_itimes(hammer_cursor_t cursor
, hammer_inode_t ip
)
764 hammer_transaction_t trans
= cursor
->trans
;
768 if ((ip
->flags
& (HAMMER_INODE_ONDISK
|HAMMER_INODE_DELONDISK
)) !=
769 HAMMER_INODE_ONDISK
) {
773 hammer_normalize_cursor(cursor
);
774 cursor
->key_beg
.localization
= ip
->obj_localization
+
775 HAMMER_LOCALIZE_INODE
;
776 cursor
->key_beg
.obj_id
= ip
->obj_id
;
777 cursor
->key_beg
.key
= 0;
778 cursor
->key_beg
.create_tid
= 0;
779 cursor
->key_beg
.delete_tid
= 0;
780 cursor
->key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
781 cursor
->key_beg
.obj_type
= 0;
782 cursor
->asof
= ip
->obj_asof
;
783 cursor
->flags
&= ~HAMMER_CURSOR_INITMASK
;
784 cursor
->flags
|= HAMMER_CURSOR_ASOF
;
785 cursor
->flags
|= HAMMER_CURSOR_GET_LEAF
;
786 cursor
->flags
|= HAMMER_CURSOR_GET_DATA
;
787 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
789 error
= hammer_btree_lookup(cursor
);
791 kprintf("error %d\n", error
);
792 Debugger("hammer_update_itimes1");
795 hammer_cache_node(&ip
->cache
[0], cursor
->node
);
796 if (ip
->sync_flags
& HAMMER_INODE_MTIME
) {
798 * Updating MTIME requires an UNDO. Just cover
799 * both atime and mtime.
801 hammer_modify_buffer(trans
, cursor
->data_buffer
,
802 HAMMER_ITIMES_BASE(&cursor
->data
->inode
),
803 HAMMER_ITIMES_BYTES
);
804 cursor
->data
->inode
.atime
= ip
->sync_ino_data
.atime
;
805 cursor
->data
->inode
.mtime
= ip
->sync_ino_data
.mtime
;
806 hammer_modify_buffer_done(cursor
->data_buffer
);
807 } else if (ip
->sync_flags
& HAMMER_INODE_ATIME
) {
809 * Updating atime only can be done in-place with
812 hammer_modify_buffer(trans
, cursor
->data_buffer
,
814 cursor
->data
->inode
.atime
= ip
->sync_ino_data
.atime
;
815 hammer_modify_buffer_done(cursor
->data_buffer
);
817 ip
->sync_flags
&= ~(HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
);
819 if (error
== EDEADLK
) {
820 hammer_done_cursor(cursor
);
821 error
= hammer_init_cursor(trans
, cursor
,
830 * Release a reference on an inode, flush as requested.
832 * On the last reference we queue the inode to the flusher for its final
836 hammer_rel_inode(struct hammer_inode
*ip
, int flush
)
838 hammer_mount_t hmp
= ip
->hmp
;
841 * Handle disposition when dropping the last ref.
844 if (ip
->lock
.refs
== 1) {
846 * Determine whether on-disk action is needed for
847 * the inode's final disposition.
849 KKASSERT(ip
->vp
== NULL
);
850 hammer_inode_unloadable_check(ip
, 0);
851 if (ip
->flags
& HAMMER_INODE_MODMASK
) {
852 if (hmp
->rsv_inodes
> desiredvnodes
) {
853 hammer_flush_inode(ip
,
854 HAMMER_FLUSH_SIGNAL
);
856 hammer_flush_inode(ip
, 0);
858 } else if (ip
->lock
.refs
== 1) {
859 hammer_unload_inode(ip
);
864 hammer_flush_inode(ip
, 0);
867 * The inode still has multiple refs, try to drop
870 KKASSERT(ip
->lock
.refs
>= 1);
871 if (ip
->lock
.refs
> 1) {
872 hammer_unref(&ip
->lock
);
880 * Unload and destroy the specified inode. Must be called with one remaining
881 * reference. The reference is disposed of.
883 * This can only be called in the context of the flusher.
886 hammer_unload_inode(struct hammer_inode
*ip
)
888 hammer_mount_t hmp
= ip
->hmp
;
890 KASSERT(ip
->lock
.refs
== 1,
891 ("hammer_unload_inode: %d refs\n", ip
->lock
.refs
));
892 KKASSERT(ip
->vp
== NULL
);
893 KKASSERT(ip
->flush_state
== HAMMER_FST_IDLE
);
894 KKASSERT(ip
->cursor_ip_refs
== 0);
895 KKASSERT(ip
->lock
.lockcount
== 0);
896 KKASSERT((ip
->flags
& HAMMER_INODE_MODMASK
) == 0);
898 KKASSERT(RB_EMPTY(&ip
->rec_tree
));
899 KKASSERT(TAILQ_EMPTY(&ip
->target_list
));
901 RB_REMOVE(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
);
903 hammer_uncache_node(&ip
->cache
[0]);
904 hammer_uncache_node(&ip
->cache
[1]);
906 hammer_clear_objid(ip
);
907 --hammer_count_inodes
;
910 hammer_inode_wakereclaims(ip
);
917 * Called on mount -u when switching from RW to RO or vise-versa. Adjust
918 * the read-only flag for cached inodes.
920 * This routine is called from a RB_SCAN().
923 hammer_reload_inode(hammer_inode_t ip
, void *arg __unused
)
925 hammer_mount_t hmp
= ip
->hmp
;
927 if (hmp
->ronly
|| hmp
->asof
!= HAMMER_MAX_TID
)
928 ip
->flags
|= HAMMER_INODE_RO
;
930 ip
->flags
&= ~HAMMER_INODE_RO
;
935 * A transaction has modified an inode, requiring updates as specified by
938 * HAMMER_INODE_DDIRTY: Inode data has been updated
939 * HAMMER_INODE_XDIRTY: Dirty in-memory records
940 * HAMMER_INODE_BUFS: Dirty buffer cache buffers
941 * HAMMER_INODE_DELETED: Inode record/data must be deleted
942 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
945 hammer_modify_inode(hammer_inode_t ip
, int flags
)
947 KKASSERT ((ip
->flags
& HAMMER_INODE_RO
) == 0 ||
948 (flags
& (HAMMER_INODE_DDIRTY
| HAMMER_INODE_XDIRTY
|
949 HAMMER_INODE_BUFS
| HAMMER_INODE_DELETED
|
950 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
)) == 0);
951 if ((ip
->flags
& HAMMER_INODE_RSV_INODES
) == 0) {
952 ip
->flags
|= HAMMER_INODE_RSV_INODES
;
953 ++ip
->hmp
->rsv_inodes
;
960 * Request that an inode be flushed. This whole mess cannot block and may
961 * recurse (if not synchronous). Once requested HAMMER will attempt to
962 * actively flush the inode until the flush can be done.
964 * The inode may already be flushing, or may be in a setup state. We can
965 * place the inode in a flushing state if it is currently idle and flag it
966 * to reflush if it is currently flushing.
968 * If the HAMMER_FLUSH_SYNCHRONOUS flag is specified we will attempt to
969 * flush the indoe synchronously using the caller's context.
972 hammer_flush_inode(hammer_inode_t ip
, int flags
)
977 * Trivial 'nothing to flush' case. If the inode is ina SETUP
978 * state we have to put it back into an IDLE state so we can
979 * drop the extra ref.
981 if ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0) {
982 if (ip
->flush_state
== HAMMER_FST_SETUP
) {
983 ip
->flush_state
= HAMMER_FST_IDLE
;
984 hammer_rel_inode(ip
, 0);
990 * Our flush action will depend on the current state.
992 switch(ip
->flush_state
) {
993 case HAMMER_FST_IDLE
:
995 * We have no dependancies and can flush immediately. Some
996 * our children may not be flushable so we have to re-test
997 * with that additional knowledge.
999 hammer_flush_inode_core(ip
, flags
);
1001 case HAMMER_FST_SETUP
:
1003 * Recurse upwards through dependancies via target_list
1004 * and start their flusher actions going if possible.
1006 * 'good' is our connectivity. -1 means we have none and
1007 * can't flush, 0 means there weren't any dependancies, and
1008 * 1 means we have good connectivity.
1010 good
= hammer_setup_parent_inodes(ip
);
1013 * We can continue if good >= 0. Determine how many records
1014 * under our inode can be flushed (and mark them).
1017 hammer_flush_inode_core(ip
, flags
);
1019 ip
->flags
|= HAMMER_INODE_REFLUSH
;
1020 if (flags
& HAMMER_FLUSH_SIGNAL
) {
1021 ip
->flags
|= HAMMER_INODE_RESIGNAL
;
1022 hammer_flusher_async(ip
->hmp
);
1028 * We are already flushing, flag the inode to reflush
1029 * if needed after it completes its current flush.
1031 if ((ip
->flags
& HAMMER_INODE_REFLUSH
) == 0)
1032 ip
->flags
|= HAMMER_INODE_REFLUSH
;
1033 if (flags
& HAMMER_FLUSH_SIGNAL
) {
1034 ip
->flags
|= HAMMER_INODE_RESIGNAL
;
1035 hammer_flusher_async(ip
->hmp
);
1042 * Scan ip->target_list, which is a list of records owned by PARENTS to our
1043 * ip which reference our ip.
1045 * XXX This is a huge mess of recursive code, but not one bit of it blocks
1046 * so for now do not ref/deref the structures. Note that if we use the
1047 * ref/rel code later, the rel CAN block.
1050 hammer_setup_parent_inodes(hammer_inode_t ip
)
1052 hammer_record_t depend
;
1054 hammer_record_t next
;
1061 TAILQ_FOREACH(depend
, &ip
->target_list
, target_entry
) {
1062 r
= hammer_setup_parent_inodes_helper(depend
);
1063 KKASSERT(depend
->target_ip
== ip
);
1064 if (r
< 0 && good
== 0)
1074 next
= TAILQ_FIRST(&ip
->target_list
);
1076 hammer_ref(&next
->lock
);
1077 hammer_ref(&next
->ip
->lock
);
1079 while ((depend
= next
) != NULL
) {
1080 if (depend
->target_ip
== NULL
) {
1082 hammer_rel_mem_record(depend
);
1083 hammer_rel_inode(pip
, 0);
1086 KKASSERT(depend
->target_ip
== ip
);
1087 next
= TAILQ_NEXT(depend
, target_entry
);
1089 hammer_ref(&next
->lock
);
1090 hammer_ref(&next
->ip
->lock
);
1092 r
= hammer_setup_parent_inodes_helper(depend
);
1093 if (r
< 0 && good
== 0)
1098 hammer_rel_mem_record(depend
);
1099 hammer_rel_inode(pip
, 0);
1106 * This helper function takes a record representing the dependancy between
1107 * the parent inode and child inode.
1109 * record->ip = parent inode
1110 * record->target_ip = child inode
1112 * We are asked to recurse upwards and convert the record from SETUP
1113 * to FLUSH if possible.
1115 * Return 1 if the record gives us connectivity
1117 * Return 0 if the record is not relevant
1119 * Return -1 if we can't resolve the dependancy and there is no connectivity.
1122 hammer_setup_parent_inodes_helper(hammer_record_t record
)
1128 KKASSERT(record
->flush_state
!= HAMMER_FST_IDLE
);
1133 * If the record is already flushing, is it in our flush group?
1135 * If it is in our flush group but it is a general record or a
1136 * delete-on-disk, it does not improve our connectivity (return 0),
1137 * and if the target inode is not trying to destroy itself we can't
1138 * allow the operation yet anyway (the second return -1).
1140 if (record
->flush_state
== HAMMER_FST_FLUSH
) {
1141 if (record
->flush_group
!= hmp
->flusher
.next
) {
1142 pip
->flags
|= HAMMER_INODE_REFLUSH
;
1145 if (record
->type
== HAMMER_MEM_RECORD_ADD
)
1147 /* GENERAL or DEL */
1152 * It must be a setup record. Try to resolve the setup dependancies
1153 * by recursing upwards so we can place ip on the flush list.
1155 KKASSERT(record
->flush_state
== HAMMER_FST_SETUP
);
1157 good
= hammer_setup_parent_inodes(pip
);
1160 * We can't flush ip because it has no connectivity (XXX also check
1161 * nlinks for pre-existing connectivity!). Flag it so any resolution
1162 * recurses back down.
1165 pip
->flags
|= HAMMER_INODE_REFLUSH
;
1170 * We are go, place the parent inode in a flushing state so we can
1171 * place its record in a flushing state. Note that the parent
1172 * may already be flushing. The record must be in the same flush
1173 * group as the parent.
1175 if (pip
->flush_state
!= HAMMER_FST_FLUSH
)
1176 hammer_flush_inode_core(pip
, HAMMER_FLUSH_RECURSION
);
1177 KKASSERT(pip
->flush_state
== HAMMER_FST_FLUSH
);
1178 KKASSERT(record
->flush_state
== HAMMER_FST_SETUP
);
1181 if (record
->type
== HAMMER_MEM_RECORD_DEL
&&
1182 (record
->target_ip
->flags
& (HAMMER_INODE_DELETED
|HAMMER_INODE_DELONDISK
)) == 0) {
1184 * Regardless of flushing state we cannot sync this path if the
1185 * record represents a delete-on-disk but the target inode
1186 * is not ready to sync its own deletion.
1188 * XXX need to count effective nlinks to determine whether
1189 * the flush is ok, otherwise removing a hardlink will
1190 * just leave the DEL record to rot.
1192 record
->target_ip
->flags
|= HAMMER_INODE_REFLUSH
;
1196 if (pip
->flush_group
== pip
->hmp
->flusher
.next
) {
1198 * This is the record we wanted to synchronize. If the
1199 * record went into a flush state while we blocked it
1200 * had better be in the correct flush group.
1202 if (record
->flush_state
!= HAMMER_FST_FLUSH
) {
1203 record
->flush_state
= HAMMER_FST_FLUSH
;
1204 record
->flush_group
= pip
->flush_group
;
1205 hammer_ref(&record
->lock
);
1207 KKASSERT(record
->flush_group
== pip
->flush_group
);
1209 if (record
->type
== HAMMER_MEM_RECORD_ADD
)
1213 * A general or delete-on-disk record does not contribute
1214 * to our visibility. We can still flush it, however.
1219 * We couldn't resolve the dependancies, request that the
1220 * inode be flushed when the dependancies can be resolved.
1222 pip
->flags
|= HAMMER_INODE_REFLUSH
;
1228 * This is the core routine placing an inode into the FST_FLUSH state.
1231 hammer_flush_inode_core(hammer_inode_t ip
, int flags
)
1236 * Set flush state and prevent the flusher from cycling into
1237 * the next flush group. Do not place the ip on the list yet.
1238 * Inodes not in the idle state get an extra reference.
1240 KKASSERT(ip
->flush_state
!= HAMMER_FST_FLUSH
);
1241 if (ip
->flush_state
== HAMMER_FST_IDLE
)
1242 hammer_ref(&ip
->lock
);
1243 ip
->flush_state
= HAMMER_FST_FLUSH
;
1244 ip
->flush_group
= ip
->hmp
->flusher
.next
;
1245 ++ip
->hmp
->flusher
.group_lock
;
1246 ++ip
->hmp
->count_iqueued
;
1247 ++hammer_count_iqueued
;
1250 * We need to be able to vfsync/truncate from the backend.
1252 KKASSERT((ip
->flags
& HAMMER_INODE_VHELD
) == 0);
1253 if (ip
->vp
&& (ip
->vp
->v_flag
& VINACTIVE
) == 0) {
1254 ip
->flags
|= HAMMER_INODE_VHELD
;
1259 * Figure out how many in-memory records we can actually flush
1260 * (not including inode meta-data, buffers, etc).
1262 if (flags
& HAMMER_FLUSH_RECURSION
) {
1265 go_count
= RB_SCAN(hammer_rec_rb_tree
, &ip
->rec_tree
, NULL
,
1266 hammer_setup_child_callback
, NULL
);
1270 * This is a more involved test that includes go_count. If we
1271 * can't flush, flag the inode and return. If go_count is 0 we
1272 * were are unable to flush any records in our rec_tree and
1273 * must ignore the XDIRTY flag.
1275 if (go_count
== 0) {
1276 if ((ip
->flags
& HAMMER_INODE_MODMASK_NOXDIRTY
) == 0) {
1277 ip
->flags
|= HAMMER_INODE_REFLUSH
;
1279 --ip
->hmp
->count_iqueued
;
1280 --hammer_count_iqueued
;
1282 ip
->flush_state
= HAMMER_FST_SETUP
;
1283 if (ip
->flags
& HAMMER_INODE_VHELD
) {
1284 ip
->flags
&= ~HAMMER_INODE_VHELD
;
1287 if (flags
& HAMMER_FLUSH_SIGNAL
) {
1288 ip
->flags
|= HAMMER_INODE_RESIGNAL
;
1289 hammer_flusher_async(ip
->hmp
);
1291 if (--ip
->hmp
->flusher
.group_lock
== 0)
1292 wakeup(&ip
->hmp
->flusher
.group_lock
);
1298 * Snapshot the state of the inode for the backend flusher.
1300 * The truncation must be retained in the frontend until after
1301 * we've actually performed the record deletion.
1303 * We continue to retain sync_trunc_off even when all truncations
1304 * have been resolved as an optimization to determine if we can
1305 * skip the B-Tree lookup for overwrite deletions.
1307 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1308 * and stays in ip->flags. Once set, it stays set until the
1309 * inode is destroyed.
1311 ip
->sync_flags
= (ip
->flags
& HAMMER_INODE_MODMASK
);
1312 if (ip
->sync_flags
& HAMMER_INODE_TRUNCATED
)
1313 ip
->sync_trunc_off
= ip
->trunc_off
;
1314 ip
->sync_ino_leaf
= ip
->ino_leaf
;
1315 ip
->sync_ino_data
= ip
->ino_data
;
1316 ip
->trunc_off
= 0x7FFFFFFFFFFFFFFFLL
;
1317 ip
->flags
&= ~HAMMER_INODE_MODMASK
;
1318 #ifdef DEBUG_TRUNCATE
1319 if ((ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) && ip
== HammerTruncIp
)
1320 kprintf("truncateS %016llx\n", ip
->sync_trunc_off
);
1324 * The flusher list inherits our inode and reference.
1326 TAILQ_INSERT_TAIL(&ip
->hmp
->flush_list
, ip
, flush_entry
);
1327 if (--ip
->hmp
->flusher
.group_lock
== 0)
1328 wakeup(&ip
->hmp
->flusher
.group_lock
);
1330 if (flags
& HAMMER_FLUSH_SIGNAL
) {
1331 hammer_flusher_async(ip
->hmp
);
1336 * Callback for scan of ip->rec_tree. Try to include each record in our
1337 * flush. ip->flush_group has been set but the inode has not yet been
1338 * moved into a flushing state.
1340 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1343 * We return 1 for any record placed or found in FST_FLUSH, which prevents
1344 * the caller from shortcutting the flush.
1347 hammer_setup_child_callback(hammer_record_t rec
, void *data
)
1349 hammer_inode_t target_ip
;
1354 * Deleted records are ignored. Note that the flush detects deleted
1355 * front-end records at multiple points to deal with races. This is
1356 * just the first line of defense. The only time DELETED_FE cannot
1357 * be set is when HAMMER_RECF_INTERLOCK_BE is set.
1359 * Don't get confused between record deletion and, say, directory
1360 * entry deletion. The deletion of a directory entry that is on
1361 * the media has nothing to do with the record deletion flags.
1363 if (rec
->flags
& (HAMMER_RECF_DELETED_FE
|HAMMER_RECF_DELETED_BE
))
1367 * If the record is in an idle state it has no dependancies and
1373 switch(rec
->flush_state
) {
1374 case HAMMER_FST_IDLE
:
1376 * Record has no setup dependancy, we can flush it.
1378 KKASSERT(rec
->target_ip
== NULL
);
1379 rec
->flush_state
= HAMMER_FST_FLUSH
;
1380 rec
->flush_group
= ip
->flush_group
;
1381 hammer_ref(&rec
->lock
);
1384 case HAMMER_FST_SETUP
:
1386 * Record has a setup dependancy. Try to include the
1387 * target ip in the flush.
1389 * We have to be careful here, if we do not do the right
1390 * thing we can lose track of dirty inodes and the system
1391 * will lockup trying to allocate buffers.
1393 target_ip
= rec
->target_ip
;
1394 KKASSERT(target_ip
!= NULL
);
1395 KKASSERT(target_ip
->flush_state
!= HAMMER_FST_IDLE
);
1396 if (target_ip
->flush_state
== HAMMER_FST_FLUSH
) {
1398 * If the target IP is already flushing in our group
1399 * we are golden, otherwise make sure the target
1402 if (target_ip
->flush_group
== ip
->flush_group
) {
1403 rec
->flush_state
= HAMMER_FST_FLUSH
;
1404 rec
->flush_group
= ip
->flush_group
;
1405 hammer_ref(&rec
->lock
);
1408 target_ip
->flags
|= HAMMER_INODE_REFLUSH
;
1410 } else if (rec
->type
== HAMMER_MEM_RECORD_ADD
) {
1412 * If the target IP is not flushing we can force
1413 * it to flush, even if it is unable to write out
1414 * any of its own records we have at least one in
1415 * hand that we CAN deal with.
1417 rec
->flush_state
= HAMMER_FST_FLUSH
;
1418 rec
->flush_group
= ip
->flush_group
;
1419 hammer_ref(&rec
->lock
);
1420 hammer_flush_inode_core(target_ip
,
1421 HAMMER_FLUSH_RECURSION
);
1425 * General or delete-on-disk record.
1427 * XXX this needs help. If a delete-on-disk we could
1428 * disconnect the target. If the target has its own
1429 * dependancies they really need to be flushed.
1433 rec
->flush_state
= HAMMER_FST_FLUSH
;
1434 rec
->flush_group
= ip
->flush_group
;
1435 hammer_ref(&rec
->lock
);
1436 hammer_flush_inode_core(target_ip
,
1437 HAMMER_FLUSH_RECURSION
);
1441 case HAMMER_FST_FLUSH
:
1443 * Record already associated with a flush group. It had
1446 KKASSERT(rec
->flush_group
== ip
->flush_group
);
1454 * Wait for a previously queued flush to complete. Not only do we need to
1455 * wait for the inode to sync out, we also may have to run the flusher again
1456 * to get it past the UNDO position pertaining to the flush so a crash does
1457 * not 'undo' our flush.
1460 hammer_wait_inode(hammer_inode_t ip
)
1462 hammer_mount_t hmp
= ip
->hmp
;
1466 sync_group
= ip
->flush_group
;
1467 waitcount
= (ip
->flags
& HAMMER_INODE_REFLUSH
) ? 2 : 1;
1469 if (ip
->flush_state
== HAMMER_FST_SETUP
) {
1471 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
1473 /* XXX can we make this != FST_IDLE ? check SETUP depends */
1474 while (ip
->flush_state
== HAMMER_FST_FLUSH
&&
1475 (ip
->flush_group
- sync_group
) < waitcount
) {
1476 ip
->flags
|= HAMMER_INODE_FLUSHW
;
1477 tsleep(&ip
->flags
, 0, "hmrwin", 0);
1479 while (hmp
->flusher
.done
- sync_group
< waitcount
) {
1481 hammer_flusher_sync(hmp
);
1486 * Called by the backend code when a flush has been completed.
1487 * The inode has already been removed from the flush list.
1489 * A pipelined flush can occur, in which case we must re-enter the
1490 * inode on the list and re-copy its fields.
1493 hammer_flush_inode_done(hammer_inode_t ip
)
1498 KKASSERT(ip
->flush_state
== HAMMER_FST_FLUSH
);
1503 * Merge left-over flags back into the frontend and fix the state.
1505 ip
->flags
|= ip
->sync_flags
;
1508 * The backend may have adjusted nlinks, so if the adjusted nlinks
1509 * does not match the fronttend set the frontend's RDIRTY flag again.
1511 if (ip
->ino_data
.nlinks
!= ip
->sync_ino_data
.nlinks
)
1512 ip
->flags
|= HAMMER_INODE_DDIRTY
;
1515 * Fix up the dirty buffer status. IO completions will also
1516 * try to clean up rsv_databufs.
1518 if (ip
->vp
&& RB_ROOT(&ip
->vp
->v_rbdirty_tree
)) {
1519 ip
->flags
|= HAMMER_INODE_BUFS
;
1521 hmp
->rsv_databufs
-= ip
->rsv_databufs
;
1522 ip
->rsv_databufs
= 0;
1526 * Re-set the XDIRTY flag if some of the inode's in-memory records
1527 * could not be flushed.
1529 KKASSERT((RB_EMPTY(&ip
->rec_tree
) &&
1530 (ip
->flags
& HAMMER_INODE_XDIRTY
) == 0) ||
1531 (!RB_EMPTY(&ip
->rec_tree
) &&
1532 (ip
->flags
& HAMMER_INODE_XDIRTY
) != 0));
1535 * Do not lose track of inodes which no longer have vnode
1536 * assocations, otherwise they may never get flushed again.
1538 if ((ip
->flags
& HAMMER_INODE_MODMASK
) && ip
->vp
== NULL
)
1539 ip
->flags
|= HAMMER_INODE_REFLUSH
;
1542 * Adjust flush_state. The target state (idle or setup) shouldn't
1543 * be terribly important since we will reflush if we really need
1544 * to do anything. XXX
1546 if (TAILQ_EMPTY(&ip
->target_list
) && RB_EMPTY(&ip
->rec_tree
)) {
1547 ip
->flush_state
= HAMMER_FST_IDLE
;
1550 ip
->flush_state
= HAMMER_FST_SETUP
;
1554 --hmp
->count_iqueued
;
1555 --hammer_count_iqueued
;
1558 * Clean up the vnode ref
1560 if (ip
->flags
& HAMMER_INODE_VHELD
) {
1561 ip
->flags
&= ~HAMMER_INODE_VHELD
;
1566 * If the frontend made more changes and requested another flush,
1567 * then try to get it running.
1569 if (ip
->flags
& HAMMER_INODE_REFLUSH
) {
1570 ip
->flags
&= ~HAMMER_INODE_REFLUSH
;
1571 if (ip
->flags
& HAMMER_INODE_RESIGNAL
) {
1572 ip
->flags
&= ~HAMMER_INODE_RESIGNAL
;
1573 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
1575 hammer_flush_inode(ip
, 0);
1580 * If the inode is now clean drop the space reservation.
1582 if ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0 &&
1583 (ip
->flags
& HAMMER_INODE_RSV_INODES
)) {
1584 ip
->flags
&= ~HAMMER_INODE_RSV_INODES
;
1589 * Finally, if the frontend is waiting for a flush to complete,
1592 if (ip
->flush_state
!= HAMMER_FST_FLUSH
) {
1593 if (ip
->flags
& HAMMER_INODE_FLUSHW
) {
1594 ip
->flags
&= ~HAMMER_INODE_FLUSHW
;
1599 hammer_rel_inode(ip
, 0);
1603 * Called from hammer_sync_inode() to synchronize in-memory records
1607 hammer_sync_record_callback(hammer_record_t record
, void *data
)
1609 hammer_cursor_t cursor
= data
;
1610 hammer_transaction_t trans
= cursor
->trans
;
1614 * Skip records that do not belong to the current flush.
1616 ++hammer_stats_record_iterations
;
1617 if (record
->flush_state
!= HAMMER_FST_FLUSH
)
1621 if (record
->flush_group
!= record
->ip
->flush_group
) {
1622 kprintf("sync_record %p ip %p bad flush group %d %d\n", record
, record
->ip
, record
->flush_group
,record
->ip
->flush_group
);
1627 KKASSERT(record
->flush_group
== record
->ip
->flush_group
);
1630 * Interlock the record using the BE flag. Once BE is set the
1631 * frontend cannot change the state of FE.
1633 * NOTE: If FE is set prior to us setting BE we still sync the
1634 * record out, but the flush completion code converts it to
1635 * a delete-on-disk record instead of destroying it.
1637 KKASSERT((record
->flags
& HAMMER_RECF_INTERLOCK_BE
) == 0);
1638 record
->flags
|= HAMMER_RECF_INTERLOCK_BE
;
1641 * The backend may have already disposed of the record.
1643 if (record
->flags
& HAMMER_RECF_DELETED_BE
) {
1649 * If the whole inode is being deleting all on-disk records will
1650 * be deleted very soon, we can't sync any new records to disk
1651 * because they will be deleted in the same transaction they were
1652 * created in (delete_tid == create_tid), which will assert.
1654 * XXX There may be a case with RECORD_ADD with DELETED_FE set
1655 * that we currently panic on.
1657 if (record
->ip
->sync_flags
& HAMMER_INODE_DELETING
) {
1658 switch(record
->type
) {
1659 case HAMMER_MEM_RECORD_DATA
:
1661 * We don't have to do anything, if the record was
1662 * committed the space will have been accounted for
1666 case HAMMER_MEM_RECORD_GENERAL
:
1667 record
->flags
|= HAMMER_RECF_DELETED_FE
;
1668 record
->flags
|= HAMMER_RECF_DELETED_BE
;
1671 case HAMMER_MEM_RECORD_ADD
:
1672 panic("hammer_sync_record_callback: illegal add "
1673 "during inode deletion record %p", record
);
1674 break; /* NOT REACHED */
1675 case HAMMER_MEM_RECORD_INODE
:
1676 panic("hammer_sync_record_callback: attempt to "
1677 "sync inode record %p?", record
);
1678 break; /* NOT REACHED */
1679 case HAMMER_MEM_RECORD_DEL
:
1681 * Follow through and issue the on-disk deletion
1688 * If DELETED_FE is set special handling is needed for directory
1689 * entries. Dependant pieces related to the directory entry may
1690 * have already been synced to disk. If this occurs we have to
1691 * sync the directory entry and then change the in-memory record
1692 * from an ADD to a DELETE to cover the fact that it's been
1693 * deleted by the frontend.
1695 * A directory delete covering record (MEM_RECORD_DEL) can never
1696 * be deleted by the frontend.
1698 * Any other record type (aka DATA) can be deleted by the frontend.
1699 * XXX At the moment the flusher must skip it because there may
1700 * be another data record in the flush group for the same block,
1701 * meaning that some frontend data changes can leak into the backend's
1702 * synchronization point.
1704 if (record
->flags
& HAMMER_RECF_DELETED_FE
) {
1705 if (record
->type
== HAMMER_MEM_RECORD_ADD
) {
1706 record
->flags
|= HAMMER_RECF_CONVERT_DELETE
;
1708 KKASSERT(record
->type
!= HAMMER_MEM_RECORD_DEL
);
1709 record
->flags
|= HAMMER_RECF_DELETED_BE
;
1716 * Assign the create_tid for new records. Deletions already
1717 * have the record's entire key properly set up.
1719 if (record
->type
!= HAMMER_MEM_RECORD_DEL
)
1720 record
->leaf
.base
.create_tid
= trans
->tid
;
1721 record
->leaf
.create_ts
= trans
->time32
;
1723 error
= hammer_ip_sync_record_cursor(cursor
, record
);
1724 if (error
!= EDEADLK
)
1726 hammer_done_cursor(cursor
);
1727 error
= hammer_init_cursor(trans
, cursor
, &record
->ip
->cache
[0],
1732 record
->flags
&= ~HAMMER_RECF_CONVERT_DELETE
;
1736 if (error
!= -ENOSPC
) {
1737 kprintf("hammer_sync_record_callback: sync failed rec "
1738 "%p, error %d\n", record
, error
);
1739 Debugger("sync failed rec");
1743 hammer_flush_record_done(record
, error
);
1748 * XXX error handling
1751 hammer_sync_inode(hammer_inode_t ip
)
1753 struct hammer_transaction trans
;
1754 struct hammer_cursor cursor
;
1755 hammer_node_t tmp_node
;
1756 hammer_record_t depend
;
1757 hammer_record_t next
;
1758 int error
, tmp_error
;
1761 if ((ip
->sync_flags
& HAMMER_INODE_MODMASK
) == 0)
1764 hammer_start_transaction_fls(&trans
, ip
->hmp
);
1765 error
= hammer_init_cursor(&trans
, &cursor
, &ip
->cache
[1], ip
);
1770 * Any directory records referencing this inode which are not in
1771 * our current flush group must adjust our nlink count for the
1772 * purposes of synchronization to disk.
1774 * Records which are in our flush group can be unlinked from our
1775 * inode now, potentially allowing the inode to be physically
1778 * This cannot block.
1780 nlinks
= ip
->ino_data
.nlinks
;
1781 next
= TAILQ_FIRST(&ip
->target_list
);
1782 while ((depend
= next
) != NULL
) {
1783 next
= TAILQ_NEXT(depend
, target_entry
);
1784 if (depend
->flush_state
== HAMMER_FST_FLUSH
&&
1785 depend
->flush_group
== ip
->hmp
->flusher
.act
) {
1787 * If this is an ADD that was deleted by the frontend
1788 * the frontend nlinks count will have already been
1789 * decremented, but the backend is going to sync its
1790 * directory entry and must account for it. The
1791 * record will be converted to a delete-on-disk when
1794 * If the ADD was not deleted by the frontend we
1795 * can remove the dependancy from our target_list.
1797 if (depend
->flags
& HAMMER_RECF_DELETED_FE
) {
1800 TAILQ_REMOVE(&ip
->target_list
, depend
,
1802 depend
->target_ip
= NULL
;
1804 } else if ((depend
->flags
& HAMMER_RECF_DELETED_FE
) == 0) {
1806 * Not part of our flush group
1808 KKASSERT((depend
->flags
& HAMMER_RECF_DELETED_BE
) == 0);
1809 switch(depend
->type
) {
1810 case HAMMER_MEM_RECORD_ADD
:
1813 case HAMMER_MEM_RECORD_DEL
:
1823 * Set dirty if we had to modify the link count.
1825 if (ip
->sync_ino_data
.nlinks
!= nlinks
) {
1826 KKASSERT((int64_t)nlinks
>= 0);
1827 ip
->sync_ino_data
.nlinks
= nlinks
;
1828 ip
->sync_flags
|= HAMMER_INODE_DDIRTY
;
1832 * If there is a trunction queued destroy any data past the (aligned)
1833 * truncation point. Userland will have dealt with the buffer
1834 * containing the truncation point for us.
1836 * We don't flush pending frontend data buffers until after we've
1837 * dealt with the truncation.
1839 if (ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) {
1841 * Interlock trunc_off. The VOP front-end may continue to
1842 * make adjustments to it while we are blocked.
1845 off_t aligned_trunc_off
;
1848 trunc_off
= ip
->sync_trunc_off
;
1849 blkmask
= hammer_blocksize(trunc_off
) - 1;
1850 aligned_trunc_off
= (trunc_off
+ blkmask
) & ~(int64_t)blkmask
;
1853 * Delete any whole blocks on-media. The front-end has
1854 * already cleaned out any partial block and made it
1855 * pending. The front-end may have updated trunc_off
1856 * while we were blocked so we only use sync_trunc_off.
1858 error
= hammer_ip_delete_range(&cursor
, ip
,
1860 0x7FFFFFFFFFFFFFFFLL
, 1);
1862 Debugger("hammer_ip_delete_range errored");
1865 * Clear the truncation flag on the backend after we have
1866 * complete the deletions. Backend data is now good again
1867 * (including new records we are about to sync, below).
1869 * Leave sync_trunc_off intact. As we write additional
1870 * records the backend will update sync_trunc_off. This
1871 * tells the backend whether it can skip the overwrite
1872 * test. This should work properly even when the backend
1873 * writes full blocks where the truncation point straddles
1874 * the block because the comparison is against the base
1875 * offset of the record.
1877 ip
->sync_flags
&= ~HAMMER_INODE_TRUNCATED
;
1878 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
1884 * Now sync related records. These will typically be directory
1885 * entries or delete-on-disk records.
1887 * Not all records will be flushed, but clear XDIRTY anyway. We
1888 * will set it again in the frontend hammer_flush_inode_done()
1889 * if records remain.
1892 tmp_error
= RB_SCAN(hammer_rec_rb_tree
, &ip
->rec_tree
, NULL
,
1893 hammer_sync_record_callback
, &cursor
);
1899 hammer_cache_node(&ip
->cache
[1], cursor
.node
);
1902 * Re-seek for inode update, assuming our cache hasn't been ripped
1903 * out from under us.
1906 tmp_node
= hammer_ref_node_safe(ip
->hmp
, &ip
->cache
[0], &error
);
1908 if ((tmp_node
->flags
& HAMMER_NODE_DELETED
) == 0)
1909 hammer_cursor_seek(&cursor
, tmp_node
, 0);
1910 hammer_rel_node(tmp_node
);
1916 * If we are deleting the inode the frontend had better not have
1917 * any active references on elements making up the inode.
1919 if (error
== 0 && ip
->sync_ino_data
.nlinks
== 0 &&
1920 RB_EMPTY(&ip
->rec_tree
) &&
1921 (ip
->sync_flags
& HAMMER_INODE_DELETING
) &&
1922 (ip
->flags
& HAMMER_INODE_DELETED
) == 0) {
1925 ip
->flags
|= HAMMER_INODE_DELETED
;
1926 error
= hammer_ip_delete_range_all(&cursor
, ip
, &count1
);
1928 ip
->sync_flags
&= ~HAMMER_INODE_DELETING
;
1929 ip
->sync_flags
&= ~HAMMER_INODE_TRUNCATED
;
1930 KKASSERT(RB_EMPTY(&ip
->rec_tree
));
1933 * Set delete_tid in both the frontend and backend
1934 * copy of the inode record. The DELETED flag handles
1935 * this, do not set RDIRTY.
1937 ip
->ino_leaf
.base
.delete_tid
= trans
.tid
;
1938 ip
->sync_ino_leaf
.base
.delete_tid
= trans
.tid
;
1939 ip
->ino_leaf
.delete_ts
= trans
.time32
;
1940 ip
->sync_ino_leaf
.delete_ts
= trans
.time32
;
1944 * Adjust the inode count in the volume header
1946 if (ip
->flags
& HAMMER_INODE_ONDISK
) {
1947 hammer_modify_volume_field(&trans
,
1950 --ip
->hmp
->rootvol
->ondisk
->vol0_stat_inodes
;
1951 hammer_modify_volume_done(trans
.rootvol
);
1954 ip
->flags
&= ~HAMMER_INODE_DELETED
;
1955 Debugger("hammer_ip_delete_range_all errored");
1959 ip
->sync_flags
&= ~HAMMER_INODE_BUFS
;
1962 Debugger("RB_SCAN errored");
1965 * Now update the inode's on-disk inode-data and/or on-disk record.
1966 * DELETED and ONDISK are managed only in ip->flags.
1968 switch(ip
->flags
& (HAMMER_INODE_DELETED
| HAMMER_INODE_ONDISK
)) {
1969 case HAMMER_INODE_DELETED
|HAMMER_INODE_ONDISK
:
1971 * If deleted and on-disk, don't set any additional flags.
1972 * the delete flag takes care of things.
1974 * Clear flags which may have been set by the frontend.
1976 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
| HAMMER_INODE_XDIRTY
|
1977 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
|
1978 HAMMER_INODE_DELETING
);
1980 case HAMMER_INODE_DELETED
:
1982 * Take care of the case where a deleted inode was never
1983 * flushed to the disk in the first place.
1985 * Clear flags which may have been set by the frontend.
1987 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
| HAMMER_INODE_XDIRTY
|
1988 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
|
1989 HAMMER_INODE_DELETING
);
1990 while (RB_ROOT(&ip
->rec_tree
)) {
1991 hammer_record_t record
= RB_ROOT(&ip
->rec_tree
);
1992 hammer_ref(&record
->lock
);
1993 KKASSERT(record
->lock
.refs
== 1);
1994 record
->flags
|= HAMMER_RECF_DELETED_FE
;
1995 record
->flags
|= HAMMER_RECF_DELETED_BE
;
1996 hammer_rel_mem_record(record
);
1999 case HAMMER_INODE_ONDISK
:
2001 * If already on-disk, do not set any additional flags.
2006 * If not on-disk and not deleted, set DDIRTY to force
2007 * an initial record to be written.
2009 * Also set the create_tid in both the frontend and backend
2010 * copy of the inode record.
2012 ip
->ino_leaf
.base
.create_tid
= trans
.tid
;
2013 ip
->ino_leaf
.create_ts
= trans
.time32
;
2014 ip
->sync_ino_leaf
.base
.create_tid
= trans
.tid
;
2015 ip
->sync_ino_leaf
.create_ts
= trans
.time32
;
2016 ip
->sync_flags
|= HAMMER_INODE_DDIRTY
;
2021 * If RDIRTY or DDIRTY is set, write out a new record. If the inode
2022 * is already on-disk the old record is marked as deleted.
2024 * If DELETED is set hammer_update_inode() will delete the existing
2025 * record without writing out a new one.
2027 * If *ONLY* the ITIMES flag is set we can update the record in-place.
2029 if (ip
->flags
& HAMMER_INODE_DELETED
) {
2030 error
= hammer_update_inode(&cursor
, ip
);
2032 if ((ip
->sync_flags
& HAMMER_INODE_DDIRTY
) == 0 &&
2033 (ip
->sync_flags
& (HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
))) {
2034 error
= hammer_update_itimes(&cursor
, ip
);
2036 if (ip
->sync_flags
& (HAMMER_INODE_DDIRTY
| HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
)) {
2037 error
= hammer_update_inode(&cursor
, ip
);
2040 Debugger("hammer_update_itimes/inode errored");
2043 * Save the TID we used to sync the inode with to make sure we
2044 * do not improperly reuse it.
2046 hammer_done_cursor(&cursor
);
2047 hammer_done_transaction(&trans
);
2052 * This routine is called when the OS is no longer actively referencing
2053 * the inode (but might still be keeping it cached), or when releasing
2054 * the last reference to an inode.
2056 * At this point if the inode's nlinks count is zero we want to destroy
2057 * it, which may mean destroying it on-media too.
2060 hammer_inode_unloadable_check(hammer_inode_t ip
, int getvp
)
2065 * Set the DELETING flag when the link count drops to 0 and the
2066 * OS no longer has any opens on the inode.
2068 * The backend will clear DELETING (a mod flag) and set DELETED
2069 * (a state flag) when it is actually able to perform the
2072 if (ip
->ino_data
.nlinks
== 0 &&
2073 (ip
->flags
& (HAMMER_INODE_DELETING
|HAMMER_INODE_DELETED
)) == 0) {
2074 ip
->flags
|= HAMMER_INODE_DELETING
;
2075 ip
->flags
|= HAMMER_INODE_TRUNCATED
;
2079 if (hammer_get_vnode(ip
, &vp
) != 0)
2087 vtruncbuf(ip
->vp
, 0, HAMMER_BUFSIZE
);
2088 vnode_pager_setsize(ip
->vp
, 0);
2097 * Re-test an inode when a dependancy had gone away to see if we
2098 * can chain flush it.
2101 hammer_test_inode(hammer_inode_t ip
)
2103 if (ip
->flags
& HAMMER_INODE_REFLUSH
) {
2104 ip
->flags
&= ~HAMMER_INODE_REFLUSH
;
2105 hammer_ref(&ip
->lock
);
2106 if (ip
->flags
& HAMMER_INODE_RESIGNAL
) {
2107 ip
->flags
&= ~HAMMER_INODE_RESIGNAL
;
2108 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
2110 hammer_flush_inode(ip
, 0);
2112 hammer_rel_inode(ip
, 0);
2117 * Clear the RECLAIM flag on an inode. This occurs when the inode is
2118 * reassociated with a vp or just before it gets freed.
2120 * Wakeup one thread blocked waiting on reclaims to complete. Note that
2121 * the inode the thread is waiting on behalf of is a different inode then
2122 * the inode we are called with. This is to create a pipeline.
2125 hammer_inode_wakereclaims(hammer_inode_t ip
)
2127 struct hammer_reclaim
*reclaim
;
2128 hammer_mount_t hmp
= ip
->hmp
;
2130 if ((ip
->flags
& HAMMER_INODE_RECLAIM
) == 0)
2133 --hammer_count_reclaiming
;
2134 --hmp
->inode_reclaims
;
2135 ip
->flags
&= ~HAMMER_INODE_RECLAIM
;
2137 if ((reclaim
= TAILQ_FIRST(&hmp
->reclaim_list
)) != NULL
) {
2138 TAILQ_REMOVE(&hmp
->reclaim_list
, reclaim
, entry
);
2139 reclaim
->okydoky
= 1;
2145 * Setup our reclaim pipeline. We only let so many detached (and dirty)
2146 * inodes build up before we start blocking.
2148 * When we block we don't care *which* inode has finished reclaiming,
2149 * as lone as one does. This is somewhat heuristical... we also put a
2150 * cap on how long we are willing to wait.
2153 hammer_inode_waitreclaims(hammer_mount_t hmp
)
2155 struct hammer_reclaim reclaim
;
2158 if (hmp
->inode_reclaims
> HAMMER_RECLAIM_WAIT
) {
2159 reclaim
.okydoky
= 0;
2160 TAILQ_INSERT_TAIL(&hmp
->reclaim_list
,
2163 reclaim
.okydoky
= 1;
2166 if (reclaim
.okydoky
== 0) {
2167 delay
= (hmp
->inode_reclaims
- HAMMER_RECLAIM_WAIT
) * hz
/
2168 HAMMER_RECLAIM_WAIT
;
2170 tsleep(&reclaim
, 0, "hmrrcm", delay
+ 1);
2171 if (reclaim
.okydoky
== 0)
2172 TAILQ_REMOVE(&hmp
->reclaim_list
, &reclaim
, entry
);