2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.103.2.6 2008/09/25 01:42:52 dillon Exp $
38 #include <vm/vm_extern.h>
42 static int hammer_unload_inode(struct hammer_inode
*ip
);
43 static void hammer_free_inode(hammer_inode_t ip
);
44 static void hammer_flush_inode_core(hammer_inode_t ip
,
45 hammer_flush_group_t flg
, int flags
);
46 static int hammer_setup_child_callback(hammer_record_t rec
, void *data
);
48 static int hammer_syncgrp_child_callback(hammer_record_t rec
, void *data
);
50 static int hammer_setup_parent_inodes(hammer_inode_t ip
,
51 hammer_flush_group_t flg
);
52 static int hammer_setup_parent_inodes_helper(hammer_record_t record
,
53 hammer_flush_group_t flg
);
54 static void hammer_inode_wakereclaims(hammer_inode_t ip
);
57 extern struct hammer_inode
*HammerTruncIp
;
61 * RB-Tree support for inode structures
64 hammer_ino_rb_compare(hammer_inode_t ip1
, hammer_inode_t ip2
)
66 if (ip1
->obj_localization
< ip2
->obj_localization
)
68 if (ip1
->obj_localization
> ip2
->obj_localization
)
70 if (ip1
->obj_id
< ip2
->obj_id
)
72 if (ip1
->obj_id
> ip2
->obj_id
)
74 if (ip1
->obj_asof
< ip2
->obj_asof
)
76 if (ip1
->obj_asof
> ip2
->obj_asof
)
82 * RB-Tree support for inode structures / special LOOKUP_INFO
85 hammer_inode_info_cmp(hammer_inode_info_t info
, hammer_inode_t ip
)
87 if (info
->obj_localization
< ip
->obj_localization
)
89 if (info
->obj_localization
> ip
->obj_localization
)
91 if (info
->obj_id
< ip
->obj_id
)
93 if (info
->obj_id
> ip
->obj_id
)
95 if (info
->obj_asof
< ip
->obj_asof
)
97 if (info
->obj_asof
> ip
->obj_asof
)
103 * Used by hammer_scan_inode_snapshots() to locate all of an object's
104 * snapshots. Note that the asof field is not tested, which we can get
105 * away with because it is the lowest-priority field.
108 hammer_inode_info_cmp_all_history(hammer_inode_t ip
, void *data
)
110 hammer_inode_info_t info
= data
;
112 if (ip
->obj_localization
> info
->obj_localization
)
114 if (ip
->obj_localization
< info
->obj_localization
)
116 if (ip
->obj_id
> info
->obj_id
)
118 if (ip
->obj_id
< info
->obj_id
)
124 * Used by hammer_unload_pseudofs() to locate all inodes associated with
128 hammer_inode_pfs_cmp(hammer_inode_t ip
, void *data
)
130 u_int32_t localization
= *(u_int32_t
*)data
;
131 if (ip
->obj_localization
> localization
)
133 if (ip
->obj_localization
< localization
)
139 * RB-Tree support for pseudofs structures
142 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1
, hammer_pseudofs_inmem_t p2
)
144 if (p1
->localization
< p2
->localization
)
146 if (p1
->localization
> p2
->localization
)
152 RB_GENERATE(hammer_ino_rb_tree
, hammer_inode
, rb_node
, hammer_ino_rb_compare
);
153 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree
, INFO
, hammer_inode
, rb_node
,
154 hammer_inode_info_cmp
, hammer_inode_info_t
);
155 RB_GENERATE2(hammer_pfs_rb_tree
, hammer_pseudofs_inmem
, rb_node
,
156 hammer_pfs_rb_compare
, u_int32_t
, localization
);
159 * The kernel is not actively referencing this vnode but is still holding
162 * This is called from the frontend.
165 hammer_vop_inactive(struct vop_inactive_args
*ap
)
167 struct hammer_inode
*ip
= VTOI(ap
->a_vp
);
178 * If the inode no longer has visibility in the filesystem try to
179 * recycle it immediately, even if the inode is dirty. Recycling
180 * it quickly allows the system to reclaim buffer cache and VM
181 * resources which can matter a lot in a heavily loaded system.
183 * This can deadlock in vfsync() if we aren't careful.
185 * Do not queue the inode to the flusher if we still have visibility,
186 * otherwise namespace calls such as chmod will unnecessarily generate
187 * multiple inode updates.
189 hammer_inode_unloadable_check(ip
, 0);
190 if (ip
->ino_data
.nlinks
== 0) {
191 if (ip
->flags
& HAMMER_INODE_MODMASK
)
192 hammer_flush_inode(ip
, 0);
199 * Release the vnode association. This is typically (but not always)
200 * the last reference on the inode.
202 * Once the association is lost we are on our own with regards to
203 * flushing the inode.
206 hammer_vop_reclaim(struct vop_reclaim_args
*ap
)
208 struct hammer_inode
*ip
;
214 if ((ip
= vp
->v_data
) != NULL
) {
219 if ((ip
->flags
& HAMMER_INODE_RECLAIM
) == 0) {
220 ++hammer_count_reclaiming
;
221 ++hmp
->inode_reclaims
;
222 ip
->flags
|= HAMMER_INODE_RECLAIM
;
224 hammer_rel_inode(ip
, 1);
230 * Return a locked vnode for the specified inode. The inode must be
231 * referenced but NOT LOCKED on entry and will remain referenced on
234 * Called from the frontend.
237 hammer_get_vnode(struct hammer_inode
*ip
, struct vnode
**vpp
)
247 if ((vp
= ip
->vp
) == NULL
) {
248 error
= getnewvnode(VT_HAMMER
, hmp
->mp
, vpp
, 0, 0);
251 hammer_lock_ex(&ip
->lock
);
252 if (ip
->vp
!= NULL
) {
253 hammer_unlock(&ip
->lock
);
258 hammer_ref(&ip
->lock
);
262 obj_type
= ip
->ino_data
.obj_type
;
263 vp
->v_type
= hammer_get_vnode_type(obj_type
);
265 hammer_inode_wakereclaims(ip
);
267 switch(ip
->ino_data
.obj_type
) {
268 case HAMMER_OBJTYPE_CDEV
:
269 case HAMMER_OBJTYPE_BDEV
:
270 vp
->v_ops
= &hmp
->mp
->mnt_vn_spec_ops
;
271 addaliasu(vp
, ip
->ino_data
.rmajor
,
272 ip
->ino_data
.rminor
);
274 case HAMMER_OBJTYPE_FIFO
:
275 vp
->v_ops
= &hmp
->mp
->mnt_vn_fifo_ops
;
282 * Only mark as the root vnode if the ip is not
283 * historical, otherwise the VFS cache will get
284 * confused. The other half of the special handling
285 * is in hammer_vop_nlookupdotdot().
287 * Pseudo-filesystem roots can be accessed via
288 * non-root filesystem paths and setting VROOT may
289 * confuse the namecache. Set VPFSROOT instead.
291 if (ip
->obj_id
== HAMMER_OBJID_ROOT
&&
292 ip
->obj_asof
== hmp
->asof
) {
293 if (ip
->obj_localization
== 0)
296 vp
->v_flag
|= VPFSROOT
;
299 vp
->v_data
= (void *)ip
;
300 /* vnode locked by getnewvnode() */
301 /* make related vnode dirty if inode dirty? */
302 hammer_unlock(&ip
->lock
);
303 if (vp
->v_type
== VREG
)
304 vinitvmio(vp
, ip
->ino_data
.size
);
309 * loop if the vget fails (aka races), or if the vp
310 * no longer matches ip->vp.
312 if (vget(vp
, LK_EXCLUSIVE
) == 0) {
323 * Locate all copies of the inode for obj_id compatible with the specified
324 * asof, reference, and issue the related call-back. This routine is used
325 * for direct-io invalidation and does not create any new inodes.
328 hammer_scan_inode_snapshots(hammer_mount_t hmp
, hammer_inode_info_t iinfo
,
329 int (*callback
)(hammer_inode_t ip
, void *data
),
332 hammer_ino_rb_tree_RB_SCAN(&hmp
->rb_inos_root
,
333 hammer_inode_info_cmp_all_history
,
338 * Acquire a HAMMER inode. The returned inode is not locked. These functions
339 * do not attach or detach the related vnode (use hammer_get_vnode() for
342 * The flags argument is only applied for newly created inodes, and only
343 * certain flags are inherited.
345 * Called from the frontend.
347 struct hammer_inode
*
348 hammer_get_inode(hammer_transaction_t trans
, hammer_inode_t dip
,
349 int64_t obj_id
, hammer_tid_t asof
, u_int32_t localization
,
350 int flags
, int *errorp
)
352 hammer_mount_t hmp
= trans
->hmp
;
353 struct hammer_inode_info iinfo
;
354 struct hammer_cursor cursor
;
355 struct hammer_inode
*ip
;
359 * Determine if we already have an inode cached. If we do then
362 iinfo
.obj_id
= obj_id
;
363 iinfo
.obj_asof
= asof
;
364 iinfo
.obj_localization
= localization
;
366 ip
= hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp
->rb_inos_root
, &iinfo
);
368 hammer_ref(&ip
->lock
);
374 * Allocate a new inode structure and deal with races later.
376 ip
= kmalloc(sizeof(*ip
), M_HAMMER_INO
, M_WAITOK
|M_ZERO
);
377 ++hammer_count_inodes
;
380 ip
->obj_asof
= iinfo
.obj_asof
;
381 ip
->obj_localization
= localization
;
383 ip
->flags
= flags
& HAMMER_INODE_RO
;
384 ip
->cache
[0].ip
= ip
;
385 ip
->cache
[1].ip
= ip
;
387 ip
->flags
|= HAMMER_INODE_RO
;
388 ip
->sync_trunc_off
= ip
->trunc_off
= ip
->save_trunc_off
=
389 0x7FFFFFFFFFFFFFFFLL
;
390 RB_INIT(&ip
->rec_tree
);
391 TAILQ_INIT(&ip
->target_list
);
392 hammer_ref(&ip
->lock
);
395 * Locate the on-disk inode. If this is a PFS root we always
396 * access the current version of the root inode and (if it is not
397 * a master) always access information under it with a snapshot
401 hammer_init_cursor(trans
, &cursor
, (dip
? &dip
->cache
[0] : NULL
), NULL
);
402 cursor
.key_beg
.localization
= localization
+ HAMMER_LOCALIZE_INODE
;
403 cursor
.key_beg
.obj_id
= ip
->obj_id
;
404 cursor
.key_beg
.key
= 0;
405 cursor
.key_beg
.create_tid
= 0;
406 cursor
.key_beg
.delete_tid
= 0;
407 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
408 cursor
.key_beg
.obj_type
= 0;
410 cursor
.asof
= iinfo
.obj_asof
;
411 cursor
.flags
= HAMMER_CURSOR_GET_LEAF
| HAMMER_CURSOR_GET_DATA
|
414 *errorp
= hammer_btree_lookup(&cursor
);
415 if (*errorp
== EDEADLK
) {
416 hammer_done_cursor(&cursor
);
421 * On success the B-Tree lookup will hold the appropriate
422 * buffer cache buffers and provide a pointer to the requested
423 * information. Copy the information to the in-memory inode
424 * and cache the B-Tree node to improve future operations.
427 ip
->ino_leaf
= cursor
.node
->ondisk
->elms
[cursor
.index
].leaf
;
428 ip
->ino_data
= cursor
.data
->inode
;
431 * cache[0] tries to cache the location of the object inode.
432 * The assumption is that it is near the directory inode.
434 * cache[1] tries to cache the location of the object data.
435 * The assumption is that it is near the directory data.
437 hammer_cache_node(&ip
->cache
[0], cursor
.node
);
438 if (dip
&& dip
->cache
[1].node
)
439 hammer_cache_node(&ip
->cache
[1], dip
->cache
[1].node
);
442 * The file should not contain any data past the file size
443 * stored in the inode. Setting save_trunc_off to the
444 * file size instead of max reduces B-Tree lookup overheads
445 * on append by allowing the flusher to avoid checking for
448 ip
->save_trunc_off
= ip
->ino_data
.size
;
451 * Locate and assign the pseudofs management structure to
454 if (dip
&& dip
->obj_localization
== ip
->obj_localization
) {
455 ip
->pfsm
= dip
->pfsm
;
456 hammer_ref(&ip
->pfsm
->lock
);
458 ip
->pfsm
= hammer_load_pseudofs(trans
,
459 ip
->obj_localization
,
461 *errorp
= 0; /* ignore ENOENT */
466 * The inode is placed on the red-black tree and will be synced to
467 * the media when flushed or by the filesystem sync. If this races
468 * another instantiation/lookup the insertion will fail.
471 if (RB_INSERT(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
)) {
472 hammer_free_inode(ip
);
473 hammer_done_cursor(&cursor
);
476 ip
->flags
|= HAMMER_INODE_ONDISK
;
478 if (ip
->flags
& HAMMER_INODE_RSV_INODES
) {
479 ip
->flags
&= ~HAMMER_INODE_RSV_INODES
; /* sanity */
483 hammer_free_inode(ip
);
486 hammer_done_cursor(&cursor
);
487 trans
->flags
|= HAMMER_TRANSF_NEWINODE
;
492 * Create a new filesystem object, returning the inode in *ipp. The
493 * returned inode will be referenced. The inode is created in-memory.
495 * If pfsm is non-NULL the caller wishes to create the root inode for
499 hammer_create_inode(hammer_transaction_t trans
, struct vattr
*vap
,
500 struct ucred
*cred
, hammer_inode_t dip
,
501 hammer_pseudofs_inmem_t pfsm
, struct hammer_inode
**ipp
)
510 ip
= kmalloc(sizeof(*ip
), M_HAMMER_INO
, M_WAITOK
|M_ZERO
);
511 ++hammer_count_inodes
;
515 KKASSERT(pfsm
->localization
!= 0);
516 ip
->obj_id
= HAMMER_OBJID_ROOT
;
517 ip
->obj_localization
= pfsm
->localization
;
519 KKASSERT(dip
!= NULL
);
520 ip
->obj_id
= hammer_alloc_objid(hmp
, dip
);
521 ip
->obj_localization
= dip
->obj_localization
;
524 KKASSERT(ip
->obj_id
!= 0);
525 ip
->obj_asof
= hmp
->asof
;
527 ip
->flush_state
= HAMMER_FST_IDLE
;
528 ip
->flags
= HAMMER_INODE_DDIRTY
|
529 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
;
530 ip
->cache
[0].ip
= ip
;
531 ip
->cache
[1].ip
= ip
;
533 ip
->trunc_off
= 0x7FFFFFFFFFFFFFFFLL
;
534 /* ip->save_trunc_off = 0; (already zero) */
535 RB_INIT(&ip
->rec_tree
);
536 TAILQ_INIT(&ip
->target_list
);
538 ip
->ino_data
.atime
= trans
->time
;
539 ip
->ino_data
.mtime
= trans
->time
;
540 ip
->ino_data
.size
= 0;
541 ip
->ino_data
.nlinks
= 0;
544 * A nohistory designator on the parent directory is inherited by
545 * the child. We will do this even for pseudo-fs creation... the
546 * sysad can turn it off.
549 ip
->ino_data
.uflags
= dip
->ino_data
.uflags
&
550 (SF_NOHISTORY
|UF_NOHISTORY
|UF_NODUMP
);
553 ip
->ino_leaf
.base
.btype
= HAMMER_BTREE_TYPE_RECORD
;
554 ip
->ino_leaf
.base
.localization
= ip
->obj_localization
+
555 HAMMER_LOCALIZE_INODE
;
556 ip
->ino_leaf
.base
.obj_id
= ip
->obj_id
;
557 ip
->ino_leaf
.base
.key
= 0;
558 ip
->ino_leaf
.base
.create_tid
= 0;
559 ip
->ino_leaf
.base
.delete_tid
= 0;
560 ip
->ino_leaf
.base
.rec_type
= HAMMER_RECTYPE_INODE
;
561 ip
->ino_leaf
.base
.obj_type
= hammer_get_obj_type(vap
->va_type
);
563 ip
->ino_data
.obj_type
= ip
->ino_leaf
.base
.obj_type
;
564 ip
->ino_data
.version
= HAMMER_INODE_DATA_VERSION
;
565 ip
->ino_data
.mode
= vap
->va_mode
;
566 ip
->ino_data
.ctime
= trans
->time
;
569 * Setup the ".." pointer. This only needs to be done for directories
570 * but we do it for all objects as a recovery aid.
573 ip
->ino_data
.parent_obj_id
= dip
->ino_leaf
.base
.obj_id
;
576 * The parent_obj_localization field only applies to pseudo-fs roots.
577 * XXX this is no longer applicable, PFSs are no longer directly
578 * tied into the parent's directory structure.
580 if (ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_DIRECTORY
&&
581 ip
->obj_id
== HAMMER_OBJID_ROOT
) {
582 ip
->ino_data
.ext
.obj
.parent_obj_localization
=
583 dip
->obj_localization
;
587 switch(ip
->ino_leaf
.base
.obj_type
) {
588 case HAMMER_OBJTYPE_CDEV
:
589 case HAMMER_OBJTYPE_BDEV
:
590 ip
->ino_data
.rmajor
= vap
->va_rmajor
;
591 ip
->ino_data
.rminor
= vap
->va_rminor
;
598 * Calculate default uid/gid and overwrite with information from
602 xuid
= hammer_to_unix_xid(&dip
->ino_data
.uid
);
603 xuid
= vop_helper_create_uid(hmp
->mp
, dip
->ino_data
.mode
,
604 xuid
, cred
, &vap
->va_mode
);
608 ip
->ino_data
.mode
= vap
->va_mode
;
610 if (vap
->va_vaflags
& VA_UID_UUID_VALID
)
611 ip
->ino_data
.uid
= vap
->va_uid_uuid
;
612 else if (vap
->va_uid
!= (uid_t
)VNOVAL
)
613 hammer_guid_to_uuid(&ip
->ino_data
.uid
, vap
->va_uid
);
615 hammer_guid_to_uuid(&ip
->ino_data
.uid
, xuid
);
617 if (vap
->va_vaflags
& VA_GID_UUID_VALID
)
618 ip
->ino_data
.gid
= vap
->va_gid_uuid
;
619 else if (vap
->va_gid
!= (gid_t
)VNOVAL
)
620 hammer_guid_to_uuid(&ip
->ino_data
.gid
, vap
->va_gid
);
622 ip
->ino_data
.gid
= dip
->ino_data
.gid
;
624 hammer_ref(&ip
->lock
);
628 hammer_ref(&pfsm
->lock
);
630 } else if (dip
->obj_localization
== ip
->obj_localization
) {
631 ip
->pfsm
= dip
->pfsm
;
632 hammer_ref(&ip
->pfsm
->lock
);
635 ip
->pfsm
= hammer_load_pseudofs(trans
,
636 ip
->obj_localization
,
638 error
= 0; /* ignore ENOENT */
642 hammer_free_inode(ip
);
644 } else if (RB_INSERT(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
)) {
645 panic("hammer_create_inode: duplicate obj_id %llx", ip
->obj_id
);
647 hammer_free_inode(ip
);
654 * Final cleanup / freeing of an inode structure
657 hammer_free_inode(hammer_inode_t ip
)
659 KKASSERT(ip
->lock
.refs
== 1);
660 hammer_uncache_node(&ip
->cache
[0]);
661 hammer_uncache_node(&ip
->cache
[1]);
662 hammer_inode_wakereclaims(ip
);
664 hammer_clear_objid(ip
);
665 --hammer_count_inodes
;
666 --ip
->hmp
->count_inodes
;
668 hammer_rel_pseudofs(ip
->hmp
, ip
->pfsm
);
671 kfree(ip
, M_HAMMER_INO
);
676 * Retrieve pseudo-fs data. NULL will never be returned.
678 * If an error occurs *errorp will be set and a default template is returned,
679 * otherwise *errorp is set to 0. Typically when an error occurs it will
682 hammer_pseudofs_inmem_t
683 hammer_load_pseudofs(hammer_transaction_t trans
,
684 u_int32_t localization
, int *errorp
)
686 hammer_mount_t hmp
= trans
->hmp
;
688 hammer_pseudofs_inmem_t pfsm
;
689 struct hammer_cursor cursor
;
693 pfsm
= RB_LOOKUP(hammer_pfs_rb_tree
, &hmp
->rb_pfsm_root
, localization
);
695 hammer_ref(&pfsm
->lock
);
701 * PFS records are stored in the root inode (not the PFS root inode,
702 * but the real root). Avoid an infinite recursion if loading
703 * the PFS for the real root.
706 ip
= hammer_get_inode(trans
, NULL
, HAMMER_OBJID_ROOT
,
708 HAMMER_DEF_LOCALIZATION
, 0, errorp
);
713 pfsm
= kmalloc(sizeof(*pfsm
), M_HAMMER
, M_WAITOK
| M_ZERO
);
714 pfsm
->localization
= localization
;
715 pfsm
->pfsd
.unique_uuid
= trans
->rootvol
->ondisk
->vol_fsid
;
716 pfsm
->pfsd
.shared_uuid
= pfsm
->pfsd
.unique_uuid
;
718 hammer_init_cursor(trans
, &cursor
, (ip
? &ip
->cache
[1] : NULL
), ip
);
719 cursor
.key_beg
.localization
= HAMMER_DEF_LOCALIZATION
+
720 HAMMER_LOCALIZE_MISC
;
721 cursor
.key_beg
.obj_id
= HAMMER_OBJID_ROOT
;
722 cursor
.key_beg
.create_tid
= 0;
723 cursor
.key_beg
.delete_tid
= 0;
724 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_PFS
;
725 cursor
.key_beg
.obj_type
= 0;
726 cursor
.key_beg
.key
= localization
;
727 cursor
.asof
= HAMMER_MAX_TID
;
728 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
731 *errorp
= hammer_ip_lookup(&cursor
);
733 *errorp
= hammer_btree_lookup(&cursor
);
735 *errorp
= hammer_ip_resolve_data(&cursor
);
737 if (cursor
.data
->pfsd
.mirror_flags
&
738 HAMMER_PFSD_DELETED
) {
741 bytes
= cursor
.leaf
->data_len
;
742 if (bytes
> sizeof(pfsm
->pfsd
))
743 bytes
= sizeof(pfsm
->pfsd
);
744 bcopy(cursor
.data
, &pfsm
->pfsd
, bytes
);
748 hammer_done_cursor(&cursor
);
750 pfsm
->fsid_udev
= hammer_fsid_to_udev(&pfsm
->pfsd
.shared_uuid
);
751 hammer_ref(&pfsm
->lock
);
753 hammer_rel_inode(ip
, 0);
754 if (RB_INSERT(hammer_pfs_rb_tree
, &hmp
->rb_pfsm_root
, pfsm
)) {
755 kfree(pfsm
, M_HAMMER
);
762 * Store pseudo-fs data. The backend will automatically delete any prior
763 * on-disk pseudo-fs data but we have to delete in-memory versions.
766 hammer_save_pseudofs(hammer_transaction_t trans
, hammer_pseudofs_inmem_t pfsm
)
768 struct hammer_cursor cursor
;
769 hammer_record_t record
;
773 ip
= hammer_get_inode(trans
, NULL
, HAMMER_OBJID_ROOT
, HAMMER_MAX_TID
,
774 HAMMER_DEF_LOCALIZATION
, 0, &error
);
776 pfsm
->fsid_udev
= hammer_fsid_to_udev(&pfsm
->pfsd
.shared_uuid
);
777 hammer_init_cursor(trans
, &cursor
, &ip
->cache
[1], ip
);
778 cursor
.key_beg
.localization
= ip
->obj_localization
+
779 HAMMER_LOCALIZE_MISC
;
780 cursor
.key_beg
.obj_id
= HAMMER_OBJID_ROOT
;
781 cursor
.key_beg
.create_tid
= 0;
782 cursor
.key_beg
.delete_tid
= 0;
783 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_PFS
;
784 cursor
.key_beg
.obj_type
= 0;
785 cursor
.key_beg
.key
= pfsm
->localization
;
786 cursor
.asof
= HAMMER_MAX_TID
;
787 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
789 error
= hammer_ip_lookup(&cursor
);
790 if (error
== 0 && hammer_cursor_inmem(&cursor
)) {
791 record
= cursor
.iprec
;
792 if (record
->flags
& HAMMER_RECF_INTERLOCK_BE
) {
793 KKASSERT(cursor
.deadlk_rec
== NULL
);
794 hammer_ref(&record
->lock
);
795 cursor
.deadlk_rec
= record
;
798 record
->flags
|= HAMMER_RECF_DELETED_FE
;
802 if (error
== 0 || error
== ENOENT
) {
803 record
= hammer_alloc_mem_record(ip
, sizeof(pfsm
->pfsd
));
804 record
->type
= HAMMER_MEM_RECORD_GENERAL
;
806 record
->leaf
.base
.localization
= ip
->obj_localization
+
807 HAMMER_LOCALIZE_MISC
;
808 record
->leaf
.base
.rec_type
= HAMMER_RECTYPE_PFS
;
809 record
->leaf
.base
.key
= pfsm
->localization
;
810 record
->leaf
.data_len
= sizeof(pfsm
->pfsd
);
811 bcopy(&pfsm
->pfsd
, record
->data
, sizeof(pfsm
->pfsd
));
812 error
= hammer_ip_add_record(trans
, record
);
814 hammer_done_cursor(&cursor
);
815 if (error
== EDEADLK
)
817 hammer_rel_inode(ip
, 0);
822 * Create a root directory for a PFS if one does not alredy exist.
824 * The PFS root stands alone so we must also bump the nlinks count
825 * to prevent it from being destroyed on release.
828 hammer_mkroot_pseudofs(hammer_transaction_t trans
, struct ucred
*cred
,
829 hammer_pseudofs_inmem_t pfsm
)
835 ip
= hammer_get_inode(trans
, NULL
, HAMMER_OBJID_ROOT
, HAMMER_MAX_TID
,
836 pfsm
->localization
, 0, &error
);
841 error
= hammer_create_inode(trans
, &vap
, cred
, NULL
, pfsm
, &ip
);
843 ++ip
->ino_data
.nlinks
;
844 hammer_modify_inode(ip
, HAMMER_INODE_DDIRTY
);
848 hammer_rel_inode(ip
, 0);
853 * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
854 * if we are unable to disassociate all the inodes.
858 hammer_unload_pseudofs_callback(hammer_inode_t ip
, void *data
)
862 hammer_ref(&ip
->lock
);
863 if (ip
->lock
.refs
== 2 && ip
->vp
)
864 vclean_unlocked(ip
->vp
);
865 if (ip
->lock
.refs
== 1 && ip
->vp
== NULL
)
868 res
= -1; /* stop, someone is using the inode */
869 hammer_rel_inode(ip
, 0);
874 hammer_unload_pseudofs(hammer_transaction_t trans
, u_int32_t localization
)
879 for (try = res
= 0; try < 4; ++try) {
880 res
= hammer_ino_rb_tree_RB_SCAN(&trans
->hmp
->rb_inos_root
,
881 hammer_inode_pfs_cmp
,
882 hammer_unload_pseudofs_callback
,
884 if (res
== 0 && try > 1)
886 hammer_flusher_sync(trans
->hmp
);
895 * Release a reference on a PFS
898 hammer_rel_pseudofs(hammer_mount_t hmp
, hammer_pseudofs_inmem_t pfsm
)
900 hammer_unref(&pfsm
->lock
);
901 if (pfsm
->lock
.refs
== 0) {
902 RB_REMOVE(hammer_pfs_rb_tree
, &hmp
->rb_pfsm_root
, pfsm
);
903 kfree(pfsm
, M_HAMMER
);
908 * Called by hammer_sync_inode().
911 hammer_update_inode(hammer_cursor_t cursor
, hammer_inode_t ip
)
913 hammer_transaction_t trans
= cursor
->trans
;
914 hammer_record_t record
;
922 * If the inode has a presence on-disk then locate it and mark
923 * it deleted, setting DELONDISK.
925 * The record may or may not be physically deleted, depending on
926 * the retention policy.
928 if ((ip
->flags
& (HAMMER_INODE_ONDISK
|HAMMER_INODE_DELONDISK
)) ==
929 HAMMER_INODE_ONDISK
) {
930 hammer_normalize_cursor(cursor
);
931 cursor
->key_beg
.localization
= ip
->obj_localization
+
932 HAMMER_LOCALIZE_INODE
;
933 cursor
->key_beg
.obj_id
= ip
->obj_id
;
934 cursor
->key_beg
.key
= 0;
935 cursor
->key_beg
.create_tid
= 0;
936 cursor
->key_beg
.delete_tid
= 0;
937 cursor
->key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
938 cursor
->key_beg
.obj_type
= 0;
939 cursor
->asof
= ip
->obj_asof
;
940 cursor
->flags
&= ~HAMMER_CURSOR_INITMASK
;
941 cursor
->flags
|= HAMMER_CURSOR_GET_LEAF
| HAMMER_CURSOR_ASOF
;
942 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
944 error
= hammer_btree_lookup(cursor
);
945 if (hammer_debug_inode
)
946 kprintf("IPDEL %p %08x %d", ip
, ip
->flags
, error
);
949 error
= hammer_ip_delete_record(cursor
, ip
, trans
->tid
);
950 if (hammer_debug_inode
)
951 kprintf(" error %d\n", error
);
953 ip
->flags
|= HAMMER_INODE_DELONDISK
;
956 hammer_cache_node(&ip
->cache
[0], cursor
->node
);
958 if (error
== EDEADLK
) {
959 hammer_done_cursor(cursor
);
960 error
= hammer_init_cursor(trans
, cursor
,
962 if (hammer_debug_inode
)
963 kprintf("IPDED %p %d\n", ip
, error
);
970 * Ok, write out the initial record or a new record (after deleting
971 * the old one), unless the DELETED flag is set. This routine will
972 * clear DELONDISK if it writes out a record.
974 * Update our inode statistics if this is the first application of
977 if (error
== 0 && (ip
->flags
& HAMMER_INODE_DELETED
) == 0) {
979 * Generate a record and write it to the media. We clean-up
980 * the state before releasing so we do not have to set-up
983 record
= hammer_alloc_mem_record(ip
, 0);
984 record
->type
= HAMMER_MEM_RECORD_INODE
;
985 record
->flush_state
= HAMMER_FST_FLUSH
;
986 record
->leaf
= ip
->sync_ino_leaf
;
987 record
->leaf
.base
.create_tid
= trans
->tid
;
988 record
->leaf
.data_len
= sizeof(ip
->sync_ino_data
);
989 record
->leaf
.create_ts
= trans
->time32
;
990 record
->data
= (void *)&ip
->sync_ino_data
;
991 record
->flags
|= HAMMER_RECF_INTERLOCK_BE
;
994 * If this flag is set we cannot sync the new file size
995 * because we haven't finished related truncations. The
996 * inode will be flushed in another flush group to finish
999 if ((ip
->flags
& HAMMER_INODE_WOULDBLOCK
) &&
1000 ip
->sync_ino_data
.size
!= ip
->ino_data
.size
) {
1002 ip
->sync_ino_data
.size
= ip
->ino_data
.size
;
1008 error
= hammer_ip_sync_record_cursor(cursor
, record
);
1009 if (hammer_debug_inode
)
1010 kprintf("GENREC %p rec %08x %d\n",
1011 ip
, record
->flags
, error
);
1012 if (error
!= EDEADLK
)
1014 hammer_done_cursor(cursor
);
1015 error
= hammer_init_cursor(trans
, cursor
,
1017 if (hammer_debug_inode
)
1018 kprintf("GENREC reinit %d\n", error
);
1024 * The record isn't managed by the inode's record tree,
1025 * destroy it whether we succeed or fail.
1027 record
->flags
&= ~HAMMER_RECF_INTERLOCK_BE
;
1028 record
->flags
|= HAMMER_RECF_DELETED_FE
| HAMMER_RECF_COMMITTED
;
1029 record
->flush_state
= HAMMER_FST_IDLE
;
1030 hammer_rel_mem_record(record
);
1036 if (hammer_debug_inode
)
1037 kprintf("CLEANDELOND %p %08x\n", ip
, ip
->flags
);
1038 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
|
1039 HAMMER_INODE_ATIME
|
1040 HAMMER_INODE_MTIME
);
1041 ip
->flags
&= ~HAMMER_INODE_DELONDISK
;
1043 ip
->sync_flags
|= HAMMER_INODE_DDIRTY
;
1046 * Root volume count of inodes
1048 hammer_sync_lock_sh(trans
);
1049 if ((ip
->flags
& HAMMER_INODE_ONDISK
) == 0) {
1050 hammer_modify_volume_field(trans
,
1053 ++ip
->hmp
->rootvol
->ondisk
->vol0_stat_inodes
;
1054 hammer_modify_volume_done(trans
->rootvol
);
1055 ip
->flags
|= HAMMER_INODE_ONDISK
;
1056 if (hammer_debug_inode
)
1057 kprintf("NOWONDISK %p\n", ip
);
1059 hammer_sync_unlock(trans
);
1064 * If the inode has been destroyed, clean out any left-over flags
1065 * that may have been set by the frontend.
1067 if (error
== 0 && (ip
->flags
& HAMMER_INODE_DELETED
)) {
1068 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
|
1069 HAMMER_INODE_ATIME
|
1070 HAMMER_INODE_MTIME
);
1076 * Update only the itimes fields.
1078 * ATIME can be updated without generating any UNDO. MTIME is updated
1079 * with UNDO so it is guaranteed to be synchronized properly in case of
1082 * Neither field is included in the B-Tree leaf element's CRC, which is how
1083 * we can get away with updating ATIME the way we do.
1086 hammer_update_itimes(hammer_cursor_t cursor
, hammer_inode_t ip
)
1088 hammer_transaction_t trans
= cursor
->trans
;
1092 if ((ip
->flags
& (HAMMER_INODE_ONDISK
|HAMMER_INODE_DELONDISK
)) !=
1093 HAMMER_INODE_ONDISK
) {
1097 hammer_normalize_cursor(cursor
);
1098 cursor
->key_beg
.localization
= ip
->obj_localization
+
1099 HAMMER_LOCALIZE_INODE
;
1100 cursor
->key_beg
.obj_id
= ip
->obj_id
;
1101 cursor
->key_beg
.key
= 0;
1102 cursor
->key_beg
.create_tid
= 0;
1103 cursor
->key_beg
.delete_tid
= 0;
1104 cursor
->key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
1105 cursor
->key_beg
.obj_type
= 0;
1106 cursor
->asof
= ip
->obj_asof
;
1107 cursor
->flags
&= ~HAMMER_CURSOR_INITMASK
;
1108 cursor
->flags
|= HAMMER_CURSOR_ASOF
;
1109 cursor
->flags
|= HAMMER_CURSOR_GET_LEAF
;
1110 cursor
->flags
|= HAMMER_CURSOR_GET_DATA
;
1111 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
1113 error
= hammer_btree_lookup(cursor
);
1115 hammer_cache_node(&ip
->cache
[0], cursor
->node
);
1116 if (ip
->sync_flags
& HAMMER_INODE_MTIME
) {
1118 * Updating MTIME requires an UNDO. Just cover
1119 * both atime and mtime.
1121 hammer_sync_lock_sh(trans
);
1122 hammer_modify_buffer(trans
, cursor
->data_buffer
,
1123 HAMMER_ITIMES_BASE(&cursor
->data
->inode
),
1124 HAMMER_ITIMES_BYTES
);
1125 cursor
->data
->inode
.atime
= ip
->sync_ino_data
.atime
;
1126 cursor
->data
->inode
.mtime
= ip
->sync_ino_data
.mtime
;
1127 hammer_modify_buffer_done(cursor
->data_buffer
);
1128 hammer_sync_unlock(trans
);
1129 } else if (ip
->sync_flags
& HAMMER_INODE_ATIME
) {
1131 * Updating atime only can be done in-place with
1134 hammer_sync_lock_sh(trans
);
1135 hammer_modify_buffer(trans
, cursor
->data_buffer
,
1137 cursor
->data
->inode
.atime
= ip
->sync_ino_data
.atime
;
1138 hammer_modify_buffer_done(cursor
->data_buffer
);
1139 hammer_sync_unlock(trans
);
1141 ip
->sync_flags
&= ~(HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
);
1143 if (error
== EDEADLK
) {
1144 hammer_done_cursor(cursor
);
1145 error
= hammer_init_cursor(trans
, cursor
,
1154 * Release a reference on an inode, flush as requested.
1156 * On the last reference we queue the inode to the flusher for its final
1160 hammer_rel_inode(struct hammer_inode
*ip
, int flush
)
1162 /*hammer_mount_t hmp = ip->hmp;*/
1165 * Handle disposition when dropping the last ref.
1168 if (ip
->lock
.refs
== 1) {
1170 * Determine whether on-disk action is needed for
1171 * the inode's final disposition.
1173 KKASSERT(ip
->vp
== NULL
);
1174 hammer_inode_unloadable_check(ip
, 0);
1175 if (ip
->flags
& HAMMER_INODE_MODMASK
) {
1176 hammer_flush_inode(ip
, 0);
1177 } else if (ip
->lock
.refs
== 1) {
1178 hammer_unload_inode(ip
);
1183 hammer_flush_inode(ip
, 0);
1186 * The inode still has multiple refs, try to drop
1189 KKASSERT(ip
->lock
.refs
>= 1);
1190 if (ip
->lock
.refs
> 1) {
1191 hammer_unref(&ip
->lock
);
1199 * Unload and destroy the specified inode. Must be called with one remaining
1200 * reference. The reference is disposed of.
1202 * The inode must be completely clean.
1205 hammer_unload_inode(struct hammer_inode
*ip
)
1207 hammer_mount_t hmp
= ip
->hmp
;
1209 KASSERT(ip
->lock
.refs
== 1,
1210 ("hammer_unload_inode: %d refs\n", ip
->lock
.refs
));
1211 KKASSERT(ip
->vp
== NULL
);
1212 KKASSERT(ip
->flush_state
== HAMMER_FST_IDLE
);
1213 KKASSERT(ip
->cursor_ip_refs
== 0);
1214 KKASSERT(ip
->lock
.lockcount
== 0);
1215 KKASSERT((ip
->flags
& HAMMER_INODE_MODMASK
) == 0);
1217 KKASSERT(RB_EMPTY(&ip
->rec_tree
));
1218 KKASSERT(TAILQ_EMPTY(&ip
->target_list
));
1220 RB_REMOVE(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
);
1222 hammer_free_inode(ip
);
1227 * Called during unmounting if a critical error occured. The in-memory
1228 * inode and all related structures are destroyed.
1230 * If a critical error did not occur the unmount code calls the standard
1231 * release and asserts that the inode is gone.
1234 hammer_destroy_inode_callback(struct hammer_inode
*ip
, void *data __unused
)
1236 hammer_record_t rec
;
1239 * Get rid of the inodes in-memory records, regardless of their
1240 * state, and clear the mod-mask.
1242 while ((rec
= TAILQ_FIRST(&ip
->target_list
)) != NULL
) {
1243 TAILQ_REMOVE(&ip
->target_list
, rec
, target_entry
);
1244 rec
->target_ip
= NULL
;
1245 if (rec
->flush_state
== HAMMER_FST_SETUP
)
1246 rec
->flush_state
= HAMMER_FST_IDLE
;
1248 while ((rec
= RB_ROOT(&ip
->rec_tree
)) != NULL
) {
1249 if (rec
->flush_state
== HAMMER_FST_FLUSH
)
1250 --rec
->flush_group
->refs
;
1252 hammer_ref(&rec
->lock
);
1253 KKASSERT(rec
->lock
.refs
== 1);
1254 rec
->flush_state
= HAMMER_FST_IDLE
;
1255 rec
->flush_group
= NULL
;
1256 rec
->flags
|= HAMMER_RECF_DELETED_FE
;
1257 rec
->flags
|= HAMMER_RECF_DELETED_BE
;
1258 hammer_rel_mem_record(rec
);
1260 ip
->flags
&= ~HAMMER_INODE_MODMASK
;
1261 ip
->sync_flags
&= ~HAMMER_INODE_MODMASK
;
1262 KKASSERT(ip
->vp
== NULL
);
1265 * Remove the inode from any flush group, force it idle. FLUSH
1266 * and SETUP states have an inode ref.
1268 switch(ip
->flush_state
) {
1269 case HAMMER_FST_FLUSH
:
1270 TAILQ_REMOVE(&ip
->flush_group
->flush_list
, ip
, flush_entry
);
1271 --ip
->flush_group
->refs
;
1272 ip
->flush_group
= NULL
;
1274 case HAMMER_FST_SETUP
:
1275 hammer_unref(&ip
->lock
);
1276 ip
->flush_state
= HAMMER_FST_IDLE
;
1278 case HAMMER_FST_IDLE
:
1283 * There shouldn't be any associated vnode. The unload needs at
1284 * least one ref, if we do have a vp steal its ip ref.
1287 kprintf("hammer_destroy_inode_callback: Unexpected "
1288 "vnode association ip %p vp %p\n", ip
, ip
->vp
);
1289 ip
->vp
->v_data
= NULL
;
1292 hammer_ref(&ip
->lock
);
1294 hammer_unload_inode(ip
);
1299 * Called on mount -u when switching from RW to RO or vise-versa. Adjust
1300 * the read-only flag for cached inodes.
1302 * This routine is called from a RB_SCAN().
1305 hammer_reload_inode(hammer_inode_t ip
, void *arg __unused
)
1307 hammer_mount_t hmp
= ip
->hmp
;
1309 if (hmp
->ronly
|| hmp
->asof
!= HAMMER_MAX_TID
)
1310 ip
->flags
|= HAMMER_INODE_RO
;
1312 ip
->flags
&= ~HAMMER_INODE_RO
;
1317 * A transaction has modified an inode, requiring updates as specified by
1320 * HAMMER_INODE_DDIRTY: Inode data has been updated
1321 * HAMMER_INODE_XDIRTY: Dirty in-memory records
1322 * HAMMER_INODE_BUFS: Dirty buffer cache buffers
1323 * HAMMER_INODE_DELETED: Inode record/data must be deleted
1324 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1327 hammer_modify_inode(hammer_inode_t ip
, int flags
)
1330 * ronly of 0 or 2 does not trigger assertion.
1331 * 2 is a special error state
1333 KKASSERT(ip
->hmp
->ronly
!= 1 ||
1334 (flags
& (HAMMER_INODE_DDIRTY
| HAMMER_INODE_XDIRTY
|
1335 HAMMER_INODE_BUFS
| HAMMER_INODE_DELETED
|
1336 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
)) == 0);
1337 if ((ip
->flags
& HAMMER_INODE_RSV_INODES
) == 0) {
1338 ip
->flags
|= HAMMER_INODE_RSV_INODES
;
1339 ++ip
->hmp
->rsv_inodes
;
1346 * Request that an inode be flushed. This whole mess cannot block and may
1347 * recurse (if not synchronous). Once requested HAMMER will attempt to
1348 * actively flush the inode until the flush can be done.
1350 * The inode may already be flushing, or may be in a setup state. We can
1351 * place the inode in a flushing state if it is currently idle and flag it
1352 * to reflush if it is currently flushing.
1354 * Upon return if the inode could not be flushed due to a setup
1355 * dependancy, then it will be automatically flushed when the dependancy
1359 hammer_flush_inode(hammer_inode_t ip
, int flags
)
1362 hammer_flush_group_t flg
;
1366 * next_flush_group is the first flush group we can place the inode
1367 * in. It may be NULL. If it becomes full we append a new flush
1368 * group and make that the next_flush_group.
1371 while ((flg
= hmp
->next_flush_group
) != NULL
) {
1372 KKASSERT(flg
->running
== 0);
1373 if (flg
->total_count
+ flg
->refs
<= ip
->hmp
->undo_rec_limit
)
1375 hmp
->next_flush_group
= TAILQ_NEXT(flg
, flush_entry
);
1376 hammer_flusher_async(ip
->hmp
, flg
);
1379 flg
= kmalloc(sizeof(*flg
), M_HAMMER
, M_WAITOK
|M_ZERO
);
1380 hmp
->next_flush_group
= flg
;
1381 TAILQ_INIT(&flg
->flush_list
);
1382 TAILQ_INSERT_TAIL(&hmp
->flush_group_list
, flg
, flush_entry
);
1386 * Trivial 'nothing to flush' case. If the inode is in a SETUP
1387 * state we have to put it back into an IDLE state so we can
1388 * drop the extra ref.
1390 * If we have a parent dependancy we must still fall through
1393 if ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0) {
1394 if (ip
->flush_state
== HAMMER_FST_SETUP
&&
1395 TAILQ_EMPTY(&ip
->target_list
)) {
1396 ip
->flush_state
= HAMMER_FST_IDLE
;
1397 hammer_rel_inode(ip
, 0);
1399 if (ip
->flush_state
== HAMMER_FST_IDLE
)
1404 * Our flush action will depend on the current state.
1406 switch(ip
->flush_state
) {
1407 case HAMMER_FST_IDLE
:
1409 * We have no dependancies and can flush immediately. Some
1410 * our children may not be flushable so we have to re-test
1411 * with that additional knowledge.
1413 hammer_flush_inode_core(ip
, flg
, flags
);
1415 case HAMMER_FST_SETUP
:
1417 * Recurse upwards through dependancies via target_list
1418 * and start their flusher actions going if possible.
1420 * 'good' is our connectivity. -1 means we have none and
1421 * can't flush, 0 means there weren't any dependancies, and
1422 * 1 means we have good connectivity.
1424 good
= hammer_setup_parent_inodes(ip
, flg
);
1428 * We can continue if good >= 0. Determine how
1429 * many records under our inode can be flushed (and
1432 hammer_flush_inode_core(ip
, flg
, flags
);
1435 * Parent has no connectivity, tell it to flush
1436 * us as soon as it does.
1438 * The REFLUSH flag is also needed to trigger
1439 * dependancy wakeups.
1441 ip
->flags
|= HAMMER_INODE_CONN_DOWN
|
1442 HAMMER_INODE_REFLUSH
;
1443 if (flags
& HAMMER_FLUSH_SIGNAL
) {
1444 ip
->flags
|= HAMMER_INODE_RESIGNAL
;
1445 hammer_flusher_async(ip
->hmp
, flg
);
1449 case HAMMER_FST_FLUSH
:
1451 * We are already flushing, flag the inode to reflush
1452 * if needed after it completes its current flush.
1454 * The REFLUSH flag is also needed to trigger
1455 * dependancy wakeups.
1457 if ((ip
->flags
& HAMMER_INODE_REFLUSH
) == 0)
1458 ip
->flags
|= HAMMER_INODE_REFLUSH
;
1459 if (flags
& HAMMER_FLUSH_SIGNAL
) {
1460 ip
->flags
|= HAMMER_INODE_RESIGNAL
;
1461 hammer_flusher_async(ip
->hmp
, flg
);
1468 * Scan ip->target_list, which is a list of records owned by PARENTS to our
1469 * ip which reference our ip.
1471 * XXX This is a huge mess of recursive code, but not one bit of it blocks
1472 * so for now do not ref/deref the structures. Note that if we use the
1473 * ref/rel code later, the rel CAN block.
1476 hammer_setup_parent_inodes(hammer_inode_t ip
, hammer_flush_group_t flg
)
1478 hammer_record_t depend
;
1483 TAILQ_FOREACH(depend
, &ip
->target_list
, target_entry
) {
1484 r
= hammer_setup_parent_inodes_helper(depend
, flg
);
1485 KKASSERT(depend
->target_ip
== ip
);
1486 if (r
< 0 && good
== 0)
1495 * This helper function takes a record representing the dependancy between
1496 * the parent inode and child inode.
1498 * record->ip = parent inode
1499 * record->target_ip = child inode
1501 * We are asked to recurse upwards and convert the record from SETUP
1502 * to FLUSH if possible.
1504 * Return 1 if the record gives us connectivity
1506 * Return 0 if the record is not relevant
1508 * Return -1 if we can't resolve the dependancy and there is no connectivity.
1511 hammer_setup_parent_inodes_helper(hammer_record_t record
,
1512 hammer_flush_group_t flg
)
1518 KKASSERT(record
->flush_state
!= HAMMER_FST_IDLE
);
1523 * If the record is already flushing, is it in our flush group?
1525 * If it is in our flush group but it is a general record or a
1526 * delete-on-disk, it does not improve our connectivity (return 0),
1527 * and if the target inode is not trying to destroy itself we can't
1528 * allow the operation yet anyway (the second return -1).
1530 if (record
->flush_state
== HAMMER_FST_FLUSH
) {
1532 * If not in our flush group ask the parent to reflush
1533 * us as soon as possible.
1535 if (record
->flush_group
!= flg
) {
1536 pip
->flags
|= HAMMER_INODE_REFLUSH
;
1537 record
->target_ip
->flags
|= HAMMER_INODE_CONN_DOWN
;
1542 * If in our flush group everything is already set up,
1543 * just return whether the record will improve our
1544 * visibility or not.
1546 if (record
->type
== HAMMER_MEM_RECORD_ADD
)
1552 * It must be a setup record. Try to resolve the setup dependancies
1553 * by recursing upwards so we can place ip on the flush list.
1555 KKASSERT(record
->flush_state
== HAMMER_FST_SETUP
);
1557 good
= hammer_setup_parent_inodes(pip
, flg
);
1560 * If good < 0 the parent has no connectivity and we cannot safely
1561 * flush the directory entry, which also means we can't flush our
1562 * ip. Flag the parent and us for downward recursion once the
1563 * parent's connectivity is resolved.
1566 /* pip->flags |= HAMMER_INODE_CONN_DOWN; set by recursion */
1567 record
->target_ip
->flags
|= HAMMER_INODE_CONN_DOWN
;
1572 * We are go, place the parent inode in a flushing state so we can
1573 * place its record in a flushing state. Note that the parent
1574 * may already be flushing. The record must be in the same flush
1575 * group as the parent.
1577 if (pip
->flush_state
!= HAMMER_FST_FLUSH
)
1578 hammer_flush_inode_core(pip
, flg
, HAMMER_FLUSH_RECURSION
);
1579 KKASSERT(pip
->flush_state
== HAMMER_FST_FLUSH
);
1580 KKASSERT(record
->flush_state
== HAMMER_FST_SETUP
);
1583 if (record
->type
== HAMMER_MEM_RECORD_DEL
&&
1584 (record
->target_ip
->flags
& (HAMMER_INODE_DELETED
|HAMMER_INODE_DELONDISK
)) == 0) {
1586 * Regardless of flushing state we cannot sync this path if the
1587 * record represents a delete-on-disk but the target inode
1588 * is not ready to sync its own deletion.
1590 * XXX need to count effective nlinks to determine whether
1591 * the flush is ok, otherwise removing a hardlink will
1592 * just leave the DEL record to rot.
1594 record
->target_ip
->flags
|= HAMMER_INODE_REFLUSH
;
1598 if (pip
->flush_group
== flg
) {
1600 * Because we have not calculated nlinks yet we can just
1601 * set records to the flush state if the parent is in
1602 * the same flush group as we are.
1604 record
->flush_state
= HAMMER_FST_FLUSH
;
1605 record
->flush_group
= flg
;
1606 ++record
->flush_group
->refs
;
1607 hammer_ref(&record
->lock
);
1610 * A general directory-add contributes to our visibility.
1612 * Otherwise it is probably a directory-delete or
1613 * delete-on-disk record and does not contribute to our
1614 * visbility (but we can still flush it).
1616 if (record
->type
== HAMMER_MEM_RECORD_ADD
)
1621 * If the parent is not in our flush group we cannot
1622 * flush this record yet, there is no visibility.
1623 * We tell the parent to reflush and mark ourselves
1624 * so the parent knows it should flush us too.
1626 pip
->flags
|= HAMMER_INODE_REFLUSH
;
1627 record
->target_ip
->flags
|= HAMMER_INODE_CONN_DOWN
;
1633 * This is the core routine placing an inode into the FST_FLUSH state.
1636 hammer_flush_inode_core(hammer_inode_t ip
, hammer_flush_group_t flg
, int flags
)
1641 * Set flush state and prevent the flusher from cycling into
1642 * the next flush group. Do not place the ip on the list yet.
1643 * Inodes not in the idle state get an extra reference.
1645 KKASSERT(ip
->flush_state
!= HAMMER_FST_FLUSH
);
1646 if (ip
->flush_state
== HAMMER_FST_IDLE
)
1647 hammer_ref(&ip
->lock
);
1648 ip
->flush_state
= HAMMER_FST_FLUSH
;
1649 ip
->flush_group
= flg
;
1650 ++ip
->hmp
->flusher
.group_lock
;
1651 ++ip
->hmp
->count_iqueued
;
1652 ++hammer_count_iqueued
;
1656 * If the flush group reaches the autoflush limit we want to signal
1657 * the flusher. This is particularly important for remove()s.
1659 if (flg
->total_count
== hammer_autoflush
)
1660 flags
|= HAMMER_FLUSH_SIGNAL
;
1663 * We need to be able to vfsync/truncate from the backend.
1665 KKASSERT((ip
->flags
& HAMMER_INODE_VHELD
) == 0);
1666 if (ip
->vp
&& (ip
->vp
->v_flag
& VINACTIVE
) == 0) {
1667 ip
->flags
|= HAMMER_INODE_VHELD
;
1672 * Figure out how many in-memory records we can actually flush
1673 * (not including inode meta-data, buffers, etc).
1675 KKASSERT((ip
->flags
& HAMMER_INODE_WOULDBLOCK
) == 0);
1676 if (flags
& HAMMER_FLUSH_RECURSION
) {
1678 * If this is a upwards recursion we do not want to
1679 * recurse down again!
1683 } else if (ip
->flags
& HAMMER_INODE_WOULDBLOCK
) {
1685 * No new records are added if we must complete a flush
1686 * from a previous cycle, but we do have to move the records
1687 * from the previous cycle to the current one.
1690 go_count
= RB_SCAN(hammer_rec_rb_tree
, &ip
->rec_tree
, NULL
,
1691 hammer_syncgrp_child_callback
, NULL
);
1697 * Normal flush, scan records and bring them into the flush.
1698 * Directory adds and deletes are usually skipped (they are
1699 * grouped with the related inode rather then with the
1702 * go_count can be negative, which means the scan aborted
1703 * due to the flush group being over-full and we should
1704 * flush what we have.
1706 go_count
= RB_SCAN(hammer_rec_rb_tree
, &ip
->rec_tree
, NULL
,
1707 hammer_setup_child_callback
, NULL
);
1711 * This is a more involved test that includes go_count. If we
1712 * can't flush, flag the inode and return. If go_count is 0 we
1713 * were are unable to flush any records in our rec_tree and
1714 * must ignore the XDIRTY flag.
1716 if (go_count
== 0) {
1717 if ((ip
->flags
& HAMMER_INODE_MODMASK_NOXDIRTY
) == 0) {
1718 --ip
->hmp
->count_iqueued
;
1719 --hammer_count_iqueued
;
1722 ip
->flush_state
= HAMMER_FST_SETUP
;
1723 ip
->flush_group
= NULL
;
1724 if (ip
->flags
& HAMMER_INODE_VHELD
) {
1725 ip
->flags
&= ~HAMMER_INODE_VHELD
;
1730 * REFLUSH is needed to trigger dependancy wakeups
1731 * when an inode is in SETUP.
1733 ip
->flags
|= HAMMER_INODE_REFLUSH
;
1734 if (flags
& HAMMER_FLUSH_SIGNAL
) {
1735 ip
->flags
|= HAMMER_INODE_RESIGNAL
;
1736 hammer_flusher_async(ip
->hmp
, flg
);
1738 if (--ip
->hmp
->flusher
.group_lock
== 0)
1739 wakeup(&ip
->hmp
->flusher
.group_lock
);
1745 * Snapshot the state of the inode for the backend flusher.
1747 * We continue to retain save_trunc_off even when all truncations
1748 * have been resolved as an optimization to determine if we can
1749 * skip the B-Tree lookup for overwrite deletions.
1751 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1752 * and stays in ip->flags. Once set, it stays set until the
1753 * inode is destroyed.
1755 if (ip
->flags
& HAMMER_INODE_TRUNCATED
) {
1756 KKASSERT((ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) == 0);
1757 ip
->sync_trunc_off
= ip
->trunc_off
;
1758 ip
->trunc_off
= 0x7FFFFFFFFFFFFFFFLL
;
1759 ip
->flags
&= ~HAMMER_INODE_TRUNCATED
;
1760 ip
->sync_flags
|= HAMMER_INODE_TRUNCATED
;
1763 * The save_trunc_off used to cache whether the B-Tree
1764 * holds any records past that point is not used until
1765 * after the truncation has succeeded, so we can safely
1768 if (ip
->save_trunc_off
> ip
->sync_trunc_off
)
1769 ip
->save_trunc_off
= ip
->sync_trunc_off
;
1771 ip
->sync_flags
|= (ip
->flags
& HAMMER_INODE_MODMASK
&
1772 ~HAMMER_INODE_TRUNCATED
);
1773 ip
->sync_ino_leaf
= ip
->ino_leaf
;
1774 ip
->sync_ino_data
= ip
->ino_data
;
1775 ip
->flags
&= ~HAMMER_INODE_MODMASK
| HAMMER_INODE_TRUNCATED
;
1776 #ifdef DEBUG_TRUNCATE
1777 if ((ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) && ip
== HammerTruncIp
)
1778 kprintf("truncateS %016llx\n", ip
->sync_trunc_off
);
1782 * The flusher list inherits our inode and reference.
1784 KKASSERT(flg
->running
== 0);
1785 TAILQ_INSERT_TAIL(&flg
->flush_list
, ip
, flush_entry
);
1786 if (--ip
->hmp
->flusher
.group_lock
== 0)
1787 wakeup(&ip
->hmp
->flusher
.group_lock
);
1789 if (flags
& HAMMER_FLUSH_SIGNAL
) {
1790 hammer_flusher_async(ip
->hmp
, flg
);
1795 * Callback for scan of ip->rec_tree. Try to include each record in our
1796 * flush. ip->flush_group has been set but the inode has not yet been
1797 * moved into a flushing state.
1799 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1802 * We return 1 for any record placed or found in FST_FLUSH, which prevents
1803 * the caller from shortcutting the flush.
1806 hammer_setup_child_callback(hammer_record_t rec
, void *data
)
1808 hammer_flush_group_t flg
;
1809 hammer_inode_t target_ip
;
1814 * Deleted records are ignored. Note that the flush detects deleted
1815 * front-end records at multiple points to deal with races. This is
1816 * just the first line of defense. The only time DELETED_FE cannot
1817 * be set is when HAMMER_RECF_INTERLOCK_BE is set.
1819 * Don't get confused between record deletion and, say, directory
1820 * entry deletion. The deletion of a directory entry that is on
1821 * the media has nothing to do with the record deletion flags.
1823 if (rec
->flags
& (HAMMER_RECF_DELETED_FE
|HAMMER_RECF_DELETED_BE
)) {
1824 if (rec
->flush_state
== HAMMER_FST_FLUSH
) {
1825 KKASSERT(rec
->flush_group
== rec
->ip
->flush_group
);
1834 * If the record is in an idle state it has no dependancies and
1838 flg
= ip
->flush_group
;
1841 switch(rec
->flush_state
) {
1842 case HAMMER_FST_IDLE
:
1844 * The record has no setup dependancy, we can flush it.
1846 KKASSERT(rec
->target_ip
== NULL
);
1847 rec
->flush_state
= HAMMER_FST_FLUSH
;
1848 rec
->flush_group
= flg
;
1850 hammer_ref(&rec
->lock
);
1853 case HAMMER_FST_SETUP
:
1855 * The record has a setup dependancy. These are typically
1856 * directory entry adds and deletes. Such entries will be
1857 * flushed when their inodes are flushed so we do not
1858 * usually have to add them to the flush here. However,
1859 * if the target_ip has set HAMMER_INODE_CONN_DOWN then
1860 * it is asking us to flush this record (and it).
1862 target_ip
= rec
->target_ip
;
1863 KKASSERT(target_ip
!= NULL
);
1864 KKASSERT(target_ip
->flush_state
!= HAMMER_FST_IDLE
);
1867 * If the target IP is already flushing in our group
1868 * we could associate the record, but target_ip has
1869 * already synced ino_data to sync_ino_data and we
1870 * would also have to adjust nlinks. Plus there are
1871 * ordering issues for adds and deletes.
1873 * Reflush downward if this is an ADD, and upward if
1876 if (target_ip
->flush_state
== HAMMER_FST_FLUSH
) {
1877 if (rec
->flush_state
== HAMMER_MEM_RECORD_ADD
)
1878 ip
->flags
|= HAMMER_INODE_REFLUSH
;
1880 target_ip
->flags
|= HAMMER_INODE_REFLUSH
;
1885 * Target IP is not yet flushing. This can get complex
1886 * because we have to be careful about the recursion.
1888 * Directories create an issue for us in that if a flush
1889 * of a directory is requested the expectation is to flush
1890 * any pending directory entries, but this will cause the
1891 * related inodes to recursively flush as well. We can't
1892 * really defer the operation so just get as many as we
1896 if ((target_ip
->flags
& HAMMER_INODE_RECLAIM
) == 0 &&
1897 (target_ip
->flags
& HAMMER_INODE_CONN_DOWN
) == 0) {
1899 * We aren't reclaiming and the target ip was not
1900 * previously prevented from flushing due to this
1901 * record dependancy. Do not flush this record.
1906 if (flg
->total_count
+ flg
->refs
>
1907 ip
->hmp
->undo_rec_limit
) {
1909 * Our flush group is over-full and we risk blowing
1910 * out the UNDO FIFO. Stop the scan, flush what we
1911 * have, then reflush the directory.
1913 * The directory may be forced through multiple
1914 * flush groups before it can be completely
1917 ip
->flags
|= HAMMER_INODE_RESIGNAL
|
1918 HAMMER_INODE_REFLUSH
;
1920 } else if (rec
->type
== HAMMER_MEM_RECORD_ADD
) {
1922 * If the target IP is not flushing we can force
1923 * it to flush, even if it is unable to write out
1924 * any of its own records we have at least one in
1925 * hand that we CAN deal with.
1927 rec
->flush_state
= HAMMER_FST_FLUSH
;
1928 rec
->flush_group
= flg
;
1930 hammer_ref(&rec
->lock
);
1931 hammer_flush_inode_core(target_ip
, flg
,
1932 HAMMER_FLUSH_RECURSION
);
1936 * General or delete-on-disk record.
1938 * XXX this needs help. If a delete-on-disk we could
1939 * disconnect the target. If the target has its own
1940 * dependancies they really need to be flushed.
1944 rec
->flush_state
= HAMMER_FST_FLUSH
;
1945 rec
->flush_group
= flg
;
1947 hammer_ref(&rec
->lock
);
1948 hammer_flush_inode_core(target_ip
, flg
,
1949 HAMMER_FLUSH_RECURSION
);
1953 case HAMMER_FST_FLUSH
:
1955 * The flush_group should already match.
1957 KKASSERT(rec
->flush_group
== flg
);
1966 * This version just moves records already in a flush state to the new
1967 * flush group and that is it.
1970 hammer_syncgrp_child_callback(hammer_record_t rec
, void *data
)
1972 hammer_inode_t ip
= rec
->ip
;
1974 switch(rec
->flush_state
) {
1975 case HAMMER_FST_FLUSH
:
1976 KKASSERT(rec
->flush_group
== ip
->flush_group
);
1986 * Wait for a previously queued flush to complete.
1988 * If a critical error occured we don't try to wait.
1991 hammer_wait_inode(hammer_inode_t ip
)
1993 hammer_flush_group_t flg
;
1996 if ((ip
->hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
) == 0) {
1997 while (ip
->flush_state
!= HAMMER_FST_IDLE
&&
1998 (ip
->hmp
->flags
& HAMMER_MOUNT_CRITICAL_ERROR
) == 0) {
1999 if (ip
->flush_state
== HAMMER_FST_SETUP
)
2000 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
2001 if (ip
->flush_state
!= HAMMER_FST_IDLE
) {
2002 ip
->flags
|= HAMMER_INODE_FLUSHW
;
2003 tsleep(&ip
->flags
, 0, "hmrwin", 0);
2010 * Called by the backend code when a flush has been completed.
2011 * The inode has already been removed from the flush list.
2013 * A pipelined flush can occur, in which case we must re-enter the
2014 * inode on the list and re-copy its fields.
2017 hammer_flush_inode_done(hammer_inode_t ip
, int error
)
2022 KKASSERT(ip
->flush_state
== HAMMER_FST_FLUSH
);
2027 * Merge left-over flags back into the frontend and fix the state.
2028 * Incomplete truncations are retained by the backend.
2031 ip
->flags
|= ip
->sync_flags
& ~HAMMER_INODE_TRUNCATED
;
2032 ip
->sync_flags
&= HAMMER_INODE_TRUNCATED
;
2035 * The backend may have adjusted nlinks, so if the adjusted nlinks
2036 * does not match the fronttend set the frontend's RDIRTY flag again.
2038 if (ip
->ino_data
.nlinks
!= ip
->sync_ino_data
.nlinks
)
2039 ip
->flags
|= HAMMER_INODE_DDIRTY
;
2042 * Fix up the dirty buffer status.
2044 if (ip
->vp
&& RB_ROOT(&ip
->vp
->v_rbdirty_tree
)) {
2045 ip
->flags
|= HAMMER_INODE_BUFS
;
2049 * Re-set the XDIRTY flag if some of the inode's in-memory records
2050 * could not be flushed.
2052 KKASSERT((RB_EMPTY(&ip
->rec_tree
) &&
2053 (ip
->flags
& HAMMER_INODE_XDIRTY
) == 0) ||
2054 (!RB_EMPTY(&ip
->rec_tree
) &&
2055 (ip
->flags
& HAMMER_INODE_XDIRTY
) != 0));
2058 * Do not lose track of inodes which no longer have vnode
2059 * assocations, otherwise they may never get flushed again.
2061 * The reflush flag can be set superfluously, causing extra pain
2062 * for no reason. If the inode is no longer modified it no longer
2063 * needs to be flushed.
2065 if (ip
->flags
& HAMMER_INODE_MODMASK
) {
2067 ip
->flags
|= HAMMER_INODE_REFLUSH
;
2069 ip
->flags
&= ~HAMMER_INODE_REFLUSH
;
2073 * Adjust the flush state.
2075 if (ip
->flags
& HAMMER_INODE_WOULDBLOCK
) {
2077 * We were unable to flush out all our records, leave the
2078 * inode in a flush state and in the current flush group.
2079 * The flush group will be re-run.
2081 * This occurs if the UNDO block gets too full or there is
2082 * too much dirty meta-data and allows the flusher to
2083 * finalize the UNDO block and then re-flush.
2085 ip
->flags
&= ~HAMMER_INODE_WOULDBLOCK
;
2089 * Remove from the flush_group
2091 TAILQ_REMOVE(&ip
->flush_group
->flush_list
, ip
, flush_entry
);
2092 ip
->flush_group
= NULL
;
2095 * Clean up the vnode ref and tracking counts.
2097 if (ip
->flags
& HAMMER_INODE_VHELD
) {
2098 ip
->flags
&= ~HAMMER_INODE_VHELD
;
2101 --hmp
->count_iqueued
;
2102 --hammer_count_iqueued
;
2105 * And adjust the state.
2107 if (TAILQ_EMPTY(&ip
->target_list
) && RB_EMPTY(&ip
->rec_tree
)) {
2108 ip
->flush_state
= HAMMER_FST_IDLE
;
2111 ip
->flush_state
= HAMMER_FST_SETUP
;
2116 * If the frontend is waiting for a flush to complete,
2119 if (ip
->flags
& HAMMER_INODE_FLUSHW
) {
2120 ip
->flags
&= ~HAMMER_INODE_FLUSHW
;
2125 * If the frontend made more changes and requested another
2126 * flush, then try to get it running.
2128 * Reflushes are aborted when the inode is errored out.
2130 if (ip
->flags
& HAMMER_INODE_REFLUSH
) {
2131 ip
->flags
&= ~HAMMER_INODE_REFLUSH
;
2132 if (ip
->flags
& HAMMER_INODE_RESIGNAL
) {
2133 ip
->flags
&= ~HAMMER_INODE_RESIGNAL
;
2134 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
2136 hammer_flush_inode(ip
, 0);
2142 * If we have no parent dependancies we can clear CONN_DOWN
2144 if (TAILQ_EMPTY(&ip
->target_list
))
2145 ip
->flags
&= ~HAMMER_INODE_CONN_DOWN
;
2148 * If the inode is now clean drop the space reservation.
2150 if ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0 &&
2151 (ip
->flags
& HAMMER_INODE_RSV_INODES
)) {
2152 ip
->flags
&= ~HAMMER_INODE_RSV_INODES
;
2157 hammer_rel_inode(ip
, 0);
2161 * Called from hammer_sync_inode() to synchronize in-memory records
2165 hammer_sync_record_callback(hammer_record_t record
, void *data
)
2167 hammer_cursor_t cursor
= data
;
2168 hammer_transaction_t trans
= cursor
->trans
;
2169 hammer_mount_t hmp
= trans
->hmp
;
2173 * Skip records that do not belong to the current flush.
2175 ++hammer_stats_record_iterations
;
2176 if (record
->flush_state
!= HAMMER_FST_FLUSH
)
2180 if (record
->flush_group
!= record
->ip
->flush_group
) {
2181 kprintf("sync_record %p ip %p bad flush group %p %p\n", record
, record
->ip
, record
->flush_group
,record
->ip
->flush_group
);
2186 KKASSERT(record
->flush_group
== record
->ip
->flush_group
);
2189 * Interlock the record using the BE flag. Once BE is set the
2190 * frontend cannot change the state of FE.
2192 * NOTE: If FE is set prior to us setting BE we still sync the
2193 * record out, but the flush completion code converts it to
2194 * a delete-on-disk record instead of destroying it.
2196 KKASSERT((record
->flags
& HAMMER_RECF_INTERLOCK_BE
) == 0);
2197 record
->flags
|= HAMMER_RECF_INTERLOCK_BE
;
2200 * The backend may have already disposed of the record.
2202 if (record
->flags
& HAMMER_RECF_DELETED_BE
) {
2208 * If the whole inode is being deleting all on-disk records will
2209 * be deleted very soon, we can't sync any new records to disk
2210 * because they will be deleted in the same transaction they were
2211 * created in (delete_tid == create_tid), which will assert.
2213 * XXX There may be a case with RECORD_ADD with DELETED_FE set
2214 * that we currently panic on.
2216 if (record
->ip
->sync_flags
& HAMMER_INODE_DELETING
) {
2217 switch(record
->type
) {
2218 case HAMMER_MEM_RECORD_DATA
:
2220 * We don't have to do anything, if the record was
2221 * committed the space will have been accounted for
2225 case HAMMER_MEM_RECORD_GENERAL
:
2226 record
->flags
|= HAMMER_RECF_DELETED_FE
;
2227 record
->flags
|= HAMMER_RECF_DELETED_BE
;
2230 case HAMMER_MEM_RECORD_ADD
:
2231 panic("hammer_sync_record_callback: illegal add "
2232 "during inode deletion record %p", record
);
2233 break; /* NOT REACHED */
2234 case HAMMER_MEM_RECORD_INODE
:
2235 panic("hammer_sync_record_callback: attempt to "
2236 "sync inode record %p?", record
);
2237 break; /* NOT REACHED */
2238 case HAMMER_MEM_RECORD_DEL
:
2240 * Follow through and issue the on-disk deletion
2247 * If DELETED_FE is set special handling is needed for directory
2248 * entries. Dependant pieces related to the directory entry may
2249 * have already been synced to disk. If this occurs we have to
2250 * sync the directory entry and then change the in-memory record
2251 * from an ADD to a DELETE to cover the fact that it's been
2252 * deleted by the frontend.
2254 * A directory delete covering record (MEM_RECORD_DEL) can never
2255 * be deleted by the frontend.
2257 * Any other record type (aka DATA) can be deleted by the frontend.
2258 * XXX At the moment the flusher must skip it because there may
2259 * be another data record in the flush group for the same block,
2260 * meaning that some frontend data changes can leak into the backend's
2261 * synchronization point.
2263 if (record
->flags
& HAMMER_RECF_DELETED_FE
) {
2264 if (record
->type
== HAMMER_MEM_RECORD_ADD
) {
2265 record
->flags
|= HAMMER_RECF_CONVERT_DELETE
;
2267 KKASSERT(record
->type
!= HAMMER_MEM_RECORD_DEL
);
2268 record
->flags
|= HAMMER_RECF_DELETED_BE
;
2275 * Assign the create_tid for new records. Deletions already
2276 * have the record's entire key properly set up.
2278 if (record
->type
!= HAMMER_MEM_RECORD_DEL
)
2279 record
->leaf
.base
.create_tid
= trans
->tid
;
2280 record
->leaf
.create_ts
= trans
->time32
;
2282 error
= hammer_ip_sync_record_cursor(cursor
, record
);
2283 if (error
!= EDEADLK
)
2285 hammer_done_cursor(cursor
);
2286 error
= hammer_init_cursor(trans
, cursor
, &record
->ip
->cache
[0],
2291 record
->flags
&= ~HAMMER_RECF_CONVERT_DELETE
;
2296 hammer_flush_record_done(record
, error
);
2299 * Do partial finalization if we have built up too many dirty
2300 * buffers. Otherwise a buffer cache deadlock can occur when
2301 * doing things like creating tens of thousands of tiny files.
2303 * We must release our cursor lock to avoid a 3-way deadlock
2304 * due to the exclusive sync lock the finalizer must get.
2306 if (hammer_flusher_meta_limit(hmp
)) {
2307 hammer_unlock_cursor(cursor
, 0);
2308 hammer_flusher_finalize(trans
, 0);
2309 hammer_lock_cursor(cursor
, 0);
2316 * Backend function called by the flusher to sync an inode to media.
2319 hammer_sync_inode(hammer_transaction_t trans
, hammer_inode_t ip
)
2321 struct hammer_cursor cursor
;
2322 hammer_node_t tmp_node
;
2323 hammer_record_t depend
;
2324 hammer_record_t next
;
2325 int error
, tmp_error
;
2328 if ((ip
->sync_flags
& HAMMER_INODE_MODMASK
) == 0)
2331 error
= hammer_init_cursor(trans
, &cursor
, &ip
->cache
[1], ip
);
2336 * Any directory records referencing this inode which are not in
2337 * our current flush group must adjust our nlink count for the
2338 * purposes of synchronization to disk.
2340 * Records which are in our flush group can be unlinked from our
2341 * inode now, potentially allowing the inode to be physically
2344 * This cannot block.
2346 nlinks
= ip
->ino_data
.nlinks
;
2347 next
= TAILQ_FIRST(&ip
->target_list
);
2348 while ((depend
= next
) != NULL
) {
2349 next
= TAILQ_NEXT(depend
, target_entry
);
2350 if (depend
->flush_state
== HAMMER_FST_FLUSH
&&
2351 depend
->flush_group
== ip
->flush_group
) {
2353 * If this is an ADD that was deleted by the frontend
2354 * the frontend nlinks count will have already been
2355 * decremented, but the backend is going to sync its
2356 * directory entry and must account for it. The
2357 * record will be converted to a delete-on-disk when
2360 * If the ADD was not deleted by the frontend we
2361 * can remove the dependancy from our target_list.
2363 if (depend
->flags
& HAMMER_RECF_DELETED_FE
) {
2366 TAILQ_REMOVE(&ip
->target_list
, depend
,
2368 depend
->target_ip
= NULL
;
2370 } else if ((depend
->flags
& HAMMER_RECF_DELETED_FE
) == 0) {
2372 * Not part of our flush group
2374 KKASSERT((depend
->flags
& HAMMER_RECF_DELETED_BE
) == 0);
2375 switch(depend
->type
) {
2376 case HAMMER_MEM_RECORD_ADD
:
2379 case HAMMER_MEM_RECORD_DEL
:
2389 * Set dirty if we had to modify the link count.
2391 if (ip
->sync_ino_data
.nlinks
!= nlinks
) {
2392 KKASSERT((int64_t)nlinks
>= 0);
2393 ip
->sync_ino_data
.nlinks
= nlinks
;
2394 ip
->sync_flags
|= HAMMER_INODE_DDIRTY
;
2398 * If there is a trunction queued destroy any data past the (aligned)
2399 * truncation point. Userland will have dealt with the buffer
2400 * containing the truncation point for us.
2402 * We don't flush pending frontend data buffers until after we've
2403 * dealt with the truncation.
2405 if (ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) {
2407 * Interlock trunc_off. The VOP front-end may continue to
2408 * make adjustments to it while we are blocked.
2411 off_t aligned_trunc_off
;
2414 trunc_off
= ip
->sync_trunc_off
;
2415 blkmask
= hammer_blocksize(trunc_off
) - 1;
2416 aligned_trunc_off
= (trunc_off
+ blkmask
) & ~(int64_t)blkmask
;
2419 * Delete any whole blocks on-media. The front-end has
2420 * already cleaned out any partial block and made it
2421 * pending. The front-end may have updated trunc_off
2422 * while we were blocked so we only use sync_trunc_off.
2424 * This operation can blow out the buffer cache, EWOULDBLOCK
2425 * means we were unable to complete the deletion. The
2426 * deletion will update sync_trunc_off in that case.
2428 error
= hammer_ip_delete_range(&cursor
, ip
,
2430 0x7FFFFFFFFFFFFFFFLL
, 2);
2431 if (error
== EWOULDBLOCK
) {
2432 ip
->flags
|= HAMMER_INODE_WOULDBLOCK
;
2434 goto defer_buffer_flush
;
2441 * Clear the truncation flag on the backend after we have
2442 * complete the deletions. Backend data is now good again
2443 * (including new records we are about to sync, below).
2445 * Leave sync_trunc_off intact. As we write additional
2446 * records the backend will update sync_trunc_off. This
2447 * tells the backend whether it can skip the overwrite
2448 * test. This should work properly even when the backend
2449 * writes full blocks where the truncation point straddles
2450 * the block because the comparison is against the base
2451 * offset of the record.
2453 ip
->sync_flags
&= ~HAMMER_INODE_TRUNCATED
;
2454 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
2460 * Now sync related records. These will typically be directory
2461 * entries, records tracking direct-writes, or delete-on-disk records.
2464 tmp_error
= RB_SCAN(hammer_rec_rb_tree
, &ip
->rec_tree
, NULL
,
2465 hammer_sync_record_callback
, &cursor
);
2471 hammer_cache_node(&ip
->cache
[1], cursor
.node
);
2474 * Re-seek for inode update, assuming our cache hasn't been ripped
2475 * out from under us.
2478 tmp_node
= hammer_ref_node_safe(ip
->hmp
, &ip
->cache
[0], &error
);
2480 hammer_cursor_downgrade(&cursor
);
2481 hammer_lock_sh(&tmp_node
->lock
);
2482 if ((tmp_node
->flags
& HAMMER_NODE_DELETED
) == 0)
2483 hammer_cursor_seek(&cursor
, tmp_node
, 0);
2484 hammer_unlock(&tmp_node
->lock
);
2485 hammer_rel_node(tmp_node
);
2491 * If we are deleting the inode the frontend had better not have
2492 * any active references on elements making up the inode.
2494 * The call to hammer_ip_delete_clean() cleans up auxillary records
2495 * but not DB or DATA records. Those must have already been deleted
2496 * by the normal truncation mechanic.
2498 if (error
== 0 && ip
->sync_ino_data
.nlinks
== 0 &&
2499 RB_EMPTY(&ip
->rec_tree
) &&
2500 (ip
->sync_flags
& HAMMER_INODE_DELETING
) &&
2501 (ip
->flags
& HAMMER_INODE_DELETED
) == 0) {
2504 error
= hammer_ip_delete_clean(&cursor
, ip
, &count1
);
2506 ip
->flags
|= HAMMER_INODE_DELETED
;
2507 ip
->sync_flags
&= ~HAMMER_INODE_DELETING
;
2508 ip
->sync_flags
&= ~HAMMER_INODE_TRUNCATED
;
2509 KKASSERT(RB_EMPTY(&ip
->rec_tree
));
2512 * Set delete_tid in both the frontend and backend
2513 * copy of the inode record. The DELETED flag handles
2514 * this, do not set RDIRTY.
2516 ip
->ino_leaf
.base
.delete_tid
= trans
->tid
;
2517 ip
->sync_ino_leaf
.base
.delete_tid
= trans
->tid
;
2518 ip
->ino_leaf
.delete_ts
= trans
->time32
;
2519 ip
->sync_ino_leaf
.delete_ts
= trans
->time32
;
2523 * Adjust the inode count in the volume header
2525 hammer_sync_lock_sh(trans
);
2526 if (ip
->flags
& HAMMER_INODE_ONDISK
) {
2527 hammer_modify_volume_field(trans
,
2530 --ip
->hmp
->rootvol
->ondisk
->vol0_stat_inodes
;
2531 hammer_modify_volume_done(trans
->rootvol
);
2533 hammer_sync_unlock(trans
);
2539 ip
->sync_flags
&= ~HAMMER_INODE_BUFS
;
2543 * Now update the inode's on-disk inode-data and/or on-disk record.
2544 * DELETED and ONDISK are managed only in ip->flags.
2546 * In the case of a defered buffer flush we still update the on-disk
2547 * inode to satisfy visibility requirements if there happen to be
2548 * directory dependancies.
2550 switch(ip
->flags
& (HAMMER_INODE_DELETED
| HAMMER_INODE_ONDISK
)) {
2551 case HAMMER_INODE_DELETED
|HAMMER_INODE_ONDISK
:
2553 * If deleted and on-disk, don't set any additional flags.
2554 * the delete flag takes care of things.
2556 * Clear flags which may have been set by the frontend.
2558 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
| HAMMER_INODE_XDIRTY
|
2559 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
|
2560 HAMMER_INODE_DELETING
);
2562 case HAMMER_INODE_DELETED
:
2564 * Take care of the case where a deleted inode was never
2565 * flushed to the disk in the first place.
2567 * Clear flags which may have been set by the frontend.
2569 ip
->sync_flags
&= ~(HAMMER_INODE_DDIRTY
| HAMMER_INODE_XDIRTY
|
2570 HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
|
2571 HAMMER_INODE_DELETING
);
2572 while (RB_ROOT(&ip
->rec_tree
)) {
2573 hammer_record_t record
= RB_ROOT(&ip
->rec_tree
);
2574 hammer_ref(&record
->lock
);
2575 KKASSERT(record
->lock
.refs
== 1);
2576 record
->flags
|= HAMMER_RECF_DELETED_FE
;
2577 record
->flags
|= HAMMER_RECF_DELETED_BE
;
2578 hammer_rel_mem_record(record
);
2581 case HAMMER_INODE_ONDISK
:
2583 * If already on-disk, do not set any additional flags.
2588 * If not on-disk and not deleted, set DDIRTY to force
2589 * an initial record to be written.
2591 * Also set the create_tid in both the frontend and backend
2592 * copy of the inode record.
2594 ip
->ino_leaf
.base
.create_tid
= trans
->tid
;
2595 ip
->ino_leaf
.create_ts
= trans
->time32
;
2596 ip
->sync_ino_leaf
.base
.create_tid
= trans
->tid
;
2597 ip
->sync_ino_leaf
.create_ts
= trans
->time32
;
2598 ip
->sync_flags
|= HAMMER_INODE_DDIRTY
;
2603 * If RDIRTY or DDIRTY is set, write out a new record. If the inode
2604 * is already on-disk the old record is marked as deleted.
2606 * If DELETED is set hammer_update_inode() will delete the existing
2607 * record without writing out a new one.
2609 * If *ONLY* the ITIMES flag is set we can update the record in-place.
2611 if (ip
->flags
& HAMMER_INODE_DELETED
) {
2612 error
= hammer_update_inode(&cursor
, ip
);
2614 if ((ip
->sync_flags
& HAMMER_INODE_DDIRTY
) == 0 &&
2615 (ip
->sync_flags
& (HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
))) {
2616 error
= hammer_update_itimes(&cursor
, ip
);
2618 if (ip
->sync_flags
& (HAMMER_INODE_DDIRTY
| HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
)) {
2619 error
= hammer_update_inode(&cursor
, ip
);
2623 hammer_critical_error(ip
->hmp
, ip
, error
,
2624 "while syncing inode");
2626 hammer_done_cursor(&cursor
);
2631 * This routine is called when the OS is no longer actively referencing
2632 * the inode (but might still be keeping it cached), or when releasing
2633 * the last reference to an inode.
2635 * At this point if the inode's nlinks count is zero we want to destroy
2636 * it, which may mean destroying it on-media too.
2639 hammer_inode_unloadable_check(hammer_inode_t ip
, int getvp
)
2644 * Set the DELETING flag when the link count drops to 0 and the
2645 * OS no longer has any opens on the inode.
2647 * The backend will clear DELETING (a mod flag) and set DELETED
2648 * (a state flag) when it is actually able to perform the
2651 * Don't reflag the deletion if the flusher is currently syncing
2652 * one that was already flagged. A previously set DELETING flag
2653 * may bounce around flags and sync_flags until the operation is
2656 if (ip
->ino_data
.nlinks
== 0 &&
2657 ((ip
->flags
| ip
->sync_flags
) & (HAMMER_INODE_DELETING
|HAMMER_INODE_DELETED
)) == 0) {
2658 ip
->flags
|= HAMMER_INODE_DELETING
;
2659 ip
->flags
|= HAMMER_INODE_TRUNCATED
;
2663 if (hammer_get_vnode(ip
, &vp
) != 0)
2671 vtruncbuf(ip
->vp
, 0, HAMMER_BUFSIZE
);
2672 vnode_pager_setsize(ip
->vp
, 0);
2681 * After potentially resolving a dependancy the inode is tested
2682 * to determine whether it needs to be reflushed.
2685 hammer_test_inode(hammer_inode_t ip
)
2687 if (ip
->flags
& HAMMER_INODE_REFLUSH
) {
2688 ip
->flags
&= ~HAMMER_INODE_REFLUSH
;
2689 hammer_ref(&ip
->lock
);
2690 if (ip
->flags
& HAMMER_INODE_RESIGNAL
) {
2691 ip
->flags
&= ~HAMMER_INODE_RESIGNAL
;
2692 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
2694 hammer_flush_inode(ip
, 0);
2696 hammer_rel_inode(ip
, 0);
2701 * Clear the RECLAIM flag on an inode. This occurs when the inode is
2702 * reassociated with a vp or just before it gets freed.
2704 * Wakeup one thread blocked waiting on reclaims to complete. Note that
2705 * the inode the thread is waiting on behalf of is a different inode then
2706 * the inode we are called with. This is to create a pipeline.
2709 hammer_inode_wakereclaims(hammer_inode_t ip
)
2711 struct hammer_reclaim
*reclaim
;
2712 hammer_mount_t hmp
= ip
->hmp
;
2714 if ((ip
->flags
& HAMMER_INODE_RECLAIM
) == 0)
2717 --hammer_count_reclaiming
;
2718 --hmp
->inode_reclaims
;
2719 ip
->flags
&= ~HAMMER_INODE_RECLAIM
;
2721 if ((reclaim
= TAILQ_FIRST(&hmp
->reclaim_list
)) != NULL
) {
2722 TAILQ_REMOVE(&hmp
->reclaim_list
, reclaim
, entry
);
2723 reclaim
->okydoky
= 1;
2729 * Setup our reclaim pipeline. We only let so many detached (and dirty)
2730 * inodes build up before we start blocking.
2732 * When we block we don't care *which* inode has finished reclaiming,
2733 * as lone as one does. This is somewhat heuristical... we also put a
2734 * cap on how long we are willing to wait.
2737 hammer_inode_waitreclaims(hammer_mount_t hmp
)
2739 struct hammer_reclaim reclaim
;
2742 if (hmp
->inode_reclaims
> HAMMER_RECLAIM_WAIT
) {
2743 reclaim
.okydoky
= 0;
2744 TAILQ_INSERT_TAIL(&hmp
->reclaim_list
,
2747 reclaim
.okydoky
= 1;
2750 if (reclaim
.okydoky
== 0) {
2751 delay
= (hmp
->inode_reclaims
- HAMMER_RECLAIM_WAIT
) * hz
/
2752 (HAMMER_RECLAIM_WAIT
* 5);
2754 tsleep(&reclaim
, 0, "hmrrcm", delay
+ 1);
2755 if (reclaim
.okydoky
== 0)
2756 TAILQ_REMOVE(&hmp
->reclaim_list
, &reclaim
, entry
);