2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2022 Tomohiro Kusumi <tkusumi@netbsd.org>
5 * Copyright (c) 2011-2022 The DragonFly Project. All rights reserved.
7 * This code is derived from software contributed to The DragonFly Project
8 * by Matthew Dillon <dillon@dragonflybsd.org>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 #include <sys/cdefs.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/types.h>
44 #include <sys/vnode.h>
51 RB_GENERATE2(hammer2_inode_tree
, hammer2_inode
, rbnode
, hammer2_inode_cmp
,
52 hammer2_tid_t
, meta
.inum
);
55 hammer2_inode_cmp(hammer2_inode_t
*ip1
, hammer2_inode_t
*ip2
)
57 if (ip1
->meta
.inum
< ip2
->meta
.inum
)
59 if (ip1
->meta
.inum
> ip2
->meta
.inum
)
65 * Caller holds pmp->list_spin and the inode should be locked. Merge ip
66 * with the specified depend.
68 * If the ip is on SYNCQ it stays there and (void *)-1 is returned, indicating
69 * that successive calls must ensure the ip is on a pass2 depend (or they are
70 * all SYNCQ). If the passed-in depend is not NULL and not (void *)-1 then
71 * we can set pass2 on it and return.
73 * If the ip is not on SYNCQ it is merged with the passed-in depend, creating
74 * a self-depend if necessary, and depend->pass2 is set according
75 * to the PASS2 flag. SIDEQ is set.
79 hammer2_inode_setdepend_locked(hammer2_inode_t
*ip
, hammer2_depend_t
*depend
)
81 hammer2_pfs_t
*pmp
= ip
->pmp
;
82 hammer2_depend_t
*dtmp
;
83 hammer2_inode_t
*iptmp
;
86 * If ip is SYNCQ its entry is used for the syncq list and it will
87 * no longer be associated with a dependency. Merging this status
88 * with a passed-in depend implies PASS2.
90 if (ip
->flags
& HAMMER2_INODE_SYNCQ
) {
91 if (depend
== (void *)-1 ||
96 hammer2_trans_setflags(pmp
, HAMMER2_TRANS_RESCAN
);
102 * If ip is already SIDEQ, merge ip->depend into the passed-in depend.
103 * If it is not, associate the ip with the passed-in depend, creating
104 * a single-entry dependency using depend_static if necessary.
106 * NOTE: The use of ip->depend_static always requires that the
107 * specific ip containing the structure is part of that
108 * particular depend_static's dependency group.
110 if (ip
->flags
& HAMMER2_INODE_SIDEQ
) {
112 * Merge ip->depend with the passed-in depend. If the
113 * passed-in depend is not a special case, all ips associated
114 * with ip->depend (including the original ip) must be moved
115 * to the passed-in depend.
117 if (depend
== NULL
) {
119 } else if (depend
== (void *)-1) {
122 } else if (depend
!= ip
->depend
) {
127 while ((iptmp
= TAILQ_FIRST(&dtmp
->sideq
)) != NULL
) {
132 TAILQ_REMOVE(&dtmp
->sideq
, iptmp
, entry
);
133 TAILQ_INSERT_TAIL(&depend
->sideq
, iptmp
, entry
);
134 iptmp
->depend
= depend
;
136 KKASSERT(sanitychk
== 1);
137 depend
->count
+= dtmp
->count
;
138 depend
->pass2
|= dtmp
->pass2
;
139 TAILQ_REMOVE(&pmp
->depq
, dtmp
, entry
);
145 * Add ip to the sideq, creating a self-dependency if
148 hammer2_inode_ref(ip
); /* extra ref usually via hammer2_inode_modify() */
149 atomic_set_int(&ip
->flags
, HAMMER2_INODE_SIDEQ
);
150 if (depend
== NULL
) {
151 depend
= &ip
->depend_static
;
152 TAILQ_INSERT_TAIL(&pmp
->depq
, depend
, entry
);
153 } else if (depend
== (void *)-1) {
154 depend
= &ip
->depend_static
;
156 TAILQ_INSERT_TAIL(&pmp
->depq
, depend
, entry
);
157 } /* else add ip to passed-in depend */
158 TAILQ_INSERT_TAIL(&depend
->sideq
, ip
, entry
);
164 if (ip
->flags
& HAMMER2_INODE_SYNCQ_PASS2
)
167 hammer2_trans_setflags(pmp
, HAMMER2_TRANS_RESCAN
);
173 * Put a solo inode on the SIDEQ (meaning that its dirty). This can also
174 * occur from inode_lock4() and inode_depend().
176 * Caller must pass-in a locked inode.
179 hammer2_inode_delayed_sideq(hammer2_inode_t
*ip
)
181 hammer2_pfs_t
*pmp
= ip
->pmp
;
184 * Optimize case to avoid pmp spinlock.
186 if ((ip
->flags
& (HAMMER2_INODE_SYNCQ
| HAMMER2_INODE_SIDEQ
)) == 0) {
187 hammer2_spin_ex(&pmp
->list_spin
);
188 hammer2_inode_setdepend_locked(ip
, NULL
);
189 hammer2_spin_unex(&pmp
->list_spin
);
194 * Lock an inode, with SYNCQ semantics.
196 * HAMMER2 offers shared and exclusive locks on inodes. Pass a mask of
199 * - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired.
200 * shared locks are not subject to SYNCQ semantics, exclusive locks
203 * - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
204 * Most front-end inode locks do.
206 * - pass HAMMER2_RESOLVE_NEVER if you do not want to require that
207 * the inode data be resolved. This is used by the syncthr because
208 * it can run on an unresolved/out-of-sync cluster, and also by the
209 * vnode reclamation code to avoid unnecessary I/O (particularly when
210 * disposing of hundreds of thousands of cached vnodes).
212 * This function, along with lock4, has SYNCQ semantics. If the inode being
213 * locked is on the SYNCQ, that is it has been staged by the syncer, we must
214 * block until the operation is complete (even if we can lock the inode). In
215 * order to reduce the stall time, we re-order the inode to the front of the
216 * pmp->syncq prior to blocking. This reordering VERY significantly improves
219 * The inode locking function locks the inode itself, resolves any stale
220 * chains in the inode's cluster, and allocates a fresh copy of the
221 * cluster with 1 ref and all the underlying chains locked.
223 * ip->cluster will be stable while the inode is locked.
225 * NOTE: We don't combine the inode/chain lock because putting away an
226 * inode would otherwise confuse multiple lock holders of the inode.
229 hammer2_inode_lock(hammer2_inode_t
*ip
, int how
)
233 hammer2_inode_ref(ip
);
237 * Inode structure mutex - Shared lock
239 if (how
& HAMMER2_RESOLVE_SHARED
) {
240 hammer2_mtx_sh(&ip
->lock
);
245 * Inode structure mutex - Exclusive lock
247 * An exclusive lock (if not recursive) must wait for inodes on
248 * SYNCQ to flush first, to ensure that meta-data dependencies such
249 * as the nlink count and related directory entries are not split
252 * If the vnode is locked by the current thread it must be unlocked
253 * across the tsleep() to avoid a deadlock.
255 hammer2_mtx_ex(&ip
->lock
);
256 if (hammer2_mtx_refs(&ip
->lock
) > 1)
258 while ((ip
->flags
& HAMMER2_INODE_SYNCQ
) && pmp
) {
259 hammer2_spin_ex(&pmp
->list_spin
);
260 if (ip
->flags
& HAMMER2_INODE_SYNCQ
) {
261 tsleep_interlock(&ip
->flags
, 0);
262 atomic_set_int(&ip
->flags
, HAMMER2_INODE_SYNCQ_WAKEUP
);
263 TAILQ_REMOVE(&pmp
->syncq
, ip
, entry
);
264 TAILQ_INSERT_HEAD(&pmp
->syncq
, ip
, entry
);
265 hammer2_spin_unex(&pmp
->list_spin
);
266 hammer2_mtx_unlock(&ip
->lock
);
267 tsleep(&ip
->flags
, PINTERLOCKED
, "h2sync", 0);
268 hammer2_mtx_ex(&ip
->lock
);
271 hammer2_spin_unex(&pmp
->list_spin
);
277 * Exclusively lock up to four inodes, in order, with SYNCQ semantics.
278 * ip1 and ip2 must not be NULL. ip3 and ip4 may be NULL, but if ip3 is
279 * NULL then ip4 must also be NULL.
281 * This creates a dependency between up to four inodes.
284 hammer2_inode_lock4(hammer2_inode_t
*ip1
, hammer2_inode_t
*ip2
,
285 hammer2_inode_t
*ip3
, hammer2_inode_t
*ip4
)
287 hammer2_inode_t
*ips
[4];
288 hammer2_inode_t
*iptmp
;
289 hammer2_inode_t
*ipslp
;
290 hammer2_depend_t
*depend
;
295 pmp
= ip1
->pmp
; /* may be NULL */
296 KKASSERT(pmp
== ip2
->pmp
);
302 } else if (ip4
== NULL
) {
305 KKASSERT(pmp
== ip3
->pmp
);
310 KKASSERT(pmp
== ip3
->pmp
);
311 KKASSERT(pmp
== ip4
->pmp
);
314 for (i
= 0; i
< count
; ++i
)
315 hammer2_inode_ref(ips
[i
]);
319 * Lock the inodes in order
321 for (i
= 0; i
< count
; ++i
) {
322 hammer2_mtx_ex(&ips
[i
]->lock
);
326 * Associate dependencies, record the first inode found on SYNCQ
327 * (operation is allowed to proceed for inodes on PASS2) for our
328 * sleep operation, this inode is theoretically the last one sync'd
331 * All inodes found on SYNCQ are moved to the head of the syncq
334 hammer2_spin_ex(&pmp
->list_spin
);
337 for (i
= 0; i
< count
; ++i
) {
339 depend
= hammer2_inode_setdepend_locked(iptmp
, depend
);
340 if (iptmp
->flags
& HAMMER2_INODE_SYNCQ
) {
341 TAILQ_REMOVE(&pmp
->syncq
, iptmp
, entry
);
342 TAILQ_INSERT_HEAD(&pmp
->syncq
, iptmp
, entry
);
347 hammer2_spin_unex(&pmp
->list_spin
);
350 * Block and retry if any of the inodes are on SYNCQ. It is
351 * important that we allow the operation to proceed in the
352 * PASS2 case, to avoid deadlocking against the vnode.
355 for (i
= 0; i
< count
; ++i
)
356 hammer2_mtx_unlock(&ips
[i
]->lock
);
357 tsleep(&ipslp
->flags
, 0, "h2sync", 2);
363 * Release an inode lock. If another thread is blocked on SYNCQ_WAKEUP
367 hammer2_inode_unlock(hammer2_inode_t
*ip
)
369 if (ip
->flags
& HAMMER2_INODE_SYNCQ_WAKEUP
) {
370 atomic_clear_int(&ip
->flags
, HAMMER2_INODE_SYNCQ_WAKEUP
);
371 hammer2_mtx_unlock(&ip
->lock
);
374 hammer2_mtx_unlock(&ip
->lock
);
376 hammer2_inode_drop(ip
);
380 * If either ip1 or ip2 have been tapped by the syncer, make sure that both
381 * are. This ensure that dependencies (e.g. dirent-v-inode) are synced
382 * together. For dirent-v-inode depends, pass the dirent as ip1.
384 * If neither ip1 or ip2 have been tapped by the syncer, merge them into a
385 * single dependency. Dependencies are entered into pmp->depq. This
386 * effectively flags the inodes SIDEQ.
388 * Both ip1 and ip2 must be locked by the caller. This also ensures
389 * that we can't race the end of the syncer's queue run.
392 hammer2_inode_depend(hammer2_inode_t
*ip1
, hammer2_inode_t
*ip2
)
395 hammer2_depend_t
*depend
;
398 hammer2_spin_ex(&pmp
->list_spin
);
399 depend
= hammer2_inode_setdepend_locked(ip1
, NULL
);
400 depend
= hammer2_inode_setdepend_locked(ip2
, depend
);
401 hammer2_spin_unex(&pmp
->list_spin
);
405 * Select a chain out of an inode's cluster and lock it.
407 * The inode does not have to be locked.
410 hammer2_inode_chain(hammer2_inode_t
*ip
, int clindex
, int how
)
412 hammer2_chain_t
*chain
;
413 hammer2_cluster_t
*cluster
;
415 hammer2_spin_sh(&ip
->cluster_spin
);
416 cluster
= &ip
->cluster
;
417 if (clindex
>= cluster
->nchains
)
420 chain
= cluster
->array
[clindex
].chain
;
422 hammer2_chain_ref(chain
);
423 hammer2_spin_unsh(&ip
->cluster_spin
);
424 hammer2_chain_lock(chain
, how
);
426 hammer2_spin_unsh(&ip
->cluster_spin
);
432 hammer2_inode_chain_and_parent(hammer2_inode_t
*ip
, int clindex
,
433 hammer2_chain_t
**parentp
, int how
)
435 hammer2_chain_t
*chain
;
436 hammer2_chain_t
*parent
;
439 hammer2_spin_sh(&ip
->cluster_spin
);
440 if (clindex
>= ip
->cluster
.nchains
)
443 chain
= ip
->cluster
.array
[clindex
].chain
;
445 hammer2_chain_ref(chain
);
446 hammer2_spin_unsh(&ip
->cluster_spin
);
447 hammer2_chain_lock(chain
, how
);
449 hammer2_spin_unsh(&ip
->cluster_spin
);
453 * Get parent, lock order must be (parent, chain).
455 parent
= chain
->parent
;
457 hammer2_chain_ref(parent
);
458 hammer2_chain_unlock(chain
);
459 hammer2_chain_lock(parent
, how
);
460 hammer2_chain_lock(chain
, how
);
462 if (ip
->cluster
.array
[clindex
].chain
== chain
&&
463 chain
->parent
== parent
) {
470 hammer2_chain_unlock(chain
);
471 hammer2_chain_drop(chain
);
473 hammer2_chain_unlock(parent
);
474 hammer2_chain_drop(parent
);
483 * Temporarily release a lock held shared or exclusive. Caller must
484 * hold the lock shared or exclusive on call and lock will be released
487 * Restore a lock that was temporarily released.
490 hammer2_inode_lock_temp_release(hammer2_inode_t
*ip
)
492 return hammer2_mtx_temp_release(&ip
->lock
);
496 hammer2_inode_lock_temp_restore(hammer2_inode_t
*ip
, hammer2_mtx_state_t ostate
)
498 hammer2_mtx_temp_restore(&ip
->lock
, ostate
);
502 * Upgrade a shared inode lock to exclusive and return. If the inode lock
503 * is already held exclusively this is a NOP.
505 * The caller MUST hold the inode lock either shared or exclusive on call
506 * and will own the lock exclusively on return.
508 * Returns non-zero if the lock was already exclusive prior to the upgrade.
511 hammer2_inode_lock_upgrade(hammer2_inode_t
*ip
)
515 /* XXX pretends it wasn't exclusive, but shouldn't matter */
516 //if (mtx_islocked_ex(&ip->lock)) {
520 hammer2_mtx_unlock(&ip
->lock
);
521 hammer2_mtx_ex(&ip
->lock
);
528 * Downgrade an inode lock from exclusive to shared only if the inode
529 * lock was previously shared. If the inode lock was previously exclusive,
533 hammer2_inode_lock_downgrade(hammer2_inode_t
*ip
, int wasexclusive
)
535 if (wasexclusive
== 0)
536 hammer2_mtx_downgrade(&ip
->lock
);
540 * Lookup an inode by inode number
543 hammer2_inode_lookup(hammer2_pfs_t
*pmp
, hammer2_tid_t inum
)
551 hammer2_spin_ex(&pmp
->inum_spin
);
552 ip
= RB_LOOKUP(hammer2_inode_tree
, &pmp
->inum_tree
, inum
);
554 hammer2_inode_ref(ip
);
555 hammer2_spin_unex(&pmp
->inum_spin
);
561 * Adding a ref to an inode is only legal if the inode already has at least
564 * (can be called with spinlock held)
567 hammer2_inode_ref(hammer2_inode_t
*ip
)
569 atomic_add_int(&ip
->refs
, 1);
570 if (hammer2_debug
& 0x80000) {
571 kprintf("INODE+1 %p (%d->%d)\n", ip
, ip
->refs
- 1, ip
->refs
);
577 * Drop an inode reference, freeing the inode when the last reference goes
581 hammer2_inode_drop(hammer2_inode_t
*ip
)
587 if (hammer2_debug
& 0x80000) {
588 kprintf("INODE-1 %p (%d->%d)\n",
589 ip
, ip
->refs
, ip
->refs
- 1);
596 * Transition to zero, must interlock with
597 * the inode inumber lookup tree (if applicable).
598 * It should not be possible for anyone to race
599 * the transition to 0.
603 hammer2_spin_ex(&pmp
->inum_spin
);
605 if (atomic_cmpset_int(&ip
->refs
, 1, 0)) {
606 KKASSERT(hammer2_mtx_refs(&ip
->lock
) == 0);
607 if (ip
->flags
& HAMMER2_INODE_ONRBTREE
) {
608 atomic_clear_int(&ip
->flags
,
609 HAMMER2_INODE_ONRBTREE
);
610 RB_REMOVE(hammer2_inode_tree
,
611 &pmp
->inum_tree
, ip
);
614 hammer2_spin_unex(&pmp
->inum_spin
);
619 * Cleaning out ip->cluster isn't entirely
622 hammer2_inode_repoint(ip
, NULL
);
625 * VOP_RECLAIM is currently unused,
626 * so directly free vnode before inode.
629 if (ip
->vp
->v_malloced
)
635 kfree_obj(ip
, pmp
->minode
);
636 atomic_add_long(&pmp
->inmem_inodes
, -1);
637 ip
= NULL
; /* will terminate loop */
639 hammer2_spin_unex(&ip
->pmp
->inum_spin
);
643 * Non zero transition
645 if (atomic_cmpset_int(&ip
->refs
, refs
, refs
- 1))
652 * Get the vnode associated with the given inode, allocating the vnode if
653 * necessary. The vnode will be returned exclusively locked.
655 * *errorp is set to a UNIX error, not a HAMMER2 error.
657 * The caller must lock the inode (shared or exclusive).
659 * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
663 hammer2_igetv(hammer2_inode_t
*ip
, int *errorp
)
669 KKASSERT(pmp
!= NULL
);
674 * Attempt to reuse an existing vnode assignment. It is
675 * possible to race a reclaim so the vget() may fail. The
676 * inode must be unlocked during the vget() to avoid a
677 * deadlock against a reclaim.
684 * Inode must be unlocked during the vget() to avoid
685 * possible deadlocks, but leave the ip ref intact.
687 * vnode is held to prevent destruction during the
688 * vget(). The vget() can still fail if we lost
689 * a reclaim race on the vnode.
691 hammer2_mtx_state_t ostate
;
694 ostate
= hammer2_inode_lock_temp_release(ip
);
695 if (vget(vp
, LK_EXCLUSIVE
)) {
697 hammer2_inode_lock_temp_restore(ip
, ostate
);
700 hammer2_inode_lock_temp_restore(ip
, ostate
);
702 /* vp still locked and ref from vget */
704 kprintf("hammer2: igetv race %p/%p\n",
714 * No vnode exists, allocate a new vnode. Beware of
715 * allocation races. This function will return an
716 * exclusively locked and referenced vnode.
718 *errorp
= getnewvnode(VT_HAMMER2
, pmp
->mp
, &vp
, 0, 0);
720 kprintf("hammer2: igetv getnewvnode failed %d\n",
727 * Lock the inode and check for an allocation race.
729 wasexclusive
= hammer2_inode_lock_upgrade(ip
);
730 if (ip
->vp
!= NULL
) {
733 hammer2_inode_lock_downgrade(ip
, wasexclusive
);
737 switch (ip
->meta
.type
) {
738 case HAMMER2_OBJTYPE_DIRECTORY
:
741 case HAMMER2_OBJTYPE_REGFILE
:
743 * Regular file must use buffer cache I/O
744 * (VKVABIO cpu sync semantics supported)
747 vsetflags(vp
, VKVABIO
);
748 vinitvmio(vp
, ip
->meta
.size
,
750 (int)ip
->meta
.size
& HAMMER2_LBUFMASK
);
752 case HAMMER2_OBJTYPE_SOFTLINK
:
754 * XXX for now we are using the generic file_read
755 * and file_write code so we need a buffer cache
758 * (VKVABIO cpu sync semantics supported)
761 vsetflags(vp
, VKVABIO
);
762 vinitvmio(vp
, ip
->meta
.size
,
764 (int)ip
->meta
.size
& HAMMER2_LBUFMASK
);
766 case HAMMER2_OBJTYPE_CDEV
:
769 case HAMMER2_OBJTYPE_BDEV
:
770 //vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
771 if (ip
->meta
.type
!= HAMMER2_OBJTYPE_CDEV
)
777 case HAMMER2_OBJTYPE_FIFO
:
779 //vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
781 case HAMMER2_OBJTYPE_SOCKET
:
785 panic("hammer2: unhandled objtype %d",
790 if (ip
== pmp
->iroot
)
791 vsetflags(vp
, VROOT
);
795 hammer2_inode_ref(ip
); /* vp association */
796 hammer2_inode_lock_downgrade(ip
, wasexclusive
);
802 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
804 if (hammer2_debug
& 0x0002) {
805 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
812 * XXX this API needs a rewrite. It needs to be split into a
813 * hammer2_inode_alloc() and hammer2_inode_build() to allow us to get
814 * rid of the inode/chain lock reversal fudge.
816 * Returns the inode associated with the passed-in cluster, allocating a new
817 * hammer2_inode structure if necessary, then synchronizing it to the passed
818 * xop cluster. When synchronizing, if idx >= 0, only cluster index (idx)
819 * is synchronized. Otherwise the whole cluster is synchronized. inum will
820 * be extracted from the passed-in xop and the inum argument will be ignored.
822 * If xop is passed as NULL then a new hammer2_inode is allocated with the
823 * specified inum, and returned. For normal inodes, the inode will be
824 * indexed in memory and if it already exists the existing ip will be
825 * returned instead of allocating a new one. The superroot and PFS inodes
826 * are not indexed in memory.
828 * The passed-in cluster must be locked and will remain locked on return.
829 * The returned inode will be locked and the caller may dispose of both
830 * via hammer2_inode_unlock() + hammer2_inode_drop(). However, if the caller
831 * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
833 * The hammer2_inode structure regulates the interface between the high level
834 * kernel VNOPS API and the filesystem backend (the chains).
836 * On return the inode is locked with the supplied cluster.
839 hammer2_inode_get(hammer2_pfs_t
*pmp
, hammer2_xop_head_t
*xop
,
840 hammer2_tid_t inum
, int idx
)
842 hammer2_inode_t
*nip
;
843 const hammer2_inode_data_t
*iptmp
;
844 const hammer2_inode_data_t
*nipdata
;
846 KKASSERT(xop
== NULL
||
847 hammer2_cluster_type(&xop
->cluster
) ==
848 HAMMER2_BREF_TYPE_INODE
);
852 * Interlocked lookup/ref of the inode. This code is only needed
853 * when looking up inodes with nlinks != 0 (TODO: optimize out
854 * otherwise and test for duplicates).
856 * Cluster can be NULL during the initial pfs allocation.
859 iptmp
= &hammer2_xop_gdata(xop
)->ipdata
;
860 inum
= iptmp
->meta
.inum
;
861 hammer2_xop_pdata(xop
);
864 nip
= hammer2_inode_lookup(pmp
, inum
);
867 * We may have to unhold the cluster to avoid a deadlock
868 * against vnlru (and possibly other XOPs).
871 if (hammer2_mtx_ex_try(&nip
->lock
) != 0) {
872 hammer2_cluster_unhold(&xop
->cluster
);
873 hammer2_mtx_ex(&nip
->lock
);
874 hammer2_cluster_rehold(&xop
->cluster
);
877 hammer2_mtx_ex(&nip
->lock
);
881 * Handle SMP race (not applicable to the super-root spmp
882 * which can't index inodes due to duplicative inode numbers).
884 if (pmp
->spmp_hmp
== NULL
&&
885 (nip
->flags
& HAMMER2_INODE_ONRBTREE
) == 0) {
886 hammer2_mtx_unlock(&nip
->lock
);
887 hammer2_inode_drop(nip
);
892 hammer2_inode_repoint_one(nip
, &xop
->cluster
,
895 hammer2_inode_repoint(nip
, &xop
->cluster
);
901 * We couldn't find the inode number, create a new inode and try to
902 * insert it, handle insertion races.
904 nip
= kmalloc_obj(sizeof(*nip
), pmp
->minode
, M_WAITOK
| M_ZERO
);
905 hammer2_spin_init(&nip
->cluster_spin
, "h2clspin");
906 atomic_add_long(&pmp
->inmem_inodes
, 1);
909 * Initialize nip's cluster. A cluster is provided for normal
910 * inodes but typically not for the super-root or PFS inodes.
913 hammer2_inode_t
*nnip
= nip
;
914 nip
->ihash
= (int)hammer2_icrc32(&nnip
, sizeof(nnip
));
917 nip
->cluster
.refs
= 1;
918 nip
->cluster
.pmp
= pmp
;
919 nip
->cluster
.flags
|= HAMMER2_CLUSTER_INODE
;
921 nipdata
= &hammer2_xop_gdata(xop
)->ipdata
;
922 nip
->meta
= nipdata
->meta
;
923 hammer2_xop_pdata(xop
);
924 hammer2_inode_repoint(nip
, &xop
->cluster
);
926 nip
->meta
.inum
= inum
; /* PFS inum is always 1 XXX */
927 /* mtime will be updated when a cluster is available */
933 * ref and lock on nip gives it state compatible to after a
934 * hammer2_inode_lock() call.
937 hammer2_mtx_init(&nip
->lock
, "h2inode");
938 hammer2_mtx_init(&nip
->truncate_lock
, "h2trunc");
939 hammer2_mtx_ex(&nip
->lock
);
940 TAILQ_INIT(&nip
->depend_static
.sideq
);
941 /* combination of thread lock and chain lock == inode lock */
944 * Attempt to add the inode. If it fails we raced another inode
945 * get. Undo all the work and try again.
947 if (pmp
->spmp_hmp
== NULL
) {
948 hammer2_spin_ex(&pmp
->inum_spin
);
949 if (RB_INSERT(hammer2_inode_tree
, &pmp
->inum_tree
, nip
)) {
950 hammer2_spin_unex(&pmp
->inum_spin
);
951 hammer2_mtx_unlock(&nip
->lock
);
952 hammer2_inode_drop(nip
);
955 atomic_set_int(&nip
->flags
, HAMMER2_INODE_ONRBTREE
);
957 hammer2_spin_unex(&pmp
->inum_spin
);
963 * Create a PFS inode under the superroot. This function will create the
964 * inode, its media chains, and also insert it into the media.
966 * Caller must be in a flush transaction because we are inserting the inode
970 hammer2_inode_create_pfs(hammer2_pfs_t
*spmp
,
971 const char *name
, size_t name_len
,
974 hammer2_xop_create_t
*xop
;
975 hammer2_inode_t
*pip
;
976 hammer2_inode_t
*nip
;
978 uint8_t pip_comp_algo
;
979 uint8_t pip_check_algo
;
980 hammer2_tid_t pip_inum
;
986 lhc
= hammer2_dirhash(name
, name_len
);
990 * Locate the inode or indirect block to create the new
991 * entry in. At the same time check for key collisions
992 * and iterate until we don't get one.
994 * Lock the directory exclusively for now to guarantee that
995 * we can find an unused lhc for the name. Due to collisions,
996 * two different creates can end up with the same lhc so we
997 * cannot depend on the OS to prevent the collision.
999 hammer2_inode_lock(pip
, 0);
1001 pip_comp_algo
= pip
->meta
.comp_algo
;
1002 pip_check_algo
= pip
->meta
.check_algo
;
1003 pip_inum
= (pip
== pip
->pmp
->iroot
) ? 1 : pip
->meta
.inum
;
1006 * Locate an unused key in the collision space.
1009 hammer2_xop_scanlhc_t
*sxop
;
1010 hammer2_key_t lhcbase
;
1013 sxop
= hammer2_xop_alloc(pip
, HAMMER2_XOP_MODIFYING
);
1015 hammer2_xop_start(&sxop
->head
, &hammer2_scanlhc_desc
);
1016 while ((error
= hammer2_xop_collect(&sxop
->head
, 0)) == 0) {
1017 if (lhc
!= sxop
->head
.cluster
.focus
->bref
.key
)
1021 hammer2_xop_retire(&sxop
->head
, HAMMER2_XOPMASK_VOP
);
1024 if (error
!= HAMMER2_ERROR_ENOENT
)
1029 if ((lhcbase
^ lhc
) & ~HAMMER2_DIRHASH_LOMASK
) {
1030 error
= HAMMER2_ERROR_ENOSPC
;
1036 * Create the inode with the lhc as the key.
1038 xop
= hammer2_xop_alloc(pip
, HAMMER2_XOP_MODIFYING
);
1040 xop
->flags
= HAMMER2_INSERT_PFSROOT
;
1041 bzero(&xop
->meta
, sizeof(xop
->meta
));
1043 xop
->meta
.type
= HAMMER2_OBJTYPE_DIRECTORY
;
1045 xop
->meta
.iparent
= pip_inum
;
1047 /* Inherit parent's inode compression mode. */
1048 xop
->meta
.comp_algo
= pip_comp_algo
;
1049 xop
->meta
.check_algo
= pip_check_algo
;
1050 xop
->meta
.version
= HAMMER2_INODE_VERSION_ONE
;
1051 hammer2_update_time(&xop
->meta
.ctime
);
1052 xop
->meta
.mtime
= xop
->meta
.ctime
;
1053 xop
->meta
.mode
= 0755;
1054 xop
->meta
.nlinks
= 1;
1057 * Regular files and softlinks allow a small amount of data to be
1058 * directly embedded in the inode. This flag will be cleared if
1059 * the size is extended past the embedded limit.
1061 if (xop
->meta
.type
== HAMMER2_OBJTYPE_REGFILE
||
1062 xop
->meta
.type
== HAMMER2_OBJTYPE_SOFTLINK
) {
1063 xop
->meta
.op_flags
|= HAMMER2_OPFLAG_DIRECTDATA
;
1065 hammer2_xop_setname(&xop
->head
, name
, name_len
);
1066 xop
->meta
.name_len
= name_len
;
1067 xop
->meta
.name_key
= lhc
;
1068 KKASSERT(name_len
< HAMMER2_INODE_MAXNAME
);
1070 hammer2_xop_start(&xop
->head
, &hammer2_inode_create_desc
);
1072 error
= hammer2_xop_collect(&xop
->head
, 0);
1074 kprintf("CREATE INODE %*.*s\n",
1075 (int)name_len
, (int)name_len
, name
);
1084 * Set up the new inode if not a hardlink pointer.
1086 * NOTE: *_get() integrates chain's lock into the inode lock.
1088 * NOTE: Only one new inode can currently be created per
1089 * transaction. If the need arises we can adjust
1090 * hammer2_trans_init() to allow more.
1092 * NOTE: nipdata will have chain's blockset data.
1094 nip
= hammer2_inode_get(pip
->pmp
, &xop
->head
, -1, -1);
1095 nip
->comp_heuristic
= 0;
1097 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1099 hammer2_inode_unlock(pip
);
1105 * Create a new, normal inode. This function will create the inode,
1106 * the media chains, but will not insert the chains onto the media topology
1107 * (doing so would require a flush transaction and cause long stalls).
1109 * Caller must be in a normal transaction.
1112 hammer2_inode_create_normal(hammer2_inode_t
*pip
,
1113 struct vattr
*vap
, struct ucred
*cred
,
1114 hammer2_key_t inum
, int *errorp
)
1116 hammer2_xop_create_t
*xop
;
1117 hammer2_inode_t
*dip
;
1118 hammer2_inode_t
*nip
;
1124 uint8_t pip_comp_algo
;
1125 uint8_t pip_check_algo
;
1126 hammer2_tid_t pip_inum
;
1128 dip
= pip
->pmp
->iroot
;
1129 KKASSERT(dip
!= NULL
);
1133 /*hammer2_inode_lock(dip, 0);*/
1135 pip_uid
= pip
->meta
.uid
;
1136 pip_gid
= pip
->meta
.gid
;
1137 pip_mode
= pip
->meta
.mode
;
1138 pip_comp_algo
= pip
->meta
.comp_algo
;
1139 pip_check_algo
= pip
->meta
.check_algo
;
1140 pip_inum
= (pip
== pip
->pmp
->iroot
) ? 1 : pip
->meta
.inum
;
1143 * Create the in-memory hammer2_inode structure for the specified
1146 nip
= hammer2_inode_get(dip
->pmp
, NULL
, inum
, -1);
1147 nip
->comp_heuristic
= 0;
1148 KKASSERT((nip
->flags
& HAMMER2_INODE_CREATING
) == 0 &&
1149 nip
->cluster
.nchains
== 0);
1150 atomic_set_int(&nip
->flags
, HAMMER2_INODE_CREATING
);
1153 * Setup the inode meta-data
1155 nip
->meta
.type
= hammer2_get_obj_type(vap
->va_type
);
1157 switch (nip
->meta
.type
) {
1158 case HAMMER2_OBJTYPE_CDEV
:
1159 case HAMMER2_OBJTYPE_BDEV
:
1160 assert(0); /* XXX unsupported */
1161 nip
->meta
.rmajor
= vap
->va_rmajor
;
1162 nip
->meta
.rminor
= vap
->va_rminor
;
1168 KKASSERT(nip
->meta
.inum
== inum
);
1169 nip
->meta
.iparent
= pip_inum
;
1171 /* Inherit parent's inode compression mode. */
1172 nip
->meta
.comp_algo
= pip_comp_algo
;
1173 nip
->meta
.check_algo
= pip_check_algo
;
1174 nip
->meta
.version
= HAMMER2_INODE_VERSION_ONE
;
1175 hammer2_update_time(&nip
->meta
.ctime
);
1176 nip
->meta
.mtime
= nip
->meta
.ctime
;
1177 nip
->meta
.mode
= vap
->va_mode
;
1178 nip
->meta
.nlinks
= 1;
1180 xuid
= hammer2_to_unix_xid(&pip_uid
);
1181 xuid
= vop_helper_create_uid(dip
->pmp
->mp
, pip_mode
,
1184 if (vap
->va_vaflags
& VA_UID_UUID_VALID
)
1185 nip
->meta
.uid
= vap
->va_uid_uuid
;
1186 else if (vap
->va_uid
!= (uid_t
)VNOVAL
)
1187 hammer2_guid_to_uuid(&nip
->meta
.uid
, vap
->va_uid
);
1189 hammer2_guid_to_uuid(&nip
->meta
.uid
, xuid
);
1191 if (vap
->va_vaflags
& VA_GID_UUID_VALID
)
1192 nip
->meta
.gid
= vap
->va_gid_uuid
;
1193 else if (vap
->va_gid
!= (gid_t
)VNOVAL
)
1194 hammer2_guid_to_uuid(&nip
->meta
.gid
, vap
->va_gid
);
1196 nip
->meta
.gid
= pip_gid
;
1199 * Regular files and softlinks allow a small amount of data to be
1200 * directly embedded in the inode. This flag will be cleared if
1201 * the size is extended past the embedded limit.
1203 if (nip
->meta
.type
== HAMMER2_OBJTYPE_REGFILE
||
1204 nip
->meta
.type
== HAMMER2_OBJTYPE_SOFTLINK
) {
1205 nip
->meta
.op_flags
|= HAMMER2_OPFLAG_DIRECTDATA
;
1209 * Create the inode using (inum) as the key. Pass pip for
1210 * method inheritance.
1212 xop
= hammer2_xop_alloc(pip
, HAMMER2_XOP_MODIFYING
);
1215 xop
->meta
= nip
->meta
;
1218 xop
->meta
.name_len
= hammer2_xop_setname_inum(&xop
->head
, inum
);
1219 xop
->meta
.name_key
= inum
;
1220 nip
->meta
.name_len
= xop
->meta
.name_len
;
1221 nip
->meta
.name_key
= xop
->meta
.name_key
;
1222 hammer2_inode_modify(nip
);
1225 * Create the inode media chains but leave them detached. We are
1226 * not in a flush transaction so we can't mess with media topology
1227 * above normal inodes (i.e. the index of the inodes themselves).
1229 * We've already set the INODE_CREATING flag. The inode's media
1230 * chains will be inserted onto the media topology on the next
1233 hammer2_xop_start(&xop
->head
, &hammer2_inode_create_det_desc
);
1235 error
= hammer2_xop_collect(&xop
->head
, 0);
1237 kprintf("create inode type %d error %d\n", nip
->meta
.type
, error
);
1246 * Associate the media chains created by the backend with the
1249 hammer2_inode_repoint(nip
, &xop
->head
.cluster
);
1251 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1252 /*hammer2_inode_unlock(dip);*/
1258 * Create a directory entry under dip with the specified name, inode number,
1259 * and OBJTYPE (type).
1261 * This returns a UNIX errno code, not a HAMMER2_ERROR_* code.
1263 * Caller must hold dip locked.
1266 hammer2_dirent_create(hammer2_inode_t
*dip
, const char *name
, size_t name_len
,
1267 hammer2_key_t inum
, uint8_t type
)
1269 hammer2_xop_mkdirent_t
*xop
;
1276 KKASSERT(name
!= NULL
);
1277 lhc
= hammer2_dirhash(name
, name_len
);
1280 * Locate the inode or indirect block to create the new
1281 * entry in. At the same time check for key collisions
1282 * and iterate until we don't get one.
1284 * Lock the directory exclusively for now to guarantee that
1285 * we can find an unused lhc for the name. Due to collisions,
1286 * two different creates can end up with the same lhc so we
1287 * cannot depend on the OS to prevent the collision.
1289 hammer2_inode_modify(dip
);
1292 * If name specified, locate an unused key in the collision space.
1293 * Otherwise use the passed-in lhc directly.
1296 hammer2_xop_scanlhc_t
*sxop
;
1297 hammer2_key_t lhcbase
;
1300 sxop
= hammer2_xop_alloc(dip
, HAMMER2_XOP_MODIFYING
);
1302 hammer2_xop_start(&sxop
->head
, &hammer2_scanlhc_desc
);
1303 while ((error
= hammer2_xop_collect(&sxop
->head
, 0)) == 0) {
1304 if (lhc
!= sxop
->head
.cluster
.focus
->bref
.key
)
1308 hammer2_xop_retire(&sxop
->head
, HAMMER2_XOPMASK_VOP
);
1311 if (error
!= HAMMER2_ERROR_ENOENT
)
1316 if ((lhcbase
^ lhc
) & ~HAMMER2_DIRHASH_LOMASK
) {
1317 error
= HAMMER2_ERROR_ENOSPC
;
1323 * Create the directory entry with the lhc as the key.
1325 xop
= hammer2_xop_alloc(dip
, HAMMER2_XOP_MODIFYING
);
1327 bzero(&xop
->dirent
, sizeof(xop
->dirent
));
1328 xop
->dirent
.inum
= inum
;
1329 xop
->dirent
.type
= type
;
1330 xop
->dirent
.namlen
= name_len
;
1332 KKASSERT(name_len
< HAMMER2_INODE_MAXNAME
);
1333 hammer2_xop_setname(&xop
->head
, name
, name_len
);
1335 hammer2_xop_start(&xop
->head
, &hammer2_inode_mkdirent_desc
);
1337 error
= hammer2_xop_collect(&xop
->head
, 0);
1339 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1341 error
= hammer2_error_to_errno(error
);
1347 * Repoint ip->cluster's chains to cluster's chains and fixup the default
1348 * focus. All items, valid or invalid, are repointed. hammer2_xop_start()
1349 * filters out invalid or non-matching elements.
1351 * Caller must hold the inode and cluster exclusive locked, if not NULL,
1352 * must also be locked.
1354 * Cluster may be NULL to clean out any chains in ip->cluster.
1357 hammer2_inode_repoint(hammer2_inode_t
*ip
, hammer2_cluster_t
*cluster
)
1359 hammer2_chain_t
*dropch
[HAMMER2_MAXCLUSTER
];
1360 hammer2_chain_t
*ochain
;
1361 hammer2_chain_t
*nchain
;
1364 bzero(dropch
, sizeof(dropch
));
1367 * Replace chains in ip->cluster with chains from cluster and
1368 * adjust the focus if necessary.
1370 * NOTE: nchain and/or ochain can be NULL due to gaps
1371 * in the cluster arrays.
1373 hammer2_spin_ex(&ip
->cluster_spin
);
1374 for (i
= 0; cluster
&& i
< cluster
->nchains
; ++i
) {
1376 * Do not replace elements which are the same. Also handle
1377 * element count discrepancies.
1379 nchain
= cluster
->array
[i
].chain
;
1380 if (i
< ip
->cluster
.nchains
) {
1381 ochain
= ip
->cluster
.array
[i
].chain
;
1382 if (ochain
== nchain
)
1391 ip
->cluster
.array
[i
].chain
= nchain
;
1392 ip
->cluster
.array
[i
].flags
&= ~HAMMER2_CITEM_INVALID
;
1393 ip
->cluster
.array
[i
].flags
|= cluster
->array
[i
].flags
&
1394 HAMMER2_CITEM_INVALID
;
1396 hammer2_chain_ref(nchain
);
1401 * Release any left-over chains in ip->cluster.
1403 while (i
< ip
->cluster
.nchains
) {
1404 nchain
= ip
->cluster
.array
[i
].chain
;
1406 ip
->cluster
.array
[i
].chain
= NULL
;
1407 ip
->cluster
.array
[i
].flags
|= HAMMER2_CITEM_INVALID
;
1414 * Fixup fields. Note that the inode-embedded cluster is never
1418 ip
->cluster
.nchains
= cluster
->nchains
;
1419 ip
->cluster
.focus
= cluster
->focus
;
1420 ip
->cluster
.flags
= cluster
->flags
& ~HAMMER2_CLUSTER_LOCKED
;
1422 ip
->cluster
.nchains
= 0;
1423 ip
->cluster
.focus
= NULL
;
1424 ip
->cluster
.flags
&= ~HAMMER2_CLUSTER_ZFLAGS
;
1427 hammer2_spin_unex(&ip
->cluster_spin
);
1430 * Cleanup outside of spinlock
1434 hammer2_chain_drop(dropch
[i
]);
1439 * Repoint a single element from the cluster to the ip. Used by the
1440 * synchronization threads to piecemeal update inodes. Does not change
1441 * focus and requires inode to be re-locked to clean-up flags (XXX).
1444 hammer2_inode_repoint_one(hammer2_inode_t
*ip
, hammer2_cluster_t
*cluster
,
1447 hammer2_chain_t
*ochain
;
1448 hammer2_chain_t
*nchain
;
1451 hammer2_spin_ex(&ip
->cluster_spin
);
1452 KKASSERT(idx
< cluster
->nchains
);
1453 if (idx
< ip
->cluster
.nchains
) {
1454 ochain
= ip
->cluster
.array
[idx
].chain
;
1455 nchain
= cluster
->array
[idx
].chain
;
1458 nchain
= cluster
->array
[idx
].chain
;
1459 for (i
= ip
->cluster
.nchains
; i
<= idx
; ++i
) {
1460 bzero(&ip
->cluster
.array
[i
],
1461 sizeof(ip
->cluster
.array
[i
]));
1462 ip
->cluster
.array
[i
].flags
|= HAMMER2_CITEM_INVALID
;
1464 ip
->cluster
.nchains
= idx
+ 1;
1466 if (ochain
!= nchain
) {
1470 ip
->cluster
.array
[idx
].chain
= nchain
;
1471 ip
->cluster
.array
[idx
].flags
&= ~HAMMER2_CITEM_INVALID
;
1472 ip
->cluster
.array
[idx
].flags
|= cluster
->array
[idx
].flags
&
1473 HAMMER2_CITEM_INVALID
;
1475 hammer2_spin_unex(&ip
->cluster_spin
);
1476 if (ochain
!= nchain
) {
1478 hammer2_chain_ref(nchain
);
1480 hammer2_chain_drop(ochain
);
1485 hammer2_inode_data_count(const hammer2_inode_t
*ip
)
1487 hammer2_chain_t
*chain
;
1488 hammer2_key_t count
= 0;
1491 for (i
= 0; i
< ip
->cluster
.nchains
; ++i
) {
1492 if ((chain
= ip
->cluster
.array
[i
].chain
) != NULL
) {
1493 if (count
< chain
->bref
.embed
.stats
.data_count
)
1494 count
= chain
->bref
.embed
.stats
.data_count
;
1501 hammer2_inode_inode_count(const hammer2_inode_t
*ip
)
1503 hammer2_chain_t
*chain
;
1504 hammer2_key_t count
= 0;
1507 for (i
= 0; i
< ip
->cluster
.nchains
; ++i
) {
1508 if ((chain
= ip
->cluster
.array
[i
].chain
) != NULL
) {
1509 if (count
< chain
->bref
.embed
.stats
.inode_count
)
1510 count
= chain
->bref
.embed
.stats
.inode_count
;
1517 * Called with a locked inode to finish unlinking an inode after xop_unlink
1518 * had been run. This function is responsible for decrementing nlinks.
1520 * We don't bother decrementing nlinks if the file is not open and this was
1523 * If the inode is a hardlink target it's chain has not yet been deleted,
1524 * otherwise it's chain has been deleted.
1526 * If isopen then any prior deletion was not permanent and the inode is
1527 * left intact with nlinks == 0;
1530 hammer2_inode_unlink_finisher(hammer2_inode_t
*ip
, struct m_vnode
**vprecyclep
)
1535 * Decrement nlinks. Catch a bad nlinks count here too (e.g. 0 or
1536 * negative), and just assume a transition to 0.
1538 if ((int64_t)ip
->meta
.nlinks
<= 1) {
1539 atomic_set_int(&ip
->flags
, HAMMER2_INODE_ISUNLINKED
);
1542 * Scrap the vnode as quickly as possible. The vp association
1543 * stays intact while we hold the inode locked. However, vp
1550 * If no vp is associated there is no high-level state to
1551 * deal with and we can scrap the inode immediately.
1554 if ((ip
->flags
& HAMMER2_INODE_DELETING
) == 0) {
1555 atomic_set_int(&ip
->flags
,
1556 HAMMER2_INODE_DELETING
);
1557 hammer2_inode_delayed_sideq(ip
);
1563 * Because INODE_ISUNLINKED is set with the inode lock
1564 * held, the vnode cannot be ripped up from under us.
1565 * There may still be refs so knote anyone waiting for
1566 * a delete notification.
1568 * The vnode is not necessarily ref'd due to the unlinking
1569 * itself, so we have to defer handling to the end of the
1570 * VOP, which will then call hammer2_inode_vprecycle().
1579 * Adjust nlinks and retain the inode on the media for now
1581 hammer2_inode_modify(ip
);
1582 if ((int64_t)ip
->meta
.nlinks
> 1)
1585 ip
->meta
.nlinks
= 0;
1591 * Called at the end of a VOP that removes a file with a vnode that
1592 * we want to try to dispose of quickly due to a file deletion. If
1593 * we don't do this, the vnode can hang around with 0 refs for a very
1594 * long time and prevent reclamation of the underlying file and inode
1595 * (inode remains on-media with nlinks == 0 until the vnode is recycled
1596 * due to random system activity or a umount).
1599 hammer2_inode_vprecycle(struct m_vnode
*vp
)
1601 if (vget(vp
, LK_EXCLUSIVE
) == 0) {
1603 hammer2_knote(vp
, NOTE_DELETE
);
1613 * Mark an inode as being modified, meaning that the caller will modify
1616 * If a vnode is present we set the vnode dirty and the nominal filesystem
1617 * sync will also handle synchronizing the inode meta-data. Unless NOSIDEQ
1618 * we must ensure that the inode is on pmp->sideq.
1620 * NOTE: We must always queue the inode to the sideq. This allows H2 to
1621 * shortcut vsyncscan() and flush inodes and their related vnodes
1622 * in a two stages. H2 still calls vfsync() for each vnode.
1624 * NOTE: No mtid (modify_tid) is passed into this routine. The caller is
1625 * only modifying the in-memory inode. A modify_tid is synchronized
1626 * later when the inode gets flushed.
1628 * NOTE: As an exception to the general rule, the inode MAY be locked
1629 * shared for this particular call.
1632 hammer2_inode_modify(hammer2_inode_t
*ip
)
1634 atomic_set_int(&ip
->flags
, HAMMER2_INODE_MODIFIED
);
1636 vsetisdirty(ip
->vp
);
1637 if (ip
->pmp
&& (ip
->flags
& HAMMER2_INODE_NOSIDEQ
) == 0)
1638 hammer2_inode_delayed_sideq(ip
);
1642 * Synchronize the inode's frontend state with the chain state prior
1643 * to any explicit flush of the inode or any strategy write call. This
1644 * does not flush the inode's chain or its sub-topology to media (higher
1645 * level layers are responsible for doing that).
1647 * Called with a locked inode inside a normal transaction.
1649 * inode must be locked.
1652 hammer2_inode_chain_sync(hammer2_inode_t
*ip
)
1657 if (ip
->flags
& (HAMMER2_INODE_RESIZED
| HAMMER2_INODE_MODIFIED
)) {
1658 hammer2_xop_fsync_t
*xop
;
1660 xop
= hammer2_xop_alloc(ip
, HAMMER2_XOP_MODIFYING
);
1661 xop
->clear_directdata
= 0;
1662 if (ip
->flags
& HAMMER2_INODE_RESIZED
) {
1663 if ((ip
->meta
.op_flags
& HAMMER2_OPFLAG_DIRECTDATA
) &&
1664 ip
->meta
.size
> HAMMER2_EMBEDDED_BYTES
) {
1665 ip
->meta
.op_flags
&= ~HAMMER2_OPFLAG_DIRECTDATA
;
1666 xop
->clear_directdata
= 1;
1668 xop
->osize
= ip
->osize
;
1670 xop
->osize
= ip
->meta
.size
; /* safety */
1672 xop
->ipflags
= ip
->flags
;
1673 xop
->meta
= ip
->meta
;
1675 atomic_clear_int(&ip
->flags
, HAMMER2_INODE_RESIZED
|
1676 HAMMER2_INODE_MODIFIED
);
1677 hammer2_xop_start(&xop
->head
, &hammer2_inode_chain_sync_desc
);
1678 error
= hammer2_xop_collect(&xop
->head
, 0);
1679 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1680 if (error
== HAMMER2_ERROR_ENOENT
)
1683 kprintf("hammer2: unable to fsync inode %p\n", ip
);
1685 atomic_set_int(&ip->flags,
1686 xop->ipflags & (HAMMER2_INODE_RESIZED |
1687 HAMMER2_INODE_MODIFIED));
1689 /* XXX return error somehow? */
1696 * When an inode is flagged INODE_CREATING its chains have not actually
1697 * been inserting into the on-media tree yet.
1700 hammer2_inode_chain_ins(hammer2_inode_t
*ip
)
1705 if (ip
->flags
& HAMMER2_INODE_CREATING
) {
1706 hammer2_xop_create_t
*xop
;
1708 atomic_clear_int(&ip
->flags
, HAMMER2_INODE_CREATING
);
1709 xop
= hammer2_xop_alloc(ip
, HAMMER2_XOP_MODIFYING
);
1710 xop
->lhc
= ip
->meta
.inum
;
1712 hammer2_xop_start(&xop
->head
, &hammer2_inode_create_ins_desc
);
1713 error
= hammer2_xop_collect(&xop
->head
, 0);
1714 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1715 if (error
== HAMMER2_ERROR_ENOENT
)
1718 kprintf("hammer2: backend unable to "
1719 "insert inode %p %ld\n", ip
, ip
->meta
.inum
);
1720 /* XXX return error somehow? */
1727 * When an inode is flagged INODE_DELETING it has been deleted (no directory
1728 * entry or open refs are left, though as an optimization H2 might leave
1729 * nlinks == 1 to avoid unnecessary block updates). The backend flush then
1730 * needs to actually remove it from the topology.
1732 * NOTE: backend flush must still sync and flush the deleted inode to clean
1733 * out related chains.
1735 * NOTE: We must clear not only INODE_DELETING, but also INODE_ISUNLINKED
1736 * to prevent the vnode reclaim code from trying to delete it twice.
1739 hammer2_inode_chain_des(hammer2_inode_t
*ip
)
1744 if (ip
->flags
& HAMMER2_INODE_DELETING
) {
1745 hammer2_xop_destroy_t
*xop
;
1747 atomic_clear_int(&ip
->flags
, HAMMER2_INODE_DELETING
|
1748 HAMMER2_INODE_ISUNLINKED
);
1749 xop
= hammer2_xop_alloc(ip
, HAMMER2_XOP_MODIFYING
);
1750 hammer2_xop_start(&xop
->head
, &hammer2_inode_destroy_desc
);
1751 error
= hammer2_xop_collect(&xop
->head
, 0);
1752 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1754 if (error
== HAMMER2_ERROR_ENOENT
)
1757 kprintf("hammer2: backend unable to "
1758 "delete inode %p %ld\n", ip
, ip
->meta
.inum
);
1759 /* XXX return error somehow? */
1766 * Flushes the inode's chain and its sub-topology to media. Interlocks
1767 * HAMMER2_INODE_DIRTYDATA by clearing it prior to the flush. Any strategy
1768 * function creating or modifying a chain under this inode will re-set the
1771 * inode must be locked.
1774 hammer2_inode_chain_flush(hammer2_inode_t
*ip
, int flags
)
1776 hammer2_xop_fsync_t
*xop
;
1779 atomic_clear_int(&ip
->flags
, HAMMER2_INODE_DIRTYDATA
);
1780 xop
= hammer2_xop_alloc(ip
, HAMMER2_XOP_MODIFYING
| flags
);
1781 hammer2_xop_start(&xop
->head
, &hammer2_inode_flush_desc
);
1782 error
= hammer2_xop_collect(&xop
->head
, HAMMER2_XOP_COLLECT_WAITALL
);
1783 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1784 if (error
== HAMMER2_ERROR_ENOENT
)
1791 hammer2_pfs_inode_count(hammer2_pfs_t
*pmp
)
1793 struct hammer2_inode
*ip
;
1794 hammer2_key_t count
= 0;
1796 hammer2_spin_ex(&pmp
->inum_spin
);
1797 RB_FOREACH(ip
, hammer2_inode_tree
, &pmp
->inum_tree
)
1799 hammer2_spin_unex(&pmp
->inum_spin
);
1805 vflush(struct mount
*mp
, int rootrefs
, int flags
)
1807 hammer2_pfs_t
*pmp
= MPTOPMP(mp
);
1808 struct hammer2_inode
*ip
, *tmp
;
1810 hammer2_key_t count_before
, count_after
, count_delta
;
1812 hammer2_spin_ex(&pmp
->inum_spin
);
1814 RB_FOREACH(ip
, hammer2_inode_tree
, &pmp
->inum_tree
)
1817 RB_FOREACH_SAFE(ip
, hammer2_inode_tree
, &pmp
->inum_tree
, tmp
) {
1820 if (!vp
->v_vflushed
) {
1822 printf("%s: drop ip=%p inum=%ld refs=%d\n",
1823 __func__, ip, ip->meta.inum, ip->refs);
1825 assert(ip
->refs
> 1);
1826 hammer2_inode_drop(ip
);
1832 RB_FOREACH(ip
, hammer2_inode_tree
, &pmp
->inum_tree
)
1834 hammer2_spin_unex(&pmp
->inum_spin
);
1836 printf("%s: total inode %ld -> %ld\n",
1837 __func__
, count_before
, count_after
);
1839 assert(count_before
>= count_after
);
1840 count_delta
= count_before
- count_after
;
1843 if (hammer2_debug
& 0x80000000)
1846 printf("%s: %ld inode freed\n", __func__
, count_delta
);