2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
41 #include <sys/vnode.h>
47 RB_GENERATE2(hammer2_inode_tree
, hammer2_inode
, rbnode
, hammer2_inode_cmp
,
48 hammer2_tid_t
, meta
.inum
);
51 hammer2_inode_cmp(hammer2_inode_t
*ip1
, hammer2_inode_t
*ip2
)
53 if (ip1
->meta
.inum
< ip2
->meta
.inum
)
55 if (ip1
->meta
.inum
> ip2
->meta
.inum
)
61 * Caller holds pmp->list_spin and the inode should be locked. Merge ip
62 * with the specified depend.
64 * If the ip is on SYNCQ it stays there and (void *)-1 is returned, indicating
65 * that successive calls must ensure the ip is on a pass2 depend (or they are
66 * all SYNCQ). If the passed-in depend is not NULL and not (void *)-1 then
67 * we can set pass2 on it and return.
69 * If the ip is not on SYNCQ it is merged with the passed-in depend, creating
70 * a self-depend if necessary, and depend->pass2 is set according
71 * to the PASS2 flag. SIDEQ is set.
75 hammer2_inode_setdepend_locked(hammer2_inode_t
*ip
, hammer2_depend_t
*depend
)
77 hammer2_pfs_t
*pmp
= ip
->pmp
;
78 hammer2_depend_t
*dtmp
;
79 hammer2_inode_t
*iptmp
;
82 * If ip is SYNCQ its entry is used for the syncq list and it will
83 * no longer be associated with a dependency. Merging this status
84 * with a passed-in depend implies PASS2.
86 if (ip
->flags
& HAMMER2_INODE_SYNCQ
) {
87 if (depend
== (void *)-1 ||
92 hammer2_trans_setflags(pmp
, HAMMER2_TRANS_RESCAN
);
98 * If ip is already SIDEQ, merge ip->depend into the passed-in depend.
99 * If it is not, associate the ip with the passed-in depend, creating
100 * a single-entry dependency using depend_static if necessary.
102 * NOTE: The use of ip->depend_static always requires that the
103 * specific ip containing the structure is part of that
104 * particular depend_static's dependency group.
106 if (ip
->flags
& HAMMER2_INODE_SIDEQ
) {
108 * Merge ip->depend with the passed-in depend. If the
109 * passed-in depend is not a special case, all ips associated
110 * with ip->depend (including the original ip) must be moved
111 * to the passed-in depend.
113 if (depend
== NULL
) {
115 } else if (depend
== (void *)-1) {
118 } else if (depend
!= ip
->depend
) {
123 while ((iptmp
= TAILQ_FIRST(&dtmp
->sideq
)) != NULL
) {
128 TAILQ_REMOVE(&dtmp
->sideq
, iptmp
, entry
);
129 TAILQ_INSERT_TAIL(&depend
->sideq
, iptmp
, entry
);
130 iptmp
->depend
= depend
;
132 KKASSERT(sanitychk
== 1);
133 depend
->count
+= dtmp
->count
;
134 depend
->pass2
|= dtmp
->pass2
;
135 TAILQ_REMOVE(&pmp
->depq
, dtmp
, entry
);
141 * Add ip to the sideq, creating a self-dependency if
144 hammer2_inode_ref(ip
);
145 atomic_set_int(&ip
->flags
, HAMMER2_INODE_SIDEQ
);
146 if (depend
== NULL
) {
147 depend
= &ip
->depend_static
;
148 TAILQ_INSERT_TAIL(&pmp
->depq
, depend
, entry
);
149 } else if (depend
== (void *)-1) {
150 depend
= &ip
->depend_static
;
152 TAILQ_INSERT_TAIL(&pmp
->depq
, depend
, entry
);
153 } /* else add ip to passed-in depend */
154 TAILQ_INSERT_TAIL(&depend
->sideq
, ip
, entry
);
160 if (ip
->flags
& HAMMER2_INODE_SYNCQ_PASS2
)
163 hammer2_trans_setflags(pmp
, HAMMER2_TRANS_RESCAN
);
169 * Put a solo inode on the SIDEQ (meaning that its dirty). This can also
170 * occur from inode_lock4() and inode_depend().
172 * Caller must pass-in a locked inode.
175 hammer2_inode_delayed_sideq(hammer2_inode_t
*ip
)
177 hammer2_pfs_t
*pmp
= ip
->pmp
;
180 * Optimize case to avoid pmp spinlock.
182 if ((ip
->flags
& (HAMMER2_INODE_SYNCQ
| HAMMER2_INODE_SIDEQ
)) == 0) {
183 hammer2_spin_ex(&pmp
->list_spin
);
184 hammer2_inode_setdepend_locked(ip
, NULL
);
185 hammer2_spin_unex(&pmp
->list_spin
);
190 * Lock an inode, with SYNCQ semantics.
192 * HAMMER2 offers shared and exclusive locks on inodes. Pass a mask of
195 * - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired.
196 * shared locks are not subject to SYNCQ semantics, exclusive locks
199 * - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
200 * Most front-end inode locks do.
202 * - pass HAMMER2_RESOLVE_NEVER if you do not want to require that
203 * the inode data be resolved. This is used by the syncthr because
204 * it can run on an unresolved/out-of-sync cluster, and also by the
205 * vnode reclamation code to avoid unnecessary I/O (particularly when
206 * disposing of hundreds of thousands of cached vnodes).
208 * This function, along with lock4, has SYNCQ semantics. If the inode being
209 * locked is on the SYNCQ, that is it has been staged by the syncer, we must
210 * block until the operation is complete (even if we can lock the inode). In
211 * order to reduce the stall time, we re-order the inode to the front of the
212 * pmp->syncq prior to blocking. This reordering VERY significantly improves
215 * The inode locking function locks the inode itself, resolves any stale
216 * chains in the inode's cluster, and allocates a fresh copy of the
217 * cluster with 1 ref and all the underlying chains locked.
219 * ip->cluster will be stable while the inode is locked.
221 * NOTE: We don't combine the inode/chain lock because putting away an
222 * inode would otherwise confuse multiple lock holders of the inode.
225 hammer2_inode_lock(hammer2_inode_t
*ip
, int how
)
229 hammer2_inode_ref(ip
);
233 * Inode structure mutex - Shared lock
235 if (how
& HAMMER2_RESOLVE_SHARED
) {
236 hammer2_mtx_sh(&ip
->lock
);
241 * Inode structure mutex - Exclusive lock
243 * An exclusive lock (if not recursive) must wait for inodes on
244 * SYNCQ to flush first, to ensure that meta-data dependencies such
245 * as the nlink count and related directory entries are not split
248 * If the vnode is locked by the current thread it must be unlocked
249 * across the tsleep() to avoid a deadlock.
251 hammer2_mtx_ex(&ip
->lock
);
252 if (hammer2_mtx_refs(&ip
->lock
) > 1)
254 while ((ip
->flags
& HAMMER2_INODE_SYNCQ
) && pmp
) {
255 hammer2_spin_ex(&pmp
->list_spin
);
256 if (ip
->flags
& HAMMER2_INODE_SYNCQ
) {
257 tsleep_interlock(&ip
->flags
, 0);
258 atomic_set_int(&ip
->flags
, HAMMER2_INODE_SYNCQ_WAKEUP
);
259 TAILQ_REMOVE(&pmp
->syncq
, ip
, entry
);
260 TAILQ_INSERT_HEAD(&pmp
->syncq
, ip
, entry
);
261 hammer2_spin_unex(&pmp
->list_spin
);
262 hammer2_mtx_unlock(&ip
->lock
);
263 tsleep(&ip
->flags
, PINTERLOCKED
, "h2sync", 0);
264 hammer2_mtx_ex(&ip
->lock
);
267 hammer2_spin_unex(&pmp
->list_spin
);
273 * Exclusively lock up to four inodes, in order, with SYNCQ semantics.
274 * ip1 and ip2 must not be NULL. ip3 and ip4 may be NULL, but if ip3 is
275 * NULL then ip4 must also be NULL.
277 * This creates a dependency between up to four inodes.
280 hammer2_inode_lock4(hammer2_inode_t
*ip1
, hammer2_inode_t
*ip2
,
281 hammer2_inode_t
*ip3
, hammer2_inode_t
*ip4
)
283 hammer2_inode_t
*ips
[4];
284 hammer2_inode_t
*iptmp
;
285 hammer2_inode_t
*ipslp
;
286 hammer2_depend_t
*depend
;
291 pmp
= ip1
->pmp
; /* may be NULL */
292 KKASSERT(pmp
== ip2
->pmp
);
298 } else if (ip4
== NULL
) {
301 KKASSERT(pmp
== ip3
->pmp
);
306 KKASSERT(pmp
== ip3
->pmp
);
307 KKASSERT(pmp
== ip4
->pmp
);
310 for (i
= 0; i
< count
; ++i
)
311 hammer2_inode_ref(ips
[i
]);
315 * Lock the inodes in order
317 for (i
= 0; i
< count
; ++i
) {
318 hammer2_mtx_ex(&ips
[i
]->lock
);
322 * Associate dependencies, record the first inode found on SYNCQ
323 * (operation is allowed to proceed for inodes on PASS2) for our
324 * sleep operation, this inode is theoretically the last one sync'd
327 * All inodes found on SYNCQ are moved to the head of the syncq
330 hammer2_spin_ex(&pmp
->list_spin
);
333 for (i
= 0; i
< count
; ++i
) {
335 depend
= hammer2_inode_setdepend_locked(iptmp
, depend
);
336 if (iptmp
->flags
& HAMMER2_INODE_SYNCQ
) {
337 TAILQ_REMOVE(&pmp
->syncq
, iptmp
, entry
);
338 TAILQ_INSERT_HEAD(&pmp
->syncq
, iptmp
, entry
);
343 hammer2_spin_unex(&pmp
->list_spin
);
346 * Block and retry if any of the inodes are on SYNCQ. It is
347 * important that we allow the operation to proceed in the
348 * PASS2 case, to avoid deadlocking against the vnode.
351 for (i
= 0; i
< count
; ++i
)
352 hammer2_mtx_unlock(&ips
[i
]->lock
);
353 tsleep(&ipslp
->flags
, 0, "h2sync", 2);
359 * Release an inode lock. If another thread is blocked on SYNCQ_WAKEUP
363 hammer2_inode_unlock(hammer2_inode_t
*ip
)
365 if (ip
->flags
& HAMMER2_INODE_SYNCQ_WAKEUP
) {
366 atomic_clear_int(&ip
->flags
, HAMMER2_INODE_SYNCQ_WAKEUP
);
367 hammer2_mtx_unlock(&ip
->lock
);
370 hammer2_mtx_unlock(&ip
->lock
);
372 hammer2_inode_drop(ip
);
376 * If either ip1 or ip2 have been tapped by the syncer, make sure that both
377 * are. This ensure that dependencies (e.g. dirent-v-inode) are synced
378 * together. For dirent-v-inode depends, pass the dirent as ip1.
380 * If neither ip1 or ip2 have been tapped by the syncer, merge them into a
381 * single dependency. Dependencies are entered into pmp->depq. This
382 * effectively flags the inodes SIDEQ.
384 * Both ip1 and ip2 must be locked by the caller. This also ensures
385 * that we can't race the end of the syncer's queue run.
388 hammer2_inode_depend(hammer2_inode_t
*ip1
, hammer2_inode_t
*ip2
)
391 hammer2_depend_t
*depend
;
394 hammer2_spin_ex(&pmp
->list_spin
);
395 depend
= hammer2_inode_setdepend_locked(ip1
, NULL
);
396 depend
= hammer2_inode_setdepend_locked(ip2
, depend
);
397 hammer2_spin_unex(&pmp
->list_spin
);
401 * Select a chain out of an inode's cluster and lock it.
403 * The inode does not have to be locked.
406 hammer2_inode_chain(hammer2_inode_t
*ip
, int clindex
, int how
)
408 hammer2_chain_t
*chain
;
409 hammer2_cluster_t
*cluster
;
411 hammer2_spin_sh(&ip
->cluster_spin
);
412 cluster
= &ip
->cluster
;
413 if (clindex
>= cluster
->nchains
)
416 chain
= cluster
->array
[clindex
].chain
;
418 hammer2_chain_ref(chain
);
419 hammer2_spin_unsh(&ip
->cluster_spin
);
420 hammer2_chain_lock(chain
, how
);
422 hammer2_spin_unsh(&ip
->cluster_spin
);
428 hammer2_inode_chain_and_parent(hammer2_inode_t
*ip
, int clindex
,
429 hammer2_chain_t
**parentp
, int how
)
431 hammer2_chain_t
*chain
;
432 hammer2_chain_t
*parent
;
435 hammer2_spin_sh(&ip
->cluster_spin
);
436 if (clindex
>= ip
->cluster
.nchains
)
439 chain
= ip
->cluster
.array
[clindex
].chain
;
441 hammer2_chain_ref(chain
);
442 hammer2_spin_unsh(&ip
->cluster_spin
);
443 hammer2_chain_lock(chain
, how
);
445 hammer2_spin_unsh(&ip
->cluster_spin
);
449 * Get parent, lock order must be (parent, chain).
451 parent
= chain
->parent
;
453 hammer2_chain_ref(parent
);
454 hammer2_chain_unlock(chain
);
455 hammer2_chain_lock(parent
, how
);
456 hammer2_chain_lock(chain
, how
);
458 if (ip
->cluster
.array
[clindex
].chain
== chain
&&
459 chain
->parent
== parent
) {
466 hammer2_chain_unlock(chain
);
467 hammer2_chain_drop(chain
);
469 hammer2_chain_unlock(parent
);
470 hammer2_chain_drop(parent
);
479 * Temporarily release a lock held shared or exclusive. Caller must
480 * hold the lock shared or exclusive on call and lock will be released
483 * Restore a lock that was temporarily released.
486 hammer2_inode_lock_temp_release(hammer2_inode_t
*ip
)
488 return hammer2_mtx_temp_release(&ip
->lock
);
492 hammer2_inode_lock_temp_restore(hammer2_inode_t
*ip
, hammer2_mtx_state_t ostate
)
494 hammer2_mtx_temp_restore(&ip
->lock
, ostate
);
498 * Upgrade a shared inode lock to exclusive and return. If the inode lock
499 * is already held exclusively this is a NOP.
501 * The caller MUST hold the inode lock either shared or exclusive on call
502 * and will own the lock exclusively on return.
504 * Returns non-zero if the lock was already exclusive prior to the upgrade.
507 hammer2_inode_lock_upgrade(hammer2_inode_t
*ip
)
511 if (mtx_islocked_ex(&ip
->lock
)) {
514 hammer2_mtx_unlock(&ip
->lock
);
515 hammer2_mtx_ex(&ip
->lock
);
522 * Downgrade an inode lock from exclusive to shared only if the inode
523 * lock was previously shared. If the inode lock was previously exclusive,
527 hammer2_inode_lock_downgrade(hammer2_inode_t
*ip
, int wasexclusive
)
529 if (wasexclusive
== 0)
530 hammer2_mtx_downgrade(&ip
->lock
);
534 * Lookup an inode by inode number
537 hammer2_inode_lookup(hammer2_pfs_t
*pmp
, hammer2_tid_t inum
)
545 hammer2_spin_ex(&pmp
->inum_spin
);
546 ip
= RB_LOOKUP(hammer2_inode_tree
, &pmp
->inum_tree
, inum
);
548 hammer2_inode_ref(ip
);
549 hammer2_spin_unex(&pmp
->inum_spin
);
555 * Adding a ref to an inode is only legal if the inode already has at least
558 * (can be called with spinlock held)
561 hammer2_inode_ref(hammer2_inode_t
*ip
)
563 atomic_add_int(&ip
->refs
, 1);
564 if (hammer2_debug
& 0x80000) {
565 kprintf("INODE+1 %p (%d->%d)\n", ip
, ip
->refs
- 1, ip
->refs
);
571 * Drop an inode reference, freeing the inode when the last reference goes
575 hammer2_inode_drop(hammer2_inode_t
*ip
)
581 if (hammer2_debug
& 0x80000) {
582 kprintf("INODE-1 %p (%d->%d)\n",
583 ip
, ip
->refs
, ip
->refs
- 1);
590 * Transition to zero, must interlock with
591 * the inode inumber lookup tree (if applicable).
592 * It should not be possible for anyone to race
593 * the transition to 0.
597 hammer2_spin_ex(&pmp
->inum_spin
);
599 if (atomic_cmpset_int(&ip
->refs
, 1, 0)) {
600 KKASSERT(hammer2_mtx_refs(&ip
->lock
) == 0);
601 if (ip
->flags
& HAMMER2_INODE_ONRBTREE
) {
602 atomic_clear_int(&ip
->flags
,
603 HAMMER2_INODE_ONRBTREE
);
604 RB_REMOVE(hammer2_inode_tree
,
605 &pmp
->inum_tree
, ip
);
608 hammer2_spin_unex(&pmp
->inum_spin
);
613 * Cleaning out ip->cluster isn't entirely
616 hammer2_inode_repoint(ip
, NULL
);
618 kfree_obj(ip
, pmp
->minode
);
619 atomic_add_long(&pmp
->inmem_inodes
, -1);
620 ip
= NULL
; /* will terminate loop */
622 hammer2_spin_unex(&ip
->pmp
->inum_spin
);
626 * Non zero transition
628 if (atomic_cmpset_int(&ip
->refs
, refs
, refs
- 1))
635 * Get the vnode associated with the given inode, allocating the vnode if
636 * necessary. The vnode will be returned exclusively locked.
638 * *errorp is set to a UNIX error, not a HAMMER2 error.
640 * The caller must lock the inode (shared or exclusive).
642 * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
646 hammer2_igetv(hammer2_inode_t
*ip
, int *errorp
)
652 KKASSERT(pmp
!= NULL
);
657 * Attempt to reuse an existing vnode assignment. It is
658 * possible to race a reclaim so the vget() may fail. The
659 * inode must be unlocked during the vget() to avoid a
660 * deadlock against a reclaim.
667 * Inode must be unlocked during the vget() to avoid
668 * possible deadlocks, but leave the ip ref intact.
670 * vnode is held to prevent destruction during the
671 * vget(). The vget() can still fail if we lost
672 * a reclaim race on the vnode.
674 hammer2_mtx_state_t ostate
;
677 ostate
= hammer2_inode_lock_temp_release(ip
);
678 if (vget(vp
, LK_EXCLUSIVE
)) {
680 hammer2_inode_lock_temp_restore(ip
, ostate
);
683 hammer2_inode_lock_temp_restore(ip
, ostate
);
685 /* vp still locked and ref from vget */
687 kprintf("hammer2: igetv race %p/%p\n",
697 * No vnode exists, allocate a new vnode. Beware of
698 * allocation races. This function will return an
699 * exclusively locked and referenced vnode.
701 *errorp
= getnewvnode(VT_HAMMER2
, pmp
->mp
, &vp
, 0, 0);
703 kprintf("hammer2: igetv getnewvnode failed %d\n",
710 * Lock the inode and check for an allocation race.
712 wasexclusive
= hammer2_inode_lock_upgrade(ip
);
713 if (ip
->vp
!= NULL
) {
716 hammer2_inode_lock_downgrade(ip
, wasexclusive
);
720 switch (ip
->meta
.type
) {
721 case HAMMER2_OBJTYPE_DIRECTORY
:
724 case HAMMER2_OBJTYPE_REGFILE
:
726 * Regular file must use buffer cache I/O
727 * (VKVABIO cpu sync semantics supported)
730 vsetflags(vp
, VKVABIO
);
731 vinitvmio(vp
, ip
->meta
.size
,
733 (int)ip
->meta
.size
& HAMMER2_LBUFMASK
);
735 case HAMMER2_OBJTYPE_SOFTLINK
:
737 * XXX for now we are using the generic file_read
738 * and file_write code so we need a buffer cache
741 * (VKVABIO cpu sync semantics supported)
744 vsetflags(vp
, VKVABIO
);
745 vinitvmio(vp
, ip
->meta
.size
,
747 (int)ip
->meta
.size
& HAMMER2_LBUFMASK
);
749 case HAMMER2_OBJTYPE_CDEV
:
752 case HAMMER2_OBJTYPE_BDEV
:
753 vp
->v_ops
= &pmp
->mp
->mnt_vn_spec_ops
;
754 if (ip
->meta
.type
!= HAMMER2_OBJTYPE_CDEV
)
760 case HAMMER2_OBJTYPE_FIFO
:
762 vp
->v_ops
= &pmp
->mp
->mnt_vn_fifo_ops
;
764 case HAMMER2_OBJTYPE_SOCKET
:
768 panic("hammer2: unhandled objtype %d",
773 if (ip
== pmp
->iroot
)
774 vsetflags(vp
, VROOT
);
778 hammer2_inode_ref(ip
); /* vp association */
779 hammer2_inode_lock_downgrade(ip
, wasexclusive
);
785 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
787 if (hammer2_debug
& 0x0002) {
788 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
789 vp
, vp
->v_refcnt
, vp
->v_auxrefs
);
795 * XXX this API needs a rewrite. It needs to be split into a
796 * hammer2_inode_alloc() and hammer2_inode_build() to allow us to get
797 * rid of the inode/chain lock reversal fudge.
799 * Returns the inode associated with the passed-in cluster, allocating a new
800 * hammer2_inode structure if necessary, then synchronizing it to the passed
801 * xop cluster. When synchronizing, if idx >= 0, only cluster index (idx)
802 * is synchronized. Otherwise the whole cluster is synchronized. inum will
803 * be extracted from the passed-in xop and the inum argument will be ignored.
805 * If xop is passed as NULL then a new hammer2_inode is allocated with the
806 * specified inum, and returned. For normal inodes, the inode will be
807 * indexed in memory and if it already exists the existing ip will be
808 * returned instead of allocating a new one. The superroot and PFS inodes
809 * are not indexed in memory.
811 * The passed-in cluster must be locked and will remain locked on return.
812 * The returned inode will be locked and the caller may dispose of both
813 * via hammer2_inode_unlock() + hammer2_inode_drop(). However, if the caller
814 * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
816 * The hammer2_inode structure regulates the interface between the high level
817 * kernel VNOPS API and the filesystem backend (the chains).
819 * On return the inode is locked with the supplied cluster.
822 hammer2_inode_get(hammer2_pfs_t
*pmp
, hammer2_xop_head_t
*xop
,
823 hammer2_tid_t inum
, int idx
)
825 hammer2_inode_t
*nip
;
826 const hammer2_inode_data_t
*iptmp
;
827 const hammer2_inode_data_t
*nipdata
;
829 KKASSERT(xop
== NULL
||
830 hammer2_cluster_type(&xop
->cluster
) ==
831 HAMMER2_BREF_TYPE_INODE
);
835 * Interlocked lookup/ref of the inode. This code is only needed
836 * when looking up inodes with nlinks != 0 (TODO: optimize out
837 * otherwise and test for duplicates).
839 * Cluster can be NULL during the initial pfs allocation.
842 iptmp
= &hammer2_xop_gdata(xop
)->ipdata
;
843 inum
= iptmp
->meta
.inum
;
844 hammer2_xop_pdata(xop
);
847 nip
= hammer2_inode_lookup(pmp
, inum
);
850 * We may have to unhold the cluster to avoid a deadlock
851 * against vnlru (and possibly other XOPs).
854 if (hammer2_mtx_ex_try(&nip
->lock
) != 0) {
855 hammer2_cluster_unhold(&xop
->cluster
);
856 hammer2_mtx_ex(&nip
->lock
);
857 hammer2_cluster_rehold(&xop
->cluster
);
860 hammer2_mtx_ex(&nip
->lock
);
864 * Handle SMP race (not applicable to the super-root spmp
865 * which can't index inodes due to duplicative inode numbers).
867 if (pmp
->spmp_hmp
== NULL
&&
868 (nip
->flags
& HAMMER2_INODE_ONRBTREE
) == 0) {
869 hammer2_mtx_unlock(&nip
->lock
);
870 hammer2_inode_drop(nip
);
875 hammer2_inode_repoint_one(nip
, &xop
->cluster
,
878 hammer2_inode_repoint(nip
, &xop
->cluster
);
884 * We couldn't find the inode number, create a new inode and try to
885 * insert it, handle insertion races.
887 nip
= kmalloc_obj(sizeof(*nip
), pmp
->minode
, M_WAITOK
| M_ZERO
);
888 hammer2_spin_init(&nip
->cluster_spin
, "h2clspin");
889 atomic_add_long(&pmp
->inmem_inodes
, 1);
892 * Initialize nip's cluster. A cluster is provided for normal
893 * inodes but typically not for the super-root or PFS inodes.
896 hammer2_inode_t
*nnip
= nip
;
897 nip
->ihash
= (int)hammer2_icrc32(&nnip
, sizeof(nnip
));
900 nip
->cluster
.refs
= 1;
901 nip
->cluster
.pmp
= pmp
;
902 nip
->cluster
.flags
|= HAMMER2_CLUSTER_INODE
;
904 nipdata
= &hammer2_xop_gdata(xop
)->ipdata
;
905 nip
->meta
= nipdata
->meta
;
906 hammer2_xop_pdata(xop
);
907 hammer2_inode_repoint(nip
, &xop
->cluster
);
909 nip
->meta
.inum
= inum
; /* PFS inum is always 1 XXX */
910 /* mtime will be updated when a cluster is available */
916 * ref and lock on nip gives it state compatible to after a
917 * hammer2_inode_lock() call.
920 hammer2_mtx_init(&nip
->lock
, "h2inode");
921 hammer2_mtx_init(&nip
->truncate_lock
, "h2trunc");
922 hammer2_mtx_ex(&nip
->lock
);
923 TAILQ_INIT(&nip
->depend_static
.sideq
);
924 /* combination of thread lock and chain lock == inode lock */
927 * Attempt to add the inode. If it fails we raced another inode
928 * get. Undo all the work and try again.
930 if (pmp
->spmp_hmp
== NULL
) {
931 hammer2_spin_ex(&pmp
->inum_spin
);
932 if (RB_INSERT(hammer2_inode_tree
, &pmp
->inum_tree
, nip
)) {
933 hammer2_spin_unex(&pmp
->inum_spin
);
934 hammer2_mtx_unlock(&nip
->lock
);
935 hammer2_inode_drop(nip
);
938 atomic_set_int(&nip
->flags
, HAMMER2_INODE_ONRBTREE
);
940 hammer2_spin_unex(&pmp
->inum_spin
);
946 * Create a PFS inode under the superroot. This function will create the
947 * inode, its media chains, and also insert it into the media.
949 * Caller must be in a flush transaction because we are inserting the inode
953 hammer2_inode_create_pfs(hammer2_pfs_t
*spmp
,
954 const char *name
, size_t name_len
,
957 hammer2_xop_create_t
*xop
;
958 hammer2_inode_t
*pip
;
959 hammer2_inode_t
*nip
;
961 uint8_t pip_comp_algo
;
962 uint8_t pip_check_algo
;
963 hammer2_tid_t pip_inum
;
969 lhc
= hammer2_dirhash(name
, name_len
);
973 * Locate the inode or indirect block to create the new
974 * entry in. At the same time check for key collisions
975 * and iterate until we don't get one.
977 * Lock the directory exclusively for now to guarantee that
978 * we can find an unused lhc for the name. Due to collisions,
979 * two different creates can end up with the same lhc so we
980 * cannot depend on the OS to prevent the collision.
982 hammer2_inode_lock(pip
, 0);
984 pip_comp_algo
= pip
->meta
.comp_algo
;
985 pip_check_algo
= pip
->meta
.check_algo
;
986 pip_inum
= (pip
== pip
->pmp
->iroot
) ? 1 : pip
->meta
.inum
;
989 * Locate an unused key in the collision space.
992 hammer2_xop_scanlhc_t
*sxop
;
993 hammer2_key_t lhcbase
;
996 sxop
= hammer2_xop_alloc(pip
, HAMMER2_XOP_MODIFYING
);
998 hammer2_xop_start(&sxop
->head
, &hammer2_scanlhc_desc
);
999 while ((error
= hammer2_xop_collect(&sxop
->head
, 0)) == 0) {
1000 if (lhc
!= sxop
->head
.cluster
.focus
->bref
.key
)
1004 hammer2_xop_retire(&sxop
->head
, HAMMER2_XOPMASK_VOP
);
1007 if (error
!= HAMMER2_ERROR_ENOENT
)
1012 if ((lhcbase
^ lhc
) & ~HAMMER2_DIRHASH_LOMASK
) {
1013 error
= HAMMER2_ERROR_ENOSPC
;
1019 * Create the inode with the lhc as the key.
1021 xop
= hammer2_xop_alloc(pip
, HAMMER2_XOP_MODIFYING
);
1023 xop
->flags
= HAMMER2_INSERT_PFSROOT
;
1024 bzero(&xop
->meta
, sizeof(xop
->meta
));
1026 xop
->meta
.type
= HAMMER2_OBJTYPE_DIRECTORY
;
1028 xop
->meta
.iparent
= pip_inum
;
1030 /* Inherit parent's inode compression mode. */
1031 xop
->meta
.comp_algo
= pip_comp_algo
;
1032 xop
->meta
.check_algo
= pip_check_algo
;
1033 xop
->meta
.version
= HAMMER2_INODE_VERSION_ONE
;
1034 hammer2_update_time(&xop
->meta
.ctime
);
1035 xop
->meta
.mtime
= xop
->meta
.ctime
;
1036 xop
->meta
.mode
= 0755;
1037 xop
->meta
.nlinks
= 1;
1040 * Regular files and softlinks allow a small amount of data to be
1041 * directly embedded in the inode. This flag will be cleared if
1042 * the size is extended past the embedded limit.
1044 if (xop
->meta
.type
== HAMMER2_OBJTYPE_REGFILE
||
1045 xop
->meta
.type
== HAMMER2_OBJTYPE_SOFTLINK
) {
1046 xop
->meta
.op_flags
|= HAMMER2_OPFLAG_DIRECTDATA
;
1048 hammer2_xop_setname(&xop
->head
, name
, name_len
);
1049 xop
->meta
.name_len
= name_len
;
1050 xop
->meta
.name_key
= lhc
;
1051 KKASSERT(name_len
< HAMMER2_INODE_MAXNAME
);
1053 hammer2_xop_start(&xop
->head
, &hammer2_inode_create_desc
);
1055 error
= hammer2_xop_collect(&xop
->head
, 0);
1057 kprintf("CREATE INODE %*.*s\n",
1058 (int)name_len
, (int)name_len
, name
);
1067 * Set up the new inode if not a hardlink pointer.
1069 * NOTE: *_get() integrates chain's lock into the inode lock.
1071 * NOTE: Only one new inode can currently be created per
1072 * transaction. If the need arises we can adjust
1073 * hammer2_trans_init() to allow more.
1075 * NOTE: nipdata will have chain's blockset data.
1077 nip
= hammer2_inode_get(pip
->pmp
, &xop
->head
, -1, -1);
1078 nip
->comp_heuristic
= 0;
1080 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1082 hammer2_inode_unlock(pip
);
1088 * Create a new, normal inode. This function will create the inode,
1089 * the media chains, but will not insert the chains onto the media topology
1090 * (doing so would require a flush transaction and cause long stalls).
1092 * Caller must be in a normal transaction.
1095 hammer2_inode_create_normal(hammer2_inode_t
*pip
,
1096 struct vattr
*vap
, struct ucred
*cred
,
1097 hammer2_key_t inum
, int *errorp
)
1099 hammer2_xop_create_t
*xop
;
1100 hammer2_inode_t
*dip
;
1101 hammer2_inode_t
*nip
;
1107 uint8_t pip_comp_algo
;
1108 uint8_t pip_check_algo
;
1109 hammer2_tid_t pip_inum
;
1111 dip
= pip
->pmp
->iroot
;
1112 KKASSERT(dip
!= NULL
);
1116 /*hammer2_inode_lock(dip, 0);*/
1118 pip_uid
= pip
->meta
.uid
;
1119 pip_gid
= pip
->meta
.gid
;
1120 pip_mode
= pip
->meta
.mode
;
1121 pip_comp_algo
= pip
->meta
.comp_algo
;
1122 pip_check_algo
= pip
->meta
.check_algo
;
1123 pip_inum
= (pip
== pip
->pmp
->iroot
) ? 1 : pip
->meta
.inum
;
1126 * Create the in-memory hammer2_inode structure for the specified
1129 nip
= hammer2_inode_get(dip
->pmp
, NULL
, inum
, -1);
1130 nip
->comp_heuristic
= 0;
1131 KKASSERT((nip
->flags
& HAMMER2_INODE_CREATING
) == 0 &&
1132 nip
->cluster
.nchains
== 0);
1133 atomic_set_int(&nip
->flags
, HAMMER2_INODE_CREATING
);
1136 * Setup the inode meta-data
1138 nip
->meta
.type
= hammer2_get_obj_type(vap
->va_type
);
1140 switch (nip
->meta
.type
) {
1141 case HAMMER2_OBJTYPE_CDEV
:
1142 case HAMMER2_OBJTYPE_BDEV
:
1143 nip
->meta
.rmajor
= vap
->va_rmajor
;
1144 nip
->meta
.rminor
= vap
->va_rminor
;
1150 KKASSERT(nip
->meta
.inum
== inum
);
1151 nip
->meta
.iparent
= pip_inum
;
1153 /* Inherit parent's inode compression mode. */
1154 nip
->meta
.comp_algo
= pip_comp_algo
;
1155 nip
->meta
.check_algo
= pip_check_algo
;
1156 nip
->meta
.version
= HAMMER2_INODE_VERSION_ONE
;
1157 hammer2_update_time(&nip
->meta
.ctime
);
1158 nip
->meta
.mtime
= nip
->meta
.ctime
;
1159 nip
->meta
.mode
= vap
->va_mode
;
1160 nip
->meta
.nlinks
= 1;
1162 xuid
= hammer2_to_unix_xid(&pip_uid
);
1163 xuid
= vop_helper_create_uid(dip
->pmp
->mp
, pip_mode
,
1166 if (vap
->va_vaflags
& VA_UID_UUID_VALID
)
1167 nip
->meta
.uid
= vap
->va_uid_uuid
;
1168 else if (vap
->va_uid
!= (uid_t
)VNOVAL
)
1169 hammer2_guid_to_uuid(&nip
->meta
.uid
, vap
->va_uid
);
1171 hammer2_guid_to_uuid(&nip
->meta
.uid
, xuid
);
1173 if (vap
->va_vaflags
& VA_GID_UUID_VALID
)
1174 nip
->meta
.gid
= vap
->va_gid_uuid
;
1175 else if (vap
->va_gid
!= (gid_t
)VNOVAL
)
1176 hammer2_guid_to_uuid(&nip
->meta
.gid
, vap
->va_gid
);
1178 nip
->meta
.gid
= pip_gid
;
1181 * Regular files and softlinks allow a small amount of data to be
1182 * directly embedded in the inode. This flag will be cleared if
1183 * the size is extended past the embedded limit.
1185 if (nip
->meta
.type
== HAMMER2_OBJTYPE_REGFILE
||
1186 nip
->meta
.type
== HAMMER2_OBJTYPE_SOFTLINK
) {
1187 nip
->meta
.op_flags
|= HAMMER2_OPFLAG_DIRECTDATA
;
1191 * Create the inode using (inum) as the key. Pass pip for
1192 * method inheritance.
1194 xop
= hammer2_xop_alloc(pip
, HAMMER2_XOP_MODIFYING
);
1197 xop
->meta
= nip
->meta
;
1200 xop
->meta
.name_len
= hammer2_xop_setname_inum(&xop
->head
, inum
);
1201 xop
->meta
.name_key
= inum
;
1202 nip
->meta
.name_len
= xop
->meta
.name_len
;
1203 nip
->meta
.name_key
= xop
->meta
.name_key
;
1204 hammer2_inode_modify(nip
);
1207 * Create the inode media chains but leave them detached. We are
1208 * not in a flush transaction so we can't mess with media topology
1209 * above normal inodes (i.e. the index of the inodes themselves).
1211 * We've already set the INODE_CREATING flag. The inode's media
1212 * chains will be inserted onto the media topology on the next
1215 hammer2_xop_start(&xop
->head
, &hammer2_inode_create_det_desc
);
1217 error
= hammer2_xop_collect(&xop
->head
, 0);
1219 kprintf("create inode type %d error %d\n", nip
->meta
.type
, error
);
1228 * Associate the media chains created by the backend with the
1231 hammer2_inode_repoint(nip
, &xop
->head
.cluster
);
1233 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1234 /*hammer2_inode_unlock(dip);*/
1240 * Create a directory entry under dip with the specified name, inode number,
1241 * and OBJTYPE (type).
1243 * This returns a UNIX errno code, not a HAMMER2_ERROR_* code.
1245 * Caller must hold dip locked.
1248 hammer2_dirent_create(hammer2_inode_t
*dip
, const char *name
, size_t name_len
,
1249 hammer2_key_t inum
, uint8_t type
)
1251 hammer2_xop_mkdirent_t
*xop
;
1258 KKASSERT(name
!= NULL
);
1259 lhc
= hammer2_dirhash(name
, name_len
);
1262 * Locate the inode or indirect block to create the new
1263 * entry in. At the same time check for key collisions
1264 * and iterate until we don't get one.
1266 * Lock the directory exclusively for now to guarantee that
1267 * we can find an unused lhc for the name. Due to collisions,
1268 * two different creates can end up with the same lhc so we
1269 * cannot depend on the OS to prevent the collision.
1271 hammer2_inode_modify(dip
);
1274 * If name specified, locate an unused key in the collision space.
1275 * Otherwise use the passed-in lhc directly.
1278 hammer2_xop_scanlhc_t
*sxop
;
1279 hammer2_key_t lhcbase
;
1282 sxop
= hammer2_xop_alloc(dip
, HAMMER2_XOP_MODIFYING
);
1284 hammer2_xop_start(&sxop
->head
, &hammer2_scanlhc_desc
);
1285 while ((error
= hammer2_xop_collect(&sxop
->head
, 0)) == 0) {
1286 if (lhc
!= sxop
->head
.cluster
.focus
->bref
.key
)
1290 hammer2_xop_retire(&sxop
->head
, HAMMER2_XOPMASK_VOP
);
1293 if (error
!= HAMMER2_ERROR_ENOENT
)
1298 if ((lhcbase
^ lhc
) & ~HAMMER2_DIRHASH_LOMASK
) {
1299 error
= HAMMER2_ERROR_ENOSPC
;
1305 * Create the directory entry with the lhc as the key.
1307 xop
= hammer2_xop_alloc(dip
, HAMMER2_XOP_MODIFYING
);
1309 bzero(&xop
->dirent
, sizeof(xop
->dirent
));
1310 xop
->dirent
.inum
= inum
;
1311 xop
->dirent
.type
= type
;
1312 xop
->dirent
.namlen
= name_len
;
1314 KKASSERT(name_len
< HAMMER2_INODE_MAXNAME
);
1315 hammer2_xop_setname(&xop
->head
, name
, name_len
);
1317 hammer2_xop_start(&xop
->head
, &hammer2_inode_mkdirent_desc
);
1319 error
= hammer2_xop_collect(&xop
->head
, 0);
1321 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1323 error
= hammer2_error_to_errno(error
);
1329 * Repoint ip->cluster's chains to cluster's chains and fixup the default
1330 * focus. All items, valid or invalid, are repointed. hammer2_xop_start()
1331 * filters out invalid or non-matching elements.
1333 * Caller must hold the inode and cluster exclusive locked, if not NULL,
1334 * must also be locked.
1336 * Cluster may be NULL to clean out any chains in ip->cluster.
1339 hammer2_inode_repoint(hammer2_inode_t
*ip
, hammer2_cluster_t
*cluster
)
1341 hammer2_chain_t
*dropch
[HAMMER2_MAXCLUSTER
];
1342 hammer2_chain_t
*ochain
;
1343 hammer2_chain_t
*nchain
;
1346 bzero(dropch
, sizeof(dropch
));
1349 * Replace chains in ip->cluster with chains from cluster and
1350 * adjust the focus if necessary.
1352 * NOTE: nchain and/or ochain can be NULL due to gaps
1353 * in the cluster arrays.
1355 hammer2_spin_ex(&ip
->cluster_spin
);
1356 for (i
= 0; cluster
&& i
< cluster
->nchains
; ++i
) {
1358 * Do not replace elements which are the same. Also handle
1359 * element count discrepancies.
1361 nchain
= cluster
->array
[i
].chain
;
1362 if (i
< ip
->cluster
.nchains
) {
1363 ochain
= ip
->cluster
.array
[i
].chain
;
1364 if (ochain
== nchain
)
1373 ip
->cluster
.array
[i
].chain
= nchain
;
1374 ip
->cluster
.array
[i
].flags
&= ~HAMMER2_CITEM_INVALID
;
1375 ip
->cluster
.array
[i
].flags
|= cluster
->array
[i
].flags
&
1376 HAMMER2_CITEM_INVALID
;
1378 hammer2_chain_ref(nchain
);
1383 * Release any left-over chains in ip->cluster.
1385 while (i
< ip
->cluster
.nchains
) {
1386 nchain
= ip
->cluster
.array
[i
].chain
;
1388 ip
->cluster
.array
[i
].chain
= NULL
;
1389 ip
->cluster
.array
[i
].flags
|= HAMMER2_CITEM_INVALID
;
1396 * Fixup fields. Note that the inode-embedded cluster is never
1400 ip
->cluster
.nchains
= cluster
->nchains
;
1401 ip
->cluster
.focus
= cluster
->focus
;
1402 ip
->cluster
.flags
= cluster
->flags
& ~HAMMER2_CLUSTER_LOCKED
;
1404 ip
->cluster
.nchains
= 0;
1405 ip
->cluster
.focus
= NULL
;
1406 ip
->cluster
.flags
&= ~HAMMER2_CLUSTER_ZFLAGS
;
1409 hammer2_spin_unex(&ip
->cluster_spin
);
1412 * Cleanup outside of spinlock
1416 hammer2_chain_drop(dropch
[i
]);
1421 * Repoint a single element from the cluster to the ip. Used by the
1422 * synchronization threads to piecemeal update inodes. Does not change
1423 * focus and requires inode to be re-locked to clean-up flags (XXX).
1426 hammer2_inode_repoint_one(hammer2_inode_t
*ip
, hammer2_cluster_t
*cluster
,
1429 hammer2_chain_t
*ochain
;
1430 hammer2_chain_t
*nchain
;
1433 hammer2_spin_ex(&ip
->cluster_spin
);
1434 KKASSERT(idx
< cluster
->nchains
);
1435 if (idx
< ip
->cluster
.nchains
) {
1436 ochain
= ip
->cluster
.array
[idx
].chain
;
1437 nchain
= cluster
->array
[idx
].chain
;
1440 nchain
= cluster
->array
[idx
].chain
;
1441 for (i
= ip
->cluster
.nchains
; i
<= idx
; ++i
) {
1442 bzero(&ip
->cluster
.array
[i
],
1443 sizeof(ip
->cluster
.array
[i
]));
1444 ip
->cluster
.array
[i
].flags
|= HAMMER2_CITEM_INVALID
;
1446 ip
->cluster
.nchains
= idx
+ 1;
1448 if (ochain
!= nchain
) {
1452 ip
->cluster
.array
[idx
].chain
= nchain
;
1453 ip
->cluster
.array
[idx
].flags
&= ~HAMMER2_CITEM_INVALID
;
1454 ip
->cluster
.array
[idx
].flags
|= cluster
->array
[idx
].flags
&
1455 HAMMER2_CITEM_INVALID
;
1457 hammer2_spin_unex(&ip
->cluster_spin
);
1458 if (ochain
!= nchain
) {
1460 hammer2_chain_ref(nchain
);
1462 hammer2_chain_drop(ochain
);
1467 hammer2_inode_data_count(const hammer2_inode_t
*ip
)
1469 hammer2_chain_t
*chain
;
1470 hammer2_key_t count
= 0;
1473 for (i
= 0; i
< ip
->cluster
.nchains
; ++i
) {
1474 if ((chain
= ip
->cluster
.array
[i
].chain
) != NULL
) {
1475 if (count
< chain
->bref
.embed
.stats
.data_count
)
1476 count
= chain
->bref
.embed
.stats
.data_count
;
1483 hammer2_inode_inode_count(const hammer2_inode_t
*ip
)
1485 hammer2_chain_t
*chain
;
1486 hammer2_key_t count
= 0;
1489 for (i
= 0; i
< ip
->cluster
.nchains
; ++i
) {
1490 if ((chain
= ip
->cluster
.array
[i
].chain
) != NULL
) {
1491 if (count
< chain
->bref
.embed
.stats
.inode_count
)
1492 count
= chain
->bref
.embed
.stats
.inode_count
;
1499 * Called with a locked inode to finish unlinking an inode after xop_unlink
1500 * had been run. This function is responsible for decrementing nlinks.
1502 * We don't bother decrementing nlinks if the file is not open and this was
1505 * If the inode is a hardlink target it's chain has not yet been deleted,
1506 * otherwise it's chain has been deleted.
1508 * If isopen then any prior deletion was not permanent and the inode is
1509 * left intact with nlinks == 0;
1512 hammer2_inode_unlink_finisher(hammer2_inode_t
*ip
, struct vnode
**vprecyclep
)
1517 * Decrement nlinks. Catch a bad nlinks count here too (e.g. 0 or
1518 * negative), and just assume a transition to 0.
1520 if ((int64_t)ip
->meta
.nlinks
<= 1) {
1521 atomic_set_int(&ip
->flags
, HAMMER2_INODE_ISUNLINKED
);
1524 * Scrap the vnode as quickly as possible. The vp association
1525 * stays intact while we hold the inode locked. However, vp
1532 * If no vp is associated there is no high-level state to
1533 * deal with and we can scrap the inode immediately.
1536 if ((ip
->flags
& HAMMER2_INODE_DELETING
) == 0) {
1537 atomic_set_int(&ip
->flags
,
1538 HAMMER2_INODE_DELETING
);
1539 hammer2_inode_delayed_sideq(ip
);
1545 * Because INODE_ISUNLINKED is set with the inode lock
1546 * held, the vnode cannot be ripped up from under us.
1547 * There may still be refs so knote anyone waiting for
1548 * a delete notification.
1550 * The vnode is not necessarily ref'd due to the unlinking
1551 * itself, so we have to defer handling to the end of the
1552 * VOP, which will then call hammer2_inode_vprecycle().
1561 * Adjust nlinks and retain the inode on the media for now
1563 hammer2_inode_modify(ip
);
1564 if ((int64_t)ip
->meta
.nlinks
> 1)
1567 ip
->meta
.nlinks
= 0;
1573 * Called at the end of a VOP that removes a file with a vnode that
1574 * we want to try to dispose of quickly due to a file deletion. If
1575 * we don't do this, the vnode can hang around with 0 refs for a very
1576 * long time and prevent reclamation of the underlying file and inode
1577 * (inode remains on-media with nlinks == 0 until the vnode is recycled
1578 * due to random system activity or a umount).
1581 hammer2_inode_vprecycle(struct vnode
*vp
)
1583 if (vget(vp
, LK_EXCLUSIVE
) == 0) {
1585 hammer2_knote(vp
, NOTE_DELETE
);
1595 * Mark an inode as being modified, meaning that the caller will modify
1598 * If a vnode is present we set the vnode dirty and the nominal filesystem
1599 * sync will also handle synchronizing the inode meta-data. Unless NOSIDEQ
1600 * we must ensure that the inode is on pmp->sideq.
1602 * NOTE: We must always queue the inode to the sideq. This allows H2 to
1603 * shortcut vsyncscan() and flush inodes and their related vnodes
1604 * in a two stages. H2 still calls vfsync() for each vnode.
1606 * NOTE: No mtid (modify_tid) is passed into this routine. The caller is
1607 * only modifying the in-memory inode. A modify_tid is synchronized
1608 * later when the inode gets flushed.
1610 * NOTE: As an exception to the general rule, the inode MAY be locked
1611 * shared for this particular call.
1614 hammer2_inode_modify(hammer2_inode_t
*ip
)
1616 atomic_set_int(&ip
->flags
, HAMMER2_INODE_MODIFIED
);
1618 vsetisdirty(ip
->vp
);
1619 if (ip
->pmp
&& (ip
->flags
& HAMMER2_INODE_NOSIDEQ
) == 0)
1620 hammer2_inode_delayed_sideq(ip
);
1624 * Synchronize the inode's frontend state with the chain state prior
1625 * to any explicit flush of the inode or any strategy write call. This
1626 * does not flush the inode's chain or its sub-topology to media (higher
1627 * level layers are responsible for doing that).
1629 * Called with a locked inode inside a normal transaction.
1631 * inode must be locked.
1634 hammer2_inode_chain_sync(hammer2_inode_t
*ip
)
1639 if (ip
->flags
& (HAMMER2_INODE_RESIZED
| HAMMER2_INODE_MODIFIED
)) {
1640 hammer2_xop_fsync_t
*xop
;
1642 xop
= hammer2_xop_alloc(ip
, HAMMER2_XOP_MODIFYING
);
1643 xop
->clear_directdata
= 0;
1644 if (ip
->flags
& HAMMER2_INODE_RESIZED
) {
1645 if ((ip
->meta
.op_flags
& HAMMER2_OPFLAG_DIRECTDATA
) &&
1646 ip
->meta
.size
> HAMMER2_EMBEDDED_BYTES
) {
1647 ip
->meta
.op_flags
&= ~HAMMER2_OPFLAG_DIRECTDATA
;
1648 xop
->clear_directdata
= 1;
1650 xop
->osize
= ip
->osize
;
1652 xop
->osize
= ip
->meta
.size
; /* safety */
1654 xop
->ipflags
= ip
->flags
;
1655 xop
->meta
= ip
->meta
;
1657 atomic_clear_int(&ip
->flags
, HAMMER2_INODE_RESIZED
|
1658 HAMMER2_INODE_MODIFIED
);
1659 hammer2_xop_start(&xop
->head
, &hammer2_inode_chain_sync_desc
);
1660 error
= hammer2_xop_collect(&xop
->head
, 0);
1661 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1662 if (error
== HAMMER2_ERROR_ENOENT
)
1665 kprintf("hammer2: unable to fsync inode %p\n", ip
);
1667 atomic_set_int(&ip->flags,
1668 xop->ipflags & (HAMMER2_INODE_RESIZED |
1669 HAMMER2_INODE_MODIFIED));
1671 /* XXX return error somehow? */
1678 * When an inode is flagged INODE_CREATING its chains have not actually
1679 * been inserting into the on-media tree yet.
1682 hammer2_inode_chain_ins(hammer2_inode_t
*ip
)
1687 if (ip
->flags
& HAMMER2_INODE_CREATING
) {
1688 hammer2_xop_create_t
*xop
;
1690 atomic_clear_int(&ip
->flags
, HAMMER2_INODE_CREATING
);
1691 xop
= hammer2_xop_alloc(ip
, HAMMER2_XOP_MODIFYING
);
1692 xop
->lhc
= ip
->meta
.inum
;
1694 hammer2_xop_start(&xop
->head
, &hammer2_inode_create_ins_desc
);
1695 error
= hammer2_xop_collect(&xop
->head
, 0);
1696 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1697 if (error
== HAMMER2_ERROR_ENOENT
)
1700 kprintf("hammer2: backend unable to "
1701 "insert inode %p %ld\n", ip
, (long)ip
->meta
.inum
);
1702 /* XXX return error somehow? */
1709 * When an inode is flagged INODE_DELETING it has been deleted (no directory
1710 * entry or open refs are left, though as an optimization H2 might leave
1711 * nlinks == 1 to avoid unnecessary block updates). The backend flush then
1712 * needs to actually remove it from the topology.
1714 * NOTE: backend flush must still sync and flush the deleted inode to clean
1715 * out related chains.
1717 * NOTE: We must clear not only INODE_DELETING, but also INODE_ISUNLINKED
1718 * to prevent the vnode reclaim code from trying to delete it twice.
1721 hammer2_inode_chain_des(hammer2_inode_t
*ip
)
1726 if (ip
->flags
& HAMMER2_INODE_DELETING
) {
1727 hammer2_xop_destroy_t
*xop
;
1729 atomic_clear_int(&ip
->flags
, HAMMER2_INODE_DELETING
|
1730 HAMMER2_INODE_ISUNLINKED
);
1731 xop
= hammer2_xop_alloc(ip
, HAMMER2_XOP_MODIFYING
);
1732 hammer2_xop_start(&xop
->head
, &hammer2_inode_destroy_desc
);
1733 error
= hammer2_xop_collect(&xop
->head
, 0);
1734 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1736 if (error
== HAMMER2_ERROR_ENOENT
)
1739 kprintf("hammer2: backend unable to "
1740 "delete inode %p %ld\n", ip
, (long)ip
->meta
.inum
);
1741 /* XXX return error somehow? */
1748 * Flushes the inode's chain and its sub-topology to media. Interlocks
1749 * HAMMER2_INODE_DIRTYDATA by clearing it prior to the flush. Any strategy
1750 * function creating or modifying a chain under this inode will re-set the
1753 * inode must be locked.
1756 hammer2_inode_chain_flush(hammer2_inode_t
*ip
, int flags
)
1758 hammer2_xop_flush_t
*xop
;
1761 atomic_clear_int(&ip
->flags
, HAMMER2_INODE_DIRTYDATA
);
1762 xop
= hammer2_xop_alloc(ip
, HAMMER2_XOP_MODIFYING
| flags
);
1763 hammer2_xop_start(&xop
->head
, &hammer2_inode_flush_desc
);
1764 error
= hammer2_xop_collect(&xop
->head
, HAMMER2_XOP_COLLECT_WAITALL
);
1765 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1766 if (error
== HAMMER2_ERROR_ENOENT
)