acpi: Narrow workaround for broken interrupt settings
[dragonfly.git] / sys / vfs / hammer2 / hammer2_inode.c
blob95c009bb0ff2863a453a2827265e593941b43b4f
1 /*
2 * Copyright (c) 2011-2023 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/types.h>
39 #include <sys/lock.h>
40 #include <sys/uuid.h>
41 #include <sys/vnode.h>
43 #include "hammer2.h"
45 #define INODE_DEBUG 0
48 * Initialize inum hash in fresh structure
50 void
51 hammer2_inum_hash_init(hammer2_pfs_t *pmp)
53 hammer2_inum_hash_t *hash;
54 int i;
56 for (i = 0; i < HAMMER2_INUMHASH_SIZE; ++i) {
57 hash = &pmp->inumhash[i];
58 hammer2_spin_init(&hash->spin, "h2inum");
63 * Caller holds pmp->list_spin and the inode should be locked. Merge ip
64 * with the specified depend.
66 * If the ip is on SYNCQ it stays there and (void *)-1 is returned, indicating
67 * that successive calls must ensure the ip is on a pass2 depend (or they are
68 * all SYNCQ). If the passed-in depend is not NULL and not (void *)-1 then
69 * we can set pass2 on it and return.
71 * If the ip is not on SYNCQ it is merged with the passed-in depend, creating
72 * a self-depend if necessary, and depend->pass2 is set according
73 * to the PASS2 flag. SIDEQ is set.
75 static __noinline
76 hammer2_depend_t *
77 hammer2_inode_setdepend_locked(hammer2_inode_t *ip, hammer2_depend_t *depend)
79 hammer2_pfs_t *pmp = ip->pmp;
80 hammer2_depend_t *dtmp;
81 hammer2_inode_t *iptmp;
84 * If ip is SYNCQ its entry is used for the syncq list and it will
85 * no longer be associated with a dependency. Merging this status
86 * with a passed-in depend implies PASS2.
88 if (ip->flags & HAMMER2_INODE_SYNCQ) {
89 if (depend == (void *)-1 ||
90 depend == NULL) {
91 return ((void *)-1);
93 depend->pass2 = 1;
94 hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN);
96 return depend;
100 * If ip is already SIDEQ, merge ip->depend into the passed-in depend.
101 * If it is not, associate the ip with the passed-in depend, creating
102 * a single-entry dependency using depend_static if necessary.
104 * NOTE: The use of ip->depend_static always requires that the
105 * specific ip containing the structure is part of that
106 * particular depend_static's dependency group.
108 if (ip->flags & HAMMER2_INODE_SIDEQ) {
110 * Merge ip->depend with the passed-in depend. If the
111 * passed-in depend is not a special case, all ips associated
112 * with ip->depend (including the original ip) must be moved
113 * to the passed-in depend.
115 if (depend == NULL) {
116 depend = ip->depend;
117 } else if (depend == (void *)-1) {
118 depend = ip->depend;
119 depend->pass2 = 1;
120 } else if (depend != ip->depend) {
121 #ifdef INVARIANTS
122 int sanitychk = 0;
123 #endif
124 dtmp = ip->depend;
125 while ((iptmp = TAILQ_FIRST(&dtmp->sideq)) != NULL) {
126 #ifdef INVARIANTS
127 if (iptmp == ip)
128 sanitychk = 1;
129 #endif
130 TAILQ_REMOVE(&dtmp->sideq, iptmp, entry);
131 TAILQ_INSERT_TAIL(&depend->sideq, iptmp, entry);
132 iptmp->depend = depend;
134 KKASSERT(sanitychk == 1);
135 depend->count += dtmp->count;
136 depend->pass2 |= dtmp->pass2;
137 TAILQ_REMOVE(&pmp->depq, dtmp, entry);
138 dtmp->count = 0;
139 dtmp->pass2 = 0;
141 } else {
143 * Add ip to the sideq, creating a self-dependency if
144 * necessary.
146 hammer2_inode_ref(ip);
147 atomic_set_int(&ip->flags, HAMMER2_INODE_SIDEQ);
148 if (depend == NULL) {
149 depend = &ip->depend_static;
150 TAILQ_INSERT_TAIL(&pmp->depq, depend, entry);
151 } else if (depend == (void *)-1) {
152 depend = &ip->depend_static;
153 depend->pass2 = 1;
154 TAILQ_INSERT_TAIL(&pmp->depq, depend, entry);
155 } /* else add ip to passed-in depend */
156 TAILQ_INSERT_TAIL(&depend->sideq, ip, entry);
157 ip->depend = depend;
158 ++depend->count;
159 ++pmp->sideq_count;
162 if (ip->flags & HAMMER2_INODE_SYNCQ_PASS2)
163 depend->pass2 = 1;
164 if (depend->pass2)
165 hammer2_trans_setflags(pmp, HAMMER2_TRANS_RESCAN);
167 return depend;
171 * Put a solo inode on the SIDEQ (meaning that its dirty). This can also
172 * occur from inode_lock4() and inode_depend().
174 * Caller must pass-in a locked inode.
176 void
177 hammer2_inode_delayed_sideq(hammer2_inode_t *ip)
179 hammer2_pfs_t *pmp = ip->pmp;
182 * Optimize case to avoid pmp spinlock.
184 if ((ip->flags & (HAMMER2_INODE_SYNCQ | HAMMER2_INODE_SIDEQ)) == 0) {
185 hammer2_spin_ex(&pmp->list_spin);
186 hammer2_inode_setdepend_locked(ip, NULL);
187 hammer2_spin_unex(&pmp->list_spin);
192 * Lock an inode, with SYNCQ semantics.
194 * HAMMER2 offers shared and exclusive locks on inodes. Pass a mask of
195 * flags for options:
197 * - pass HAMMER2_RESOLVE_SHARED if a shared lock is desired.
198 * shared locks are not subject to SYNCQ semantics, exclusive locks
199 * are.
201 * - pass HAMMER2_RESOLVE_ALWAYS if you need the inode's meta-data.
202 * Most front-end inode locks do.
204 * - pass HAMMER2_RESOLVE_NEVER if you do not want to require that
205 * the inode data be resolved. This is used by the syncthr because
206 * it can run on an unresolved/out-of-sync cluster, and also by the
207 * vnode reclamation code to avoid unnecessary I/O (particularly when
208 * disposing of hundreds of thousands of cached vnodes).
210 * This function, along with lock4, has SYNCQ semantics. If the inode being
211 * locked is on the SYNCQ, that is it has been staged by the syncer, we must
212 * block until the operation is complete (even if we can lock the inode). In
213 * order to reduce the stall time, we re-order the inode to the front of the
214 * pmp->syncq prior to blocking. This reordering VERY significantly improves
215 * performance.
217 * The inode locking function locks the inode itself, resolves any stale
218 * chains in the inode's cluster, and allocates a fresh copy of the
219 * cluster with 1 ref and all the underlying chains locked.
221 * ip->cluster will be stable while the inode is locked.
223 * NOTE: We don't combine the inode/chain lock because putting away an
224 * inode would otherwise confuse multiple lock holders of the inode.
226 void
227 hammer2_inode_lock(hammer2_inode_t *ip, int how)
229 hammer2_pfs_t *pmp;
231 hammer2_inode_ref(ip);
232 pmp = ip->pmp;
235 * Inode structure mutex - Shared lock
237 if (how & HAMMER2_RESOLVE_SHARED) {
238 hammer2_mtx_sh(&ip->lock);
239 return;
243 * Inode structure mutex - Exclusive lock
245 * An exclusive lock (if not recursive) must wait for inodes on
246 * SYNCQ to flush first, to ensure that meta-data dependencies such
247 * as the nlink count and related directory entries are not split
248 * across flushes.
250 * If the vnode is locked by the current thread it must be unlocked
251 * across the tsleep() to avoid a deadlock.
253 hammer2_mtx_ex(&ip->lock);
254 if (hammer2_mtx_refs(&ip->lock) > 1)
255 return;
256 while ((ip->flags & HAMMER2_INODE_SYNCQ) && pmp) {
257 hammer2_spin_ex(&pmp->list_spin);
258 if (ip->flags & HAMMER2_INODE_SYNCQ) {
259 tsleep_interlock(&ip->flags, 0);
260 atomic_set_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
261 TAILQ_REMOVE(&pmp->syncq, ip, entry);
262 TAILQ_INSERT_HEAD(&pmp->syncq, ip, entry);
263 hammer2_spin_unex(&pmp->list_spin);
264 hammer2_mtx_unlock(&ip->lock);
265 tsleep(&ip->flags, PINTERLOCKED, "h2sync", 0);
266 hammer2_mtx_ex(&ip->lock);
267 continue;
269 hammer2_spin_unex(&pmp->list_spin);
270 break;
275 * Exclusively lock up to four inodes, in order, with SYNCQ semantics.
276 * ip1 and ip2 must not be NULL. ip3 and ip4 may be NULL, but if ip3 is
277 * NULL then ip4 must also be NULL.
279 * This creates a dependency between up to four inodes.
281 void
282 hammer2_inode_lock4(hammer2_inode_t *ip1, hammer2_inode_t *ip2,
283 hammer2_inode_t *ip3, hammer2_inode_t *ip4)
285 hammer2_inode_t *ips[4];
286 hammer2_inode_t *iptmp;
287 hammer2_inode_t *ipslp;
288 hammer2_depend_t *depend;
289 hammer2_pfs_t *pmp;
290 size_t count;
291 size_t i;
293 pmp = ip1->pmp; /* may be NULL */
294 KKASSERT(pmp == ip2->pmp);
296 ips[0] = ip1;
297 ips[1] = ip2;
298 if (ip3 == NULL) {
299 count = 2;
300 } else if (ip4 == NULL) {
301 count = 3;
302 ips[2] = ip3;
303 KKASSERT(pmp == ip3->pmp);
304 } else {
305 count = 4;
306 ips[2] = ip3;
307 ips[3] = ip4;
308 KKASSERT(pmp == ip3->pmp);
309 KKASSERT(pmp == ip4->pmp);
312 for (i = 0; i < count; ++i)
313 hammer2_inode_ref(ips[i]);
315 restart:
317 * Lock the inodes in order
319 for (i = 0; i < count; ++i) {
320 hammer2_mtx_ex(&ips[i]->lock);
324 * Associate dependencies, record the first inode found on SYNCQ
325 * (operation is allowed to proceed for inodes on PASS2) for our
326 * sleep operation, this inode is theoretically the last one sync'd
327 * in the sequence.
329 * All inodes found on SYNCQ are moved to the head of the syncq
330 * to reduce stalls.
332 hammer2_spin_ex(&pmp->list_spin);
333 depend = NULL;
334 ipslp = NULL;
335 for (i = 0; i < count; ++i) {
336 iptmp = ips[i];
337 depend = hammer2_inode_setdepend_locked(iptmp, depend);
338 if (iptmp->flags & HAMMER2_INODE_SYNCQ) {
339 TAILQ_REMOVE(&pmp->syncq, iptmp, entry);
340 TAILQ_INSERT_HEAD(&pmp->syncq, iptmp, entry);
341 if (ipslp == NULL)
342 ipslp = iptmp;
345 hammer2_spin_unex(&pmp->list_spin);
348 * Block and retry if any of the inodes are on SYNCQ. It is
349 * important that we allow the operation to proceed in the
350 * PASS2 case, to avoid deadlocking against the vnode.
352 if (ipslp) {
353 for (i = 0; i < count; ++i)
354 hammer2_mtx_unlock(&ips[i]->lock);
355 tsleep(&ipslp->flags, 0, "h2sync", 2);
356 goto restart;
361 * Release an inode lock. If another thread is blocked on SYNCQ_WAKEUP
362 * we wake them up.
364 void
365 hammer2_inode_unlock(hammer2_inode_t *ip)
367 if (ip->flags & HAMMER2_INODE_SYNCQ_WAKEUP) {
368 atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
369 hammer2_mtx_unlock(&ip->lock);
370 wakeup(&ip->flags);
371 } else {
372 hammer2_mtx_unlock(&ip->lock);
374 hammer2_inode_drop(ip);
378 * If either ip1 or ip2 have been tapped by the syncer, make sure that both
379 * are. This ensure that dependencies (e.g. dirent-v-inode) are synced
380 * together. For dirent-v-inode depends, pass the dirent as ip1.
382 * If neither ip1 or ip2 have been tapped by the syncer, merge them into a
383 * single dependency. Dependencies are entered into pmp->depq. This
384 * effectively flags the inodes SIDEQ.
386 * Both ip1 and ip2 must be locked by the caller. This also ensures
387 * that we can't race the end of the syncer's queue run.
389 void
390 hammer2_inode_depend(hammer2_inode_t *ip1, hammer2_inode_t *ip2)
392 hammer2_pfs_t *pmp;
393 hammer2_depend_t *depend;
395 pmp = ip1->pmp;
396 hammer2_spin_ex(&pmp->list_spin);
397 depend = hammer2_inode_setdepend_locked(ip1, NULL);
398 depend = hammer2_inode_setdepend_locked(ip2, depend);
399 hammer2_spin_unex(&pmp->list_spin);
403 * Select a chain out of an inode's cluster and lock it.
405 * The inode does not have to be locked.
407 hammer2_chain_t *
408 hammer2_inode_chain(hammer2_inode_t *ip, int clindex, int how)
410 hammer2_chain_t *chain;
411 hammer2_cluster_t *cluster;
413 hammer2_spin_sh(&ip->cluster_spin);
414 cluster = &ip->cluster;
415 if (clindex >= cluster->nchains)
416 chain = NULL;
417 else
418 chain = cluster->array[clindex].chain;
419 if (chain) {
420 hammer2_chain_ref(chain);
421 hammer2_spin_unsh(&ip->cluster_spin);
422 hammer2_chain_lock(chain, how);
423 } else {
424 hammer2_spin_unsh(&ip->cluster_spin);
426 return chain;
429 hammer2_chain_t *
430 hammer2_inode_chain_and_parent(hammer2_inode_t *ip, int clindex,
431 hammer2_chain_t **parentp, int how)
433 hammer2_chain_t *chain;
434 hammer2_chain_t *parent;
436 for (;;) {
437 hammer2_spin_sh(&ip->cluster_spin);
438 if (clindex >= ip->cluster.nchains)
439 chain = NULL;
440 else
441 chain = ip->cluster.array[clindex].chain;
442 if (chain) {
443 hammer2_chain_ref(chain);
444 hammer2_spin_unsh(&ip->cluster_spin);
445 hammer2_chain_lock(chain, how);
446 } else {
447 hammer2_spin_unsh(&ip->cluster_spin);
451 * Get parent, lock order must be (parent, chain).
453 parent = chain->parent;
454 if (parent) {
455 hammer2_chain_ref(parent);
456 hammer2_chain_unlock(chain);
457 hammer2_chain_lock(parent, how);
458 hammer2_chain_lock(chain, how);
460 if (ip->cluster.array[clindex].chain == chain &&
461 chain->parent == parent) {
462 break;
466 * Retry
468 hammer2_chain_unlock(chain);
469 hammer2_chain_drop(chain);
470 if (parent) {
471 hammer2_chain_unlock(parent);
472 hammer2_chain_drop(parent);
475 *parentp = parent;
477 return chain;
481 * Temporarily release a lock held shared or exclusive. Caller must
482 * hold the lock shared or exclusive on call and lock will be released
483 * on return.
485 * Restore a lock that was temporarily released.
487 hammer2_mtx_state_t
488 hammer2_inode_lock_temp_release(hammer2_inode_t *ip)
490 return hammer2_mtx_temp_release(&ip->lock);
493 void
494 hammer2_inode_lock_temp_restore(hammer2_inode_t *ip, hammer2_mtx_state_t ostate)
496 hammer2_mtx_temp_restore(&ip->lock, ostate);
500 * Upgrade a shared inode lock to exclusive and return. If the inode lock
501 * is already held exclusively this is a NOP.
503 * The caller MUST hold the inode lock either shared or exclusive on call
504 * and will own the lock exclusively on return.
506 * Returns non-zero if the lock was already exclusive prior to the upgrade.
509 hammer2_inode_lock_upgrade(hammer2_inode_t *ip)
511 int wasexclusive;
513 if (mtx_islocked_ex(&ip->lock)) {
514 wasexclusive = 1;
515 } else {
516 hammer2_mtx_unlock(&ip->lock);
517 hammer2_mtx_ex(&ip->lock);
518 wasexclusive = 0;
520 return wasexclusive;
524 * Downgrade an inode lock from exclusive to shared only if the inode
525 * lock was previously shared. If the inode lock was previously exclusive,
526 * this is a NOP.
528 void
529 hammer2_inode_lock_downgrade(hammer2_inode_t *ip, int wasexclusive)
531 if (wasexclusive == 0)
532 hammer2_mtx_downgrade(&ip->lock);
535 static __inline hammer2_inum_hash_t *
536 inumhash(hammer2_pfs_t *pmp, hammer2_tid_t inum)
538 int hv;
540 hv = (int)inum;
541 return (&pmp->inumhash[hv & HAMMER2_INUMHASH_MASK]);
546 * Lookup an inode by inode number
548 hammer2_inode_t *
549 hammer2_inode_lookup(hammer2_pfs_t *pmp, hammer2_tid_t inum)
551 hammer2_inum_hash_t *hash;
552 hammer2_inode_t *ip;
554 KKASSERT(pmp);
555 if (pmp->spmp_hmp) {
556 ip = NULL;
557 } else {
558 hash = inumhash(pmp, inum);
559 hammer2_spin_sh(&hash->spin);
560 for (ip = hash->base; ip; ip = ip->next) {
561 if (ip->meta.inum == inum) {
562 hammer2_inode_ref(ip);
563 break;
566 hammer2_spin_unsh(&hash->spin);
568 return(ip);
572 * Adding a ref to an inode is only legal if the inode already has at least
573 * one ref.
575 * (can be called with spinlock held)
577 void
578 hammer2_inode_ref(hammer2_inode_t *ip)
580 atomic_add_int(&ip->refs, 1);
581 if (hammer2_debug & 0x80000) {
582 kprintf("INODE+1 %p (%d->%d)\n", ip, ip->refs - 1, ip->refs);
583 print_backtrace(8);
588 * Drop an inode reference, freeing the inode when the last reference goes
589 * away.
591 void
592 hammer2_inode_drop(hammer2_inode_t *ip)
594 hammer2_pfs_t *pmp;
595 u_int refs;
597 while (ip) {
598 if (hammer2_debug & 0x80000) {
599 kprintf("INODE-1 %p (%d->%d)\n",
600 ip, ip->refs, ip->refs - 1);
601 print_backtrace(8);
603 refs = ip->refs;
604 cpu_ccfence();
605 if (refs == 1) {
607 * Transition to zero, must interlock with
608 * the inode inumber lookup tree (if applicable).
609 * It should not be possible for anyone to race
610 * the transition to 0.
612 hammer2_inum_hash_t *hash;
613 hammer2_inode_t **xipp;
615 pmp = ip->pmp;
616 KKASSERT(pmp);
617 hash = inumhash(pmp, ip->meta.inum);
619 hammer2_spin_ex(&hash->spin);
620 if (atomic_cmpset_int(&ip->refs, 1, 0)) {
621 KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
622 if (ip->flags & HAMMER2_INODE_ONHASH) {
623 xipp = &hash->base;
624 while (*xipp != ip)
625 xipp = &(*xipp)->next;
626 *xipp = ip->next;
627 ip->next = NULL;
628 atomic_add_long(&pmp->inum_count, -1);
629 atomic_clear_int(&ip->flags,
630 HAMMER2_INODE_ONHASH);
632 hammer2_spin_unex(&hash->spin);
634 ip->pmp = NULL;
637 * Cleaning out ip->cluster isn't entirely
638 * trivial.
640 hammer2_inode_repoint(ip, NULL);
642 kfree_obj(ip, pmp->minode);
643 atomic_add_long(&pmp->inmem_inodes, -1);
644 ip = NULL; /* will terminate loop */
645 } else {
646 hammer2_spin_unex(&hash->spin);
648 } else {
650 * Non zero transition
652 if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
653 break;
659 * Get the vnode associated with the given inode, allocating the vnode if
660 * necessary. The vnode will be returned exclusively locked.
662 * *errorp is set to a UNIX error, not a HAMMER2 error.
664 * The caller must lock the inode (shared or exclusive).
666 * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
667 * races.
669 struct vnode *
670 hammer2_igetv(hammer2_inode_t *ip, int *errorp)
672 hammer2_pfs_t *pmp;
673 struct vnode *vp;
675 pmp = ip->pmp;
676 KKASSERT(pmp != NULL);
677 *errorp = 0;
679 for (;;) {
681 * Attempt to reuse an existing vnode assignment. It is
682 * possible to race a reclaim so the vget() may fail. The
683 * inode must be unlocked during the vget() to avoid a
684 * deadlock against a reclaim.
686 int wasexclusive;
688 vp = ip->vp;
689 if (vp) {
691 * Inode must be unlocked during the vget() to avoid
692 * possible deadlocks, but leave the ip ref intact.
694 * vnode is held to prevent destruction during the
695 * vget(). The vget() can still fail if we lost
696 * a reclaim race on the vnode.
698 hammer2_mtx_state_t ostate;
700 vhold(vp);
701 ostate = hammer2_inode_lock_temp_release(ip);
702 if (vget(vp, LK_EXCLUSIVE)) {
703 vdrop(vp);
704 hammer2_inode_lock_temp_restore(ip, ostate);
705 continue;
707 hammer2_inode_lock_temp_restore(ip, ostate);
708 vdrop(vp);
709 /* vp still locked and ref from vget */
710 if (ip->vp != vp) {
711 kprintf("hammer2: igetv race %p/%p\n",
712 ip->vp, vp);
713 vput(vp);
714 continue;
716 *errorp = 0;
717 break;
721 * No vnode exists, allocate a new vnode. Beware of
722 * allocation races. This function will return an
723 * exclusively locked and referenced vnode.
725 *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
726 if (*errorp) {
727 kprintf("hammer2: igetv getnewvnode failed %d\n",
728 *errorp);
729 vp = NULL;
730 break;
734 * Lock the inode and check for an allocation race.
736 wasexclusive = hammer2_inode_lock_upgrade(ip);
737 if (ip->vp != NULL) {
738 vp->v_type = VBAD;
739 vx_put(vp);
740 hammer2_inode_lock_downgrade(ip, wasexclusive);
741 continue;
744 switch (ip->meta.type) {
745 case HAMMER2_OBJTYPE_DIRECTORY:
746 vp->v_type = VDIR;
747 break;
748 case HAMMER2_OBJTYPE_REGFILE:
750 * Regular file must use buffer cache I/O
751 * (VKVABIO cpu sync semantics supported)
753 vp->v_type = VREG;
754 vsetflags(vp, VKVABIO);
755 vinitvmio(vp, ip->meta.size,
756 HAMMER2_LBUFSIZE,
757 (int)ip->meta.size & HAMMER2_LBUFMASK);
758 break;
759 case HAMMER2_OBJTYPE_SOFTLINK:
761 * XXX for now we are using the generic file_read
762 * and file_write code so we need a buffer cache
763 * association.
765 * (VKVABIO cpu sync semantics supported)
767 vp->v_type = VLNK;
768 vsetflags(vp, VKVABIO);
769 vinitvmio(vp, ip->meta.size,
770 HAMMER2_LBUFSIZE,
771 (int)ip->meta.size & HAMMER2_LBUFMASK);
772 break;
773 case HAMMER2_OBJTYPE_CDEV:
774 vp->v_type = VCHR;
775 /* fall through */
776 case HAMMER2_OBJTYPE_BDEV:
777 vp->v_ops = &pmp->mp->mnt_vn_spec_ops;
778 if (ip->meta.type != HAMMER2_OBJTYPE_CDEV)
779 vp->v_type = VBLK;
780 addaliasu(vp,
781 ip->meta.rmajor,
782 ip->meta.rminor);
783 break;
784 case HAMMER2_OBJTYPE_FIFO:
785 vp->v_type = VFIFO;
786 vp->v_ops = &pmp->mp->mnt_vn_fifo_ops;
787 break;
788 case HAMMER2_OBJTYPE_SOCKET:
789 vp->v_type = VSOCK;
790 break;
791 default:
792 panic("hammer2: unhandled objtype %d",
793 ip->meta.type);
794 break;
797 if (ip == pmp->iroot)
798 vsetflags(vp, VROOT);
800 vp->v_data = ip;
801 ip->vp = vp;
802 hammer2_inode_ref(ip); /* vp association */
803 hammer2_inode_lock_downgrade(ip, wasexclusive);
804 vx_downgrade(vp);
805 break;
809 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
811 if (hammer2_debug & 0x0002) {
812 kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n",
813 vp, vp->v_refcnt, vp->v_auxrefs);
815 return (vp);
819 * XXX this API needs a rewrite. It needs to be split into a
820 * hammer2_inode_alloc() and hammer2_inode_build() to allow us to get
821 * rid of the inode/chain lock reversal fudge.
823 * Returns the inode associated with the passed-in cluster, allocating a new
824 * hammer2_inode structure if necessary, then synchronizing it to the passed
825 * xop cluster. When synchronizing, if idx >= 0, only cluster index (idx)
826 * is synchronized. Otherwise the whole cluster is synchronized. inum will
827 * be extracted from the passed-in xop and the inum argument will be ignored.
829 * If xop is passed as NULL then a new hammer2_inode is allocated with the
830 * specified inum, and returned. For normal inodes, the inode will be
831 * indexed in memory and if it already exists the existing ip will be
832 * returned instead of allocating a new one. The superroot and PFS inodes
833 * are not indexed in memory.
835 * The passed-in cluster must be locked and will remain locked on return.
836 * The returned inode will be locked and the caller may dispose of both
837 * via hammer2_inode_unlock() + hammer2_inode_drop(). However, if the caller
838 * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
840 * The hammer2_inode structure regulates the interface between the high level
841 * kernel VNOPS API and the filesystem backend (the chains).
843 * On return the inode is locked with the supplied cluster.
845 hammer2_inode_t *
846 hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_xop_head_t *xop,
847 hammer2_tid_t inum, int idx)
849 hammer2_inode_t *nip;
850 const hammer2_inode_data_t *iptmp;
851 const hammer2_inode_data_t *nipdata;
853 KKASSERT(xop == NULL ||
854 hammer2_cluster_type(&xop->cluster) ==
855 HAMMER2_BREF_TYPE_INODE);
856 KKASSERT(pmp);
859 * Interlocked lookup/ref of the inode. This code is only needed
860 * when looking up inodes with nlinks != 0 (TODO: optimize out
861 * otherwise and test for duplicates).
863 * Cluster can be NULL during the initial pfs allocation.
865 if (xop) {
866 iptmp = &hammer2_xop_gdata(xop)->ipdata;
867 inum = iptmp->meta.inum;
868 hammer2_xop_pdata(xop);
870 again:
871 nip = hammer2_inode_lookup(pmp, inum);
872 if (nip) {
874 * We may have to unhold the cluster to avoid a deadlock
875 * against vnlru (and possibly other XOPs).
877 if (xop) {
878 if (hammer2_mtx_ex_try(&nip->lock) != 0) {
879 hammer2_cluster_unhold(&xop->cluster);
880 hammer2_mtx_ex(&nip->lock);
881 hammer2_cluster_rehold(&xop->cluster);
883 } else {
884 hammer2_mtx_ex(&nip->lock);
888 * Handle SMP race (not applicable to the super-root spmp
889 * which can't index inodes due to duplicative inode numbers).
891 if (pmp->spmp_hmp == NULL &&
892 (nip->flags & HAMMER2_INODE_ONHASH) == 0) {
893 hammer2_mtx_unlock(&nip->lock);
894 hammer2_inode_drop(nip);
895 goto again;
897 if (xop) {
898 if (idx >= 0)
899 hammer2_inode_repoint_one(nip, &xop->cluster,
900 idx);
901 else
902 hammer2_inode_repoint(nip, &xop->cluster);
904 return nip;
908 * We couldn't find the inode number, create a new inode and try to
909 * insert it, handle insertion races.
911 nip = kmalloc_obj(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
912 hammer2_spin_init(&nip->cluster_spin, "h2clspin");
913 atomic_add_long(&pmp->inmem_inodes, 1);
916 * Initialize nip's cluster. A cluster is provided for normal
917 * inodes but typically not for the super-root or PFS inodes.
920 hammer2_inode_t *nnip = nip;
921 nip->ihash = (int)hammer2_icrc32(&nnip, sizeof(nnip));
924 nip->cluster.refs = 1;
925 nip->cluster.pmp = pmp;
926 nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
927 if (xop) {
928 nipdata = &hammer2_xop_gdata(xop)->ipdata;
929 nip->meta = nipdata->meta;
930 hammer2_xop_pdata(xop);
931 hammer2_inode_repoint(nip, &xop->cluster);
932 } else {
933 nip->meta.inum = inum; /* PFS inum is always 1 XXX */
934 /* mtime will be updated when a cluster is available */
937 nip->pmp = pmp;
940 * ref and lock on nip gives it state compatible to after a
941 * hammer2_inode_lock() call.
943 nip->refs = 1;
944 hammer2_mtx_init(&nip->lock, "h2inode");
945 hammer2_mtx_init(&nip->truncate_lock, "h2trunc");
946 hammer2_mtx_ex(&nip->lock);
947 TAILQ_INIT(&nip->depend_static.sideq);
948 /* combination of thread lock and chain lock == inode lock */
951 * Attempt to add the inode. If it fails we raced another inode
952 * get. Undo all the work and try again.
954 if (pmp->spmp_hmp == NULL) {
955 hammer2_inum_hash_t *hash;
956 hammer2_inode_t *xip;
957 hammer2_inode_t **xipp;
959 hash = inumhash(pmp, nip->meta.inum);
960 hammer2_spin_ex(&hash->spin);
961 for (xipp = &hash->base;
962 (xip = *xipp) != NULL;
963 xipp = &xip->next)
965 if (xip->meta.inum == nip->meta.inum) {
966 hammer2_spin_unex(&hash->spin);
967 hammer2_mtx_unlock(&nip->lock);
968 hammer2_inode_drop(nip);
969 goto again;
972 nip->next = NULL;
973 *xipp = nip;
974 atomic_set_int(&nip->flags, HAMMER2_INODE_ONHASH);
975 atomic_add_long(&pmp->inum_count, 1);
976 hammer2_spin_unex(&hash->spin);
978 return (nip);
982 * Create a PFS inode under the superroot. This function will create the
983 * inode, its media chains, and also insert it into the media.
985 * Caller must be in a flush transaction because we are inserting the inode
986 * onto the media.
988 hammer2_inode_t *
989 hammer2_inode_create_pfs(hammer2_pfs_t *spmp,
990 const char *name, size_t name_len,
991 int *errorp)
993 hammer2_xop_create_t *xop;
994 hammer2_inode_t *pip;
995 hammer2_inode_t *nip;
996 int error;
997 uint8_t pip_comp_algo;
998 uint8_t pip_check_algo;
999 hammer2_tid_t pip_inum;
1000 hammer2_key_t lhc;
1002 pip = spmp->iroot;
1003 nip = NULL;
1005 lhc = hammer2_dirhash(name, name_len);
1006 *errorp = 0;
1009 * Locate the inode or indirect block to create the new
1010 * entry in. At the same time check for key collisions
1011 * and iterate until we don't get one.
1013 * Lock the directory exclusively for now to guarantee that
1014 * we can find an unused lhc for the name. Due to collisions,
1015 * two different creates can end up with the same lhc so we
1016 * cannot depend on the OS to prevent the collision.
1018 hammer2_inode_lock(pip, 0);
1020 pip_comp_algo = pip->meta.comp_algo;
1021 pip_check_algo = pip->meta.check_algo;
1022 pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
1025 * Locate an unused key in the collision space.
1028 hammer2_xop_scanlhc_t *sxop;
1029 hammer2_key_t lhcbase;
1031 lhcbase = lhc;
1032 sxop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1033 sxop->lhc = lhc;
1034 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
1035 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
1036 if (lhc != sxop->head.cluster.focus->bref.key)
1037 break;
1038 ++lhc;
1040 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
1042 if (error) {
1043 if (error != HAMMER2_ERROR_ENOENT)
1044 goto done2;
1045 ++lhc;
1046 error = 0;
1048 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
1049 error = HAMMER2_ERROR_ENOSPC;
1050 goto done2;
1055 * Create the inode with the lhc as the key.
1057 xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1058 xop->lhc = lhc;
1059 xop->flags = HAMMER2_INSERT_PFSROOT;
1060 bzero(&xop->meta, sizeof(xop->meta));
1062 xop->meta.type = HAMMER2_OBJTYPE_DIRECTORY;
1063 xop->meta.inum = 1;
1064 xop->meta.iparent = pip_inum;
1066 /* Inherit parent's inode compression mode. */
1067 xop->meta.comp_algo = pip_comp_algo;
1068 xop->meta.check_algo = pip_check_algo;
1069 xop->meta.version = HAMMER2_INODE_VERSION_ONE;
1070 hammer2_update_time(&xop->meta.ctime);
1071 xop->meta.mtime = xop->meta.ctime;
1072 xop->meta.mode = 0755;
1073 xop->meta.nlinks = 1;
1075 hammer2_xop_setname(&xop->head, name, name_len);
1076 xop->meta.name_len = name_len;
1077 xop->meta.name_key = lhc;
1078 KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
1080 hammer2_xop_start(&xop->head, &hammer2_inode_create_desc);
1082 error = hammer2_xop_collect(&xop->head, 0);
1083 #if INODE_DEBUG
1084 kprintf("CREATE INODE %*.*s\n",
1085 (int)name_len, (int)name_len, name);
1086 #endif
1088 if (error) {
1089 *errorp = error;
1090 goto done;
1094 * Set up the new inode if not a hardlink pointer.
1096 * NOTE: *_get() integrates chain's lock into the inode lock.
1098 * NOTE: Only one new inode can currently be created per
1099 * transaction. If the need arises we can adjust
1100 * hammer2_trans_init() to allow more.
1102 * NOTE: nipdata will have chain's blockset data.
1104 nip = hammer2_inode_get(pip->pmp, &xop->head, -1, -1);
1105 nip->comp_heuristic = 0;
1106 done:
1107 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1108 done2:
1109 hammer2_inode_unlock(pip);
1111 return (nip);
1115 * Create a new, normal inode. This function will create the inode,
1116 * the media chains, but will not insert the chains onto the media topology
1117 * (doing so would require a flush transaction and cause long stalls).
1119 * Caller must be in a normal transaction.
1121 hammer2_inode_t *
1122 hammer2_inode_create_normal(hammer2_inode_t *pip,
1123 struct vattr *vap, struct ucred *cred,
1124 hammer2_key_t inum, int *errorp)
1126 hammer2_xop_create_t *xop;
1127 hammer2_inode_t *dip;
1128 hammer2_inode_t *nip;
1129 int error;
1130 uid_t xuid;
1131 uuid_t pip_uid;
1132 uuid_t pip_gid;
1133 uint32_t pip_mode;
1134 uint8_t pip_comp_algo;
1135 uint8_t pip_check_algo;
1136 hammer2_tid_t pip_inum;
1138 dip = pip->pmp->iroot;
1139 KKASSERT(dip != NULL);
1141 *errorp = 0;
1143 /*hammer2_inode_lock(dip, 0);*/
1145 pip_uid = pip->meta.uid;
1146 pip_gid = pip->meta.gid;
1147 pip_mode = pip->meta.mode;
1148 pip_comp_algo = pip->meta.comp_algo;
1149 pip_check_algo = pip->meta.check_algo;
1150 pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;
1153 * Create the in-memory hammer2_inode structure for the specified
1154 * inode.
1156 nip = hammer2_inode_get(dip->pmp, NULL, inum, -1);
1157 nip->comp_heuristic = 0;
1158 KKASSERT((nip->flags & HAMMER2_INODE_CREATING) == 0 &&
1159 nip->cluster.nchains == 0);
1160 atomic_set_int(&nip->flags, HAMMER2_INODE_CREATING);
1163 * Setup the inode meta-data
1165 nip->meta.type = hammer2_get_obj_type(vap->va_type);
1167 switch (nip->meta.type) {
1168 case HAMMER2_OBJTYPE_CDEV:
1169 case HAMMER2_OBJTYPE_BDEV:
1170 nip->meta.rmajor = vap->va_rmajor;
1171 nip->meta.rminor = vap->va_rminor;
1172 break;
1173 default:
1174 break;
1177 KKASSERT(nip->meta.inum == inum);
1178 nip->meta.iparent = pip_inum;
1180 /* Inherit parent's inode compression mode. */
1181 nip->meta.comp_algo = pip_comp_algo;
1182 nip->meta.check_algo = pip_check_algo;
1183 nip->meta.version = HAMMER2_INODE_VERSION_ONE;
1184 hammer2_update_time(&nip->meta.ctime);
1185 nip->meta.mtime = nip->meta.ctime;
1186 nip->meta.mode = vap->va_mode;
1187 nip->meta.nlinks = 1;
1189 xuid = hammer2_to_unix_xid(&pip_uid);
1190 xuid = vop_helper_create_uid(dip->pmp->mp, pip_mode,
1191 xuid, cred,
1192 &vap->va_mode);
1193 if (vap->va_vaflags & VA_UID_UUID_VALID)
1194 nip->meta.uid = vap->va_uid_uuid;
1195 else if (vap->va_uid != (uid_t)VNOVAL)
1196 hammer2_guid_to_uuid(&nip->meta.uid, vap->va_uid);
1197 else
1198 hammer2_guid_to_uuid(&nip->meta.uid, xuid);
1200 if (vap->va_vaflags & VA_GID_UUID_VALID)
1201 nip->meta.gid = vap->va_gid_uuid;
1202 else if (vap->va_gid != (gid_t)VNOVAL)
1203 hammer2_guid_to_uuid(&nip->meta.gid, vap->va_gid);
1204 else
1205 nip->meta.gid = pip_gid;
1208 * Regular files and softlinks allow a small amount of data to be
1209 * directly embedded in the inode. This flag will be cleared if
1210 * the size is extended past the embedded limit.
1212 if (nip->meta.type == HAMMER2_OBJTYPE_REGFILE ||
1213 nip->meta.type == HAMMER2_OBJTYPE_SOFTLINK) {
1214 nip->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
1218 * Create the inode using (inum) as the key. Pass pip for
1219 * method inheritance.
1221 xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
1222 xop->lhc = inum;
1223 xop->flags = 0;
1224 xop->meta = nip->meta;
1226 xop->meta.name_len = hammer2_xop_setname_inum(&xop->head, inum);
1227 xop->meta.name_key = inum;
1228 nip->meta.name_len = xop->meta.name_len;
1229 nip->meta.name_key = xop->meta.name_key;
1230 hammer2_inode_modify(nip);
1233 * Create the inode media chains but leave them detached. We are
1234 * not in a flush transaction so we can't mess with media topology
1235 * above normal inodes (i.e. the index of the inodes themselves).
1237 * We've already set the INODE_CREATING flag. The inode's media
1238 * chains will be inserted onto the media topology on the next
1239 * filesystem sync.
1241 hammer2_xop_start(&xop->head, &hammer2_inode_create_det_desc);
1243 error = hammer2_xop_collect(&xop->head, 0);
1244 #if INODE_DEBUG
1245 kprintf("create inode type %d error %d\n", nip->meta.type, error);
1246 #endif
1248 if (error) {
1249 *errorp = error;
1250 goto done;
1254 * Associate the media chains created by the backend with the
1255 * frontend inode.
1257 hammer2_inode_repoint(nip, &xop->head.cluster);
1258 done:
1259 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1260 /*hammer2_inode_unlock(dip);*/
1262 return (nip);
1266 * Create a directory entry under dip with the specified name, inode number,
1267 * and OBJTYPE (type).
1269 * This returns a UNIX errno code, not a HAMMER2_ERROR_* code.
1271 * Caller must hold dip locked.
1274 hammer2_dirent_create(hammer2_inode_t *dip, const char *name, size_t name_len,
1275 hammer2_key_t inum, uint8_t type)
1277 hammer2_xop_mkdirent_t *xop;
1278 hammer2_key_t lhc;
1279 int error;
1281 lhc = 0;
1282 error = 0;
1284 KKASSERT(name != NULL);
1285 lhc = hammer2_dirhash(name, name_len);
1288 * Locate the inode or indirect block to create the new
1289 * entry in. At the same time check for key collisions
1290 * and iterate until we don't get one.
1292 * Lock the directory exclusively for now to guarantee that
1293 * we can find an unused lhc for the name. Due to collisions,
1294 * two different creates can end up with the same lhc so we
1295 * cannot depend on the OS to prevent the collision.
1297 hammer2_inode_modify(dip);
1300 * If name specified, locate an unused key in the collision space.
1301 * Otherwise use the passed-in lhc directly.
1304 hammer2_xop_scanlhc_t *sxop;
1305 hammer2_key_t lhcbase;
1307 lhcbase = lhc;
1308 sxop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1309 sxop->lhc = lhc;
1310 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
1311 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
1312 if (lhc != sxop->head.cluster.focus->bref.key)
1313 break;
1314 ++lhc;
1316 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
1318 if (error) {
1319 if (error != HAMMER2_ERROR_ENOENT)
1320 goto done2;
1321 ++lhc;
1322 error = 0;
1324 if ((lhcbase ^ lhc) & ~HAMMER2_DIRHASH_LOMASK) {
1325 error = HAMMER2_ERROR_ENOSPC;
1326 goto done2;
1331 * Create the directory entry with the lhc as the key.
1333 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1334 xop->lhc = lhc;
1335 bzero(&xop->dirent, sizeof(xop->dirent));
1336 xop->dirent.inum = inum;
1337 xop->dirent.type = type;
1338 xop->dirent.namlen = name_len;
1340 KKASSERT(name_len < HAMMER2_INODE_MAXNAME);
1341 hammer2_xop_setname(&xop->head, name, name_len);
1343 hammer2_xop_start(&xop->head, &hammer2_inode_mkdirent_desc);
1345 error = hammer2_xop_collect(&xop->head, 0);
1347 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1348 done2:
1349 error = hammer2_error_to_errno(error);
1351 return error;
1355 * Repoint ip->cluster's chains to cluster's chains and fixup the default
1356 * focus. All items, valid or invalid, are repointed. hammer2_xop_start()
1357 * filters out invalid or non-matching elements.
1359 * Caller must hold the inode and cluster exclusive locked, if not NULL,
1360 * must also be locked.
1362 * Cluster may be NULL to clean out any chains in ip->cluster.
1364 void
1365 hammer2_inode_repoint(hammer2_inode_t *ip, hammer2_cluster_t *cluster)
1367 hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
1368 hammer2_chain_t *ochain;
1369 hammer2_chain_t *nchain;
1370 int i;
1372 bzero(dropch, sizeof(dropch));
1375 * Drop any cached (typically data) chains related to this inode
1377 hammer2_spin_ex(&ip->cluster_spin);
1378 for (i = 0; i < ip->ccache_nchains; ++i) {
1379 dropch[i] = ip->ccache[i].chain;
1380 ip->ccache[i].flags = 0;
1381 ip->ccache[i].chain = NULL;
1383 ip->ccache_nchains = 0;
1384 hammer2_spin_unex(&ip->cluster_spin);
1386 while (--i >= 0) {
1387 if (dropch[i]) {
1388 hammer2_chain_drop(dropch[i]);
1389 dropch[i] = NULL;
1394 * Replace chains in ip->cluster with chains from cluster and
1395 * adjust the focus if necessary.
1397 * NOTE: nchain and/or ochain can be NULL due to gaps
1398 * in the cluster arrays.
1400 hammer2_spin_ex(&ip->cluster_spin);
1401 for (i = 0; cluster && i < cluster->nchains; ++i) {
1403 * Do not replace elements which are the same. Also handle
1404 * element count discrepancies.
1406 nchain = cluster->array[i].chain;
1407 if (i < ip->cluster.nchains) {
1408 ochain = ip->cluster.array[i].chain;
1409 if (ochain == nchain)
1410 continue;
1411 } else {
1412 ochain = NULL;
1416 * Make adjustments
1418 ip->cluster.array[i].chain = nchain;
1419 ip->cluster.array[i].flags &= ~HAMMER2_CITEM_INVALID;
1420 ip->cluster.array[i].flags |= cluster->array[i].flags &
1421 HAMMER2_CITEM_INVALID;
1422 if (nchain)
1423 hammer2_chain_ref(nchain);
1424 dropch[i] = ochain;
1428 * Release any left-over chains in ip->cluster.
1430 while (i < ip->cluster.nchains) {
1431 nchain = ip->cluster.array[i].chain;
1432 if (nchain) {
1433 ip->cluster.array[i].chain = NULL;
1434 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1436 dropch[i] = nchain;
1437 ++i;
1441 * Fixup fields. Note that the inode-embedded cluster is never
1442 * directly locked.
1444 if (cluster) {
1445 ip->cluster.nchains = cluster->nchains;
1446 ip->cluster.focus = cluster->focus;
1447 ip->cluster.flags = cluster->flags & ~HAMMER2_CLUSTER_LOCKED;
1448 } else {
1449 ip->cluster.nchains = 0;
1450 ip->cluster.focus = NULL;
1451 ip->cluster.flags &= ~HAMMER2_CLUSTER_ZFLAGS;
1454 hammer2_spin_unex(&ip->cluster_spin);
1457 * Cleanup outside of spinlock
1459 while (--i >= 0) {
1460 if (dropch[i])
1461 hammer2_chain_drop(dropch[i]);
1466 * Repoint a single element from the cluster to the ip. Used by the
1467 * synchronization threads to piecemeal update inodes. Does not change
1468 * focus and requires inode to be re-locked to clean-up flags (XXX).
1470 void
1471 hammer2_inode_repoint_one(hammer2_inode_t *ip, hammer2_cluster_t *cluster,
1472 int idx)
1474 hammer2_chain_t *dropch[HAMMER2_MAXCLUSTER];
1475 hammer2_chain_t *ochain;
1476 hammer2_chain_t *nchain;
1477 int i;
1480 * Drop any cached (typically data) chains related to this inode
1482 hammer2_spin_ex(&ip->cluster_spin);
1483 for (i = 0; i < ip->ccache_nchains; ++i) {
1484 dropch[i] = ip->ccache[i].chain;
1485 ip->ccache[i].chain = NULL;
1487 ip->ccache_nchains = 0;
1488 hammer2_spin_unex(&ip->cluster_spin);
1490 while (--i >= 0) {
1491 if (dropch[i])
1492 hammer2_chain_drop(dropch[i]);
1496 * Replace inode chain at index
1498 hammer2_spin_ex(&ip->cluster_spin);
1499 KKASSERT(idx < cluster->nchains);
1500 if (idx < ip->cluster.nchains) {
1501 ochain = ip->cluster.array[idx].chain;
1502 nchain = cluster->array[idx].chain;
1503 } else {
1504 ochain = NULL;
1505 nchain = cluster->array[idx].chain;
1506 for (i = ip->cluster.nchains; i <= idx; ++i) {
1507 bzero(&ip->cluster.array[i],
1508 sizeof(ip->cluster.array[i]));
1509 ip->cluster.array[i].flags |= HAMMER2_CITEM_INVALID;
1511 ip->cluster.nchains = idx + 1;
1513 if (ochain != nchain) {
1515 * Make adjustments.
1517 ip->cluster.array[idx].chain = nchain;
1518 ip->cluster.array[idx].flags &= ~HAMMER2_CITEM_INVALID;
1519 ip->cluster.array[idx].flags |= cluster->array[idx].flags &
1520 HAMMER2_CITEM_INVALID;
1522 hammer2_spin_unex(&ip->cluster_spin);
1523 if (ochain != nchain) {
1524 if (nchain)
1525 hammer2_chain_ref(nchain);
1526 if (ochain)
1527 hammer2_chain_drop(ochain);
1531 hammer2_key_t
1532 hammer2_inode_data_count(const hammer2_inode_t *ip)
1534 hammer2_chain_t *chain;
1535 hammer2_key_t count = 0;
1536 int i;
1538 for (i = 0; i < ip->cluster.nchains; ++i) {
1539 if ((chain = ip->cluster.array[i].chain) != NULL) {
1540 if (count < chain->bref.embed.stats.data_count)
1541 count = chain->bref.embed.stats.data_count;
1544 return count;
1547 hammer2_key_t
1548 hammer2_inode_inode_count(const hammer2_inode_t *ip)
1550 hammer2_chain_t *chain;
1551 hammer2_key_t count = 0;
1552 int i;
1554 for (i = 0; i < ip->cluster.nchains; ++i) {
1555 if ((chain = ip->cluster.array[i].chain) != NULL) {
1556 if (count < chain->bref.embed.stats.inode_count)
1557 count = chain->bref.embed.stats.inode_count;
1560 return count;
1564 * Called with a locked inode to finish unlinking an inode after xop_unlink
1565 * had been run. This function is responsible for decrementing nlinks.
1567 * We don't bother decrementing nlinks if the file is not open and this was
1568 * the last link.
1570 * If the inode is a hardlink target it's chain has not yet been deleted,
1571 * otherwise it's chain has been deleted.
1573 * If isopen then any prior deletion was not permanent and the inode is
1574 * left intact with nlinks == 0;
1577 hammer2_inode_unlink_finisher(hammer2_inode_t *ip, struct vnode **vprecyclep)
1579 struct vnode *vp;
1582 * Decrement nlinks. Catch a bad nlinks count here too (e.g. 0 or
1583 * negative), and just assume a transition to 0.
1585 if ((int64_t)ip->meta.nlinks <= 1) {
1586 atomic_set_int(&ip->flags, HAMMER2_INODE_ISUNLINKED);
1589 * Scrap the vnode as quickly as possible. The vp association
1590 * stays intact while we hold the inode locked. However, vp
1591 * can be NULL here.
1593 vp = ip->vp;
1594 cpu_ccfence();
1597 * If no vp is associated there is no high-level state to
1598 * deal with and we can scrap the inode immediately.
1600 if (vp == NULL) {
1601 if ((ip->flags & HAMMER2_INODE_DELETING) == 0) {
1602 atomic_set_int(&ip->flags,
1603 HAMMER2_INODE_DELETING);
1604 hammer2_inode_delayed_sideq(ip);
1606 return 0;
1610 * Because INODE_ISUNLINKED is set with the inode lock
1611 * held, the vnode cannot be ripped up from under us.
1612 * There may still be refs so knote anyone waiting for
1613 * a delete notification.
1615 * The vnode is not necessarily ref'd due to the unlinking
1616 * itself, so we have to defer handling to the end of the
1617 * VOP, which will then call hammer2_inode_vprecycle().
1619 if (vprecyclep) {
1620 vhold(vp);
1621 *vprecyclep = vp;
1626 * Adjust nlinks and retain the inode on the media for now
1628 hammer2_inode_modify(ip);
1629 if ((int64_t)ip->meta.nlinks > 1)
1630 --ip->meta.nlinks;
1631 else
1632 ip->meta.nlinks = 0;
1634 return 0;
1638 * Called at the end of a VOP that removes a file with a vnode that
1639 * we want to try to dispose of quickly due to a file deletion. If
1640 * we don't do this, the vnode can hang around with 0 refs for a very
1641 * long time and prevent reclamation of the underlying file and inode
1642 * (inode remains on-media with nlinks == 0 until the vnode is recycled
1643 * due to random system activity or a umount).
1645 void
1646 hammer2_inode_vprecycle(struct vnode *vp)
1648 if (vget(vp, LK_EXCLUSIVE) == 0) {
1649 vfinalize(vp);
1650 hammer2_knote(vp, NOTE_DELETE);
1651 vdrop(vp);
1652 vput(vp);
1653 } else {
1654 vdrop(vp);
1660 * Mark an inode as being modified, meaning that the caller will modify
1661 * ip->meta.
1663 * If a vnode is present we set the vnode dirty and the nominal filesystem
1664 * sync will also handle synchronizing the inode meta-data. Unless NOSIDEQ
1665 * we must ensure that the inode is on pmp->sideq.
1667 * NOTE: We must always queue the inode to the sideq. This allows H2 to
1668 * shortcut vsyncscan() and flush inodes and their related vnodes
1669 * in a two stages. H2 still calls vfsync() for each vnode.
1671 * NOTE: No mtid (modify_tid) is passed into this routine. The caller is
1672 * only modifying the in-memory inode. A modify_tid is synchronized
1673 * later when the inode gets flushed.
1675 * NOTE: As an exception to the general rule, the inode MAY be locked
1676 * shared for this particular call.
1678 void
1679 hammer2_inode_modify(hammer2_inode_t *ip)
1681 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1682 if (ip->vp)
1683 vsetisdirty(ip->vp);
1684 if (ip->pmp && (ip->flags & HAMMER2_INODE_NOSIDEQ) == 0)
1685 hammer2_inode_delayed_sideq(ip);
1689 * Synchronize the inode's frontend state with the chain state prior
1690 * to any explicit flush of the inode or any strategy write call. This
1691 * does not flush the inode's chain or its sub-topology to media (higher
1692 * level layers are responsible for doing that).
1694 * Called with a locked inode inside a normal transaction.
1696 * inode must be locked.
1699 hammer2_inode_chain_sync(hammer2_inode_t *ip)
1701 int error;
1703 error = 0;
1704 if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
1705 hammer2_xop_fsync_t *xop;
1707 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1708 xop->clear_directdata = 0;
1709 if (ip->flags & HAMMER2_INODE_RESIZED) {
1710 if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1711 ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
1712 ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1713 xop->clear_directdata = 1;
1715 xop->osize = ip->osize;
1716 } else {
1717 xop->osize = ip->meta.size; /* safety */
1719 xop->ipflags = ip->flags;
1720 xop->meta = ip->meta;
1722 atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
1723 HAMMER2_INODE_MODIFIED);
1724 hammer2_xop_start(&xop->head, &hammer2_inode_chain_sync_desc);
1725 error = hammer2_xop_collect(&xop->head, 0);
1726 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1727 if (error == HAMMER2_ERROR_ENOENT)
1728 error = 0;
1729 if (error) {
1730 kprintf("hammer2: unable to fsync inode %p\n", ip);
1732 atomic_set_int(&ip->flags,
1733 xop->ipflags & (HAMMER2_INODE_RESIZED |
1734 HAMMER2_INODE_MODIFIED));
1736 /* XXX return error somehow? */
1739 return error;
1743 * When an inode is flagged INODE_CREATING its chains have not actually
1744 * been inserting into the on-media tree yet.
1747 hammer2_inode_chain_ins(hammer2_inode_t *ip)
1749 int error;
1751 error = 0;
1752 if (ip->flags & HAMMER2_INODE_CREATING) {
1753 hammer2_xop_create_t *xop;
1755 atomic_clear_int(&ip->flags, HAMMER2_INODE_CREATING);
1756 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1757 xop->lhc = ip->meta.inum;
1758 xop->flags = 0;
1759 hammer2_xop_start(&xop->head, &hammer2_inode_create_ins_desc);
1760 error = hammer2_xop_collect(&xop->head, 0);
1761 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1762 if (error == HAMMER2_ERROR_ENOENT)
1763 error = 0;
1764 if (error) {
1765 kprintf("hammer2: backend unable to "
1766 "insert inode %p %ld\n", ip, (long)ip->meta.inum);
1767 /* XXX return error somehow? */
1770 return error;
1774 * When an inode is flagged INODE_DELETING it has been deleted (no directory
1775 * entry or open refs are left, though as an optimization H2 might leave
1776 * nlinks == 1 to avoid unnecessary block updates). The backend flush then
1777 * needs to actually remove it from the topology.
1779 * NOTE: backend flush must still sync and flush the deleted inode to clean
1780 * out related chains.
1782 * NOTE: We must clear not only INODE_DELETING, but also INODE_ISUNLINKED
1783 * to prevent the vnode reclaim code from trying to delete it twice.
1786 hammer2_inode_chain_des(hammer2_inode_t *ip)
1788 int error;
1790 error = 0;
1791 if (ip->flags & HAMMER2_INODE_DELETING) {
1792 hammer2_xop_destroy_t *xop;
1794 atomic_clear_int(&ip->flags, HAMMER2_INODE_DELETING |
1795 HAMMER2_INODE_ISUNLINKED);
1796 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1797 hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc);
1798 error = hammer2_xop_collect(&xop->head, 0);
1799 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1801 if (error == HAMMER2_ERROR_ENOENT)
1802 error = 0;
1803 if (error) {
1804 kprintf("hammer2: backend unable to "
1805 "delete inode %p %ld\n", ip, (long)ip->meta.inum);
1806 /* XXX return error somehow? */
1809 return error;
1813 * Flushes the inode's chain and its sub-topology to media. Interlocks
1814 * HAMMER2_INODE_DIRTYDATA by clearing it prior to the flush. Any strategy
1815 * function creating or modifying a chain under this inode will re-set the
1816 * flag.
1818 * inode must be locked.
1821 hammer2_inode_chain_flush(hammer2_inode_t *ip, int flags)
1823 hammer2_xop_flush_t *xop;
1824 int error;
1826 atomic_clear_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
1827 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | flags);
1828 hammer2_xop_start(&xop->head, &hammer2_inode_flush_desc);
1829 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_WAITALL);
1830 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1831 if (error == HAMMER2_ERROR_ENOENT)
1832 error = 0;
1834 return error;