hammer2 - freemap part 4, misc fixes
[dragonfly.git] / sys / vfs / hammer2 / hammer2_chain.c
blobeaf55b5d46bba98955722612027994fa7d90dfd4
1 /*
2 * Copyright (c) 2011-2013 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
36 * This subsystem implements most of the core support functions for
37 * the hammer2_chain and hammer2_chain_core structures.
39 * Chains represent the filesystem media topology in-memory. Any given
40 * chain can represent an inode, indirect block, data, or other types
41 * of blocks.
43 * This module provides APIs for direct and indirect block searches,
44 * iterations, recursions, creation, deletion, replication, and snapshot
45 * views (used by the flush and snapshot code).
47 * Generally speaking any modification made to a chain must propagate all
48 * the way back to the volume header, issuing copy-on-write updates to the
49 * blockref tables all the way up. Any chain except the volume header itself
50 * can be flushed to disk at any time, in any order. None of it matters
51 * until we get to the point where we want to synchronize the volume header
52 * (see the flush code).
54 * The chain structure supports snapshot views in time, which are primarily
55 * used until the related data and meta-data is flushed to allow the
56 * filesystem to make snapshots without requiring it to first flush,
57 * and to allow the filesystem flush and modify the filesystem concurrently
58 * with minimal or no stalls.
60 #include <sys/cdefs.h>
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/types.h>
64 #include <sys/lock.h>
65 #include <sys/kern_syscall.h>
66 #include <sys/uuid.h>
68 #include "hammer2.h"
70 static int hammer2_indirect_optimize; /* XXX SYSCTL */
72 static hammer2_chain_t *hammer2_chain_create_indirect(
73 hammer2_trans_t *trans, hammer2_chain_t *parent,
74 hammer2_key_t key, int keybits, int for_type, int *errorp);
75 static void adjreadcounter(hammer2_blockref_t *bref, size_t bytes);
78 * We use a red-black tree to guarantee safe lookups under shared locks.
80 * Chains can be overloaded onto the same index, creating a different
81 * view of a blockref table based on a transaction id. The RBTREE
82 * deconflicts the view by sub-sorting on delete_tid.
84 * NOTE: Any 'current' chain which is not yet deleted will have a
85 * delete_tid of HAMMER2_MAX_TID (0xFFF....FFFLLU).
87 RB_GENERATE(hammer2_chain_tree, hammer2_chain, rbnode, hammer2_chain_cmp);
89 int
90 hammer2_chain_cmp(hammer2_chain_t *chain1, hammer2_chain_t *chain2)
92 if (chain1->index < chain2->index)
93 return(-1);
94 if (chain1->index > chain2->index)
95 return(1);
96 if (chain1->delete_tid < chain2->delete_tid)
97 return(-1);
98 if (chain1->delete_tid > chain2->delete_tid)
99 return(1);
100 return(0);
103 static __inline
105 hammer2_isclusterable(hammer2_chain_t *chain)
107 if (hammer2_cluster_enable) {
108 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
109 chain->bref.type == HAMMER2_BREF_TYPE_INODE ||
110 chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
111 return(1);
114 return(0);
118 * Recursively set the SUBMODIFIED flag up to the root starting at chain's
119 * parent. SUBMODIFIED is not set in chain itself.
121 * This function only operates on current-time transactions and is not
122 * used during flushes. Instead, the flush code manages the flag itself.
124 void
125 hammer2_chain_setsubmod(hammer2_trans_t *trans, hammer2_chain_t *chain)
127 hammer2_chain_core_t *above;
129 if (trans->flags & HAMMER2_TRANS_ISFLUSH)
130 return;
131 while ((above = chain->above) != NULL) {
132 spin_lock(&above->cst.spin);
133 chain = above->first_parent;
134 while (hammer2_chain_refactor_test(chain, 1))
135 chain = chain->next_parent;
136 atomic_set_int(&chain->flags, HAMMER2_CHAIN_SUBMODIFIED);
137 spin_unlock(&above->cst.spin);
142 * Allocate a new disconnected chain element representing the specified
143 * bref. chain->refs is set to 1 and the passed bref is copied to
144 * chain->bref. chain->bytes is derived from the bref.
146 * chain->core is NOT allocated and the media data and bp pointers are left
147 * NULL. The caller must call chain_core_alloc() to allocate or associate
148 * a core with the chain.
150 * NOTE: Returns a referenced but unlocked (because there is no core) chain.
152 hammer2_chain_t *
153 hammer2_chain_alloc(hammer2_mount_t *hmp, hammer2_trans_t *trans,
154 hammer2_blockref_t *bref)
156 hammer2_chain_t *chain;
157 u_int bytes = 1U << (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
160 * Construct the appropriate system structure.
162 switch(bref->type) {
163 case HAMMER2_BREF_TYPE_INODE:
164 case HAMMER2_BREF_TYPE_INDIRECT:
165 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
166 case HAMMER2_BREF_TYPE_DATA:
167 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
168 chain = kmalloc(sizeof(*chain), hmp->mchain, M_WAITOK | M_ZERO);
169 break;
170 case HAMMER2_BREF_TYPE_VOLUME:
171 case HAMMER2_BREF_TYPE_FREEMAP:
172 chain = NULL;
173 panic("hammer2_chain_alloc volume type illegal for op");
174 default:
175 chain = NULL;
176 panic("hammer2_chain_alloc: unrecognized blockref type: %d",
177 bref->type);
180 chain->hmp = hmp;
181 chain->bref = *bref;
182 chain->index = -1; /* not yet assigned */
183 chain->bytes = bytes;
184 chain->refs = 1;
185 chain->flags = HAMMER2_CHAIN_ALLOCATED;
186 chain->delete_tid = HAMMER2_MAX_TID;
187 if (trans)
188 chain->modify_tid = trans->sync_tid;
190 return (chain);
194 * Associate an existing core with the chain or allocate a new core.
196 * The core is not locked. No additional refs on the chain are made.
198 void
199 hammer2_chain_core_alloc(hammer2_chain_t *chain, hammer2_chain_core_t *core)
201 hammer2_chain_t **scanp;
203 KKASSERT(chain->core == NULL);
204 KKASSERT(chain->next_parent == NULL);
206 if (core == NULL) {
207 core = kmalloc(sizeof(*core), chain->hmp->mchain,
208 M_WAITOK | M_ZERO);
209 RB_INIT(&core->rbtree);
210 core->sharecnt = 1;
211 chain->core = core;
212 ccms_cst_init(&core->cst, chain);
213 core->first_parent = chain;
214 } else {
215 atomic_add_int(&core->sharecnt, 1);
216 chain->core = core;
217 spin_lock(&core->cst.spin);
218 if (core->first_parent == NULL) {
219 core->first_parent = chain;
220 } else {
221 scanp = &core->first_parent;
222 while (*scanp)
223 scanp = &(*scanp)->next_parent;
224 *scanp = chain;
225 hammer2_chain_ref(chain); /* next_parent link */
227 spin_unlock(&core->cst.spin);
232 * Add a reference to a chain element, preventing its destruction.
234 void
235 hammer2_chain_ref(hammer2_chain_t *chain)
237 atomic_add_int(&chain->refs, 1);
241 * Drop the caller's reference to the chain. When the ref count drops to
242 * zero this function will disassociate the chain from its parent and
243 * deallocate it, then recursely drop the parent using the implied ref
244 * from the chain's chain->parent.
246 * WARNING! Just because we are able to deallocate a chain doesn't mean
247 * that chain->core->rbtree is empty. There can still be a sharecnt
248 * on chain->core and RBTREE entries that refer to different parents.
250 static hammer2_chain_t *hammer2_chain_lastdrop(hammer2_chain_t *chain);
252 void
253 hammer2_chain_drop(hammer2_chain_t *chain)
255 u_int refs;
256 u_int need = 0;
258 #if 1
259 if (chain->flags & HAMMER2_CHAIN_MOVED)
260 ++need;
261 if (chain->flags & HAMMER2_CHAIN_MODIFIED)
262 ++need;
263 KKASSERT(chain->refs > need);
264 #endif
266 while (chain) {
267 refs = chain->refs;
268 cpu_ccfence();
269 KKASSERT(refs > 0);
271 if (refs == 1) {
272 chain = hammer2_chain_lastdrop(chain);
273 } else {
274 if (atomic_cmpset_int(&chain->refs, refs, refs - 1))
275 break;
276 /* retry the same chain */
282 * Safe handling of the 1->0 transition on chain. Returns a chain for
283 * recursive drop or NULL, possibly returning the same chain of the atomic
284 * op fails.
286 * The cst spinlock is allowed nest child-to-parent (not parent-to-child).
288 static
289 hammer2_chain_t *
290 hammer2_chain_lastdrop(hammer2_chain_t *chain)
292 hammer2_mount_t *hmp;
293 hammer2_chain_core_t *above;
294 hammer2_chain_core_t *core;
295 hammer2_chain_t *rdrop1;
296 hammer2_chain_t *rdrop2;
299 * Spinlock the core and check to see if it is empty. If it is
300 * not empty we leave chain intact with refs == 0.
302 if ((core = chain->core) != NULL) {
303 spin_lock(&core->cst.spin);
304 if (RB_ROOT(&core->rbtree)) {
305 if (atomic_cmpset_int(&chain->refs, 1, 0)) {
306 /* 1->0 transition successful */
307 spin_unlock(&core->cst.spin);
308 return(NULL);
309 } else {
310 /* 1->0 transition failed, retry */
311 spin_unlock(&core->cst.spin);
312 return(chain);
317 hmp = chain->hmp;
318 rdrop1 = NULL;
319 rdrop2 = NULL;
322 * Spinlock the parent and try to drop the last ref. On success
323 * remove chain from its parent.
325 if ((above = chain->above) != NULL) {
326 spin_lock(&above->cst.spin);
327 if (!atomic_cmpset_int(&chain->refs, 1, 0)) {
328 /* 1->0 transition failed */
329 spin_unlock(&above->cst.spin);
330 if (core)
331 spin_unlock(&core->cst.spin);
332 return(chain);
333 /* stop */
337 * 1->0 transition successful
339 KKASSERT(chain->flags & HAMMER2_CHAIN_ONRBTREE);
340 RB_REMOVE(hammer2_chain_tree, &above->rbtree, chain);
341 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
342 chain->above = NULL;
345 * Calculate a chain to return for a recursive drop.
347 * XXX this needs help, we have a potential deep-recursion
348 * problem which we try to address but sometimes we wind up
349 * with two elements that have to be dropped.
351 * If the chain has an associated core with refs at 0
352 * the chain must be the first in the core's linked list
353 * by definition, and we will recursively drop the ref
354 * implied by the chain->next_parent field.
356 * Otherwise if the rbtree containing chain is empty we try
357 * to recursively drop our parent (only the first one could
358 * possibly have refs == 0 since the rest are linked via
359 * next_parent).
361 * Otherwise we try to recursively drop a sibling.
363 if (chain->next_parent) {
364 KKASSERT(core != NULL);
365 rdrop1 = chain->next_parent;
367 if (RB_EMPTY(&above->rbtree)) {
368 rdrop2 = above->first_parent;
369 if (rdrop2 == NULL || rdrop2->refs ||
370 atomic_cmpset_int(&rdrop2->refs, 0, 1) == 0) {
371 rdrop2 = NULL;
373 } else {
374 rdrop2 = RB_ROOT(&above->rbtree);
375 if (atomic_cmpset_int(&rdrop2->refs, 0, 1) == 0)
376 rdrop2 = NULL;
378 spin_unlock(&above->cst.spin);
379 above = NULL; /* safety */
380 } else {
381 if (chain->next_parent) {
382 KKASSERT(core != NULL);
383 rdrop1 = chain->next_parent;
388 * We still have the core spinlock (if core is non-NULL). The
389 * above spinlock is gone.
391 if (core) {
392 KKASSERT(core->first_parent == chain);
393 if (chain->next_parent) {
394 /* parent should already be set */
395 KKASSERT(rdrop1 == chain->next_parent);
397 core->first_parent = chain->next_parent;
398 chain->next_parent = NULL;
399 chain->core = NULL;
401 if (atomic_fetchadd_int(&core->sharecnt, -1) == 1) {
403 * On the 1->0 transition of core we can destroy
404 * it.
406 spin_unlock(&core->cst.spin);
407 KKASSERT(core->cst.count == 0);
408 KKASSERT(core->cst.upgrade == 0);
409 kfree(core, hmp->mchain);
410 } else {
411 spin_unlock(&core->cst.spin);
413 core = NULL; /* safety */
417 * All spin locks are gone, finish freeing stuff.
419 KKASSERT((chain->flags & (HAMMER2_CHAIN_MOVED |
420 HAMMER2_CHAIN_MODIFIED)) == 0);
422 switch(chain->bref.type) {
423 case HAMMER2_BREF_TYPE_VOLUME:
424 case HAMMER2_BREF_TYPE_FREEMAP:
425 chain->data = NULL;
426 break;
427 case HAMMER2_BREF_TYPE_INODE:
428 if (chain->data) {
429 kfree(chain->data, hmp->minode);
430 chain->data = NULL;
432 break;
433 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
434 if (chain->data) {
435 kfree(chain->data, hmp->mchain);
436 chain->data = NULL;
438 break;
439 default:
440 KKASSERT(chain->data == NULL);
441 break;
444 KKASSERT(chain->bp == NULL);
445 chain->hmp = NULL;
447 if (chain->flags & HAMMER2_CHAIN_ALLOCATED) {
448 chain->flags &= ~HAMMER2_CHAIN_ALLOCATED;
449 kfree(chain, hmp->mchain);
451 if (rdrop1 && rdrop2) {
452 hammer2_chain_drop(rdrop1);
453 return(rdrop2);
454 } else if (rdrop1)
455 return(rdrop1);
456 else
457 return(rdrop2);
461 * Ref and lock a chain element, acquiring its data with I/O if necessary,
462 * and specify how you would like the data to be resolved.
464 * Returns 0 on success or an error code if the data could not be acquired.
465 * The chain element is locked on return regardless of whether an error
466 * occurred or not.
468 * The lock is allowed to recurse, multiple locking ops will aggregate
469 * the requested resolve types. Once data is assigned it will not be
470 * removed until the last unlock.
472 * HAMMER2_RESOLVE_NEVER - Do not resolve the data element.
473 * (typically used to avoid device/logical buffer
474 * aliasing for data)
476 * HAMMER2_RESOLVE_MAYBE - Do not resolve data elements for chains in
477 * the INITIAL-create state (indirect blocks only).
479 * Do not resolve data elements for DATA chains.
480 * (typically used to avoid device/logical buffer
481 * aliasing for data)
483 * HAMMER2_RESOLVE_ALWAYS- Always resolve the data element.
485 * HAMMER2_RESOLVE_SHARED- (flag) The chain is locked shared, otherwise
486 * it will be locked exclusive.
488 * NOTE: Embedded elements (volume header, inodes) are always resolved
489 * regardless.
491 * NOTE: Specifying HAMMER2_RESOLVE_ALWAYS on a newly-created non-embedded
492 * element will instantiate and zero its buffer, and flush it on
493 * release.
495 * NOTE: (data) elements are normally locked RESOLVE_NEVER or RESOLVE_MAYBE
496 * so as not to instantiate a device buffer, which could alias against
497 * a logical file buffer. However, if ALWAYS is specified the
498 * device buffer will be instantiated anyway.
500 * WARNING! If data must be fetched a shared lock will temporarily be
501 * upgraded to exclusive. However, a deadlock can occur if
502 * the caller owns more than one shared lock.
505 hammer2_chain_lock(hammer2_chain_t *chain, int how)
507 hammer2_mount_t *hmp;
508 hammer2_chain_core_t *core;
509 hammer2_blockref_t *bref;
510 hammer2_off_t pbase;
511 hammer2_off_t pmask;
512 hammer2_off_t peof;
513 ccms_state_t ostate;
514 size_t boff;
515 size_t psize;
516 int error;
517 char *bdata;
520 * Ref and lock the element. Recursive locks are allowed.
522 if ((how & HAMMER2_RESOLVE_NOREF) == 0)
523 hammer2_chain_ref(chain);
524 atomic_add_int(&chain->lockcnt, 1);
526 hmp = chain->hmp;
527 KKASSERT(hmp != NULL);
530 * Get the appropriate lock.
532 core = chain->core;
533 if (how & HAMMER2_RESOLVE_SHARED)
534 ccms_thread_lock(&core->cst, CCMS_STATE_SHARED);
535 else
536 ccms_thread_lock(&core->cst, CCMS_STATE_EXCLUSIVE);
539 * If we already have a valid data pointer no further action is
540 * necessary.
542 if (chain->data)
543 return (0);
546 * Do we have to resolve the data?
548 switch(how & HAMMER2_RESOLVE_MASK) {
549 case HAMMER2_RESOLVE_NEVER:
550 return(0);
551 case HAMMER2_RESOLVE_MAYBE:
552 if (chain->flags & HAMMER2_CHAIN_INITIAL)
553 return(0);
554 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
555 return(0);
556 #if 0
557 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE)
558 return(0);
559 #endif
560 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF)
561 return(0);
562 /* fall through */
563 case HAMMER2_RESOLVE_ALWAYS:
564 break;
568 * Upgrade to an exclusive lock so we can safely manipulate the
569 * buffer cache. If another thread got to it before us we
570 * can just return.
572 ostate = ccms_thread_lock_upgrade(&core->cst);
573 if (chain->data) {
574 ccms_thread_lock_downgrade(&core->cst, ostate);
575 return (0);
579 * We must resolve to a device buffer, either by issuing I/O or
580 * by creating a zero-fill element. We do not mark the buffer
581 * dirty when creating a zero-fill element (the hammer2_chain_modify()
582 * API must still be used to do that).
584 * The device buffer is variable-sized in powers of 2 down
585 * to HAMMER2_MIN_ALLOC (typically 1K). A 64K physical storage
586 * chunk always contains buffers of the same size. (XXX)
588 * The minimum physical IO size may be larger than the variable
589 * block size.
591 bref = &chain->bref;
593 psize = hammer2_devblksize(chain->bytes);
594 pmask = (hammer2_off_t)psize - 1;
595 pbase = bref->data_off & ~pmask;
596 boff = bref->data_off & (HAMMER2_OFF_MASK & pmask);
597 KKASSERT(pbase != 0);
598 peof = (pbase + HAMMER2_SEGMASK64) & ~HAMMER2_SEGMASK64;
601 * The getblk() optimization can only be used on newly created
602 * elements if the physical block size matches the request.
604 if ((chain->flags & HAMMER2_CHAIN_INITIAL) &&
605 chain->bytes == psize) {
606 chain->bp = getblk(hmp->devvp, pbase, psize, 0, 0);
607 error = 0;
608 } else if (hammer2_isclusterable(chain)) {
609 error = cluster_read(hmp->devvp, peof, pbase, psize,
610 psize, HAMMER2_PBUFSIZE*4,
611 &chain->bp);
612 adjreadcounter(&chain->bref, chain->bytes);
613 } else {
614 error = bread(hmp->devvp, pbase, psize, &chain->bp);
615 adjreadcounter(&chain->bref, chain->bytes);
618 if (error) {
619 kprintf("hammer2_chain_get: I/O error %016jx: %d\n",
620 (intmax_t)pbase, error);
621 bqrelse(chain->bp);
622 chain->bp = NULL;
623 ccms_thread_lock_downgrade(&core->cst, ostate);
624 return (error);
628 * Zero the data area if the chain is in the INITIAL-create state.
629 * Mark the buffer for bdwrite(). This clears the INITIAL state
630 * but does not mark the chain modified.
632 bdata = (char *)chain->bp->b_data + boff;
633 if (chain->flags & HAMMER2_CHAIN_INITIAL) {
634 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
635 bzero(bdata, chain->bytes);
636 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
640 * Setup the data pointer, either pointing it to an embedded data
641 * structure and copying the data from the buffer, or pointing it
642 * into the buffer.
644 * The buffer is not retained when copying to an embedded data
645 * structure in order to avoid potential deadlocks or recursions
646 * on the same physical buffer.
648 switch (bref->type) {
649 case HAMMER2_BREF_TYPE_VOLUME:
650 case HAMMER2_BREF_TYPE_FREEMAP:
652 * Copy data from bp to embedded buffer
654 panic("hammer2_chain_lock: called on unresolved volume header");
655 #if 0
656 /* NOT YET */
657 KKASSERT(pbase == 0);
658 KKASSERT(chain->bytes == HAMMER2_PBUFSIZE);
659 bcopy(bdata, &hmp->voldata, chain->bytes);
660 chain->data = (void *)&hmp->voldata;
661 bqrelse(chain->bp);
662 chain->bp = NULL;
663 #endif
664 break;
665 case HAMMER2_BREF_TYPE_INODE:
667 * Copy data from bp to embedded buffer, do not retain the
668 * device buffer.
670 KKASSERT(chain->bytes == sizeof(chain->data->ipdata));
671 atomic_set_int(&chain->flags, HAMMER2_CHAIN_EMBEDDED);
672 chain->data = kmalloc(sizeof(chain->data->ipdata),
673 hmp->minode, M_WAITOK | M_ZERO);
674 bcopy(bdata, &chain->data->ipdata, chain->bytes);
675 bqrelse(chain->bp);
676 chain->bp = NULL;
677 break;
678 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
679 KKASSERT(chain->bytes == sizeof(chain->data->bmdata));
680 atomic_set_int(&chain->flags, HAMMER2_CHAIN_EMBEDDED);
681 chain->data = kmalloc(sizeof(chain->data->bmdata),
682 hmp->mchain, M_WAITOK | M_ZERO);
683 bcopy(bdata, &chain->data->bmdata, chain->bytes);
684 bqrelse(chain->bp);
685 chain->bp = NULL;
686 break;
687 case HAMMER2_BREF_TYPE_INDIRECT:
688 case HAMMER2_BREF_TYPE_DATA:
689 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
690 default:
692 * Point data at the device buffer and leave bp intact.
694 chain->data = (void *)bdata;
695 break;
699 * Make sure the bp is not specifically owned by this thread before
700 * restoring to a possibly shared lock, so another hammer2 thread
701 * can release it.
703 if (chain->bp)
704 BUF_KERNPROC(chain->bp);
705 ccms_thread_lock_downgrade(&core->cst, ostate);
706 return (0);
710 * Unlock and deref a chain element.
712 * On the last lock release any non-embedded data (chain->bp) will be
713 * retired.
715 void
716 hammer2_chain_unlock(hammer2_chain_t *chain)
718 hammer2_chain_core_t *core = chain->core;
719 ccms_state_t ostate;
720 long *counterp;
721 u_int lockcnt;
724 * The core->cst lock can be shared across several chains so we
725 * need to track the per-chain lockcnt separately.
727 * If multiple locks are present (or being attempted) on this
728 * particular chain we can just unlock, drop refs, and return.
730 * Otherwise fall-through on the 1->0 transition.
732 for (;;) {
733 lockcnt = chain->lockcnt;
734 KKASSERT(lockcnt > 0);
735 cpu_ccfence();
736 if (lockcnt > 1) {
737 if (atomic_cmpset_int(&chain->lockcnt,
738 lockcnt, lockcnt - 1)) {
739 ccms_thread_unlock(&core->cst);
740 hammer2_chain_drop(chain);
741 return;
743 } else {
744 if (atomic_cmpset_int(&chain->lockcnt, 1, 0))
745 break;
747 /* retry */
751 * On the 1->0 transition we upgrade the core lock (if necessary)
752 * to exclusive for terminal processing. If after upgrading we find
753 * that lockcnt is non-zero, another thread is racing us and will
754 * handle the unload for us later on, so just cleanup and return
755 * leaving the data/bp intact
757 * Otherwise if lockcnt is still 0 it is possible for it to become
758 * non-zero and race, but since we hold the core->cst lock
759 * exclusively all that will happen is that the chain will be
760 * reloaded after we unload it.
762 ostate = ccms_thread_lock_upgrade(&core->cst);
763 if (chain->lockcnt) {
764 ccms_thread_unlock_upgraded(&core->cst, ostate);
765 hammer2_chain_drop(chain);
766 return;
770 * Shortcut the case if the data is embedded or not resolved.
772 * Do NOT NULL out chain->data (e.g. inode data), it might be
773 * dirty.
775 * The DIRTYBP flag is non-applicable in this situation and can
776 * be cleared to keep the flags state clean.
778 if (chain->bp == NULL) {
779 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
780 ccms_thread_unlock_upgraded(&core->cst, ostate);
781 hammer2_chain_drop(chain);
782 return;
786 * Statistics
788 if ((chain->flags & HAMMER2_CHAIN_DIRTYBP) == 0) {
790 } else if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
791 switch(chain->bref.type) {
792 case HAMMER2_BREF_TYPE_DATA:
793 counterp = &hammer2_ioa_file_write;
794 break;
795 case HAMMER2_BREF_TYPE_INODE:
796 counterp = &hammer2_ioa_meta_write;
797 break;
798 case HAMMER2_BREF_TYPE_INDIRECT:
799 counterp = &hammer2_ioa_indr_write;
800 break;
801 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
802 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
803 counterp = &hammer2_ioa_fmap_write;
804 break;
805 default:
806 counterp = &hammer2_ioa_volu_write;
807 break;
809 *counterp += chain->bytes;
810 } else {
811 switch(chain->bref.type) {
812 case HAMMER2_BREF_TYPE_DATA:
813 counterp = &hammer2_iod_file_write;
814 break;
815 case HAMMER2_BREF_TYPE_INODE:
816 counterp = &hammer2_iod_meta_write;
817 break;
818 case HAMMER2_BREF_TYPE_INDIRECT:
819 counterp = &hammer2_iod_indr_write;
820 break;
821 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
822 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
823 counterp = &hammer2_iod_fmap_write;
824 break;
825 default:
826 counterp = &hammer2_iod_volu_write;
827 break;
829 *counterp += chain->bytes;
833 * Clean out the bp.
835 * If a device buffer was used for data be sure to destroy the
836 * buffer when we are done to avoid aliases (XXX what about the
837 * underlying VM pages?).
839 * NOTE: Freemap leaf's use reserved blocks and thus no aliasing
840 * is possible.
842 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA)
843 chain->bp->b_flags |= B_RELBUF;
846 * The DIRTYBP flag tracks whether we have to bdwrite() the buffer
847 * or not. The flag will get re-set when chain_modify() is called,
848 * even if MODIFIED is already set, allowing the OS to retire the
849 * buffer independent of a hammer2 flus.
851 chain->data = NULL;
852 if (chain->flags & HAMMER2_CHAIN_DIRTYBP) {
853 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
854 if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
855 atomic_clear_int(&chain->flags,
856 HAMMER2_CHAIN_IOFLUSH);
857 chain->bp->b_flags |= B_RELBUF;
858 cluster_awrite(chain->bp);
859 } else {
860 chain->bp->b_flags |= B_CLUSTEROK;
861 bdwrite(chain->bp);
863 } else {
864 if (chain->flags & HAMMER2_CHAIN_IOFLUSH) {
865 atomic_clear_int(&chain->flags,
866 HAMMER2_CHAIN_IOFLUSH);
867 chain->bp->b_flags |= B_RELBUF;
868 brelse(chain->bp);
869 } else {
870 /* bp might still be dirty */
871 bqrelse(chain->bp);
874 chain->bp = NULL;
875 ccms_thread_unlock_upgraded(&core->cst, ostate);
876 hammer2_chain_drop(chain);
880 * Resize the chain's physical storage allocation in-place. This may
881 * replace the passed-in chain with a new chain.
883 * Chains can be resized smaller without reallocating the storage.
884 * Resizing larger will reallocate the storage.
886 * Must be passed an exclusively locked parent and chain, returns a new
887 * exclusively locked chain at the same index and unlocks the old chain.
888 * Flushes the buffer if necessary.
890 * This function is mostly used with DATA blocks locked RESOLVE_NEVER in order
891 * to avoid instantiating a device buffer that conflicts with the vnode
892 * data buffer. That is, the passed-in bp is a logical buffer, whereas
893 * any chain-oriented bp would be a device buffer.
895 * XXX flags currently ignored, uses chain->bp to detect data/no-data.
896 * XXX return error if cannot resize.
898 void
899 hammer2_chain_resize(hammer2_trans_t *trans, hammer2_inode_t *ip,
900 struct buf *bp,
901 hammer2_chain_t *parent, hammer2_chain_t **chainp,
902 int nradix, int flags)
904 hammer2_mount_t *hmp = trans->hmp;
905 hammer2_chain_t *chain = *chainp;
906 hammer2_off_t pbase;
907 size_t obytes;
908 size_t nbytes;
909 size_t bbytes;
910 int boff;
913 * Only data and indirect blocks can be resized for now.
914 * (The volu root, inodes, and freemap elements use a fixed size).
916 KKASSERT(chain != &hmp->vchain);
917 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA ||
918 chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT);
921 * Nothing to do if the element is already the proper size
923 obytes = chain->bytes;
924 nbytes = 1U << nradix;
925 if (obytes == nbytes)
926 return;
929 * Delete the old chain and duplicate it at the same (parent, index),
930 * returning a new chain. This allows the old chain to still be
931 * used by the flush code. Duplication occurs in-place.
933 * The parent does not have to be locked for the delete/duplicate call,
934 * but is in this particular code path.
936 * NOTE: If we are not crossing a synchronization point the
937 * duplication code will simply reuse the existing chain
938 * structure.
940 hammer2_chain_delete_duplicate(trans, &chain, 0);
943 * Set MODIFIED and add a chain ref to prevent destruction. Both
944 * modified flags share the same ref. (duplicated chains do not
945 * start out MODIFIED unless possibly if the duplication code
946 * decided to reuse the existing chain as-is).
948 * If the chain is already marked MODIFIED then we can safely
949 * return the previous allocation to the pool without having to
950 * worry about snapshots. XXX check flush synchronization.
952 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
953 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
954 hammer2_chain_ref(chain);
958 * Relocate the block, even if making it smaller (because different
959 * block sizes may be in different regions).
961 hammer2_freemap_alloc(trans, &chain->bref, nbytes);
962 chain->bytes = nbytes;
963 /*ip->delta_dcount += (ssize_t)(nbytes - obytes);*/ /* XXX atomic */
966 * The device buffer may be larger than the allocation size.
968 if ((bbytes = chain->bytes) < HAMMER2_MINIOSIZE)
969 bbytes = HAMMER2_MINIOSIZE;
970 pbase = chain->bref.data_off & ~(hammer2_off_t)(bbytes - 1);
971 boff = chain->bref.data_off & HAMMER2_OFF_MASK & (bbytes - 1);
974 * For now just support it on DATA chains (and not on indirect
975 * blocks).
977 KKASSERT(chain->bp == NULL);
980 * Make sure the chain is marked MOVED and SUBMOD is set in the
981 * parent(s) so the adjustments are picked up by flush.
983 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
984 hammer2_chain_ref(chain);
985 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
987 hammer2_chain_setsubmod(trans, chain);
988 *chainp = chain;
992 * Set a chain modified, making it read-write and duplicating it if necessary.
993 * This function will assign a new physical block to the chain if necessary
995 * Duplication of already-modified chains is possible when the modification
996 * crosses a flush synchronization boundary.
998 * Non-data blocks - The chain should be locked to at least the RESOLVE_MAYBE
999 * level or the COW operation will not work.
1001 * Data blocks - The chain is usually locked RESOLVE_NEVER so as not to
1002 * run the data through the device buffers.
1004 * This function may return a different chain than was passed, in which case
1005 * the old chain will be unlocked and the new chain will be locked.
1007 * ip->chain may be adjusted by hammer2_chain_modify_ip().
1009 hammer2_inode_data_t *
1010 hammer2_chain_modify_ip(hammer2_trans_t *trans, hammer2_inode_t *ip,
1011 hammer2_chain_t **chainp, int flags)
1013 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1014 hammer2_chain_modify(trans, chainp, flags);
1015 if (ip->chain != *chainp)
1016 hammer2_inode_repoint(ip, NULL, *chainp);
1017 return(&ip->chain->data->ipdata);
1020 void
1021 hammer2_chain_modify(hammer2_trans_t *trans, hammer2_chain_t **chainp,
1022 int flags)
1024 hammer2_mount_t *hmp = trans->hmp;
1025 hammer2_chain_t *chain;
1026 hammer2_off_t pbase;
1027 hammer2_off_t pmask;
1028 hammer2_off_t peof;
1029 hammer2_tid_t flush_tid;
1030 struct buf *nbp;
1031 int error;
1032 int wasinitial;
1033 size_t psize;
1034 size_t boff;
1035 void *bdata;
1038 * Data must be resolved if already assigned unless explicitly
1039 * flagged otherwise.
1041 chain = *chainp;
1042 if (chain->data == NULL && (flags & HAMMER2_MODIFY_OPTDATA) == 0 &&
1043 (chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX)) {
1044 hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS);
1045 hammer2_chain_unlock(chain);
1049 * data is not optional for freemap chains (we must always be sure
1050 * to copy the data on COW storage allocations).
1052 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
1053 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
1054 KKASSERT((chain->flags & HAMMER2_CHAIN_INITIAL) ||
1055 (flags & HAMMER2_MODIFY_OPTDATA) == 0);
1059 * If the chain is already marked MODIFIED we can usually just
1060 * return. However, if a modified chain is modified again in
1061 * a synchronization-point-crossing manner we have to issue a
1062 * delete/duplicate on the chain to avoid flush interference.
1064 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
1066 * Which flush_tid do we need to check? If the chain is
1067 * related to the freemap we have to use the freemap flush
1068 * tid (free_flush_tid), otherwise we use the normal filesystem
1069 * flush tid (topo_flush_tid). The two flush domains are
1070 * almost completely independent of each other.
1072 if (chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
1073 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
1074 flush_tid = hmp->topo_flush_tid; /* XXX */
1075 goto skipxx; /* XXX */
1076 } else {
1077 flush_tid = hmp->topo_flush_tid;
1081 * Main tests
1083 if (chain->modify_tid <= flush_tid &&
1084 trans->sync_tid > flush_tid) {
1086 * Modifications cross synchronization point,
1087 * requires delete-duplicate.
1089 KKASSERT((flags & HAMMER2_MODIFY_ASSERTNOCOPY) == 0);
1090 hammer2_chain_delete_duplicate(trans, chainp, 0);
1091 chain = *chainp;
1092 /* fall through using duplicate */
1094 skipxx: /* XXX */
1096 * Quick return path, set DIRTYBP to ensure that
1097 * the later retirement of bp will write it out.
1099 * quick return path also needs the modify_tid
1100 * logic.
1102 if (chain->bp)
1103 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
1104 if ((flags & HAMMER2_MODIFY_NO_MODIFY_TID) == 0)
1105 chain->bref.modify_tid = trans->sync_tid;
1106 chain->modify_tid = trans->sync_tid;
1107 return;
1111 * modify_tid is only update for primary modifications, not for
1112 * propagated brefs. mirror_tid will be updated regardless during
1113 * the flush, no need to set it here.
1115 if ((flags & HAMMER2_MODIFY_NO_MODIFY_TID) == 0)
1116 chain->bref.modify_tid = trans->sync_tid;
1119 * Set MODIFIED and add a chain ref to prevent destruction. Both
1120 * modified flags share the same ref.
1122 if ((chain->flags & HAMMER2_CHAIN_MODIFIED) == 0) {
1123 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1124 hammer2_chain_ref(chain);
1128 * Adjust chain->modify_tid so the flusher knows when the
1129 * modification occurred.
1131 chain->modify_tid = trans->sync_tid;
1134 * The modification or re-modification requires an allocation and
1135 * possible COW.
1137 * We normally always allocate new storage here. If storage exists
1138 * and MODIFY_NOREALLOC is passed in, we do not allocate new storage.
1140 if (chain != &hmp->vchain &&
1141 chain != &hmp->fchain &&
1142 ((chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX) == 0 ||
1143 (flags & HAMMER2_MODIFY_NOREALLOC) == 0)
1145 hammer2_freemap_alloc(trans, &chain->bref, chain->bytes);
1146 /* XXX failed allocation */
1150 * Do not COW if OPTDATA is set. INITIAL flag remains unchanged.
1151 * (OPTDATA does not prevent [re]allocation of storage, only the
1152 * related copy-on-write op).
1154 if (flags & HAMMER2_MODIFY_OPTDATA)
1155 goto skip2;
1158 * Clearing the INITIAL flag (for indirect blocks) indicates that
1159 * we've processed the uninitialized storage allocation.
1161 * If this flag is already clear we are likely in a copy-on-write
1162 * situation but we have to be sure NOT to bzero the storage if
1163 * no data is present.
1165 if (chain->flags & HAMMER2_CHAIN_INITIAL) {
1166 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1167 wasinitial = 1;
1168 } else {
1169 wasinitial = 0;
1173 * We currently should never instantiate a device buffer for a
1174 * file data chain. (We definitely can for a freemap chain).
1176 KKASSERT(chain->bref.type != HAMMER2_BREF_TYPE_DATA);
1179 * Instantiate data buffer and possibly execute COW operation
1181 switch(chain->bref.type) {
1182 case HAMMER2_BREF_TYPE_VOLUME:
1183 case HAMMER2_BREF_TYPE_FREEMAP:
1184 case HAMMER2_BREF_TYPE_INODE:
1185 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
1187 * The data is embedded, no copy-on-write operation is
1188 * needed.
1190 KKASSERT(chain->bp == NULL);
1191 break;
1192 case HAMMER2_BREF_TYPE_DATA:
1193 case HAMMER2_BREF_TYPE_INDIRECT:
1194 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1196 * Perform the copy-on-write operation
1198 KKASSERT(chain != &hmp->vchain && chain != &hmp->fchain);
1200 psize = hammer2_devblksize(chain->bytes);
1201 pmask = (hammer2_off_t)psize - 1;
1202 pbase = chain->bref.data_off & ~pmask;
1203 boff = chain->bref.data_off & (HAMMER2_OFF_MASK & pmask);
1204 KKASSERT(pbase != 0);
1205 peof = (pbase + HAMMER2_SEGMASK64) & ~HAMMER2_SEGMASK64;
1208 * The getblk() optimization can only be used if the
1209 * chain element size matches the physical block size.
1211 if (chain->bp && chain->bp->b_loffset == pbase) {
1212 nbp = chain->bp;
1213 error = 0;
1214 } else if (chain->bytes == psize) {
1215 nbp = getblk(hmp->devvp, pbase, psize, 0, 0);
1216 error = 0;
1217 } else if (hammer2_isclusterable(chain)) {
1218 error = cluster_read(hmp->devvp, peof, pbase, psize,
1219 psize, HAMMER2_PBUFSIZE*4,
1220 &nbp);
1221 adjreadcounter(&chain->bref, chain->bytes);
1222 } else {
1223 error = bread(hmp->devvp, pbase, psize, &nbp);
1224 adjreadcounter(&chain->bref, chain->bytes);
1226 KKASSERT(error == 0);
1227 bdata = (char *)nbp->b_data + boff;
1230 * Copy or zero-fill on write depending on whether
1231 * chain->data exists or not. Retire the existing bp
1232 * based on the DIRTYBP flag. Set the DIRTYBP flag to
1233 * indicate that retirement of nbp should use bdwrite().
1235 if (chain->data) {
1236 KKASSERT(chain->bp != NULL);
1237 if (chain->data != bdata) {
1238 bcopy(chain->data, bdata, chain->bytes);
1240 } else if (wasinitial) {
1241 bzero(bdata, chain->bytes);
1242 } else {
1244 * We have a problem. We were asked to COW but
1245 * we don't have any data to COW with!
1247 panic("hammer2_chain_modify: having a COW %p\n",
1248 chain);
1250 if (chain->bp != nbp) {
1251 if (chain->bp) {
1252 if (chain->flags & HAMMER2_CHAIN_DIRTYBP) {
1253 chain->bp->b_flags |= B_CLUSTEROK;
1254 bdwrite(chain->bp);
1255 } else {
1256 chain->bp->b_flags |= B_RELBUF;
1257 brelse(chain->bp);
1260 chain->bp = nbp;
1261 BUF_KERNPROC(chain->bp);
1263 chain->data = bdata;
1264 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DIRTYBP);
1265 break;
1266 default:
1267 panic("hammer2_chain_modify: illegal non-embedded type %d",
1268 chain->bref.type);
1269 break;
1272 skip2:
1273 hammer2_chain_setsubmod(trans, chain);
1277 * Mark the volume as having been modified. This short-cut version
1278 * does not have to lock the volume's chain, which allows the ioctl
1279 * code to make adjustments to connections without deadlocking. XXX
1281 * No ref is made on vchain when flagging it MODIFIED.
1283 void
1284 hammer2_modify_volume(hammer2_mount_t *hmp)
1286 hammer2_voldata_lock(hmp);
1287 hammer2_voldata_unlock(hmp, 1);
1291 * Locate an in-memory chain. The parent must be locked. The in-memory
1292 * chain is returned with a reference and without a lock, or NULL
1293 * if not found.
1295 * This function returns the chain at the specified index with the highest
1296 * delete_tid. The caller must check whether the chain is flagged
1297 * CHAIN_DELETED or not. However, because chain iterations can be removed
1298 * from memory we must ALSO check that DELETED chains are not flushed. A
1299 * DELETED chain which has been flushed must be ignored (the caller must
1300 * check the parent's blockref array).
1302 * NOTE: If no chain is found the caller usually must check the on-media
1303 * array to determine if a blockref exists at the index.
1305 struct hammer2_chain_find_info {
1306 hammer2_chain_t *best;
1307 hammer2_tid_t delete_tid;
1308 int index;
1311 static
1313 hammer2_chain_find_cmp(hammer2_chain_t *child, void *data)
1315 struct hammer2_chain_find_info *info = data;
1317 if (child->index < info->index)
1318 return(-1);
1319 if (child->index > info->index)
1320 return(1);
1321 return(0);
1324 static
1326 hammer2_chain_find_callback(hammer2_chain_t *child, void *data)
1328 struct hammer2_chain_find_info *info = data;
1330 if (info->delete_tid < child->delete_tid) {
1331 info->delete_tid = child->delete_tid;
1332 info->best = child;
1334 return(0);
1337 static
1338 hammer2_chain_t *
1339 hammer2_chain_find_locked(hammer2_chain_t *parent, int index)
1341 struct hammer2_chain_find_info info;
1342 hammer2_chain_t *child;
1344 info.index = index;
1345 info.delete_tid = 0;
1346 info.best = NULL;
1348 RB_SCAN(hammer2_chain_tree, &parent->core->rbtree,
1349 hammer2_chain_find_cmp, hammer2_chain_find_callback,
1350 &info);
1351 child = info.best;
1353 return (child);
1356 hammer2_chain_t *
1357 hammer2_chain_find(hammer2_chain_t *parent, int index)
1359 hammer2_chain_t *child;
1361 spin_lock(&parent->core->cst.spin);
1362 child = hammer2_chain_find_locked(parent, index);
1363 if (child)
1364 hammer2_chain_ref(child);
1365 spin_unlock(&parent->core->cst.spin);
1367 return (child);
1371 * Return a locked chain structure with all associated data acquired.
1372 * (if LOOKUP_NOLOCK is requested the returned chain is only referenced).
1374 * Caller must hold the parent locked shared or exclusive since we may
1375 * need the parent's bref array to find our block.
1377 * The returned child is locked as requested. If NOLOCK, the returned
1378 * child is still at least referenced.
1380 hammer2_chain_t *
1381 hammer2_chain_get(hammer2_chain_t *parent, int index, int flags)
1383 hammer2_blockref_t *bref;
1384 hammer2_mount_t *hmp = parent->hmp;
1385 hammer2_chain_core_t *above = parent->core;
1386 hammer2_chain_t *chain;
1387 hammer2_chain_t dummy;
1388 int how;
1391 * Figure out how to lock. MAYBE can be used to optimized
1392 * the initial-create state for indirect blocks.
1394 if (flags & (HAMMER2_LOOKUP_NODATA | HAMMER2_LOOKUP_NOLOCK))
1395 how = HAMMER2_RESOLVE_NEVER;
1396 else
1397 how = HAMMER2_RESOLVE_MAYBE;
1398 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK))
1399 how |= HAMMER2_RESOLVE_SHARED;
1401 retry:
1403 * First see if we have a (possibly modified) chain element cached
1404 * for this (parent, index). Acquire the data if necessary.
1406 * If chain->data is non-NULL the chain should already be marked
1407 * modified.
1409 dummy.flags = 0;
1410 dummy.index = index;
1411 dummy.delete_tid = HAMMER2_MAX_TID;
1412 spin_lock(&above->cst.spin);
1413 chain = RB_FIND(hammer2_chain_tree, &above->rbtree, &dummy);
1414 if (chain) {
1415 hammer2_chain_ref(chain);
1416 spin_unlock(&above->cst.spin);
1417 if ((flags & HAMMER2_LOOKUP_NOLOCK) == 0)
1418 hammer2_chain_lock(chain, how | HAMMER2_RESOLVE_NOREF);
1419 return(chain);
1421 spin_unlock(&above->cst.spin);
1424 * The parent chain must not be in the INITIAL state.
1426 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1427 panic("hammer2_chain_get: Missing bref(1)");
1428 /* NOT REACHED */
1432 * No RBTREE entry found, lookup the bref and issue I/O (switch on
1433 * the parent's bref to determine where and how big the array is).
1435 switch(parent->bref.type) {
1436 case HAMMER2_BREF_TYPE_INODE:
1437 KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
1438 bref = &parent->data->ipdata.u.blockset.blockref[index];
1439 break;
1440 case HAMMER2_BREF_TYPE_INDIRECT:
1441 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1442 KKASSERT(parent->data != NULL);
1443 KKASSERT(index >= 0 &&
1444 index < parent->bytes / sizeof(hammer2_blockref_t));
1445 bref = &parent->data->npdata[index];
1446 break;
1447 case HAMMER2_BREF_TYPE_VOLUME:
1448 KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
1449 bref = &hmp->voldata.sroot_blockset.blockref[index];
1450 break;
1451 case HAMMER2_BREF_TYPE_FREEMAP:
1452 KKASSERT(index >= 0 && index < HAMMER2_SET_COUNT);
1453 bref = &hmp->voldata.freemap_blockset.blockref[index];
1454 break;
1455 default:
1456 bref = NULL;
1457 panic("hammer2_chain_get: unrecognized blockref type: %d",
1458 parent->bref.type);
1460 if (bref->type == 0) {
1461 panic("hammer2_chain_get: Missing bref(2)");
1462 /* NOT REACHED */
1466 * Allocate a chain structure representing the existing media
1467 * entry. Resulting chain has one ref and is not locked.
1469 * The locking operation we do later will issue I/O to read it.
1471 chain = hammer2_chain_alloc(hmp, NULL, bref);
1472 hammer2_chain_core_alloc(chain, NULL); /* ref'd chain returned */
1475 * Link the chain into its parent. A spinlock is required to safely
1476 * access the RBTREE, and it is possible to collide with another
1477 * hammer2_chain_get() operation because the caller might only hold
1478 * a shared lock on the parent.
1480 KKASSERT(parent->refs > 0);
1481 spin_lock(&above->cst.spin);
1482 chain->above = above;
1483 chain->index = index;
1484 if (RB_INSERT(hammer2_chain_tree, &above->rbtree, chain)) {
1485 chain->above = NULL;
1486 chain->index = -1;
1487 spin_unlock(&above->cst.spin);
1488 hammer2_chain_drop(chain);
1489 goto retry;
1491 atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
1492 spin_unlock(&above->cst.spin);
1495 * Our new chain is referenced but NOT locked. Lock the chain
1496 * below. The locking operation also resolves its data.
1498 * If NOLOCK is set the release will release the one-and-only lock.
1500 if ((flags & HAMMER2_LOOKUP_NOLOCK) == 0) {
1501 hammer2_chain_lock(chain, how); /* recusive lock */
1502 hammer2_chain_drop(chain); /* excess ref */
1504 return (chain);
1508 * Lookup initialization/completion API
1510 hammer2_chain_t *
1511 hammer2_chain_lookup_init(hammer2_chain_t *parent, int flags)
1513 if (flags & HAMMER2_LOOKUP_SHARED) {
1514 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS |
1515 HAMMER2_RESOLVE_SHARED);
1516 } else {
1517 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS);
1519 return (parent);
1522 void
1523 hammer2_chain_lookup_done(hammer2_chain_t *parent)
1525 if (parent)
1526 hammer2_chain_unlock(parent);
1529 static
1530 hammer2_chain_t *
1531 hammer2_chain_getparent(hammer2_chain_t **parentp, int how)
1533 hammer2_chain_t *oparent;
1534 hammer2_chain_t *nparent;
1535 hammer2_chain_core_t *above;
1537 oparent = *parentp;
1538 above = oparent->above;
1540 spin_lock(&above->cst.spin);
1541 nparent = above->first_parent;
1542 while (hammer2_chain_refactor_test(nparent, 1))
1543 nparent = nparent->next_parent;
1544 hammer2_chain_ref(nparent); /* protect nparent, use in lock */
1545 spin_unlock(&above->cst.spin);
1547 hammer2_chain_unlock(oparent);
1548 hammer2_chain_lock(nparent, how | HAMMER2_RESOLVE_NOREF);
1549 *parentp = nparent;
1551 return (nparent);
1555 * Locate any key between key_beg and key_end inclusive. (*parentp)
1556 * typically points to an inode but can also point to a related indirect
1557 * block and this function will recurse upwards and find the inode again.
1559 * WARNING! THIS DOES NOT RETURN KEYS IN LOGICAL KEY ORDER! ANY KEY
1560 * WITHIN THE RANGE CAN BE RETURNED. HOWEVER, AN ITERATION
1561 * WHICH PICKS UP WHERE WE LEFT OFF WILL CONTINUE THE SCAN
1562 * AND ALL IN-RANGE KEYS WILL EVENTUALLY BE RETURNED (NOT
1563 * NECESSARILY IN ORDER).
1565 * (*parentp) must be exclusively locked and referenced and can be an inode
1566 * or an existing indirect block within the inode.
1568 * On return (*parentp) will be modified to point at the deepest parent chain
1569 * element encountered during the search, as a helper for an insertion or
1570 * deletion. The new (*parentp) will be locked and referenced and the old
1571 * will be unlocked and dereferenced (no change if they are both the same).
1573 * The matching chain will be returned exclusively locked. If NOLOCK is
1574 * requested the chain will be returned only referenced.
1576 * NULL is returned if no match was found, but (*parentp) will still
1577 * potentially be adjusted.
1579 * This function will also recurse up the chain if the key is not within the
1580 * current parent's range. (*parentp) can never be set to NULL. An iteration
1581 * can simply allow (*parentp) to float inside the loop.
1583 * NOTE! chain->data is not always resolved. By default it will not be
1584 * resolved for BREF_TYPE_DATA, FREEMAP_NODE, or FREEMAP_LEAF. Use
1585 * HAMMER2_LOOKUP_ALWAYS to force resolution (but be careful w/
1586 * BREF_TYPE_DATA as the device buffer can alias the logical file
1587 * buffer).
1589 hammer2_chain_t *
1590 hammer2_chain_lookup(hammer2_chain_t **parentp,
1591 hammer2_key_t key_beg, hammer2_key_t key_end,
1592 int flags)
1594 hammer2_mount_t *hmp;
1595 hammer2_chain_t *parent;
1596 hammer2_chain_t *chain;
1597 hammer2_chain_t *tmp;
1598 hammer2_blockref_t *base;
1599 hammer2_blockref_t *bref;
1600 hammer2_key_t scan_beg;
1601 hammer2_key_t scan_end;
1602 int count = 0;
1603 int i;
1604 int how_always = HAMMER2_RESOLVE_ALWAYS;
1605 int how_maybe = HAMMER2_RESOLVE_MAYBE;
1607 if (flags & HAMMER2_LOOKUP_ALWAYS)
1608 how_maybe = how_always;
1610 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK)) {
1611 how_maybe |= HAMMER2_RESOLVE_SHARED;
1612 how_always |= HAMMER2_RESOLVE_SHARED;
1616 * Recurse (*parentp) upward if necessary until the parent completely
1617 * encloses the key range or we hit the inode.
1619 parent = *parentp;
1620 hmp = parent->hmp;
1622 while (parent->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1623 parent->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1624 scan_beg = parent->bref.key;
1625 scan_end = scan_beg +
1626 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1627 if (key_beg >= scan_beg && key_end <= scan_end)
1628 break;
1629 parent = hammer2_chain_getparent(parentp, how_maybe);
1632 again:
1634 * Locate the blockref array. Currently we do a fully associative
1635 * search through the array.
1637 switch(parent->bref.type) {
1638 case HAMMER2_BREF_TYPE_INODE:
1640 * Special shortcut for embedded data returns the inode
1641 * itself. Callers must detect this condition and access
1642 * the embedded data (the strategy code does this for us).
1644 * This is only applicable to regular files and softlinks.
1646 if (parent->data->ipdata.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1647 if (flags & HAMMER2_LOOKUP_NOLOCK)
1648 hammer2_chain_ref(parent);
1649 else
1650 hammer2_chain_lock(parent, how_always);
1651 return (parent);
1653 base = &parent->data->ipdata.u.blockset.blockref[0];
1654 count = HAMMER2_SET_COUNT;
1655 break;
1656 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1657 case HAMMER2_BREF_TYPE_INDIRECT:
1659 * Handle MATCHIND on the parent
1661 if (flags & HAMMER2_LOOKUP_MATCHIND) {
1662 scan_beg = parent->bref.key;
1663 scan_end = scan_beg +
1664 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1665 if (key_beg == scan_beg && key_end == scan_end) {
1666 chain = parent;
1667 hammer2_chain_lock(chain, how_maybe);
1668 goto done;
1672 * Optimize indirect blocks in the INITIAL state to avoid
1673 * I/O.
1675 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1676 base = NULL;
1677 } else {
1678 if (parent->data == NULL)
1679 panic("parent->data is NULL");
1680 base = &parent->data->npdata[0];
1682 count = parent->bytes / sizeof(hammer2_blockref_t);
1683 break;
1684 case HAMMER2_BREF_TYPE_VOLUME:
1685 base = &hmp->voldata.sroot_blockset.blockref[0];
1686 count = HAMMER2_SET_COUNT;
1687 break;
1688 case HAMMER2_BREF_TYPE_FREEMAP:
1689 base = &hmp->voldata.freemap_blockset.blockref[0];
1690 count = HAMMER2_SET_COUNT;
1691 break;
1692 default:
1693 panic("hammer2_chain_lookup: unrecognized blockref type: %d",
1694 parent->bref.type);
1695 base = NULL; /* safety */
1696 count = 0; /* safety */
1700 * If the element and key overlap we use the element.
1702 * NOTE! Deleted elements are effectively invisible. Deletions
1703 * proactively clear the parent bref to the deleted child
1704 * so we do not try to shadow here to avoid parent updates
1705 * (which would be difficult since multiple deleted elements
1706 * might represent different flush synchronization points).
1708 bref = NULL;
1709 scan_beg = 0; /* avoid compiler warning */
1710 scan_end = 0; /* avoid compiler warning */
1712 for (i = 0; i < count; ++i) {
1713 tmp = hammer2_chain_find(parent, i);
1714 if (tmp) {
1715 if (tmp->flags & HAMMER2_CHAIN_DELETED) {
1716 hammer2_chain_drop(tmp);
1717 continue;
1719 bref = &tmp->bref;
1720 KKASSERT(bref->type != 0);
1721 } else if (base == NULL || base[i].type == 0) {
1722 continue;
1723 } else {
1724 bref = &base[i];
1726 scan_beg = bref->key;
1727 scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
1728 if (tmp)
1729 hammer2_chain_drop(tmp);
1730 if (key_beg <= scan_end && key_end >= scan_beg)
1731 break;
1733 if (i == count) {
1734 if (key_beg == key_end)
1735 return (NULL);
1736 return (hammer2_chain_next(parentp, NULL,
1737 key_beg, key_end, flags));
1741 * Acquire the new chain element. If the chain element is an
1742 * indirect block we must search recursively.
1744 * It is possible for the tmp chain above to be removed from
1745 * the RBTREE but the parent lock ensures it would not have been
1746 * destroyed from the media, so the chain_get() code will simply
1747 * reload it from the media in that case.
1749 chain = hammer2_chain_get(parent, i, flags);
1750 if (chain == NULL)
1751 return (NULL);
1754 * If the chain element is an indirect block it becomes the new
1755 * parent and we loop on it.
1757 * The parent always has to be locked with at least RESOLVE_MAYBE
1758 * so we can access its data. It might need a fixup if the caller
1759 * passed incompatible flags. Be careful not to cause a deadlock
1760 * as a data-load requires an exclusive lock.
1762 * If HAMMER2_LOOKUP_MATCHIND is set and the indirect block's key
1763 * range is within the requested key range we return the indirect
1764 * block and do NOT loop. This is usually only used to acquire
1765 * freemap nodes.
1767 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1768 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1769 hammer2_chain_unlock(parent);
1770 *parentp = parent = chain;
1771 if (flags & HAMMER2_LOOKUP_NOLOCK) {
1772 hammer2_chain_lock(chain,
1773 how_maybe |
1774 HAMMER2_RESOLVE_NOREF);
1775 } else if ((flags & HAMMER2_LOOKUP_NODATA) &&
1776 chain->data == NULL) {
1777 hammer2_chain_ref(chain);
1778 hammer2_chain_unlock(chain);
1779 hammer2_chain_lock(chain,
1780 how_maybe |
1781 HAMMER2_RESOLVE_NOREF);
1783 goto again;
1785 done:
1787 * All done, return the chain
1789 return (chain);
1793 * After having issued a lookup we can iterate all matching keys.
1795 * If chain is non-NULL we continue the iteration from just after it's index.
1797 * If chain is NULL we assume the parent was exhausted and continue the
1798 * iteration at the next parent.
1800 * parent must be locked on entry and remains locked throughout. chain's
1801 * lock status must match flags. Chain is always at least referenced.
1803 * WARNING! The MATCHIND flag does not apply to this function.
1805 hammer2_chain_t *
1806 hammer2_chain_next(hammer2_chain_t **parentp, hammer2_chain_t *chain,
1807 hammer2_key_t key_beg, hammer2_key_t key_end,
1808 int flags)
1810 hammer2_mount_t *hmp;
1811 hammer2_chain_t *parent;
1812 hammer2_chain_t *tmp;
1813 hammer2_blockref_t *base;
1814 hammer2_blockref_t *bref;
1815 hammer2_key_t scan_beg;
1816 hammer2_key_t scan_end;
1817 int i;
1818 int how_maybe = HAMMER2_RESOLVE_MAYBE;
1819 int count;
1821 if (flags & (HAMMER2_LOOKUP_SHARED | HAMMER2_LOOKUP_NOLOCK))
1822 how_maybe |= HAMMER2_RESOLVE_SHARED;
1824 parent = *parentp;
1825 hmp = parent->hmp;
1827 again:
1829 * Calculate the next index and recalculate the parent if necessary.
1831 if (chain) {
1833 * Continue iteration within current parent. If not NULL
1834 * the passed-in chain may or may not be locked, based on
1835 * the LOOKUP_NOLOCK flag (passed in as returned from lookup
1836 * or a prior next).
1838 i = chain->index + 1;
1839 if (flags & HAMMER2_LOOKUP_NOLOCK)
1840 hammer2_chain_drop(chain);
1841 else
1842 hammer2_chain_unlock(chain);
1845 * Any scan where the lookup returned degenerate data embedded
1846 * in the inode has an invalid index and must terminate.
1848 if (chain == parent)
1849 return(NULL);
1850 chain = NULL;
1851 } else if (parent->bref.type != HAMMER2_BREF_TYPE_INDIRECT &&
1852 parent->bref.type != HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1854 * We reached the end of the iteration.
1856 return (NULL);
1857 } else {
1859 * Continue iteration with next parent unless the current
1860 * parent covers the range.
1862 scan_beg = parent->bref.key;
1863 scan_end = scan_beg +
1864 ((hammer2_key_t)1 << parent->bref.keybits) - 1;
1865 if (key_beg >= scan_beg && key_end <= scan_end)
1866 return (NULL);
1868 i = parent->index + 1;
1869 parent = hammer2_chain_getparent(parentp, how_maybe);
1872 again2:
1874 * Locate the blockref array. Currently we do a fully associative
1875 * search through the array.
1877 switch(parent->bref.type) {
1878 case HAMMER2_BREF_TYPE_INODE:
1879 base = &parent->data->ipdata.u.blockset.blockref[0];
1880 count = HAMMER2_SET_COUNT;
1881 break;
1882 case HAMMER2_BREF_TYPE_INDIRECT:
1883 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
1884 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
1885 base = NULL;
1886 } else {
1887 KKASSERT(parent->data != NULL);
1888 base = &parent->data->npdata[0];
1890 count = parent->bytes / sizeof(hammer2_blockref_t);
1891 break;
1892 case HAMMER2_BREF_TYPE_VOLUME:
1893 base = &hmp->voldata.sroot_blockset.blockref[0];
1894 count = HAMMER2_SET_COUNT;
1895 break;
1896 case HAMMER2_BREF_TYPE_FREEMAP:
1897 base = &hmp->voldata.freemap_blockset.blockref[0];
1898 count = HAMMER2_SET_COUNT;
1899 break;
1900 default:
1901 panic("hammer2_chain_next: unrecognized blockref type: %d",
1902 parent->bref.type);
1903 base = NULL; /* safety */
1904 count = 0; /* safety */
1905 break;
1907 KKASSERT(i <= count);
1910 * Look for the key. If we are unable to find a match and an exact
1911 * match was requested we return NULL. If a range was requested we
1912 * run hammer2_chain_next() to iterate.
1914 * NOTE! Deleted elements are effectively invisible. Deletions
1915 * proactively clear the parent bref to the deleted child
1916 * so we do not try to shadow here to avoid parent updates
1917 * (which would be difficult since multiple deleted elements
1918 * might represent different flush synchronization points).
1920 bref = NULL;
1921 scan_beg = 0; /* avoid compiler warning */
1922 scan_end = 0; /* avoid compiler warning */
1924 while (i < count) {
1925 tmp = hammer2_chain_find(parent, i);
1926 if (tmp) {
1927 if (tmp->flags & HAMMER2_CHAIN_DELETED) {
1928 hammer2_chain_drop(tmp);
1929 ++i;
1930 continue;
1932 bref = &tmp->bref;
1933 } else if (base == NULL || base[i].type == 0) {
1934 ++i;
1935 continue;
1936 } else {
1937 bref = &base[i];
1939 scan_beg = bref->key;
1940 scan_end = scan_beg + ((hammer2_key_t)1 << bref->keybits) - 1;
1941 if (tmp)
1942 hammer2_chain_drop(tmp);
1943 if (key_beg <= scan_end && key_end >= scan_beg)
1944 break;
1945 ++i;
1949 * If we couldn't find a match recurse up a parent to continue the
1950 * search.
1952 if (i == count)
1953 goto again;
1956 * Acquire the new chain element. If the chain element is an
1957 * indirect block we must search recursively.
1959 chain = hammer2_chain_get(parent, i, flags);
1960 if (chain == NULL)
1961 return (NULL);
1964 * If the chain element is an indirect block it becomes the new
1965 * parent and we loop on it.
1967 * The parent always has to be locked with at least RESOLVE_MAYBE
1968 * so we can access its data. It might need a fixup if the caller
1969 * passed incompatible flags. Be careful not to cause a deadlock
1970 * as a data-load requires an exclusive lock.
1972 * If HAMMER2_LOOKUP_MATCHIND is set and the indirect block's key
1973 * range is within the requested key range we return the indirect
1974 * block and do NOT loop. This is usually only used to acquire
1975 * freemap nodes.
1977 if (chain->bref.type == HAMMER2_BREF_TYPE_INDIRECT ||
1978 chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_NODE) {
1979 if ((flags & HAMMER2_LOOKUP_MATCHIND) == 0 ||
1980 key_beg > scan_beg || key_end < scan_end) {
1981 hammer2_chain_unlock(parent);
1982 *parentp = parent = chain;
1983 chain = NULL;
1984 if (flags & HAMMER2_LOOKUP_NOLOCK) {
1985 hammer2_chain_lock(parent,
1986 how_maybe |
1987 HAMMER2_RESOLVE_NOREF);
1988 } else if ((flags & HAMMER2_LOOKUP_NODATA) &&
1989 parent->data == NULL) {
1990 hammer2_chain_ref(parent);
1991 hammer2_chain_unlock(parent);
1992 hammer2_chain_lock(parent,
1993 how_maybe |
1994 HAMMER2_RESOLVE_NOREF);
1996 i = 0;
1997 goto again2;
2002 * All done, return chain
2004 return (chain);
2008 * Create and return a new hammer2 system memory structure of the specified
2009 * key, type and size and insert it under (*parentp). This is a full
2010 * insertion, based on the supplied key/keybits, and may involve creating
2011 * indirect blocks and moving other chains around via delete/duplicate.
2013 * (*parentp) must be exclusive locked and may be replaced on return
2014 * depending on how much work the function had to do.
2016 * (*chainp) usually starts out NULL and returns the newly created chain,
2017 * but if the caller desires the caller may allocate a disconnected chain
2018 * and pass it in instead. (It is also possible for the caller to use
2019 * chain_duplicate() to create a disconnected chain, manipulate it, then
2020 * pass it into this function to insert it).
2022 * This function should NOT be used to insert INDIRECT blocks. It is
2023 * typically used to create/insert inodes and data blocks.
2025 * Caller must pass-in an exclusively locked parent the new chain is to
2026 * be inserted under, and optionally pass-in a disconnected, exclusively
2027 * locked chain to insert (else we create a new chain). The function will
2028 * adjust (*parentp) as necessary, create or connect the chain, and
2029 * return an exclusively locked chain in *chainp.
2032 hammer2_chain_create(hammer2_trans_t *trans, hammer2_chain_t **parentp,
2033 hammer2_chain_t **chainp,
2034 hammer2_key_t key, int keybits, int type, size_t bytes)
2036 hammer2_mount_t *hmp;
2037 hammer2_chain_t *chain;
2038 hammer2_chain_t *child;
2039 hammer2_chain_t *parent = *parentp;
2040 hammer2_chain_core_t *above;
2041 hammer2_blockref_t dummy;
2042 hammer2_blockref_t *base;
2043 int allocated = 0;
2044 int error = 0;
2045 int count;
2046 int i;
2048 above = parent->core;
2049 KKASSERT(ccms_thread_lock_owned(&above->cst));
2050 hmp = parent->hmp;
2051 chain = *chainp;
2053 if (chain == NULL) {
2055 * First allocate media space and construct the dummy bref,
2056 * then allocate the in-memory chain structure. Set the
2057 * INITIAL flag for fresh chains.
2059 bzero(&dummy, sizeof(dummy));
2060 dummy.type = type;
2061 dummy.key = key;
2062 dummy.keybits = keybits;
2063 dummy.data_off = hammer2_getradix(bytes);
2064 dummy.methods = parent->bref.methods;
2065 chain = hammer2_chain_alloc(hmp, trans, &dummy);
2066 hammer2_chain_core_alloc(chain, NULL);
2068 atomic_set_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
2071 * Lock the chain manually, chain_lock will load the chain
2072 * which we do NOT want to do. (note: chain->refs is set
2073 * to 1 by chain_alloc() for us, but lockcnt is not).
2075 chain->lockcnt = 1;
2076 ccms_thread_lock(&chain->core->cst, CCMS_STATE_EXCLUSIVE);
2077 allocated = 1;
2080 * We do NOT set INITIAL here (yet). INITIAL is only
2081 * used for indirect blocks.
2083 * Recalculate bytes to reflect the actual media block
2084 * allocation.
2086 bytes = (hammer2_off_t)1 <<
2087 (int)(chain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
2088 chain->bytes = bytes;
2090 switch(type) {
2091 case HAMMER2_BREF_TYPE_VOLUME:
2092 case HAMMER2_BREF_TYPE_FREEMAP:
2093 panic("hammer2_chain_create: called with volume type");
2094 break;
2095 case HAMMER2_BREF_TYPE_INODE:
2096 KKASSERT(bytes == HAMMER2_INODE_BYTES);
2097 atomic_set_int(&chain->flags, HAMMER2_CHAIN_EMBEDDED);
2098 chain->data = kmalloc(sizeof(chain->data->ipdata),
2099 hmp->minode, M_WAITOK | M_ZERO);
2100 break;
2101 case HAMMER2_BREF_TYPE_INDIRECT:
2102 panic("hammer2_chain_create: cannot be used to"
2103 "create indirect block");
2104 break;
2105 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2106 panic("hammer2_chain_create: cannot be used to"
2107 "create freemap root or node");
2108 break;
2109 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2110 KKASSERT(bytes == sizeof(chain->data->bmdata));
2111 atomic_set_int(&chain->flags, HAMMER2_CHAIN_EMBEDDED);
2112 chain->data = kmalloc(sizeof(chain->data->bmdata),
2113 hmp->mchain, M_WAITOK | M_ZERO);
2114 break;
2115 case HAMMER2_BREF_TYPE_DATA:
2116 default:
2117 /* leave chain->data NULL */
2118 KKASSERT(chain->data == NULL);
2119 break;
2121 } else {
2123 * Potentially update the existing chain's key/keybits.
2125 * Do NOT mess with the current state of the INITIAL flag.
2127 chain->bref.key = key;
2128 chain->bref.keybits = keybits;
2129 KKASSERT(chain->above == NULL);
2132 again:
2133 above = parent->core;
2136 * Locate a free blockref in the parent's array
2138 switch(parent->bref.type) {
2139 case HAMMER2_BREF_TYPE_INODE:
2140 KKASSERT((parent->data->ipdata.op_flags &
2141 HAMMER2_OPFLAG_DIRECTDATA) == 0);
2142 KKASSERT(parent->data != NULL);
2143 base = &parent->data->ipdata.u.blockset.blockref[0];
2144 count = HAMMER2_SET_COUNT;
2145 break;
2146 case HAMMER2_BREF_TYPE_INDIRECT:
2147 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2148 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2149 base = NULL;
2150 } else {
2151 KKASSERT(parent->data != NULL);
2152 base = &parent->data->npdata[0];
2154 count = parent->bytes / sizeof(hammer2_blockref_t);
2155 break;
2156 case HAMMER2_BREF_TYPE_VOLUME:
2157 KKASSERT(parent->data != NULL);
2158 base = &hmp->voldata.sroot_blockset.blockref[0];
2159 count = HAMMER2_SET_COUNT;
2160 break;
2161 case HAMMER2_BREF_TYPE_FREEMAP:
2162 KKASSERT(parent->data != NULL);
2163 base = &hmp->voldata.freemap_blockset.blockref[0];
2164 count = HAMMER2_SET_COUNT;
2165 break;
2166 default:
2167 panic("hammer2_chain_create: unrecognized blockref type: %d",
2168 parent->bref.type);
2169 count = 0;
2170 break;
2174 * Scan for an unallocated bref, also skipping any slots occupied
2175 * by in-memory chain elements that may not yet have been updated
2176 * in the parent's bref array.
2178 * We don't have to hold the spinlock to save an empty slot as
2179 * new slots can only transition from empty if the parent is
2180 * locked exclusively.
2182 spin_lock(&above->cst.spin);
2183 for (i = 0; i < count; ++i) {
2184 child = hammer2_chain_find_locked(parent, i);
2185 if (child) {
2186 if (child->flags & HAMMER2_CHAIN_DELETED)
2187 break;
2188 continue;
2190 if (base == NULL)
2191 break;
2192 if (base[i].type == 0)
2193 break;
2195 spin_unlock(&above->cst.spin);
2198 * If no free blockref could be found we must create an indirect
2199 * block and move a number of blockrefs into it. With the parent
2200 * locked we can safely lock each child in order to move it without
2201 * causing a deadlock.
2203 * This may return the new indirect block or the old parent depending
2204 * on where the key falls. NULL is returned on error.
2206 if (i == count) {
2207 hammer2_chain_t *nparent;
2209 nparent = hammer2_chain_create_indirect(trans, parent,
2210 key, keybits,
2211 type, &error);
2212 if (nparent == NULL) {
2213 if (allocated)
2214 hammer2_chain_drop(chain);
2215 chain = NULL;
2216 goto done;
2218 if (parent != nparent) {
2219 hammer2_chain_unlock(parent);
2220 parent = *parentp = nparent;
2222 goto again;
2226 * Link the chain into its parent. Later on we will have to set
2227 * the MOVED bit in situations where we don't mark the new chain
2228 * as being modified.
2230 if (chain->above != NULL)
2231 panic("hammer2: hammer2_chain_create: chain already connected");
2232 KKASSERT(chain->above == NULL);
2233 KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0);
2235 chain->above = above;
2236 chain->index = i;
2237 spin_lock(&above->cst.spin);
2238 if (RB_INSERT(hammer2_chain_tree, &above->rbtree, chain))
2239 panic("hammer2_chain_create: collision");
2240 atomic_set_int(&chain->flags, HAMMER2_CHAIN_ONRBTREE);
2241 spin_unlock(&above->cst.spin);
2243 if (allocated) {
2245 * Mark the newly created chain modified.
2247 * Device buffers are not instantiated for DATA elements
2248 * as these are handled by logical buffers.
2250 * Indirect and freemap node indirect blocks are handled
2251 * by hammer2_chain_create_indirect() and not by this
2252 * function.
2254 * Data for all other bref types is expected to be
2255 * instantiated (INODE, LEAF).
2257 switch(chain->bref.type) {
2258 case HAMMER2_BREF_TYPE_DATA:
2259 hammer2_chain_modify(trans, &chain,
2260 HAMMER2_MODIFY_OPTDATA |
2261 HAMMER2_MODIFY_ASSERTNOCOPY);
2262 break;
2263 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2264 case HAMMER2_BREF_TYPE_INODE:
2265 hammer2_chain_modify(trans, &chain,
2266 HAMMER2_MODIFY_ASSERTNOCOPY);
2267 break;
2268 default:
2270 * Remaining types are not supported by this function.
2271 * In particular, INDIRECT and LEAF_NODE types are
2272 * handled by create_indirect().
2274 panic("hammer2_chain_create: bad type: %d",
2275 chain->bref.type);
2276 /* NOT REACHED */
2277 break;
2279 } else {
2281 * When reconnecting a chain we must set MOVED and setsubmod
2282 * so the flush recognizes that it must update the bref in
2283 * the parent.
2285 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2286 hammer2_chain_ref(chain);
2287 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
2289 hammer2_chain_setsubmod(trans, chain);
2292 done:
2293 *chainp = chain;
2295 return (error);
2299 * Replace (*chainp) with a duplicate. The original *chainp is unlocked
2300 * and the replacement will be returned locked. Both the original and the
2301 * new chain will share the same RBTREE (have the same chain->core), with
2302 * the new chain becoming the 'current' chain (meaning it is the first in
2303 * the linked list at core->chain_first).
2305 * If (parent, i) then the new duplicated chain is inserted under the parent
2306 * at the specified index (the parent must not have a ref at that index).
2308 * If (NULL, -1) then the new duplicated chain is not inserted anywhere,
2309 * similar to if it had just been chain_alloc()'d (suitable for passing into
2310 * hammer2_chain_create() after this function returns).
2312 * NOTE! Duplication is used in order to retain the original topology to
2313 * support flush synchronization points. Both the original and the
2314 * new chain will have the same transaction id and thus the operation
2315 * appears atomic w/regards to media flushes.
2317 static void hammer2_chain_dup_fixup(hammer2_chain_t *ochain,
2318 hammer2_chain_t *nchain);
2320 void
2321 hammer2_chain_duplicate(hammer2_trans_t *trans, hammer2_chain_t *parent, int i,
2322 hammer2_chain_t **chainp, hammer2_blockref_t *bref)
2324 hammer2_mount_t *hmp = trans->hmp;
2325 hammer2_blockref_t *base;
2326 hammer2_chain_t *ochain;
2327 hammer2_chain_t *nchain;
2328 hammer2_chain_t *scan;
2329 hammer2_chain_core_t *above;
2330 size_t bytes;
2331 int count;
2332 int oflags;
2333 void *odata;
2336 * First create a duplicate of the chain structure, associating
2337 * it with the same core, making it the same size, pointing it
2338 * to the same bref (the same media block).
2340 ochain = *chainp;
2341 if (bref == NULL)
2342 bref = &ochain->bref;
2343 nchain = hammer2_chain_alloc(hmp, trans, bref);
2344 hammer2_chain_core_alloc(nchain, ochain->core);
2345 bytes = (hammer2_off_t)1 <<
2346 (int)(bref->data_off & HAMMER2_OFF_MASK_RADIX);
2347 nchain->bytes = bytes;
2348 nchain->modify_tid = ochain->modify_tid;
2350 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_NEVER);
2351 hammer2_chain_dup_fixup(ochain, nchain);
2354 * If parent is not NULL, insert into the parent at the requested
2355 * index. The newly duplicated chain must be marked MOVED and
2356 * SUBMODIFIED set in its parent(s).
2358 * Having both chains locked is extremely important for atomicy.
2360 if (parent) {
2362 * Locate a free blockref in the parent's array
2364 above = parent->core;
2365 KKASSERT(ccms_thread_lock_owned(&above->cst));
2367 switch(parent->bref.type) {
2368 case HAMMER2_BREF_TYPE_INODE:
2369 KKASSERT((parent->data->ipdata.op_flags &
2370 HAMMER2_OPFLAG_DIRECTDATA) == 0);
2371 KKASSERT(parent->data != NULL);
2372 base = &parent->data->ipdata.u.blockset.blockref[0];
2373 count = HAMMER2_SET_COUNT;
2374 break;
2375 case HAMMER2_BREF_TYPE_INDIRECT:
2376 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2377 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2378 base = NULL;
2379 } else {
2380 KKASSERT(parent->data != NULL);
2381 base = &parent->data->npdata[0];
2383 count = parent->bytes / sizeof(hammer2_blockref_t);
2384 break;
2385 case HAMMER2_BREF_TYPE_VOLUME:
2386 KKASSERT(parent->data != NULL);
2387 base = &hmp->voldata.sroot_blockset.blockref[0];
2388 count = HAMMER2_SET_COUNT;
2389 break;
2390 case HAMMER2_BREF_TYPE_FREEMAP:
2391 KKASSERT(parent->data != NULL);
2392 base = &hmp->voldata.freemap_blockset.blockref[0];
2393 count = HAMMER2_SET_COUNT;
2394 break;
2395 default:
2396 panic("hammer2_chain_create: unrecognized "
2397 "blockref type: %d",
2398 parent->bref.type);
2399 count = 0;
2400 break;
2402 KKASSERT(i >= 0 && i < count);
2404 KKASSERT((nchain->flags & HAMMER2_CHAIN_DELETED) == 0);
2405 KKASSERT(parent->refs > 0);
2407 spin_lock(&above->cst.spin);
2408 nchain->above = above;
2409 nchain->index = i;
2410 scan = hammer2_chain_find_locked(parent, i);
2411 KKASSERT(base == NULL || base[i].type == 0 ||
2412 scan == NULL ||
2413 (scan->flags & HAMMER2_CHAIN_DELETED));
2414 if (RB_INSERT(hammer2_chain_tree, &above->rbtree,
2415 nchain)) {
2416 panic("hammer2_chain_duplicate: collision");
2418 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_ONRBTREE);
2419 spin_unlock(&above->cst.spin);
2421 if ((nchain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2422 hammer2_chain_ref(nchain);
2423 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MOVED);
2425 hammer2_chain_setsubmod(trans, nchain);
2429 * We have to unlock ochain to flush any dirty data, asserting the
2430 * case (data == NULL) to catch any extra locks that might have been
2431 * present, then transfer state to nchain.
2433 oflags = ochain->flags;
2434 odata = ochain->data;
2435 hammer2_chain_unlock(ochain);
2436 KKASSERT((ochain->flags & HAMMER2_CHAIN_EMBEDDED) ||
2437 ochain->data == NULL);
2439 if (oflags & HAMMER2_CHAIN_INITIAL)
2440 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_INITIAL);
2443 * WARNING! We should never resolve DATA to device buffers
2444 * (XXX allow it if the caller did?), and since
2445 * we currently do not have the logical buffer cache
2446 * buffer in-hand to fix its cached physical offset
2447 * we also force the modify code to not COW it. XXX
2449 if (oflags & HAMMER2_CHAIN_MODIFIED) {
2450 if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2451 hammer2_chain_modify(trans, &nchain,
2452 HAMMER2_MODIFY_OPTDATA |
2453 HAMMER2_MODIFY_NOREALLOC |
2454 HAMMER2_MODIFY_ASSERTNOCOPY);
2455 } else if (oflags & HAMMER2_CHAIN_INITIAL) {
2456 hammer2_chain_modify(trans, &nchain,
2457 HAMMER2_MODIFY_OPTDATA |
2458 HAMMER2_MODIFY_ASSERTNOCOPY);
2459 } else {
2460 hammer2_chain_modify(trans, &nchain,
2461 HAMMER2_MODIFY_ASSERTNOCOPY);
2463 hammer2_chain_drop(nchain);
2464 } else {
2465 if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2466 hammer2_chain_drop(nchain);
2467 } else if (oflags & HAMMER2_CHAIN_INITIAL) {
2468 hammer2_chain_drop(nchain);
2469 } else {
2470 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS |
2471 HAMMER2_RESOLVE_NOREF);
2472 hammer2_chain_unlock(nchain);
2475 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_SUBMODIFIED);
2476 *chainp = nchain;
2479 #if 0
2481 * When the chain is in the INITIAL state we must still
2482 * ensure that a block has been assigned so MOVED processing
2483 * works as expected.
2485 KKASSERT (nchain->bref.type != HAMMER2_BREF_TYPE_DATA);
2486 hammer2_chain_modify(trans, &nchain,
2487 HAMMER2_MODIFY_OPTDATA |
2488 HAMMER2_MODIFY_ASSERTNOCOPY);
2491 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_MAYBE |
2492 HAMMER2_RESOLVE_NOREF); /* eat excess ref */
2493 hammer2_chain_unlock(nchain);
2494 #endif
2497 * Special in-place delete-duplicate sequence which does not require a
2498 * locked parent. (*chainp) is marked DELETED and atomically replaced
2499 * with a duplicate. Atomicy is at the very-fine spin-lock level in
2500 * order to ensure that lookups do not race us.
2502 void
2503 hammer2_chain_delete_duplicate(hammer2_trans_t *trans, hammer2_chain_t **chainp,
2504 int flags)
2506 hammer2_mount_t *hmp = trans->hmp;
2507 hammer2_chain_t *ochain;
2508 hammer2_chain_t *nchain;
2509 hammer2_chain_core_t *above;
2510 size_t bytes;
2511 int oflags;
2512 void *odata;
2515 * First create a duplicate of the chain structure
2517 ochain = *chainp;
2518 nchain = hammer2_chain_alloc(hmp, trans, &ochain->bref); /* 1 ref */
2519 if (flags & HAMMER2_DELDUP_RECORE)
2520 hammer2_chain_core_alloc(nchain, NULL);
2521 else
2522 hammer2_chain_core_alloc(nchain, ochain->core);
2523 above = ochain->above;
2525 bytes = (hammer2_off_t)1 <<
2526 (int)(ochain->bref.data_off & HAMMER2_OFF_MASK_RADIX);
2527 nchain->bytes = bytes;
2528 nchain->modify_tid = ochain->modify_tid;
2531 * Lock nchain and insert into ochain's core hierarchy, marking
2532 * ochain DELETED at the same time. Having both chains locked
2533 * is extremely important for atomicy.
2535 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_NEVER);
2536 hammer2_chain_dup_fixup(ochain, nchain);
2537 /* extra ref still present from original allocation */
2539 nchain->index = ochain->index;
2541 spin_lock(&above->cst.spin);
2542 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_ONRBTREE);
2543 ochain->delete_tid = trans->sync_tid;
2544 nchain->above = above;
2545 atomic_set_int(&ochain->flags, HAMMER2_CHAIN_DELETED);
2546 if ((ochain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2547 hammer2_chain_ref(ochain);
2548 atomic_set_int(&ochain->flags, HAMMER2_CHAIN_MOVED);
2550 if (RB_INSERT(hammer2_chain_tree, &above->rbtree, nchain)) {
2551 panic("hammer2_chain_delete_duplicate: collision");
2553 spin_unlock(&above->cst.spin);
2556 * We have to unlock ochain to flush any dirty data, asserting the
2557 * case (data == NULL) to catch any extra locks that might have been
2558 * present, then transfer state to nchain.
2560 oflags = ochain->flags;
2561 odata = ochain->data;
2562 hammer2_chain_unlock(ochain); /* replacing ochain */
2563 KKASSERT(ochain->bref.type == HAMMER2_BREF_TYPE_INODE ||
2564 ochain->data == NULL);
2566 if (oflags & HAMMER2_CHAIN_INITIAL)
2567 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_INITIAL);
2570 * WARNING! We should never resolve DATA to device buffers
2571 * (XXX allow it if the caller did?), and since
2572 * we currently do not have the logical buffer cache
2573 * buffer in-hand to fix its cached physical offset
2574 * we also force the modify code to not COW it. XXX
2576 if (oflags & HAMMER2_CHAIN_MODIFIED) {
2577 if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2578 hammer2_chain_modify(trans, &nchain,
2579 HAMMER2_MODIFY_OPTDATA |
2580 HAMMER2_MODIFY_NOREALLOC |
2581 HAMMER2_MODIFY_ASSERTNOCOPY);
2582 } else if (oflags & HAMMER2_CHAIN_INITIAL) {
2583 hammer2_chain_modify(trans, &nchain,
2584 HAMMER2_MODIFY_OPTDATA |
2585 HAMMER2_MODIFY_ASSERTNOCOPY);
2586 } else {
2587 hammer2_chain_modify(trans, &nchain,
2588 HAMMER2_MODIFY_ASSERTNOCOPY);
2590 hammer2_chain_drop(nchain);
2591 } else {
2592 if (nchain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2593 hammer2_chain_drop(nchain);
2594 } else if (oflags & HAMMER2_CHAIN_INITIAL) {
2595 hammer2_chain_drop(nchain);
2596 } else {
2597 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS |
2598 HAMMER2_RESOLVE_NOREF);
2599 hammer2_chain_unlock(nchain);
2604 * Unconditionally set the MOVED and SUBMODIFIED bit to force
2605 * update of parent bref and indirect blockrefs during flush.
2607 if ((nchain->flags & HAMMER2_CHAIN_MOVED) == 0) {
2608 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_MOVED);
2609 hammer2_chain_ref(nchain);
2611 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_SUBMODIFIED);
2612 hammer2_chain_setsubmod(trans, nchain);
2613 *chainp = nchain;
2617 * Helper function to fixup inodes. The caller procedure stack may hold
2618 * multiple locks on ochain if it represents an inode, preventing our
2619 * unlock from retiring its state to the buffer cache.
2621 * In this situation any attempt to access the buffer cache could result
2622 * either in stale data or a deadlock. Work around the problem by copying
2623 * the embedded data directly.
2625 static
2626 void
2627 hammer2_chain_dup_fixup(hammer2_chain_t *ochain, hammer2_chain_t *nchain)
2629 if (ochain->data == NULL)
2630 return;
2631 switch(ochain->bref.type) {
2632 case HAMMER2_BREF_TYPE_INODE:
2633 KKASSERT(nchain->data == NULL);
2634 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_EMBEDDED);
2635 nchain->data = kmalloc(sizeof(nchain->data->ipdata),
2636 ochain->hmp->minode, M_WAITOK | M_ZERO);
2637 nchain->data->ipdata = ochain->data->ipdata;
2638 break;
2639 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
2640 KKASSERT(nchain->data == NULL);
2641 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_EMBEDDED);
2642 nchain->data = kmalloc(sizeof(nchain->data->bmdata),
2643 ochain->hmp->mchain, M_WAITOK | M_ZERO);
2644 bcopy(ochain->data->bmdata,
2645 nchain->data->bmdata,
2646 sizeof(nchain->data->bmdata));
2647 break;
2648 default:
2649 break;
2654 * Create a snapshot of the specified {parent, chain} with the specified
2655 * label.
2657 * (a) We create a duplicate connected to the super-root as the specified
2658 * label.
2660 * (b) We issue a restricted flush using the current transaction on the
2661 * duplicate.
2663 * (c) We disconnect and reallocate the duplicate's core.
2666 hammer2_chain_snapshot(hammer2_trans_t *trans, hammer2_inode_t *ip,
2667 hammer2_ioc_pfs_t *pfs)
2669 hammer2_mount_t *hmp = trans->hmp;
2670 hammer2_chain_t *chain;
2671 hammer2_chain_t *nchain;
2672 hammer2_chain_t *parent;
2673 hammer2_inode_data_t *ipdata;
2674 size_t name_len = strlen(pfs->name);
2675 hammer2_key_t lhc = hammer2_dirhash(pfs->name, name_len);
2676 int error;
2679 * Create disconnected duplicate
2681 KKASSERT((trans->flags & HAMMER2_TRANS_RESTRICTED) == 0);
2682 nchain = ip->chain;
2683 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_MAYBE);
2684 hammer2_chain_duplicate(trans, NULL, -1, &nchain, NULL);
2685 atomic_set_int(&nchain->flags, HAMMER2_CHAIN_RECYCLE |
2686 HAMMER2_CHAIN_SNAPSHOT);
2689 * Create named entry in the super-root.
2691 parent = hammer2_chain_lookup_init(hmp->schain, 0);
2692 error = 0;
2693 while (error == 0) {
2694 chain = hammer2_chain_lookup(&parent, lhc, lhc, 0);
2695 if (chain == NULL)
2696 break;
2697 if ((lhc & HAMMER2_DIRHASH_LOMASK) == HAMMER2_DIRHASH_LOMASK)
2698 error = ENOSPC;
2699 hammer2_chain_unlock(chain);
2700 chain = NULL;
2701 ++lhc;
2703 hammer2_chain_create(trans, &parent, &nchain, lhc, 0,
2704 HAMMER2_BREF_TYPE_INODE,
2705 HAMMER2_INODE_BYTES);
2706 hammer2_chain_modify(trans, &nchain, HAMMER2_MODIFY_ASSERTNOCOPY);
2707 hammer2_chain_lookup_done(parent);
2708 parent = NULL; /* safety */
2711 * Name fixup
2713 ipdata = &nchain->data->ipdata;
2714 ipdata->name_key = lhc;
2715 ipdata->name_len = name_len;
2716 ksnprintf(ipdata->filename, sizeof(ipdata->filename), "%s", pfs->name);
2719 * Set PFS type, generate a unique filesystem id, and generate
2720 * a cluster id. Use the same clid when snapshotting a PFS root,
2721 * which theoretically allows the snapshot to be used as part of
2722 * the same cluster (perhaps as a cache).
2724 ipdata->pfs_type = HAMMER2_PFSTYPE_SNAPSHOT;
2725 kern_uuidgen(&ipdata->pfs_fsid, 1);
2726 if (ip->chain == ip->pmp->rchain)
2727 ipdata->pfs_clid = ip->chain->data->ipdata.pfs_clid;
2728 else
2729 kern_uuidgen(&ipdata->pfs_clid, 1);
2732 * Issue a restricted flush of the snapshot. This is a synchronous
2733 * operation.
2735 trans->flags |= HAMMER2_TRANS_RESTRICTED;
2736 kprintf("SNAPSHOTA\n");
2737 tsleep(trans, 0, "snapslp", hz*4);
2738 kprintf("SNAPSHOTB\n");
2739 hammer2_chain_flush(trans, nchain);
2740 trans->flags &= ~HAMMER2_TRANS_RESTRICTED;
2742 #if 0
2744 * Remove the link b/c nchain is a snapshot and snapshots don't
2745 * follow CHAIN_DELETED semantics ?
2747 chain = ip->chain;
2750 KKASSERT(chain->duplink == nchain);
2751 KKASSERT(chain->core == nchain->core);
2752 KKASSERT(nchain->refs >= 2);
2753 chain->duplink = nchain->duplink;
2754 atomic_clear_int(&nchain->flags, HAMMER2_CHAIN_DUPTARGET);
2755 hammer2_chain_drop(nchain);
2756 #endif
2758 kprintf("snapshot %s nchain->refs %d nchain->flags %08x\n",
2759 pfs->name, nchain->refs, nchain->flags);
2760 hammer2_chain_unlock(nchain);
2762 return (error);
2766 * Create an indirect block that covers one or more of the elements in the
2767 * current parent. Either returns the existing parent with no locking or
2768 * ref changes or returns the new indirect block locked and referenced
2769 * and leaving the original parent lock/ref intact as well.
2771 * If an error occurs, NULL is returned and *errorp is set to the error.
2773 * The returned chain depends on where the specified key falls.
2775 * The key/keybits for the indirect mode only needs to follow three rules:
2777 * (1) That all elements underneath it fit within its key space and
2779 * (2) That all elements outside it are outside its key space.
2781 * (3) When creating the new indirect block any elements in the current
2782 * parent that fit within the new indirect block's keyspace must be
2783 * moved into the new indirect block.
2785 * (4) The keyspace chosen for the inserted indirect block CAN cover a wider
2786 * keyspace the the current parent, but lookup/iteration rules will
2787 * ensure (and must ensure) that rule (2) for all parents leading up
2788 * to the nearest inode or the root volume header is adhered to. This
2789 * is accomplished by always recursing through matching keyspaces in
2790 * the hammer2_chain_lookup() and hammer2_chain_next() API.
2792 * The current implementation calculates the current worst-case keyspace by
2793 * iterating the current parent and then divides it into two halves, choosing
2794 * whichever half has the most elements (not necessarily the half containing
2795 * the requested key).
2797 * We can also opt to use the half with the least number of elements. This
2798 * causes lower-numbered keys (aka logical file offsets) to recurse through
2799 * fewer indirect blocks and higher-numbered keys to recurse through more.
2800 * This also has the risk of not moving enough elements to the new indirect
2801 * block and being forced to create several indirect blocks before the element
2802 * can be inserted.
2804 * Must be called with an exclusively locked parent.
2806 static int hammer2_chain_indkey_freemap(hammer2_chain_t *parent,
2807 hammer2_key_t *keyp, int keybits,
2808 hammer2_blockref_t *base, int count);
2809 static int hammer2_chain_indkey_normal(hammer2_chain_t *parent,
2810 hammer2_key_t *keyp, int keybits,
2811 hammer2_blockref_t *base, int count);
2812 static
2813 hammer2_chain_t *
2814 hammer2_chain_create_indirect(hammer2_trans_t *trans, hammer2_chain_t *parent,
2815 hammer2_key_t create_key, int create_bits,
2816 int for_type, int *errorp)
2818 hammer2_mount_t *hmp = trans->hmp;
2819 hammer2_chain_core_t *above;
2820 hammer2_chain_core_t *icore;
2821 hammer2_blockref_t *base;
2822 hammer2_blockref_t *bref;
2823 hammer2_chain_t *chain;
2824 hammer2_chain_t *child;
2825 hammer2_chain_t *ichain;
2826 hammer2_chain_t dummy;
2827 hammer2_key_t key = create_key;
2828 int keybits = create_bits;
2829 int count;
2830 int nbytes;
2831 int i;
2834 * Calculate the base blockref pointer or NULL if the chain
2835 * is known to be empty. We need to calculate the array count
2836 * for RB lookups either way.
2838 KKASSERT(ccms_thread_lock_owned(&parent->core->cst));
2839 *errorp = 0;
2840 above = parent->core;
2842 /*hammer2_chain_modify(trans, &parent, HAMMER2_MODIFY_OPTDATA);*/
2843 if (parent->flags & HAMMER2_CHAIN_INITIAL) {
2844 base = NULL;
2846 switch(parent->bref.type) {
2847 case HAMMER2_BREF_TYPE_INODE:
2848 count = HAMMER2_SET_COUNT;
2849 break;
2850 case HAMMER2_BREF_TYPE_INDIRECT:
2851 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2852 count = parent->bytes / sizeof(hammer2_blockref_t);
2853 break;
2854 case HAMMER2_BREF_TYPE_VOLUME:
2855 count = HAMMER2_SET_COUNT;
2856 break;
2857 case HAMMER2_BREF_TYPE_FREEMAP:
2858 count = HAMMER2_SET_COUNT;
2859 break;
2860 default:
2861 panic("hammer2_chain_create_indirect: "
2862 "unrecognized blockref type: %d",
2863 parent->bref.type);
2864 count = 0;
2865 break;
2867 } else {
2868 switch(parent->bref.type) {
2869 case HAMMER2_BREF_TYPE_INODE:
2870 base = &parent->data->ipdata.u.blockset.blockref[0];
2871 count = HAMMER2_SET_COUNT;
2872 break;
2873 case HAMMER2_BREF_TYPE_INDIRECT:
2874 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
2875 base = &parent->data->npdata[0];
2876 count = parent->bytes / sizeof(hammer2_blockref_t);
2877 break;
2878 case HAMMER2_BREF_TYPE_VOLUME:
2879 base = &hmp->voldata.sroot_blockset.blockref[0];
2880 count = HAMMER2_SET_COUNT;
2881 break;
2882 case HAMMER2_BREF_TYPE_FREEMAP:
2883 base = &hmp->voldata.freemap_blockset.blockref[0];
2884 count = HAMMER2_SET_COUNT;
2885 break;
2886 default:
2887 panic("hammer2_chain_create_indirect: "
2888 "unrecognized blockref type: %d",
2889 parent->bref.type);
2890 count = 0;
2891 break;
2896 * dummy used in later chain allocation (no longer used for lookups).
2898 bzero(&dummy, sizeof(dummy));
2899 dummy.delete_tid = HAMMER2_MAX_TID;
2902 * When creating an indirect block for a freemap node or leaf
2903 * the key/keybits must be fitted to static radix levels because
2904 * particular radix levels use particular reserved blocks in the
2905 * related zone.
2907 * This routine calculates the key/radix of the indirect block
2908 * we need to create, and whether it is on the high-side or the
2909 * low-side.
2911 if (for_type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
2912 for_type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
2913 keybits = hammer2_chain_indkey_freemap(parent, &key, keybits,
2914 base, count);
2915 } else {
2916 keybits = hammer2_chain_indkey_normal(parent, &key, keybits,
2917 base, count);
2921 * Normalize the key for the radix being represented, keeping the
2922 * high bits and throwing away the low bits.
2924 key &= ~(((hammer2_key_t)1 << keybits) - 1);
2927 * How big should our new indirect block be? It has to be at least
2928 * as large as its parent.
2930 if (parent->bref.type == HAMMER2_BREF_TYPE_INODE)
2931 nbytes = HAMMER2_IND_BYTES_MIN;
2932 else
2933 nbytes = HAMMER2_IND_BYTES_MAX;
2934 if (nbytes < count * sizeof(hammer2_blockref_t))
2935 nbytes = count * sizeof(hammer2_blockref_t);
2938 * Ok, create our new indirect block
2940 if (for_type == HAMMER2_BREF_TYPE_FREEMAP_NODE ||
2941 for_type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) {
2942 dummy.bref.type = HAMMER2_BREF_TYPE_FREEMAP_NODE;
2943 } else {
2944 dummy.bref.type = HAMMER2_BREF_TYPE_INDIRECT;
2946 dummy.bref.key = key;
2947 dummy.bref.keybits = keybits;
2948 dummy.bref.data_off = hammer2_getradix(nbytes);
2949 dummy.bref.methods = parent->bref.methods;
2951 ichain = hammer2_chain_alloc(hmp, trans, &dummy.bref);
2952 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_INITIAL);
2953 hammer2_chain_core_alloc(ichain, NULL);
2954 icore = ichain->core;
2955 hammer2_chain_lock(ichain, HAMMER2_RESOLVE_MAYBE);
2956 hammer2_chain_drop(ichain); /* excess ref from alloc */
2959 * We have to mark it modified to allocate its block, but use
2960 * OPTDATA to allow it to remain in the INITIAL state. Otherwise
2961 * it won't be acted upon by the flush code.
2963 * XXX leave the node unmodified, depend on the SUBMODIFIED
2964 * flush to assign and modify parent blocks.
2966 hammer2_chain_modify(trans, &ichain, HAMMER2_MODIFY_OPTDATA);
2969 * Iterate the original parent and move the matching brefs into
2970 * the new indirect block.
2972 * At the same time locate an empty slot (or what will become an
2973 * empty slot) and assign the new indirect block to that slot.
2975 * XXX handle flushes.
2977 spin_lock(&above->cst.spin);
2978 for (i = 0; i < count; ++i) {
2980 * For keying purposes access the bref from the media or
2981 * from our in-memory cache. In cases where the in-memory
2982 * cache overrides the media the keyrefs will be the same
2983 * anyway so we can avoid checking the cache when the media
2984 * has a key.
2986 child = hammer2_chain_find_locked(parent, i);
2987 if (child) {
2988 if (child->flags & HAMMER2_CHAIN_DELETED) {
2989 if (ichain->index < 0)
2990 ichain->index = i;
2991 continue;
2993 bref = &child->bref;
2994 } else if (base && base[i].type) {
2995 bref = &base[i];
2996 } else {
2997 if (ichain->index < 0)
2998 ichain->index = i;
2999 continue;
3003 * Skip keys that are not within the key/radix of the new
3004 * indirect block. They stay in the parent.
3006 if ((~(((hammer2_key_t)1 << keybits) - 1) &
3007 (key ^ bref->key)) != 0) {
3008 continue;
3012 * This element is being moved from the parent, its slot
3013 * is available for our new indirect block.
3015 if (ichain->index < 0)
3016 ichain->index = i;
3019 * Load the new indirect block by acquiring or allocating
3020 * the related chain entries, then move them to the new
3021 * parent (ichain) by deleting them from their old location
3022 * and inserting a duplicate of the chain and any modified
3023 * sub-chain in the new location.
3025 * We must set MOVED in the chain being duplicated and
3026 * SUBMODIFIED in the parent(s) so the flush code knows
3027 * what is going on. The latter is done after the loop.
3029 * WARNING! above->cst.spin must be held when parent is
3030 * modified, even though we own the full blown lock,
3031 * to deal with setsubmod and rename races.
3032 * (XXX remove this req).
3034 spin_unlock(&above->cst.spin);
3035 chain = hammer2_chain_get(parent, i, HAMMER2_LOOKUP_NODATA);
3036 hammer2_chain_delete(trans, chain);
3037 hammer2_chain_duplicate(trans, ichain, i, &chain, NULL);
3038 hammer2_chain_unlock(chain);
3039 KKASSERT(parent->refs > 0);
3040 chain = NULL;
3041 spin_lock(&above->cst.spin);
3043 spin_unlock(&above->cst.spin);
3046 * Insert the new indirect block into the parent now that we've
3047 * cleared out some entries in the parent. We calculated a good
3048 * insertion index in the loop above (ichain->index).
3050 * We don't have to set MOVED here because we mark ichain modified
3051 * down below (so the normal modified -> flush -> set-moved sequence
3052 * applies).
3054 * The insertion shouldn't race as this is a completely new block
3055 * and the parent is locked.
3057 if (ichain->index < 0)
3058 kprintf("indirect parent %p count %d key %016jx/%d\n",
3059 parent, count, (intmax_t)key, keybits);
3060 KKASSERT(ichain->index >= 0);
3061 KKASSERT((ichain->flags & HAMMER2_CHAIN_ONRBTREE) == 0);
3062 spin_lock(&above->cst.spin);
3063 if (RB_INSERT(hammer2_chain_tree, &above->rbtree, ichain))
3064 panic("hammer2_chain_create_indirect: ichain insertion");
3065 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_ONRBTREE);
3066 ichain->above = above;
3067 spin_unlock(&above->cst.spin);
3070 * Mark the new indirect block modified after insertion, which
3071 * will propagate up through parent all the way to the root and
3072 * also allocate the physical block in ichain for our caller,
3073 * and assign ichain->data to a pre-zero'd space (because there
3074 * is not prior data to copy into it).
3076 * We have to set SUBMODIFIED in ichain's flags manually so the
3077 * flusher knows it has to recurse through it to get to all of
3078 * our moved blocks, then call setsubmod() to set the bit
3079 * recursively.
3081 /*hammer2_chain_modify(trans, &ichain, HAMMER2_MODIFY_OPTDATA);*/
3082 atomic_set_int(&ichain->flags, HAMMER2_CHAIN_SUBMODIFIED);
3083 hammer2_chain_setsubmod(trans, ichain);
3086 * Figure out what to return.
3088 if (~(((hammer2_key_t)1 << keybits) - 1) &
3089 (create_key ^ key)) {
3091 * Key being created is outside the key range,
3092 * return the original parent.
3094 hammer2_chain_unlock(ichain);
3095 } else {
3097 * Otherwise its in the range, return the new parent.
3098 * (leave both the new and old parent locked).
3100 parent = ichain;
3103 return(parent);
3107 * Calculate the keybits and highside/lowside of the freemap node the
3108 * caller is creating.
3110 * This routine will specify the next higher-level freemap key/radix
3111 * representing the lowest-ordered set. By doing so, eventually all
3112 * low-ordered sets will be moved one level down.
3114 * We have to be careful here because the freemap reserves a limited
3115 * number of blocks for a limited number of levels. So we can't just
3116 * push indiscriminately.
3119 hammer2_chain_indkey_freemap(hammer2_chain_t *parent, hammer2_key_t *keyp,
3120 int keybits, hammer2_blockref_t *base, int count)
3122 hammer2_chain_core_t *above;
3123 hammer2_chain_t *child;
3124 hammer2_blockref_t *bref;
3125 hammer2_key_t key;
3126 int locount;
3127 int hicount;
3128 int i;
3130 key = *keyp;
3131 above = parent->core;
3132 locount = 0;
3133 hicount = 0;
3134 keybits = 64;
3137 * Calculate the range of keys in the array being careful to skip
3138 * slots which are overridden with a deletion.
3140 spin_lock(&above->cst.spin);
3141 for (i = 0; i < count; ++i) {
3142 child = hammer2_chain_find_locked(parent, i);
3143 if (child) {
3144 if (child->flags & HAMMER2_CHAIN_DELETED)
3145 continue;
3146 bref = &child->bref;
3147 } else if (base && base[i].type) {
3148 bref = &base[i];
3149 } else {
3150 continue;
3153 if (keybits > bref->keybits) {
3154 key = bref->key;
3155 keybits = bref->keybits;
3156 } else if (keybits == bref->keybits && bref->key < key) {
3157 key = bref->key;
3160 spin_unlock(&above->cst.spin);
3163 * Return the keybits for a higher-level FREEMAP_NODE covering
3164 * this node.
3166 switch(keybits) {
3167 case HAMMER2_FREEMAP_LEVEL0_RADIX:
3168 keybits = HAMMER2_FREEMAP_LEVEL1_RADIX;
3169 break;
3170 case HAMMER2_FREEMAP_LEVEL1_RADIX:
3171 keybits = HAMMER2_FREEMAP_LEVEL2_RADIX;
3172 break;
3173 case HAMMER2_FREEMAP_LEVEL2_RADIX:
3174 keybits = HAMMER2_FREEMAP_LEVEL3_RADIX;
3175 break;
3176 case HAMMER2_FREEMAP_LEVEL3_RADIX:
3177 keybits = HAMMER2_FREEMAP_LEVEL4_RADIX;
3178 break;
3179 case HAMMER2_FREEMAP_LEVEL4_RADIX:
3180 panic("hammer2_chain_indkey_freemap: level too high");
3181 break;
3182 default:
3183 panic("hammer2_chain_indkey_freemap: bad radix");
3184 break;
3186 *keyp = key;
3188 return (keybits);
3192 * Calculate the keybits and highside/lowside of the indirect block the
3193 * caller is creating.
3195 static int
3196 hammer2_chain_indkey_normal(hammer2_chain_t *parent, hammer2_key_t *keyp,
3197 int keybits, hammer2_blockref_t *base, int count)
3199 hammer2_chain_core_t *above;
3200 hammer2_chain_t *child;
3201 hammer2_blockref_t *bref;
3202 hammer2_key_t key;
3203 int nkeybits;
3204 int locount;
3205 int hicount;
3206 int i;
3208 key = *keyp;
3209 above = parent->core;
3210 locount = 0;
3211 hicount = 0;
3214 * Calculate the range of keys in the array being careful to skip
3215 * slots which are overridden with a deletion. Once the scan
3216 * completes we will cut the key range in half and shift half the
3217 * range into the new indirect block.
3219 spin_lock(&above->cst.spin);
3220 for (i = 0; i < count; ++i) {
3221 child = hammer2_chain_find_locked(parent, i);
3222 if (child) {
3223 if (child->flags & HAMMER2_CHAIN_DELETED)
3224 continue;
3225 bref = &child->bref;
3226 } else if (base && base[i].type) {
3227 bref = &base[i];
3228 } else {
3229 continue;
3233 * Expand our calculated key range (key, keybits) to fit
3234 * the scanned key. nkeybits represents the full range
3235 * that we will later cut in half (two halves @ nkeybits - 1).
3237 nkeybits = keybits;
3238 if (nkeybits < bref->keybits) {
3239 if (bref->keybits > 64) {
3240 kprintf("bad bref index %d chain %p bref %p\n",
3241 i, child, bref);
3242 Debugger("fubar");
3244 nkeybits = bref->keybits;
3246 while (nkeybits < 64 &&
3247 (~(((hammer2_key_t)1 << nkeybits) - 1) &
3248 (key ^ bref->key)) != 0) {
3249 ++nkeybits;
3253 * If the new key range is larger we have to determine
3254 * which side of the new key range the existing keys fall
3255 * under by checking the high bit, then collapsing the
3256 * locount into the hicount or vise-versa.
3258 if (keybits != nkeybits) {
3259 if (((hammer2_key_t)1 << (nkeybits - 1)) & key) {
3260 hicount += locount;
3261 locount = 0;
3262 } else {
3263 locount += hicount;
3264 hicount = 0;
3266 keybits = nkeybits;
3270 * The newly scanned key will be in the lower half or the
3271 * higher half of the (new) key range.
3273 if (((hammer2_key_t)1 << (nkeybits - 1)) & bref->key)
3274 ++hicount;
3275 else
3276 ++locount;
3278 spin_unlock(&above->cst.spin);
3279 bref = NULL; /* now invalid (safety) */
3282 * Adjust keybits to represent half of the full range calculated
3283 * above (radix 63 max)
3285 --keybits;
3288 * Select whichever half contains the most elements. Theoretically
3289 * we can select either side as long as it contains at least one
3290 * element (in order to ensure that a free slot is present to hold
3291 * the indirect block).
3293 if (hammer2_indirect_optimize) {
3295 * Insert node for least number of keys, this will arrange
3296 * the first few blocks of a large file or the first few
3297 * inodes in a directory with fewer indirect blocks when
3298 * created linearly.
3300 if (hicount < locount && hicount != 0)
3301 key |= (hammer2_key_t)1 << keybits;
3302 else
3303 key &= ~(hammer2_key_t)1 << keybits;
3304 } else {
3306 * Insert node for most number of keys, best for heavily
3307 * fragmented files.
3309 if (hicount > locount)
3310 key |= (hammer2_key_t)1 << keybits;
3311 else
3312 key &= ~(hammer2_key_t)1 << keybits;
3314 *keyp = key;
3316 return (keybits);
3320 * Sets CHAIN_DELETED and CHAIN_MOVED in the chain being deleted and
3321 * set chain->delete_tid.
3323 * This function does NOT generate a modification to the parent. It
3324 * would be nearly impossible to figure out which parent to modify anyway.
3325 * Such modifications are handled by the flush code and are properly merged
3326 * using the flush synchronization point.
3328 * The find/get code will properly overload the RBTREE check on top of
3329 * the bref check to detect deleted entries.
3331 * This function is NOT recursive. Any entity already pushed into the
3332 * chain (such as an inode) may still need visibility into its contents,
3333 * as well as the ability to read and modify the contents. For example,
3334 * for an unlinked file which is still open.
3336 * NOTE: This function does NOT set chain->modify_tid, allowing future
3337 * code to distinguish between live and deleted chains by testing
3338 * sync_tid.
3340 * NOTE: Deletions normally do not occur in the middle of a duplication
3341 * chain but we use a trick for hardlink migration that refactors
3342 * the originating inode without deleting it, so we make no assumptions
3343 * here.
3345 void
3346 hammer2_chain_delete(hammer2_trans_t *trans, hammer2_chain_t *chain)
3348 KKASSERT(ccms_thread_lock_owned(&chain->core->cst));
3351 * Nothing to do if already marked.
3353 if (chain->flags & HAMMER2_CHAIN_DELETED)
3354 return;
3357 * We must set MOVED along with DELETED for the flush code to
3358 * recognize the operation and properly disconnect the chain
3359 * in-memory.
3361 * The setting of DELETED causes finds, lookups, and _next iterations
3362 * to no longer recognize the chain. RB_SCAN()s will still have
3363 * visibility (needed for flush serialization points).
3365 * We need the spinlock on the core whos RBTREE contains chain
3366 * to protect against races.
3368 spin_lock(&chain->above->cst.spin);
3369 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DELETED);
3370 if ((chain->flags & HAMMER2_CHAIN_MOVED) == 0) {
3371 hammer2_chain_ref(chain);
3372 atomic_set_int(&chain->flags, HAMMER2_CHAIN_MOVED);
3374 chain->delete_tid = trans->sync_tid;
3375 spin_unlock(&chain->above->cst.spin);
3376 hammer2_chain_setsubmod(trans, chain);
3379 void
3380 hammer2_chain_wait(hammer2_chain_t *chain)
3382 tsleep(chain, 0, "chnflw", 1);
3385 static
3386 void
3387 adjreadcounter(hammer2_blockref_t *bref, size_t bytes)
3389 long *counterp;
3391 switch(bref->type) {
3392 case HAMMER2_BREF_TYPE_DATA:
3393 counterp = &hammer2_iod_file_read;
3394 break;
3395 case HAMMER2_BREF_TYPE_INODE:
3396 counterp = &hammer2_iod_meta_read;
3397 break;
3398 case HAMMER2_BREF_TYPE_INDIRECT:
3399 counterp = &hammer2_iod_indr_read;
3400 break;
3401 case HAMMER2_BREF_TYPE_FREEMAP_NODE:
3402 case HAMMER2_BREF_TYPE_FREEMAP_LEAF:
3403 counterp = &hammer2_iod_fmap_read;
3404 break;
3405 default:
3406 counterp = &hammer2_iod_volu_read;
3407 break;
3409 *counterp += bytes;