HAMMER 59A/Many: Mirroring related work (and one bug fix).
[dragonfly.git] / sys / vfs / hammer / hammer_btree.c
blobb097493075c44a687ebfc8fe42a086610d366132
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_btree.c,v 1.58 2008/06/26 04:06:22 dillon Exp $
38 * HAMMER B-Tree index
40 * HAMMER implements a modified B+Tree. In documentation this will
41 * simply be refered to as the HAMMER B-Tree. Basically a HAMMER B-Tree
42 * looks like a B+Tree (A B-Tree which stores its records only at the leafs
43 * of the tree), but adds two additional boundary elements which describe
44 * the left-most and right-most element a node is able to represent. In
45 * otherwords, we have boundary elements at the two ends of a B-Tree node
46 * instead of sub-tree pointers.
48 * A B-Tree internal node looks like this:
50 * B N N N N N N B <-- boundary and internal elements
51 * S S S S S S S <-- subtree pointers
53 * A B-Tree leaf node basically looks like this:
55 * L L L L L L L L <-- leaf elemenets
57 * The radix for an internal node is 1 less then a leaf but we get a
58 * number of significant benefits for our troubles.
60 * The big benefit to using a B-Tree containing boundary information
61 * is that it is possible to cache pointers into the middle of the tree
62 * and not have to start searches, insertions, OR deletions at the root
63 * node. In particular, searches are able to progress in a definitive
64 * direction from any point in the tree without revisting nodes. This
65 * greatly improves the efficiency of many operations, most especially
66 * record appends.
68 * B-Trees also make the stacking of trees fairly straightforward.
70 * INSERTIONS: A search performed with the intention of doing
71 * an insert will guarantee that the terminal leaf node is not full by
72 * splitting full nodes. Splits occur top-down during the dive down the
73 * B-Tree.
75 * DELETIONS: A deletion makes no attempt to proactively balance the
76 * tree and will recursively remove nodes that become empty. If a
77 * deadlock occurs a deletion may not be able to remove an empty leaf.
78 * Deletions never allow internal nodes to become empty (that would blow
79 * up the boundaries).
81 #include "hammer.h"
82 #include <sys/buf.h>
83 #include <sys/buf2.h>
85 static int btree_search(hammer_cursor_t cursor, int flags);
86 static int btree_split_internal(hammer_cursor_t cursor);
87 static int btree_split_leaf(hammer_cursor_t cursor);
88 static int btree_remove(hammer_cursor_t cursor);
89 static int btree_node_is_full(hammer_node_ondisk_t node);
90 static void hammer_make_separator(hammer_base_elm_t key1,
91 hammer_base_elm_t key2, hammer_base_elm_t dest);
94 * Iterate records after a search. The cursor is iterated forwards past
95 * the current record until a record matching the key-range requirements
96 * is found. ENOENT is returned if the iteration goes past the ending
97 * key.
99 * The iteration is inclusive of key_beg and can be inclusive or exclusive
100 * of key_end depending on whether HAMMER_CURSOR_END_INCLUSIVE is set.
102 * When doing an as-of search (cursor->asof != 0), key_beg.create_tid
103 * may be modified by B-Tree functions.
105 * cursor->key_beg may or may not be modified by this function during
106 * the iteration. XXX future - in case of an inverted lock we may have
107 * to reinitiate the lookup and set key_beg to properly pick up where we
108 * left off.
110 * NOTE! EDEADLK *CANNOT* be returned by this procedure.
113 hammer_btree_iterate(hammer_cursor_t cursor)
115 hammer_node_ondisk_t node;
116 hammer_btree_elm_t elm;
117 int error;
118 int r;
119 int s;
122 * Skip past the current record
124 node = cursor->node->ondisk;
125 if (node == NULL)
126 return(ENOENT);
127 if (cursor->index < node->count &&
128 (cursor->flags & HAMMER_CURSOR_ATEDISK)) {
129 ++cursor->index;
133 * Loop until an element is found or we are done.
135 for (;;) {
137 * We iterate up the tree and then index over one element
138 * while we are at the last element in the current node.
140 * If we are at the root of the filesystem, cursor_up
141 * returns ENOENT.
143 * XXX this could be optimized by storing the information in
144 * the parent reference.
146 * XXX we can lose the node lock temporarily, this could mess
147 * up our scan.
149 ++hammer_stats_btree_iterations;
150 if (cursor->index == node->count) {
151 if (hammer_debug_btree) {
152 kprintf("BRACKETU %016llx[%d] -> %016llx[%d] (td=%p)\n",
153 cursor->node->node_offset,
154 cursor->index,
155 (cursor->parent ? cursor->parent->node_offset : -1),
156 cursor->parent_index,
157 curthread);
159 KKASSERT(cursor->parent == NULL || cursor->parent->ondisk->elms[cursor->parent_index].internal.subtree_offset == cursor->node->node_offset);
160 error = hammer_cursor_up(cursor);
161 if (error)
162 break;
163 /* reload stale pointer */
164 node = cursor->node->ondisk;
165 KKASSERT(cursor->index != node->count);
168 * If we are reblocking we want to return internal
169 * nodes.
171 if (cursor->flags & HAMMER_CURSOR_REBLOCKING) {
172 cursor->flags |= HAMMER_CURSOR_ATEDISK;
173 return(0);
175 ++cursor->index;
176 continue;
180 * Check internal or leaf element. Determine if the record
181 * at the cursor has gone beyond the end of our range.
183 * We recurse down through internal nodes.
185 if (node->type == HAMMER_BTREE_TYPE_INTERNAL) {
186 elm = &node->elms[cursor->index];
188 r = hammer_btree_cmp(&cursor->key_end, &elm[0].base);
189 s = hammer_btree_cmp(&cursor->key_beg, &elm[1].base);
190 if (hammer_debug_btree) {
191 kprintf("BRACKETL %016llx[%d] %016llx %02x %016llx lo=%02x %d (td=%p)\n",
192 cursor->node->node_offset,
193 cursor->index,
194 elm[0].internal.base.obj_id,
195 elm[0].internal.base.rec_type,
196 elm[0].internal.base.key,
197 elm[0].internal.base.localization,
199 curthread
201 kprintf("BRACKETR %016llx[%d] %016llx %02x %016llx lo=%02x %d\n",
202 cursor->node->node_offset,
203 cursor->index + 1,
204 elm[1].internal.base.obj_id,
205 elm[1].internal.base.rec_type,
206 elm[1].internal.base.key,
207 elm[1].internal.base.localization,
212 if (r < 0) {
213 error = ENOENT;
214 break;
216 if (r == 0 && (cursor->flags &
217 HAMMER_CURSOR_END_INCLUSIVE) == 0) {
218 error = ENOENT;
219 break;
221 KKASSERT(s <= 0);
224 * Better not be zero
226 KKASSERT(elm->internal.subtree_offset != 0);
229 * If running the mirror filter see if we can skip
230 * the entire sub-tree.
232 if (cursor->flags & HAMMER_CURSOR_MIRROR_FILTERED) {
233 if (elm->internal.mirror_tid <
234 cursor->mirror_tid) {
235 ++cursor->index;
236 continue;
240 error = hammer_cursor_down(cursor);
241 if (error)
242 break;
243 KKASSERT(cursor->index == 0);
244 /* reload stale pointer */
245 node = cursor->node->ondisk;
246 continue;
247 } else {
248 elm = &node->elms[cursor->index];
249 r = hammer_btree_cmp(&cursor->key_end, &elm->base);
250 if (hammer_debug_btree) {
251 kprintf("ELEMENT %016llx:%d %c %016llx %02x %016llx lo=%02x %d\n",
252 cursor->node->node_offset,
253 cursor->index,
254 (elm[0].leaf.base.btype ?
255 elm[0].leaf.base.btype : '?'),
256 elm[0].leaf.base.obj_id,
257 elm[0].leaf.base.rec_type,
258 elm[0].leaf.base.key,
259 elm[0].leaf.base.localization,
263 if (r < 0) {
264 error = ENOENT;
265 break;
269 * We support both end-inclusive and
270 * end-exclusive searches.
272 if (r == 0 &&
273 (cursor->flags & HAMMER_CURSOR_END_INCLUSIVE) == 0) {
274 error = ENOENT;
275 break;
278 switch(elm->leaf.base.btype) {
279 case HAMMER_BTREE_TYPE_RECORD:
280 if ((cursor->flags & HAMMER_CURSOR_ASOF) &&
281 hammer_btree_chkts(cursor->asof, &elm->base)) {
282 ++cursor->index;
283 continue;
285 break;
286 default:
287 error = EINVAL;
288 break;
290 if (error)
291 break;
294 * node pointer invalid after loop
298 * Return entry
300 if (hammer_debug_btree) {
301 int i = cursor->index;
302 hammer_btree_elm_t elm = &cursor->node->ondisk->elms[i];
303 kprintf("ITERATE %p:%d %016llx %02x %016llx lo=%02x\n",
304 cursor->node, i,
305 elm->internal.base.obj_id,
306 elm->internal.base.rec_type,
307 elm->internal.base.key,
308 elm->internal.base.localization
311 return(0);
313 return(error);
317 * Iterate in the reverse direction. This is used by the pruning code to
318 * avoid overlapping records.
321 hammer_btree_iterate_reverse(hammer_cursor_t cursor)
323 hammer_node_ondisk_t node;
324 hammer_btree_elm_t elm;
325 int error;
326 int r;
327 int s;
330 * Skip past the current record. For various reasons the cursor
331 * may end up set to -1 or set to point at the end of the current
332 * node. These cases must be addressed.
334 node = cursor->node->ondisk;
335 if (node == NULL)
336 return(ENOENT);
337 if (cursor->index != -1 &&
338 (cursor->flags & HAMMER_CURSOR_ATEDISK)) {
339 --cursor->index;
341 if (cursor->index == cursor->node->ondisk->count)
342 --cursor->index;
345 * Loop until an element is found or we are done.
347 for (;;) {
349 * We iterate up the tree and then index over one element
350 * while we are at the last element in the current node.
352 if (cursor->index == -1) {
353 error = hammer_cursor_up(cursor);
354 if (error) {
355 cursor->index = 0; /* sanity */
356 break;
358 /* reload stale pointer */
359 node = cursor->node->ondisk;
360 KKASSERT(cursor->index != node->count);
361 --cursor->index;
362 continue;
366 * Check internal or leaf element. Determine if the record
367 * at the cursor has gone beyond the end of our range.
369 * We recurse down through internal nodes.
371 KKASSERT(cursor->index != node->count);
372 if (node->type == HAMMER_BTREE_TYPE_INTERNAL) {
373 elm = &node->elms[cursor->index];
374 r = hammer_btree_cmp(&cursor->key_end, &elm[0].base);
375 s = hammer_btree_cmp(&cursor->key_beg, &elm[1].base);
376 if (hammer_debug_btree) {
377 kprintf("BRACKETL %016llx[%d] %016llx %02x %016llx lo=%02x %d\n",
378 cursor->node->node_offset,
379 cursor->index,
380 elm[0].internal.base.obj_id,
381 elm[0].internal.base.rec_type,
382 elm[0].internal.base.key,
383 elm[0].internal.base.localization,
386 kprintf("BRACKETR %016llx[%d] %016llx %02x %016llx lo=%02x %d\n",
387 cursor->node->node_offset,
388 cursor->index + 1,
389 elm[1].internal.base.obj_id,
390 elm[1].internal.base.rec_type,
391 elm[1].internal.base.key,
392 elm[1].internal.base.localization,
397 if (s >= 0) {
398 error = ENOENT;
399 break;
401 KKASSERT(r >= 0);
404 * Better not be zero
406 KKASSERT(elm->internal.subtree_offset != 0);
408 error = hammer_cursor_down(cursor);
409 if (error)
410 break;
411 KKASSERT(cursor->index == 0);
412 /* reload stale pointer */
413 node = cursor->node->ondisk;
415 /* this can assign -1 if the leaf was empty */
416 cursor->index = node->count - 1;
417 continue;
418 } else {
419 elm = &node->elms[cursor->index];
420 s = hammer_btree_cmp(&cursor->key_beg, &elm->base);
421 if (hammer_debug_btree) {
422 kprintf("ELEMENT %016llx:%d %c %016llx %02x %016llx lo=%02x %d\n",
423 cursor->node->node_offset,
424 cursor->index,
425 (elm[0].leaf.base.btype ?
426 elm[0].leaf.base.btype : '?'),
427 elm[0].leaf.base.obj_id,
428 elm[0].leaf.base.rec_type,
429 elm[0].leaf.base.key,
430 elm[0].leaf.base.localization,
434 if (s > 0) {
435 error = ENOENT;
436 break;
439 switch(elm->leaf.base.btype) {
440 case HAMMER_BTREE_TYPE_RECORD:
441 if ((cursor->flags & HAMMER_CURSOR_ASOF) &&
442 hammer_btree_chkts(cursor->asof, &elm->base)) {
443 --cursor->index;
444 continue;
446 break;
447 default:
448 error = EINVAL;
449 break;
451 if (error)
452 break;
455 * node pointer invalid after loop
459 * Return entry
461 if (hammer_debug_btree) {
462 int i = cursor->index;
463 hammer_btree_elm_t elm = &cursor->node->ondisk->elms[i];
464 kprintf("ITERATE %p:%d %016llx %02x %016llx lo=%02x\n",
465 cursor->node, i,
466 elm->internal.base.obj_id,
467 elm->internal.base.rec_type,
468 elm->internal.base.key,
469 elm->internal.base.localization
472 return(0);
474 return(error);
478 * Lookup cursor->key_beg. 0 is returned on success, ENOENT if the entry
479 * could not be found, EDEADLK if inserting and a retry is needed, and a
480 * fatal error otherwise. When retrying, the caller must terminate the
481 * cursor and reinitialize it. EDEADLK cannot be returned if not inserting.
483 * The cursor is suitably positioned for a deletion on success, and suitably
484 * positioned for an insertion on ENOENT if HAMMER_CURSOR_INSERT was
485 * specified.
487 * The cursor may begin anywhere, the search will traverse the tree in
488 * either direction to locate the requested element.
490 * Most of the logic implementing historical searches is handled here. We
491 * do an initial lookup with create_tid set to the asof TID. Due to the
492 * way records are laid out, a backwards iteration may be required if
493 * ENOENT is returned to locate the historical record. Here's the
494 * problem:
496 * create_tid: 10 15 20
497 * LEAF1 LEAF2
498 * records: (11) (18)
500 * Lets say we want to do a lookup AS-OF timestamp 17. We will traverse
501 * LEAF2 but the only record in LEAF2 has a create_tid of 18, which is
502 * not visible and thus causes ENOENT to be returned. We really need
503 * to check record 11 in LEAF1. If it also fails then the search fails
504 * (e.g. it might represent the range 11-16 and thus still not match our
505 * AS-OF timestamp of 17). Note that LEAF1 could be empty, requiring
506 * further iterations.
508 * If this case occurs btree_search() will set HAMMER_CURSOR_CREATE_CHECK
509 * and the cursor->create_check TID if an iteration might be needed.
510 * In the above example create_check would be set to 14.
513 hammer_btree_lookup(hammer_cursor_t cursor)
515 int error;
517 ++hammer_stats_btree_lookups;
518 if (cursor->flags & HAMMER_CURSOR_ASOF) {
519 KKASSERT((cursor->flags & HAMMER_CURSOR_INSERT) == 0);
520 cursor->key_beg.create_tid = cursor->asof;
521 for (;;) {
522 cursor->flags &= ~HAMMER_CURSOR_CREATE_CHECK;
523 error = btree_search(cursor, 0);
524 if (error != ENOENT ||
525 (cursor->flags & HAMMER_CURSOR_CREATE_CHECK) == 0) {
527 * Stop if no error.
528 * Stop if error other then ENOENT.
529 * Stop if ENOENT and not special case.
531 break;
533 if (hammer_debug_btree) {
534 kprintf("CREATE_CHECK %016llx\n",
535 cursor->create_check);
537 cursor->key_beg.create_tid = cursor->create_check;
538 /* loop */
540 } else {
541 error = btree_search(cursor, 0);
543 if (error == 0)
544 error = hammer_btree_extract(cursor, cursor->flags);
545 return(error);
549 * Execute the logic required to start an iteration. The first record
550 * located within the specified range is returned and iteration control
551 * flags are adjusted for successive hammer_btree_iterate() calls.
554 hammer_btree_first(hammer_cursor_t cursor)
556 int error;
558 error = hammer_btree_lookup(cursor);
559 if (error == ENOENT) {
560 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
561 error = hammer_btree_iterate(cursor);
563 cursor->flags |= HAMMER_CURSOR_ATEDISK;
564 return(error);
568 * Similarly but for an iteration in the reverse direction.
570 * Set ATEDISK when iterating backwards to skip the current entry,
571 * which after an ENOENT lookup will be pointing beyond our end point.
574 hammer_btree_last(hammer_cursor_t cursor)
576 struct hammer_base_elm save;
577 int error;
579 save = cursor->key_beg;
580 cursor->key_beg = cursor->key_end;
581 error = hammer_btree_lookup(cursor);
582 cursor->key_beg = save;
583 if (error == ENOENT ||
584 (cursor->flags & HAMMER_CURSOR_END_INCLUSIVE) == 0) {
585 cursor->flags |= HAMMER_CURSOR_ATEDISK;
586 error = hammer_btree_iterate_reverse(cursor);
588 cursor->flags |= HAMMER_CURSOR_ATEDISK;
589 return(error);
593 * Extract the record and/or data associated with the cursor's current
594 * position. Any prior record or data stored in the cursor is replaced.
595 * The cursor must be positioned at a leaf node.
597 * NOTE: All extractions occur at the leaf of the B-Tree.
600 hammer_btree_extract(hammer_cursor_t cursor, int flags)
602 hammer_mount_t hmp;
603 hammer_node_ondisk_t node;
604 hammer_btree_elm_t elm;
605 hammer_off_t data_off;
606 int32_t data_len;
607 int error;
610 * The case where the data reference resolves to the same buffer
611 * as the record reference must be handled.
613 node = cursor->node->ondisk;
614 elm = &node->elms[cursor->index];
615 cursor->data = NULL;
616 hmp = cursor->node->hmp;
619 * There is nothing to extract for an internal element.
621 if (node->type == HAMMER_BTREE_TYPE_INTERNAL)
622 return(EINVAL);
625 * Only record types have data.
627 KKASSERT(node->type == HAMMER_BTREE_TYPE_LEAF);
628 cursor->leaf = &elm->leaf;
630 if ((flags & HAMMER_CURSOR_GET_DATA) == 0)
631 return(0);
632 if (elm->leaf.base.btype != HAMMER_BTREE_TYPE_RECORD)
633 return(0);
634 data_off = elm->leaf.data_offset;
635 data_len = elm->leaf.data_len;
636 if (data_off == 0)
637 return(0);
640 * Load the data
642 KKASSERT(data_len >= 0 && data_len <= HAMMER_XBUFSIZE);
643 cursor->data = hammer_bread_ext(hmp, data_off, data_len,
644 &error, &cursor->data_buffer);
645 if (hammer_crc_test_leaf(cursor->data, &elm->leaf) == 0)
646 Debugger("CRC FAILED: DATA");
647 return(error);
652 * Insert a leaf element into the B-Tree at the current cursor position.
653 * The cursor is positioned such that the element at and beyond the cursor
654 * are shifted to make room for the new record.
656 * The caller must call hammer_btree_lookup() with the HAMMER_CURSOR_INSERT
657 * flag set and that call must return ENOENT before this function can be
658 * called.
660 * The caller may depend on the cursor's exclusive lock after return to
661 * interlock frontend visibility (see HAMMER_RECF_CONVERT_DELETE).
663 * ENOSPC is returned if there is no room to insert a new record.
666 hammer_btree_insert(hammer_cursor_t cursor, hammer_btree_leaf_elm_t elm)
668 hammer_node_ondisk_t node;
669 int i;
670 int error;
672 if ((error = hammer_cursor_upgrade_node(cursor)) != 0)
673 return(error);
674 ++hammer_stats_btree_inserts;
677 * Insert the element at the leaf node and update the count in the
678 * parent. It is possible for parent to be NULL, indicating that
679 * the filesystem's ROOT B-Tree node is a leaf itself, which is
680 * possible. The root inode can never be deleted so the leaf should
681 * never be empty.
683 * Remember that the right-hand boundary is not included in the
684 * count.
686 hammer_modify_node_all(cursor->trans, cursor->node);
687 node = cursor->node->ondisk;
688 i = cursor->index;
689 KKASSERT(elm->base.btype != 0);
690 KKASSERT(node->type == HAMMER_BTREE_TYPE_LEAF);
691 KKASSERT(node->count < HAMMER_BTREE_LEAF_ELMS);
692 if (i != node->count) {
693 bcopy(&node->elms[i], &node->elms[i+1],
694 (node->count - i) * sizeof(*elm));
696 node->elms[i].leaf = *elm;
697 ++node->count;
700 * Update the leaf node's aggregate mirror_tid for mirroring
701 * support.
703 if (node->mirror_tid < elm->base.delete_tid)
704 node->mirror_tid = elm->base.delete_tid;
705 if (node->mirror_tid < elm->base.create_tid)
706 node->mirror_tid = elm->base.create_tid;
707 hammer_modify_node_done(cursor->node);
710 * What we really want to do is propogate mirror_tid all the way
711 * up the parent chain to the B-Tree root. That would be
712 * ultra-expensive, though.
714 if (cursor->parent &&
715 (cursor->trans->hmp->hflags & (HMNT_MASTERID|HMNT_SLAVE))) {
716 hammer_btree_mirror_propagate(cursor->trans, cursor->parent,
717 cursor->parent_index,
718 node->mirror_tid);
722 * Debugging sanity checks.
724 KKASSERT(hammer_btree_cmp(cursor->left_bound, &elm->base) <= 0);
725 KKASSERT(hammer_btree_cmp(cursor->right_bound, &elm->base) > 0);
726 if (i) {
727 KKASSERT(hammer_btree_cmp(&node->elms[i-1].leaf.base, &elm->base) < 0);
729 if (i != node->count - 1)
730 KKASSERT(hammer_btree_cmp(&node->elms[i+1].leaf.base, &elm->base) > 0);
732 return(0);
736 * Delete a record from the B-Tree at the current cursor position.
737 * The cursor is positioned such that the current element is the one
738 * to be deleted.
740 * On return the cursor will be positioned after the deleted element and
741 * MAY point to an internal node. It will be suitable for the continuation
742 * of an iteration but not for an insertion or deletion.
744 * Deletions will attempt to partially rebalance the B-Tree in an upward
745 * direction, but will terminate rather then deadlock. Empty internal nodes
746 * are never allowed by a deletion which deadlocks may end up giving us an
747 * empty leaf. The pruner will clean up and rebalance the tree.
749 * This function can return EDEADLK, requiring the caller to retry the
750 * operation after clearing the deadlock.
753 hammer_btree_delete(hammer_cursor_t cursor)
755 hammer_node_ondisk_t ondisk;
756 hammer_node_t node;
757 hammer_node_t parent;
758 int error;
759 int i;
761 if ((error = hammer_cursor_upgrade(cursor)) != 0)
762 return(error);
763 ++hammer_stats_btree_deletes;
766 * Delete the element from the leaf node.
768 * Remember that leaf nodes do not have boundaries.
770 node = cursor->node;
771 ondisk = node->ondisk;
772 i = cursor->index;
774 KKASSERT(ondisk->type == HAMMER_BTREE_TYPE_LEAF);
775 KKASSERT(i >= 0 && i < ondisk->count);
776 hammer_modify_node_all(cursor->trans, node);
777 if (i + 1 != ondisk->count) {
778 bcopy(&ondisk->elms[i+1], &ondisk->elms[i],
779 (ondisk->count - i - 1) * sizeof(ondisk->elms[0]));
781 --ondisk->count;
782 hammer_modify_node_done(node);
785 * Validate local parent
787 if (ondisk->parent) {
788 parent = cursor->parent;
790 KKASSERT(parent != NULL);
791 KKASSERT(parent->node_offset == ondisk->parent);
795 * If the leaf becomes empty it must be detached from the parent,
796 * potentially recursing through to the filesystem root.
798 * This may reposition the cursor at one of the parent's of the
799 * current node.
801 * Ignore deadlock errors, that simply means that btree_remove
802 * was unable to recurse and had to leave us with an empty leaf.
804 KKASSERT(cursor->index <= ondisk->count);
805 if (ondisk->count == 0) {
806 error = btree_remove(cursor);
807 if (error == EDEADLK)
808 error = 0;
809 } else {
810 error = 0;
812 KKASSERT(cursor->parent == NULL ||
813 cursor->parent_index < cursor->parent->ondisk->count);
814 return(error);
818 * PRIMAY B-TREE SEARCH SUPPORT PROCEDURE
820 * Search the filesystem B-Tree for cursor->key_beg, return the matching node.
822 * The search can begin ANYWHERE in the B-Tree. As a first step the search
823 * iterates up the tree as necessary to properly position itself prior to
824 * actually doing the sarch.
826 * INSERTIONS: The search will split full nodes and leaves on its way down
827 * and guarentee that the leaf it ends up on is not full. If we run out
828 * of space the search continues to the leaf (to position the cursor for
829 * the spike), but ENOSPC is returned.
831 * The search is only guarenteed to end up on a leaf if an error code of 0
832 * is returned, or if inserting and an error code of ENOENT is returned.
833 * Otherwise it can stop at an internal node. On success a search returns
834 * a leaf node.
836 * COMPLEXITY WARNING! This is the core B-Tree search code for the entire
837 * filesystem, and it is not simple code. Please note the following facts:
839 * - Internal node recursions have a boundary on the left AND right. The
840 * right boundary is non-inclusive. The create_tid is a generic part
841 * of the key for internal nodes.
843 * - Leaf nodes contain terminal elements only now.
845 * - Filesystem lookups typically set HAMMER_CURSOR_ASOF, indicating a
846 * historical search. ASOF and INSERT are mutually exclusive. When
847 * doing an as-of lookup btree_search() checks for a right-edge boundary
848 * case. If while recursing down the left-edge differs from the key
849 * by ONLY its create_tid, HAMMER_CURSOR_CREATE_CHECK is set along
850 * with cursor->create_check. This is used by btree_lookup() to iterate.
851 * The iteration backwards because as-of searches can wind up going
852 * down the wrong branch of the B-Tree.
854 static
856 btree_search(hammer_cursor_t cursor, int flags)
858 hammer_node_ondisk_t node;
859 hammer_btree_elm_t elm;
860 int error;
861 int enospc = 0;
862 int i;
863 int r;
864 int s;
866 flags |= cursor->flags;
867 ++hammer_stats_btree_searches;
869 if (hammer_debug_btree) {
870 kprintf("SEARCH %016llx[%d] %016llx %02x key=%016llx cre=%016llx lo=%02x (td = %p)\n",
871 cursor->node->node_offset,
872 cursor->index,
873 cursor->key_beg.obj_id,
874 cursor->key_beg.rec_type,
875 cursor->key_beg.key,
876 cursor->key_beg.create_tid,
877 cursor->key_beg.localization,
878 curthread
880 if (cursor->parent)
881 kprintf("SEARCHP %016llx[%d] (%016llx/%016llx %016llx/%016llx) (%p/%p %p/%p)\n",
882 cursor->parent->node_offset, cursor->parent_index,
883 cursor->left_bound->obj_id,
884 cursor->parent->ondisk->elms[cursor->parent_index].internal.base.obj_id,
885 cursor->right_bound->obj_id,
886 cursor->parent->ondisk->elms[cursor->parent_index+1].internal.base.obj_id,
887 cursor->left_bound,
888 &cursor->parent->ondisk->elms[cursor->parent_index],
889 cursor->right_bound,
890 &cursor->parent->ondisk->elms[cursor->parent_index+1]
895 * Move our cursor up the tree until we find a node whos range covers
896 * the key we are trying to locate.
898 * The left bound is inclusive, the right bound is non-inclusive.
899 * It is ok to cursor up too far.
901 for (;;) {
902 r = hammer_btree_cmp(&cursor->key_beg, cursor->left_bound);
903 s = hammer_btree_cmp(&cursor->key_beg, cursor->right_bound);
904 if (r >= 0 && s < 0)
905 break;
906 KKASSERT(cursor->parent);
907 ++hammer_stats_btree_iterations;
908 error = hammer_cursor_up(cursor);
909 if (error)
910 goto done;
914 * The delete-checks below are based on node, not parent. Set the
915 * initial delete-check based on the parent.
917 if (r == 1) {
918 KKASSERT(cursor->left_bound->create_tid != 1);
919 cursor->create_check = cursor->left_bound->create_tid - 1;
920 cursor->flags |= HAMMER_CURSOR_CREATE_CHECK;
924 * We better have ended up with a node somewhere.
926 KKASSERT(cursor->node != NULL);
929 * If we are inserting we can't start at a full node if the parent
930 * is also full (because there is no way to split the node),
931 * continue running up the tree until the requirement is satisfied
932 * or we hit the root of the filesystem.
934 * (If inserting we aren't doing an as-of search so we don't have
935 * to worry about create_check).
937 while ((flags & HAMMER_CURSOR_INSERT) && enospc == 0) {
938 if (cursor->node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
939 if (btree_node_is_full(cursor->node->ondisk) == 0)
940 break;
941 } else {
942 if (btree_node_is_full(cursor->node->ondisk) ==0)
943 break;
945 if (cursor->node->ondisk->parent == 0 ||
946 cursor->parent->ondisk->count != HAMMER_BTREE_INT_ELMS) {
947 break;
949 ++hammer_stats_btree_iterations;
950 error = hammer_cursor_up(cursor);
951 /* node may have become stale */
952 if (error)
953 goto done;
957 * Push down through internal nodes to locate the requested key.
959 node = cursor->node->ondisk;
960 while (node->type == HAMMER_BTREE_TYPE_INTERNAL) {
962 * Scan the node to find the subtree index to push down into.
963 * We go one-past, then back-up.
965 * We must proactively remove deleted elements which may
966 * have been left over from a deadlocked btree_remove().
968 * The left and right boundaries are included in the loop
969 * in order to detect edge cases.
971 * If the separator only differs by create_tid (r == 1)
972 * and we are doing an as-of search, we may end up going
973 * down a branch to the left of the one containing the
974 * desired key. This requires numerous special cases.
976 ++hammer_stats_btree_iterations;
977 if (hammer_debug_btree) {
978 kprintf("SEARCH-I %016llx count=%d\n",
979 cursor->node->node_offset,
980 node->count);
984 * Try to shortcut the search before dropping into the
985 * linear loop. Locate the first node where r <= 1.
987 i = hammer_btree_search_node(&cursor->key_beg, node);
988 while (i <= node->count) {
989 ++hammer_stats_btree_elements;
990 elm = &node->elms[i];
991 r = hammer_btree_cmp(&cursor->key_beg, &elm->base);
992 if (hammer_debug_btree > 2) {
993 kprintf(" IELM %p %d r=%d\n",
994 &node->elms[i], i, r);
996 if (r < 0)
997 break;
998 if (r == 1) {
999 KKASSERT(elm->base.create_tid != 1);
1000 cursor->create_check = elm->base.create_tid - 1;
1001 cursor->flags |= HAMMER_CURSOR_CREATE_CHECK;
1003 ++i;
1005 if (hammer_debug_btree) {
1006 kprintf("SEARCH-I preI=%d/%d r=%d\n",
1007 i, node->count, r);
1011 * These cases occur when the parent's idea of the boundary
1012 * is wider then the child's idea of the boundary, and
1013 * require special handling. If not inserting we can
1014 * terminate the search early for these cases but the
1015 * child's boundaries cannot be unconditionally modified.
1017 if (i == 0) {
1019 * If i == 0 the search terminated to the LEFT of the
1020 * left_boundary but to the RIGHT of the parent's left
1021 * boundary.
1023 u_int8_t save;
1025 elm = &node->elms[0];
1028 * If we aren't inserting we can stop here.
1030 if ((flags & (HAMMER_CURSOR_INSERT |
1031 HAMMER_CURSOR_PRUNING)) == 0) {
1032 cursor->index = 0;
1033 return(ENOENT);
1037 * Correct a left-hand boundary mismatch.
1039 * We can only do this if we can upgrade the lock,
1040 * and synchronized as a background cursor (i.e.
1041 * inserting or pruning).
1043 * WARNING: We can only do this if inserting, i.e.
1044 * we are running on the backend.
1046 if ((error = hammer_cursor_upgrade(cursor)) != 0)
1047 return(error);
1048 KKASSERT(cursor->flags & HAMMER_CURSOR_BACKEND);
1049 hammer_modify_node_field(cursor->trans, cursor->node,
1050 elms[0]);
1051 save = node->elms[0].base.btype;
1052 node->elms[0].base = *cursor->left_bound;
1053 node->elms[0].base.btype = save;
1054 hammer_modify_node_done(cursor->node);
1055 } else if (i == node->count + 1) {
1057 * If i == node->count + 1 the search terminated to
1058 * the RIGHT of the right boundary but to the LEFT
1059 * of the parent's right boundary. If we aren't
1060 * inserting we can stop here.
1062 * Note that the last element in this case is
1063 * elms[i-2] prior to adjustments to 'i'.
1065 --i;
1066 if ((flags & (HAMMER_CURSOR_INSERT |
1067 HAMMER_CURSOR_PRUNING)) == 0) {
1068 cursor->index = i;
1069 return (ENOENT);
1073 * Correct a right-hand boundary mismatch.
1074 * (actual push-down record is i-2 prior to
1075 * adjustments to i).
1077 * We can only do this if we can upgrade the lock,
1078 * and synchronized as a background cursor (i.e.
1079 * inserting or pruning).
1081 * WARNING: We can only do this if inserting, i.e.
1082 * we are running on the backend.
1084 if ((error = hammer_cursor_upgrade(cursor)) != 0)
1085 return(error);
1086 elm = &node->elms[i];
1087 KKASSERT(cursor->flags & HAMMER_CURSOR_BACKEND);
1088 hammer_modify_node(cursor->trans, cursor->node,
1089 &elm->base, sizeof(elm->base));
1090 elm->base = *cursor->right_bound;
1091 hammer_modify_node_done(cursor->node);
1092 --i;
1093 } else {
1095 * The push-down index is now i - 1. If we had
1096 * terminated on the right boundary this will point
1097 * us at the last element.
1099 --i;
1101 cursor->index = i;
1102 elm = &node->elms[i];
1104 if (hammer_debug_btree) {
1105 kprintf("RESULT-I %016llx[%d] %016llx %02x "
1106 "key=%016llx cre=%016llx lo=%02x\n",
1107 cursor->node->node_offset,
1109 elm->internal.base.obj_id,
1110 elm->internal.base.rec_type,
1111 elm->internal.base.key,
1112 elm->internal.base.create_tid,
1113 elm->internal.base.localization
1118 * We better have a valid subtree offset.
1120 KKASSERT(elm->internal.subtree_offset != 0);
1123 * Handle insertion and deletion requirements.
1125 * If inserting split full nodes. The split code will
1126 * adjust cursor->node and cursor->index if the current
1127 * index winds up in the new node.
1129 * If inserting and a left or right edge case was detected,
1130 * we cannot correct the left or right boundary and must
1131 * prepend and append an empty leaf node in order to make
1132 * the boundary correction.
1134 * If we run out of space we set enospc and continue on
1135 * to a leaf to provide the spike code with a good point
1136 * of entry.
1138 if ((flags & HAMMER_CURSOR_INSERT) && enospc == 0) {
1139 if (btree_node_is_full(node)) {
1140 error = btree_split_internal(cursor);
1141 if (error) {
1142 if (error != ENOSPC)
1143 goto done;
1144 enospc = 1;
1147 * reload stale pointers
1149 i = cursor->index;
1150 node = cursor->node->ondisk;
1155 * Push down (push into new node, existing node becomes
1156 * the parent) and continue the search.
1158 error = hammer_cursor_down(cursor);
1159 /* node may have become stale */
1160 if (error)
1161 goto done;
1162 node = cursor->node->ondisk;
1166 * We are at a leaf, do a linear search of the key array.
1168 * On success the index is set to the matching element and 0
1169 * is returned.
1171 * On failure the index is set to the insertion point and ENOENT
1172 * is returned.
1174 * Boundaries are not stored in leaf nodes, so the index can wind
1175 * up to the left of element 0 (index == 0) or past the end of
1176 * the array (index == node->count). It is also possible that the
1177 * leaf might be empty.
1179 ++hammer_stats_btree_iterations;
1180 KKASSERT (node->type == HAMMER_BTREE_TYPE_LEAF);
1181 KKASSERT(node->count <= HAMMER_BTREE_LEAF_ELMS);
1182 if (hammer_debug_btree) {
1183 kprintf("SEARCH-L %016llx count=%d\n",
1184 cursor->node->node_offset,
1185 node->count);
1189 * Try to shortcut the search before dropping into the
1190 * linear loop. Locate the first node where r <= 1.
1192 i = hammer_btree_search_node(&cursor->key_beg, node);
1193 while (i < node->count) {
1194 ++hammer_stats_btree_elements;
1195 elm = &node->elms[i];
1197 r = hammer_btree_cmp(&cursor->key_beg, &elm->leaf.base);
1199 if (hammer_debug_btree > 1)
1200 kprintf(" ELM %p %d r=%d\n", &node->elms[i], i, r);
1203 * We are at a record element. Stop if we've flipped past
1204 * key_beg, not counting the create_tid test. Allow the
1205 * r == 1 case (key_beg > element but differs only by its
1206 * create_tid) to fall through to the AS-OF check.
1208 KKASSERT (elm->leaf.base.btype == HAMMER_BTREE_TYPE_RECORD);
1210 if (r < 0)
1211 goto failed;
1212 if (r > 1) {
1213 ++i;
1214 continue;
1218 * Check our as-of timestamp against the element.
1220 if (flags & HAMMER_CURSOR_ASOF) {
1221 if (hammer_btree_chkts(cursor->asof,
1222 &node->elms[i].base) != 0) {
1223 ++i;
1224 continue;
1226 /* success */
1227 } else {
1228 if (r > 0) { /* can only be +1 */
1229 ++i;
1230 continue;
1232 /* success */
1234 cursor->index = i;
1235 error = 0;
1236 if (hammer_debug_btree) {
1237 kprintf("RESULT-L %016llx[%d] (SUCCESS)\n",
1238 cursor->node->node_offset, i);
1240 goto done;
1244 * The search of the leaf node failed. i is the insertion point.
1246 failed:
1247 if (hammer_debug_btree) {
1248 kprintf("RESULT-L %016llx[%d] (FAILED)\n",
1249 cursor->node->node_offset, i);
1253 * No exact match was found, i is now at the insertion point.
1255 * If inserting split a full leaf before returning. This
1256 * may have the side effect of adjusting cursor->node and
1257 * cursor->index.
1259 cursor->index = i;
1260 if ((flags & HAMMER_CURSOR_INSERT) && enospc == 0 &&
1261 btree_node_is_full(node)) {
1262 error = btree_split_leaf(cursor);
1263 if (error) {
1264 if (error != ENOSPC)
1265 goto done;
1266 enospc = 1;
1269 * reload stale pointers
1271 /* NOT USED
1272 i = cursor->index;
1273 node = &cursor->node->internal;
1278 * We reached a leaf but did not find the key we were looking for.
1279 * If this is an insert we will be properly positioned for an insert
1280 * (ENOENT) or spike (ENOSPC) operation.
1282 error = enospc ? ENOSPC : ENOENT;
1283 done:
1284 return(error);
1288 * Heuristical search for the first element whos comparison is <= 1. May
1289 * return an index whos compare result is > 1 but may only return an index
1290 * whos compare result is <= 1 if it is the first element with that result.
1293 hammer_btree_search_node(hammer_base_elm_t elm, hammer_node_ondisk_t node)
1295 int b;
1296 int s;
1297 int i;
1298 int r;
1301 * Don't bother if the node does not have very many elements
1303 b = 0;
1304 s = node->count;
1305 while (s - b > 4) {
1306 i = b + (s - b) / 2;
1307 ++hammer_stats_btree_elements;
1308 r = hammer_btree_cmp(elm, &node->elms[i].leaf.base);
1309 if (r <= 1) {
1310 s = i;
1311 } else {
1312 b = i;
1315 return(b);
1319 /************************************************************************
1320 * SPLITTING AND MERGING *
1321 ************************************************************************
1323 * These routines do all the dirty work required to split and merge nodes.
1327 * Split an internal node into two nodes and move the separator at the split
1328 * point to the parent.
1330 * (cursor->node, cursor->index) indicates the element the caller intends
1331 * to push into. We will adjust node and index if that element winds
1332 * up in the split node.
1334 * If we are at the root of the filesystem a new root must be created with
1335 * two elements, one pointing to the original root and one pointing to the
1336 * newly allocated split node.
1338 static
1340 btree_split_internal(hammer_cursor_t cursor)
1342 hammer_node_ondisk_t ondisk;
1343 hammer_node_t node;
1344 hammer_node_t parent;
1345 hammer_node_t new_node;
1346 hammer_btree_elm_t elm;
1347 hammer_btree_elm_t parent_elm;
1348 hammer_node_locklist_t locklist = NULL;
1349 hammer_mount_t hmp = cursor->trans->hmp;
1350 int parent_index;
1351 int made_root;
1352 int split;
1353 int error;
1354 int i;
1355 const int esize = sizeof(*elm);
1357 error = hammer_btree_lock_children(cursor, &locklist);
1358 if (error)
1359 goto done;
1360 if ((error = hammer_cursor_upgrade(cursor)) != 0)
1361 goto done;
1362 ++hammer_stats_btree_splits;
1365 * We are splitting but elms[split] will be promoted to the parent,
1366 * leaving the right hand node with one less element. If the
1367 * insertion point will be on the left-hand side adjust the split
1368 * point to give the right hand side one additional node.
1370 node = cursor->node;
1371 ondisk = node->ondisk;
1372 split = (ondisk->count + 1) / 2;
1373 if (cursor->index <= split)
1374 --split;
1377 * If we are at the root of the filesystem, create a new root node
1378 * with 1 element and split normally. Avoid making major
1379 * modifications until we know the whole operation will work.
1381 if (ondisk->parent == 0) {
1382 parent = hammer_alloc_btree(cursor->trans, &error);
1383 if (parent == NULL)
1384 goto done;
1385 hammer_lock_ex(&parent->lock);
1386 hammer_modify_node_noundo(cursor->trans, parent);
1387 ondisk = parent->ondisk;
1388 ondisk->count = 1;
1389 ondisk->parent = 0;
1390 ondisk->type = HAMMER_BTREE_TYPE_INTERNAL;
1391 ondisk->elms[0].base = hmp->root_btree_beg;
1392 ondisk->elms[0].base.btype = node->ondisk->type;
1393 ondisk->elms[0].internal.subtree_offset = node->node_offset;
1394 ondisk->elms[1].base = hmp->root_btree_end;
1395 hammer_modify_node_done(parent);
1396 /* ondisk->elms[1].base.btype - not used */
1397 made_root = 1;
1398 parent_index = 0; /* index of current node in parent */
1399 } else {
1400 made_root = 0;
1401 parent = cursor->parent;
1402 parent_index = cursor->parent_index;
1406 * Split node into new_node at the split point.
1408 * B O O O P N N B <-- P = node->elms[split]
1409 * 0 1 2 3 4 5 6 <-- subtree indices
1411 * x x P x x
1412 * s S S s
1413 * / \
1414 * B O O O B B N N B <--- inner boundary points are 'P'
1415 * 0 1 2 3 4 5 6
1418 new_node = hammer_alloc_btree(cursor->trans, &error);
1419 if (new_node == NULL) {
1420 if (made_root) {
1421 hammer_unlock(&parent->lock);
1422 hammer_delete_node(cursor->trans, parent);
1423 hammer_rel_node(parent);
1425 goto done;
1427 hammer_lock_ex(&new_node->lock);
1430 * Create the new node. P becomes the left-hand boundary in the
1431 * new node. Copy the right-hand boundary as well.
1433 * elm is the new separator.
1435 hammer_modify_node_noundo(cursor->trans, new_node);
1436 hammer_modify_node_all(cursor->trans, node);
1437 ondisk = node->ondisk;
1438 elm = &ondisk->elms[split];
1439 bcopy(elm, &new_node->ondisk->elms[0],
1440 (ondisk->count - split + 1) * esize);
1441 new_node->ondisk->count = ondisk->count - split;
1442 new_node->ondisk->parent = parent->node_offset;
1443 new_node->ondisk->type = HAMMER_BTREE_TYPE_INTERNAL;
1444 KKASSERT(ondisk->type == new_node->ondisk->type);
1447 * Cleanup the original node. Elm (P) becomes the new boundary,
1448 * its subtree_offset was moved to the new node. If we had created
1449 * a new root its parent pointer may have changed.
1451 elm->internal.subtree_offset = 0;
1452 ondisk->count = split;
1455 * Insert the separator into the parent, fixup the parent's
1456 * reference to the original node, and reference the new node.
1457 * The separator is P.
1459 * Remember that base.count does not include the right-hand boundary.
1461 hammer_modify_node_all(cursor->trans, parent);
1462 ondisk = parent->ondisk;
1463 KKASSERT(ondisk->count != HAMMER_BTREE_INT_ELMS);
1464 parent_elm = &ondisk->elms[parent_index+1];
1465 bcopy(parent_elm, parent_elm + 1,
1466 (ondisk->count - parent_index) * esize);
1467 parent_elm->internal.base = elm->base; /* separator P */
1468 parent_elm->internal.base.btype = new_node->ondisk->type;
1469 parent_elm->internal.subtree_offset = new_node->node_offset;
1470 ++ondisk->count;
1471 hammer_modify_node_done(parent);
1474 * The children of new_node need their parent pointer set to new_node.
1475 * The children have already been locked by
1476 * hammer_btree_lock_children().
1478 for (i = 0; i < new_node->ondisk->count; ++i) {
1479 elm = &new_node->ondisk->elms[i];
1480 error = btree_set_parent(cursor->trans, new_node, elm);
1481 if (error) {
1482 panic("btree_split_internal: btree-fixup problem");
1485 hammer_modify_node_done(new_node);
1488 * The filesystem's root B-Tree pointer may have to be updated.
1490 if (made_root) {
1491 hammer_volume_t volume;
1493 volume = hammer_get_root_volume(hmp, &error);
1494 KKASSERT(error == 0);
1496 hammer_modify_volume_field(cursor->trans, volume,
1497 vol0_btree_root);
1498 volume->ondisk->vol0_btree_root = parent->node_offset;
1499 hammer_modify_volume_done(volume);
1500 node->ondisk->parent = parent->node_offset;
1501 if (cursor->parent) {
1502 hammer_unlock(&cursor->parent->lock);
1503 hammer_rel_node(cursor->parent);
1505 cursor->parent = parent; /* lock'd and ref'd */
1506 hammer_rel_volume(volume, 0);
1508 hammer_modify_node_done(node);
1512 * Ok, now adjust the cursor depending on which element the original
1513 * index was pointing at. If we are >= the split point the push node
1514 * is now in the new node.
1516 * NOTE: If we are at the split point itself we cannot stay with the
1517 * original node because the push index will point at the right-hand
1518 * boundary, which is illegal.
1520 * NOTE: The cursor's parent or parent_index must be adjusted for
1521 * the case where a new parent (new root) was created, and the case
1522 * where the cursor is now pointing at the split node.
1524 if (cursor->index >= split) {
1525 cursor->parent_index = parent_index + 1;
1526 cursor->index -= split;
1527 hammer_unlock(&cursor->node->lock);
1528 hammer_rel_node(cursor->node);
1529 cursor->node = new_node; /* locked and ref'd */
1530 } else {
1531 cursor->parent_index = parent_index;
1532 hammer_unlock(&new_node->lock);
1533 hammer_rel_node(new_node);
1537 * Fixup left and right bounds
1539 parent_elm = &parent->ondisk->elms[cursor->parent_index];
1540 cursor->left_bound = &parent_elm[0].internal.base;
1541 cursor->right_bound = &parent_elm[1].internal.base;
1542 KKASSERT(hammer_btree_cmp(cursor->left_bound,
1543 &cursor->node->ondisk->elms[0].internal.base) <= 0);
1544 KKASSERT(hammer_btree_cmp(cursor->right_bound,
1545 &cursor->node->ondisk->elms[cursor->node->ondisk->count].internal.base) >= 0);
1547 done:
1548 hammer_btree_unlock_children(&locklist);
1549 hammer_cursor_downgrade(cursor);
1550 return (error);
1554 * Same as the above, but splits a full leaf node.
1556 * This function
1558 static
1560 btree_split_leaf(hammer_cursor_t cursor)
1562 hammer_node_ondisk_t ondisk;
1563 hammer_node_t parent;
1564 hammer_node_t leaf;
1565 hammer_mount_t hmp;
1566 hammer_node_t new_leaf;
1567 hammer_btree_elm_t elm;
1568 hammer_btree_elm_t parent_elm;
1569 hammer_base_elm_t mid_boundary;
1570 int parent_index;
1571 int made_root;
1572 int split;
1573 int error;
1574 const size_t esize = sizeof(*elm);
1576 if ((error = hammer_cursor_upgrade(cursor)) != 0)
1577 return(error);
1578 ++hammer_stats_btree_splits;
1580 KKASSERT(hammer_btree_cmp(cursor->left_bound,
1581 &cursor->node->ondisk->elms[0].leaf.base) <= 0);
1582 KKASSERT(hammer_btree_cmp(cursor->right_bound,
1583 &cursor->node->ondisk->elms[cursor->node->ondisk->count-1].leaf.base) > 0);
1586 * Calculate the split point. If the insertion point will be on
1587 * the left-hand side adjust the split point to give the right
1588 * hand side one additional node.
1590 * Spikes are made up of two leaf elements which cannot be
1591 * safely split.
1593 leaf = cursor->node;
1594 ondisk = leaf->ondisk;
1595 split = (ondisk->count + 1) / 2;
1596 if (cursor->index <= split)
1597 --split;
1598 error = 0;
1599 hmp = leaf->hmp;
1601 elm = &ondisk->elms[split];
1603 KKASSERT(hammer_btree_cmp(cursor->left_bound, &elm[-1].leaf.base) <= 0);
1604 KKASSERT(hammer_btree_cmp(cursor->left_bound, &elm->leaf.base) <= 0);
1605 KKASSERT(hammer_btree_cmp(cursor->right_bound, &elm->leaf.base) > 0);
1606 KKASSERT(hammer_btree_cmp(cursor->right_bound, &elm[1].leaf.base) > 0);
1609 * If we are at the root of the tree, create a new root node with
1610 * 1 element and split normally. Avoid making major modifications
1611 * until we know the whole operation will work.
1613 if (ondisk->parent == 0) {
1614 parent = hammer_alloc_btree(cursor->trans, &error);
1615 if (parent == NULL)
1616 goto done;
1617 hammer_lock_ex(&parent->lock);
1618 hammer_modify_node_noundo(cursor->trans, parent);
1619 ondisk = parent->ondisk;
1620 ondisk->count = 1;
1621 ondisk->parent = 0;
1622 ondisk->type = HAMMER_BTREE_TYPE_INTERNAL;
1623 ondisk->elms[0].base = hmp->root_btree_beg;
1624 ondisk->elms[0].base.btype = leaf->ondisk->type;
1625 ondisk->elms[0].internal.subtree_offset = leaf->node_offset;
1626 ondisk->elms[1].base = hmp->root_btree_end;
1627 /* ondisk->elms[1].base.btype = not used */
1628 hammer_modify_node_done(parent);
1629 made_root = 1;
1630 parent_index = 0; /* insertion point in parent */
1631 } else {
1632 made_root = 0;
1633 parent = cursor->parent;
1634 parent_index = cursor->parent_index;
1638 * Split leaf into new_leaf at the split point. Select a separator
1639 * value in-between the two leafs but with a bent towards the right
1640 * leaf since comparisons use an 'elm >= separator' inequality.
1642 * L L L L L L L L
1644 * x x P x x
1645 * s S S s
1646 * / \
1647 * L L L L L L L L
1649 new_leaf = hammer_alloc_btree(cursor->trans, &error);
1650 if (new_leaf == NULL) {
1651 if (made_root) {
1652 hammer_unlock(&parent->lock);
1653 hammer_delete_node(cursor->trans, parent);
1654 hammer_rel_node(parent);
1656 goto done;
1658 hammer_lock_ex(&new_leaf->lock);
1661 * Create the new node and copy the leaf elements from the split
1662 * point on to the new node.
1664 hammer_modify_node_all(cursor->trans, leaf);
1665 hammer_modify_node_noundo(cursor->trans, new_leaf);
1666 ondisk = leaf->ondisk;
1667 elm = &ondisk->elms[split];
1668 bcopy(elm, &new_leaf->ondisk->elms[0], (ondisk->count - split) * esize);
1669 new_leaf->ondisk->count = ondisk->count - split;
1670 new_leaf->ondisk->parent = parent->node_offset;
1671 new_leaf->ondisk->type = HAMMER_BTREE_TYPE_LEAF;
1672 KKASSERT(ondisk->type == new_leaf->ondisk->type);
1673 hammer_modify_node_done(new_leaf);
1676 * Cleanup the original node. Because this is a leaf node and
1677 * leaf nodes do not have a right-hand boundary, there
1678 * aren't any special edge cases to clean up. We just fixup the
1679 * count.
1681 ondisk->count = split;
1684 * Insert the separator into the parent, fixup the parent's
1685 * reference to the original node, and reference the new node.
1686 * The separator is P.
1688 * Remember that base.count does not include the right-hand boundary.
1689 * We are copying parent_index+1 to parent_index+2, not +0 to +1.
1691 hammer_modify_node_all(cursor->trans, parent);
1692 ondisk = parent->ondisk;
1693 KKASSERT(split != 0);
1694 KKASSERT(ondisk->count != HAMMER_BTREE_INT_ELMS);
1695 parent_elm = &ondisk->elms[parent_index+1];
1696 bcopy(parent_elm, parent_elm + 1,
1697 (ondisk->count - parent_index) * esize);
1699 hammer_make_separator(&elm[-1].base, &elm[0].base, &parent_elm->base);
1700 parent_elm->internal.base.btype = new_leaf->ondisk->type;
1701 parent_elm->internal.subtree_offset = new_leaf->node_offset;
1702 mid_boundary = &parent_elm->base;
1703 ++ondisk->count;
1704 hammer_modify_node_done(parent);
1707 * The filesystem's root B-Tree pointer may have to be updated.
1709 if (made_root) {
1710 hammer_volume_t volume;
1712 volume = hammer_get_root_volume(hmp, &error);
1713 KKASSERT(error == 0);
1715 hammer_modify_volume_field(cursor->trans, volume,
1716 vol0_btree_root);
1717 volume->ondisk->vol0_btree_root = parent->node_offset;
1718 hammer_modify_volume_done(volume);
1719 leaf->ondisk->parent = parent->node_offset;
1720 if (cursor->parent) {
1721 hammer_unlock(&cursor->parent->lock);
1722 hammer_rel_node(cursor->parent);
1724 cursor->parent = parent; /* lock'd and ref'd */
1725 hammer_rel_volume(volume, 0);
1727 hammer_modify_node_done(leaf);
1730 * Ok, now adjust the cursor depending on which element the original
1731 * index was pointing at. If we are >= the split point the push node
1732 * is now in the new node.
1734 * NOTE: If we are at the split point itself we need to select the
1735 * old or new node based on where key_beg's insertion point will be.
1736 * If we pick the wrong side the inserted element will wind up in
1737 * the wrong leaf node and outside that node's bounds.
1739 if (cursor->index > split ||
1740 (cursor->index == split &&
1741 hammer_btree_cmp(&cursor->key_beg, mid_boundary) >= 0)) {
1742 cursor->parent_index = parent_index + 1;
1743 cursor->index -= split;
1744 hammer_unlock(&cursor->node->lock);
1745 hammer_rel_node(cursor->node);
1746 cursor->node = new_leaf;
1747 } else {
1748 cursor->parent_index = parent_index;
1749 hammer_unlock(&new_leaf->lock);
1750 hammer_rel_node(new_leaf);
1754 * Fixup left and right bounds
1756 parent_elm = &parent->ondisk->elms[cursor->parent_index];
1757 cursor->left_bound = &parent_elm[0].internal.base;
1758 cursor->right_bound = &parent_elm[1].internal.base;
1761 * Assert that the bounds are correct.
1763 KKASSERT(hammer_btree_cmp(cursor->left_bound,
1764 &cursor->node->ondisk->elms[0].leaf.base) <= 0);
1765 KKASSERT(hammer_btree_cmp(cursor->right_bound,
1766 &cursor->node->ondisk->elms[cursor->node->ondisk->count-1].leaf.base) > 0);
1767 KKASSERT(hammer_btree_cmp(cursor->left_bound, &cursor->key_beg) <= 0);
1768 KKASSERT(hammer_btree_cmp(cursor->right_bound, &cursor->key_beg) > 0);
1770 done:
1771 hammer_cursor_downgrade(cursor);
1772 return (error);
1776 * Recursively correct the right-hand boundary's create_tid to (tid) as
1777 * long as the rest of the key matches. We have to recurse upward in
1778 * the tree as well as down the left side of each parent's right node.
1780 * Return EDEADLK if we were only partially successful, forcing the caller
1781 * to try again. The original cursor is not modified. This routine can
1782 * also fail with EDEADLK if it is forced to throw away a portion of its
1783 * record history.
1785 * The caller must pass a downgraded cursor to us (otherwise we can't dup it).
1787 struct hammer_rhb {
1788 TAILQ_ENTRY(hammer_rhb) entry;
1789 hammer_node_t node;
1790 int index;
1793 TAILQ_HEAD(hammer_rhb_list, hammer_rhb);
1796 hammer_btree_correct_rhb(hammer_cursor_t cursor, hammer_tid_t tid)
1798 struct hammer_rhb_list rhb_list;
1799 hammer_base_elm_t elm;
1800 hammer_node_t orig_node;
1801 struct hammer_rhb *rhb;
1802 int orig_index;
1803 int error;
1805 TAILQ_INIT(&rhb_list);
1808 * Save our position so we can restore it on return. This also
1809 * gives us a stable 'elm'.
1811 orig_node = cursor->node;
1812 hammer_ref_node(orig_node);
1813 hammer_lock_sh(&orig_node->lock);
1814 orig_index = cursor->index;
1815 elm = &orig_node->ondisk->elms[orig_index].base;
1818 * Now build a list of parents going up, allocating a rhb
1819 * structure for each one.
1821 while (cursor->parent) {
1823 * Stop if we no longer have any right-bounds to fix up
1825 if (elm->obj_id != cursor->right_bound->obj_id ||
1826 elm->rec_type != cursor->right_bound->rec_type ||
1827 elm->key != cursor->right_bound->key) {
1828 break;
1832 * Stop if the right-hand bound's create_tid does not
1833 * need to be corrected.
1835 if (cursor->right_bound->create_tid >= tid)
1836 break;
1838 rhb = kmalloc(sizeof(*rhb), M_HAMMER, M_WAITOK|M_ZERO);
1839 rhb->node = cursor->parent;
1840 rhb->index = cursor->parent_index;
1841 hammer_ref_node(rhb->node);
1842 hammer_lock_sh(&rhb->node->lock);
1843 TAILQ_INSERT_HEAD(&rhb_list, rhb, entry);
1845 hammer_cursor_up(cursor);
1849 * now safely adjust the right hand bound for each rhb. This may
1850 * also require taking the right side of the tree and iterating down
1851 * ITS left side.
1853 error = 0;
1854 while (error == 0 && (rhb = TAILQ_FIRST(&rhb_list)) != NULL) {
1855 error = hammer_cursor_seek(cursor, rhb->node, rhb->index);
1856 if (error)
1857 break;
1858 TAILQ_REMOVE(&rhb_list, rhb, entry);
1859 hammer_unlock(&rhb->node->lock);
1860 hammer_rel_node(rhb->node);
1861 kfree(rhb, M_HAMMER);
1863 switch (cursor->node->ondisk->type) {
1864 case HAMMER_BTREE_TYPE_INTERNAL:
1866 * Right-boundary for parent at internal node
1867 * is one element to the right of the element whos
1868 * right boundary needs adjusting. We must then
1869 * traverse down the left side correcting any left
1870 * bounds (which may now be too far to the left).
1872 ++cursor->index;
1873 error = hammer_btree_correct_lhb(cursor, tid);
1874 break;
1875 default:
1876 panic("hammer_btree_correct_rhb(): Bad node type");
1877 error = EINVAL;
1878 break;
1883 * Cleanup
1885 while ((rhb = TAILQ_FIRST(&rhb_list)) != NULL) {
1886 TAILQ_REMOVE(&rhb_list, rhb, entry);
1887 hammer_unlock(&rhb->node->lock);
1888 hammer_rel_node(rhb->node);
1889 kfree(rhb, M_HAMMER);
1891 error = hammer_cursor_seek(cursor, orig_node, orig_index);
1892 hammer_unlock(&orig_node->lock);
1893 hammer_rel_node(orig_node);
1894 return (error);
1898 * Similar to rhb (in fact, rhb calls lhb), but corrects the left hand
1899 * bound going downward starting at the current cursor position.
1901 * This function does not restore the cursor after use.
1904 hammer_btree_correct_lhb(hammer_cursor_t cursor, hammer_tid_t tid)
1906 struct hammer_rhb_list rhb_list;
1907 hammer_base_elm_t elm;
1908 hammer_base_elm_t cmp;
1909 struct hammer_rhb *rhb;
1910 int error;
1912 TAILQ_INIT(&rhb_list);
1914 cmp = &cursor->node->ondisk->elms[cursor->index].base;
1917 * Record the node and traverse down the left-hand side for all
1918 * matching records needing a boundary correction.
1920 error = 0;
1921 for (;;) {
1922 rhb = kmalloc(sizeof(*rhb), M_HAMMER, M_WAITOK|M_ZERO);
1923 rhb->node = cursor->node;
1924 rhb->index = cursor->index;
1925 hammer_ref_node(rhb->node);
1926 hammer_lock_sh(&rhb->node->lock);
1927 TAILQ_INSERT_HEAD(&rhb_list, rhb, entry);
1929 if (cursor->node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
1931 * Nothing to traverse down if we are at the right
1932 * boundary of an internal node.
1934 if (cursor->index == cursor->node->ondisk->count)
1935 break;
1936 } else {
1937 elm = &cursor->node->ondisk->elms[cursor->index].base;
1938 if (elm->btype == HAMMER_BTREE_TYPE_RECORD)
1939 break;
1940 panic("Illegal leaf record type %02x", elm->btype);
1942 error = hammer_cursor_down(cursor);
1943 if (error)
1944 break;
1946 elm = &cursor->node->ondisk->elms[cursor->index].base;
1947 if (elm->obj_id != cmp->obj_id ||
1948 elm->rec_type != cmp->rec_type ||
1949 elm->key != cmp->key) {
1950 break;
1952 if (elm->create_tid >= tid)
1953 break;
1958 * Now we can safely adjust the left-hand boundary from the bottom-up.
1959 * The last element we remove from the list is the caller's right hand
1960 * boundary, which must also be adjusted.
1962 while (error == 0 && (rhb = TAILQ_FIRST(&rhb_list)) != NULL) {
1963 error = hammer_cursor_seek(cursor, rhb->node, rhb->index);
1964 if (error)
1965 break;
1966 TAILQ_REMOVE(&rhb_list, rhb, entry);
1967 hammer_unlock(&rhb->node->lock);
1968 hammer_rel_node(rhb->node);
1969 kfree(rhb, M_HAMMER);
1971 elm = &cursor->node->ondisk->elms[cursor->index].base;
1972 if (cursor->node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
1973 hammer_modify_node(cursor->trans, cursor->node,
1974 &elm->create_tid,
1975 sizeof(elm->create_tid));
1976 elm->create_tid = tid;
1977 hammer_modify_node_done(cursor->node);
1978 } else {
1979 panic("hammer_btree_correct_lhb(): Bad element type");
1984 * Cleanup
1986 while ((rhb = TAILQ_FIRST(&rhb_list)) != NULL) {
1987 TAILQ_REMOVE(&rhb_list, rhb, entry);
1988 hammer_unlock(&rhb->node->lock);
1989 hammer_rel_node(rhb->node);
1990 kfree(rhb, M_HAMMER);
1992 return (error);
1996 * Attempt to remove the locked, empty or want-to-be-empty B-Tree node at
1997 * (cursor->node). Returns 0 on success, EDEADLK if we could not complete
1998 * the operation due to a deadlock, or some other error.
2000 * This routine is always called with an empty, locked leaf but may recurse
2001 * into want-to-be-empty parents as part of its operation.
2003 * It should also be noted that when removing empty leaves we must be sure
2004 * to test and update mirror_tid because another thread may have deadlocked
2005 * against us (or someone) trying to propogate it up and cannot retry once
2006 * the node has been deleted.
2008 * On return the cursor may end up pointing to an internal node, suitable
2009 * for further iteration but not for an immediate insertion or deletion.
2011 static int
2012 btree_remove(hammer_cursor_t cursor)
2014 hammer_node_ondisk_t ondisk;
2015 hammer_btree_elm_t elm;
2016 hammer_node_t node;
2017 hammer_node_t parent;
2018 const int esize = sizeof(*elm);
2019 int error;
2021 node = cursor->node;
2024 * When deleting the root of the filesystem convert it to
2025 * an empty leaf node. Internal nodes cannot be empty.
2027 ondisk = node->ondisk;
2028 if (ondisk->parent == 0) {
2029 KKASSERT(cursor->parent == NULL);
2030 hammer_modify_node_all(cursor->trans, node);
2031 KKASSERT(ondisk == node->ondisk);
2032 ondisk->type = HAMMER_BTREE_TYPE_LEAF;
2033 ondisk->count = 0;
2034 hammer_modify_node_done(node);
2035 cursor->index = 0;
2036 return(0);
2039 parent = cursor->parent;
2042 * If another thread deadlocked trying to propogate mirror_tid up
2043 * we have to finish the job before deleting node. XXX
2045 if (parent->ondisk->mirror_tid < node->ondisk->mirror_tid &&
2046 (cursor->trans->hmp->hflags & (HMNT_MASTERID|HMNT_SLAVE))) {
2047 hammer_btree_mirror_propagate(cursor->trans,
2048 parent,
2049 cursor->parent_index,
2050 node->ondisk->mirror_tid);
2055 * Attempt to remove the parent's reference to the child. If the
2056 * parent would become empty we have to recurse. If we fail we
2057 * leave the parent pointing to an empty leaf node.
2059 if (parent->ondisk->count == 1) {
2061 * This special cursor_up_locked() call leaves the original
2062 * node exclusively locked and referenced, leaves the
2063 * original parent locked (as the new node), and locks the
2064 * new parent. It can return EDEADLK.
2066 error = hammer_cursor_up_locked(cursor);
2067 if (error == 0) {
2068 error = btree_remove(cursor);
2069 if (error == 0) {
2070 hammer_modify_node_all(cursor->trans, node);
2071 ondisk = node->ondisk;
2072 ondisk->type = HAMMER_BTREE_TYPE_DELETED;
2073 ondisk->count = 0;
2074 hammer_modify_node_done(node);
2075 hammer_flush_node(node);
2076 hammer_delete_node(cursor->trans, node);
2077 } else {
2078 kprintf("Warning: BTREE_REMOVE: Defering "
2079 "parent removal1 @ %016llx, skipping\n",
2080 node->node_offset);
2082 hammer_unlock(&node->lock);
2083 hammer_rel_node(node);
2084 } else {
2085 kprintf("Warning: BTREE_REMOVE: Defering parent "
2086 "removal2 @ %016llx, skipping\n",
2087 node->node_offset);
2089 } else {
2090 KKASSERT(parent->ondisk->count > 1);
2093 * Delete the subtree reference in the parent
2095 hammer_modify_node_all(cursor->trans, parent);
2096 ondisk = parent->ondisk;
2097 KKASSERT(ondisk->type == HAMMER_BTREE_TYPE_INTERNAL);
2099 elm = &ondisk->elms[cursor->parent_index];
2100 KKASSERT(elm->internal.subtree_offset == node->node_offset);
2101 KKASSERT(ondisk->count > 0);
2102 bcopy(&elm[1], &elm[0],
2103 (ondisk->count - cursor->parent_index) * esize);
2104 --ondisk->count;
2105 hammer_modify_node_done(parent);
2106 hammer_flush_node(node);
2107 hammer_delete_node(cursor->trans, node);
2110 * cursor->node is invalid, cursor up to make the cursor
2111 * valid again.
2113 error = hammer_cursor_up(cursor);
2115 return (error);
2119 * Propagate a mirror TID update upwards through the B-Tree to the root.
2121 * A locked internal node must be passed in. The node will remain locked
2122 * on return.
2124 * This function syncs mirror_tid at the specified internal node's element,
2125 * adjusts the node's aggregation mirror_tid, and then recurses upwards.
2128 hammer_btree_mirror_propagate(hammer_transaction_t trans, hammer_node_t node,
2129 int index, hammer_tid_t mirror_tid)
2131 hammer_btree_internal_elm_t elm;
2132 hammer_node_t parent;
2133 int parent_index;
2134 int error;
2136 KKASSERT (node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL);
2139 * Adjust the node's element
2141 elm = &node->ondisk->elms[index].internal;
2142 if (elm->mirror_tid >= mirror_tid)
2143 return(0);
2144 hammer_modify_node(trans, node, &elm->mirror_tid,
2145 sizeof(elm->mirror_tid));
2146 elm->mirror_tid = mirror_tid;
2147 hammer_modify_node_done(node);
2150 * Adjust the node's mirror_tid aggragator
2152 if (node->ondisk->mirror_tid >= mirror_tid)
2153 return(0);
2154 hammer_modify_node_field(trans, node, mirror_tid);
2155 node->ondisk->mirror_tid = mirror_tid;
2156 hammer_modify_node_done(node);
2158 error = 0;
2159 error = 0;
2160 if (node->ondisk->parent &&
2161 (trans->hmp->hflags & (HMNT_MASTERID|HMNT_SLAVE))) {
2162 parent = hammer_btree_get_parent(node, &parent_index,
2163 &error, 1);
2164 if (parent) {
2165 hammer_btree_mirror_propagate(trans, parent,
2166 parent_index, mirror_tid);
2167 hammer_unlock(&parent->lock);
2168 hammer_rel_node(parent);
2171 return(error);
2174 hammer_node_t
2175 hammer_btree_get_parent(hammer_node_t node, int *parent_indexp, int *errorp,
2176 int try_exclusive)
2178 hammer_node_t parent;
2179 hammer_btree_elm_t elm;
2180 int i;
2183 * Get the node
2185 parent = hammer_get_node(node->hmp, node->ondisk->parent, 0, errorp);
2186 if (*errorp) {
2187 KKASSERT(parent == NULL);
2188 return(NULL);
2190 KKASSERT ((parent->flags & HAMMER_NODE_DELETED) == 0);
2193 * Lock the node
2195 if (try_exclusive) {
2196 if (hammer_lock_ex_try(&parent->lock)) {
2197 hammer_rel_node(parent);
2198 *errorp = EDEADLK;
2199 return(NULL);
2201 } else {
2202 hammer_lock_sh(&parent->lock);
2206 * Figure out which element in the parent is pointing to the
2207 * child.
2209 if (node->ondisk->count) {
2210 i = hammer_btree_search_node(&node->ondisk->elms[0].base,
2211 parent->ondisk);
2212 } else {
2213 i = 0;
2215 while (i < parent->ondisk->count) {
2216 elm = &parent->ondisk->elms[i];
2217 if (elm->internal.subtree_offset == node->node_offset)
2218 break;
2219 ++i;
2221 if (i == parent->ondisk->count) {
2222 hammer_unlock(&parent->lock);
2223 panic("Bad B-Tree link: parent %p node %p\n", parent, node);
2225 *parent_indexp = i;
2226 KKASSERT(*errorp == 0);
2227 return(parent);
2231 * The element (elm) has been moved to a new internal node (node).
2233 * If the element represents a pointer to an internal node that node's
2234 * parent must be adjusted to the element's new location.
2236 * XXX deadlock potential here with our exclusive locks
2239 btree_set_parent(hammer_transaction_t trans, hammer_node_t node,
2240 hammer_btree_elm_t elm)
2242 hammer_node_t child;
2243 int error;
2245 error = 0;
2247 switch(elm->base.btype) {
2248 case HAMMER_BTREE_TYPE_INTERNAL:
2249 case HAMMER_BTREE_TYPE_LEAF:
2250 child = hammer_get_node(node->hmp, elm->internal.subtree_offset,
2251 0, &error);
2252 if (error == 0) {
2253 hammer_modify_node_field(trans, child, parent);
2254 child->ondisk->parent = node->node_offset;
2255 hammer_modify_node_done(child);
2256 hammer_rel_node(child);
2258 break;
2259 default:
2260 break;
2262 return(error);
2266 * Exclusively lock all the children of node. This is used by the split
2267 * code to prevent anyone from accessing the children of a cursor node
2268 * while we fix-up its parent offset.
2270 * If we don't lock the children we can really mess up cursors which block
2271 * trying to cursor-up into our node.
2273 * On failure EDEADLK (or some other error) is returned. If a deadlock
2274 * error is returned the cursor is adjusted to block on termination.
2277 hammer_btree_lock_children(hammer_cursor_t cursor,
2278 struct hammer_node_locklist **locklistp)
2280 hammer_node_t node;
2281 hammer_node_locklist_t item;
2282 hammer_node_ondisk_t ondisk;
2283 hammer_btree_elm_t elm;
2284 hammer_node_t child;
2285 int error;
2286 int i;
2288 node = cursor->node;
2289 ondisk = node->ondisk;
2290 error = 0;
2293 * We really do not want to block on I/O with exclusive locks held,
2294 * pre-get the children before trying to lock the mess.
2296 for (i = 0; i < ondisk->count; ++i) {
2297 ++hammer_stats_btree_elements;
2298 elm = &ondisk->elms[i];
2299 if (elm->base.btype != HAMMER_BTREE_TYPE_LEAF &&
2300 elm->base.btype != HAMMER_BTREE_TYPE_INTERNAL) {
2301 continue;
2303 child = hammer_get_node(node->hmp,
2304 elm->internal.subtree_offset,
2305 0, &error);
2306 if (child)
2307 hammer_rel_node(child);
2311 * Do it for real
2313 for (i = 0; error == 0 && i < ondisk->count; ++i) {
2314 ++hammer_stats_btree_elements;
2315 elm = &ondisk->elms[i];
2317 switch(elm->base.btype) {
2318 case HAMMER_BTREE_TYPE_INTERNAL:
2319 case HAMMER_BTREE_TYPE_LEAF:
2320 KKASSERT(elm->internal.subtree_offset != 0);
2321 child = hammer_get_node(node->hmp,
2322 elm->internal.subtree_offset,
2323 0, &error);
2324 break;
2325 default:
2326 child = NULL;
2327 break;
2329 if (child) {
2330 if (hammer_lock_ex_try(&child->lock) != 0) {
2331 if (cursor->deadlk_node == NULL) {
2332 cursor->deadlk_node = child;
2333 hammer_ref_node(cursor->deadlk_node);
2335 error = EDEADLK;
2336 hammer_rel_node(child);
2337 } else {
2338 item = kmalloc(sizeof(*item),
2339 M_HAMMER, M_WAITOK);
2340 item->next = *locklistp;
2341 item->node = child;
2342 *locklistp = item;
2346 if (error)
2347 hammer_btree_unlock_children(locklistp);
2348 return(error);
2353 * Release previously obtained node locks.
2355 void
2356 hammer_btree_unlock_children(struct hammer_node_locklist **locklistp)
2358 hammer_node_locklist_t item;
2360 while ((item = *locklistp) != NULL) {
2361 *locklistp = item->next;
2362 hammer_unlock(&item->node->lock);
2363 hammer_rel_node(item->node);
2364 kfree(item, M_HAMMER);
2368 /************************************************************************
2369 * MISCELLANIOUS SUPPORT *
2370 ************************************************************************/
2373 * Compare two B-Tree elements, return -N, 0, or +N (e.g. similar to strcmp).
2375 * Note that for this particular function a return value of -1, 0, or +1
2376 * can denote a match if create_tid is otherwise discounted. A create_tid
2377 * of zero is considered to be 'infinity' in comparisons.
2379 * See also hammer_rec_rb_compare() and hammer_rec_cmp() in hammer_object.c.
2382 hammer_btree_cmp(hammer_base_elm_t key1, hammer_base_elm_t key2)
2384 if (key1->localization < key2->localization)
2385 return(-5);
2386 if (key1->localization > key2->localization)
2387 return(5);
2389 if (key1->obj_id < key2->obj_id)
2390 return(-4);
2391 if (key1->obj_id > key2->obj_id)
2392 return(4);
2394 if (key1->rec_type < key2->rec_type)
2395 return(-3);
2396 if (key1->rec_type > key2->rec_type)
2397 return(3);
2399 if (key1->key < key2->key)
2400 return(-2);
2401 if (key1->key > key2->key)
2402 return(2);
2405 * A create_tid of zero indicates a record which is undeletable
2406 * and must be considered to have a value of positive infinity.
2408 if (key1->create_tid == 0) {
2409 if (key2->create_tid == 0)
2410 return(0);
2411 return(1);
2413 if (key2->create_tid == 0)
2414 return(-1);
2415 if (key1->create_tid < key2->create_tid)
2416 return(-1);
2417 if (key1->create_tid > key2->create_tid)
2418 return(1);
2419 return(0);
2423 * Test a timestamp against an element to determine whether the
2424 * element is visible. A timestamp of 0 means 'infinity'.
2427 hammer_btree_chkts(hammer_tid_t asof, hammer_base_elm_t base)
2429 if (asof == 0) {
2430 if (base->delete_tid)
2431 return(1);
2432 return(0);
2434 if (asof < base->create_tid)
2435 return(-1);
2436 if (base->delete_tid && asof >= base->delete_tid)
2437 return(1);
2438 return(0);
2442 * Create a separator half way inbetween key1 and key2. For fields just
2443 * one unit apart, the separator will match key2. key1 is on the left-hand
2444 * side and key2 is on the right-hand side.
2446 * key2 must be >= the separator. It is ok for the separator to match key2.
2448 * NOTE: Even if key1 does not match key2, the separator may wind up matching
2449 * key2.
2451 * NOTE: It might be beneficial to just scrap this whole mess and just
2452 * set the separator to key2.
2454 #define MAKE_SEPARATOR(key1, key2, dest, field) \
2455 dest->field = key1->field + ((key2->field - key1->field + 1) >> 1);
2457 static void
2458 hammer_make_separator(hammer_base_elm_t key1, hammer_base_elm_t key2,
2459 hammer_base_elm_t dest)
2461 bzero(dest, sizeof(*dest));
2463 dest->rec_type = key2->rec_type;
2464 dest->key = key2->key;
2465 dest->obj_id = key2->obj_id;
2466 dest->create_tid = key2->create_tid;
2468 MAKE_SEPARATOR(key1, key2, dest, localization);
2469 if (key1->localization == key2->localization) {
2470 MAKE_SEPARATOR(key1, key2, dest, obj_id);
2471 if (key1->obj_id == key2->obj_id) {
2472 MAKE_SEPARATOR(key1, key2, dest, rec_type);
2473 if (key1->rec_type == key2->rec_type) {
2474 MAKE_SEPARATOR(key1, key2, dest, key);
2476 * Don't bother creating a separator for
2477 * create_tid, which also conveniently avoids
2478 * having to handle the create_tid == 0
2479 * (infinity) case. Just leave create_tid
2480 * set to key2.
2482 * Worst case, dest matches key2 exactly,
2483 * which is acceptable.
2490 #undef MAKE_SEPARATOR
2493 * Return whether a generic internal or leaf node is full
2495 static int
2496 btree_node_is_full(hammer_node_ondisk_t node)
2498 switch(node->type) {
2499 case HAMMER_BTREE_TYPE_INTERNAL:
2500 if (node->count == HAMMER_BTREE_INT_ELMS)
2501 return(1);
2502 break;
2503 case HAMMER_BTREE_TYPE_LEAF:
2504 if (node->count == HAMMER_BTREE_LEAF_ELMS)
2505 return(1);
2506 break;
2507 default:
2508 panic("illegal btree subtype");
2510 return(0);
2513 #if 0
2514 static int
2515 btree_max_elements(u_int8_t type)
2517 if (type == HAMMER_BTREE_TYPE_LEAF)
2518 return(HAMMER_BTREE_LEAF_ELMS);
2519 if (type == HAMMER_BTREE_TYPE_INTERNAL)
2520 return(HAMMER_BTREE_INT_ELMS);
2521 panic("btree_max_elements: bad type %d\n", type);
2523 #endif
2525 void
2526 hammer_print_btree_node(hammer_node_ondisk_t ondisk)
2528 hammer_btree_elm_t elm;
2529 int i;
2531 kprintf("node %p count=%d parent=%016llx type=%c\n",
2532 ondisk, ondisk->count, ondisk->parent, ondisk->type);
2535 * Dump both boundary elements if an internal node
2537 if (ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
2538 for (i = 0; i <= ondisk->count; ++i) {
2539 elm = &ondisk->elms[i];
2540 hammer_print_btree_elm(elm, ondisk->type, i);
2542 } else {
2543 for (i = 0; i < ondisk->count; ++i) {
2544 elm = &ondisk->elms[i];
2545 hammer_print_btree_elm(elm, ondisk->type, i);
2550 void
2551 hammer_print_btree_elm(hammer_btree_elm_t elm, u_int8_t type, int i)
2553 kprintf(" %2d", i);
2554 kprintf("\tobj_id = %016llx\n", elm->base.obj_id);
2555 kprintf("\tkey = %016llx\n", elm->base.key);
2556 kprintf("\tcreate_tid = %016llx\n", elm->base.create_tid);
2557 kprintf("\tdelete_tid = %016llx\n", elm->base.delete_tid);
2558 kprintf("\trec_type = %04x\n", elm->base.rec_type);
2559 kprintf("\tobj_type = %02x\n", elm->base.obj_type);
2560 kprintf("\tbtype = %02x (%c)\n",
2561 elm->base.btype,
2562 (elm->base.btype ? elm->base.btype : '?'));
2563 kprintf("\tlocalization = %02x\n", elm->base.localization);
2565 switch(type) {
2566 case HAMMER_BTREE_TYPE_INTERNAL:
2567 kprintf("\tsubtree_off = %016llx\n",
2568 elm->internal.subtree_offset);
2569 break;
2570 case HAMMER_BTREE_TYPE_RECORD:
2571 kprintf("\tdata_offset = %016llx\n", elm->leaf.data_offset);
2572 kprintf("\tdata_len = %08x\n", elm->leaf.data_len);
2573 kprintf("\tdata_crc = %08x\n", elm->leaf.data_crc);
2574 break;