HAMMER 25/many: Pruning code
[dragonfly/netmp.git] / sys / vfs / hammer / hammer_btree.c
blob6f752c3e78c70fd401c54d8a5b38d5d003f02fb0
1 /*
2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_btree.c,v 1.28 2008/02/05 07:58:43 dillon Exp $
38 * HAMMER B-Tree index
40 * HAMMER implements a modified B+Tree. In documentation this will
41 * simply be refered to as the HAMMER B-Tree. Basically a HAMMER B-Tree
42 * looks like a B+Tree (A B-Tree which stores its records only at the leafs
43 * of the tree), but adds two additional boundary elements which describe
44 * the left-most and right-most element a node is able to represent. In
45 * otherwords, we have boundary elements at the two ends of a B-Tree node
46 * instead of sub-tree pointers.
48 * A B-Tree internal node looks like this:
50 * B N N N N N N B <-- boundary and internal elements
51 * S S S S S S S <-- subtree pointers
53 * A B-Tree leaf node basically looks like this:
55 * L L L L L L L L <-- leaf elemenets
57 * The radix for an internal node is 1 less then a leaf but we get a
58 * number of significant benefits for our troubles.
60 * The big benefit to using a B-Tree containing boundary information
61 * is that it is possible to cache pointers into the middle of the tree
62 * and not have to start searches, insertions, OR deletions at the root
63 * node. In particular, searches are able to progress in a definitive
64 * direction from any point in the tree without revisting nodes. This
65 * greatly improves the efficiency of many operations, most especially
66 * record appends.
68 * B-Trees also make the stacking of trees fairly straightforward.
70 * SPIKES: Two leaf elements denoting a sub-range of keys may represent
71 * a spike, or a recursion into another cluster. Most standard B-Tree
72 * searches traverse spikes. The ending spike element is range-inclusive
73 * and does not operate quite like a right-bound.
75 * INSERTIONS: A search performed with the intention of doing
76 * an insert will guarantee that the terminal leaf node is not full by
77 * splitting full nodes. Splits occur top-down during the dive down the
78 * B-Tree.
80 * DELETIONS: A deletion makes no attempt to proactively balance the
81 * tree and will recursively remove nodes that become empty. Empty
82 * nodes are not allowed and a deletion may recurse upwards from the leaf.
83 * Rather then allow a deadlock a deletion may terminate early by setting
84 * an internal node's element's subtree_offset to 0. The deletion will
85 * then be resumed the next time a search encounters the element.
87 #include "hammer.h"
88 #include <sys/buf.h>
89 #include <sys/buf2.h>
91 static int btree_search(hammer_cursor_t cursor, int flags);
92 static int btree_split_internal(hammer_cursor_t cursor);
93 static int btree_split_leaf(hammer_cursor_t cursor);
94 static int btree_remove(hammer_cursor_t cursor);
95 static int btree_remove_deleted_element(hammer_cursor_t cursor);
96 static int btree_set_parent(hammer_node_t node, hammer_btree_elm_t elm);
97 static int btree_node_is_almost_full(hammer_node_ondisk_t node);
98 static int btree_node_is_full(hammer_node_ondisk_t node);
99 static void hammer_make_separator(hammer_base_elm_t key1,
100 hammer_base_elm_t key2, hammer_base_elm_t dest);
103 * Iterate records after a search. The cursor is iterated forwards past
104 * the current record until a record matching the key-range requirements
105 * is found. ENOENT is returned if the iteration goes past the ending
106 * key.
108 * The iteration is inclusive of key_beg and can be inclusive or exclusive
109 * of key_end depending on whether HAMMER_CURSOR_END_INCLUSIVE is set.
111 * When doing an as-of search (cursor->asof != 0), key_beg.create_tid
112 * may be modified by B-Tree functions.
114 * cursor->key_beg may or may not be modified by this function during
115 * the iteration. XXX future - in case of an inverted lock we may have
116 * to reinitiate the lookup and set key_beg to properly pick up where we
117 * left off.
119 * NOTE! EDEADLK *CANNOT* be returned by this procedure.
122 hammer_btree_iterate(hammer_cursor_t cursor)
124 hammer_node_ondisk_t node;
125 hammer_btree_elm_t elm;
126 int error;
127 int r;
128 int s;
131 * Skip past the current record
133 node = cursor->node->ondisk;
134 if (node == NULL)
135 return(ENOENT);
136 if (cursor->index < node->count &&
137 (cursor->flags & HAMMER_CURSOR_ATEDISK)) {
138 ++cursor->index;
142 * Loop until an element is found or we are done.
144 for (;;) {
146 * We iterate up the tree and then index over one element
147 * while we are at the last element in the current node.
149 * NOTE: This can pop us up to another cluster.
151 * If we are at the root of the root cluster, cursor_up
152 * returns ENOENT.
154 * NOTE: hammer_cursor_up() will adjust cursor->key_beg
155 * when told to re-search for the cluster tag.
157 * XXX this could be optimized by storing the information in
158 * the parent reference.
160 * XXX we can lose the node lock temporarily, this could mess
161 * up our scan.
163 if (cursor->index == node->count) {
164 error = hammer_cursor_up(cursor);
165 if (error)
166 break;
167 /* reload stale pointer */
168 node = cursor->node->ondisk;
169 KKASSERT(cursor->index != node->count);
170 ++cursor->index;
171 continue;
175 * Check internal or leaf element. Determine if the record
176 * at the cursor has gone beyond the end of our range.
178 * Generally we recurse down through internal nodes. An
179 * internal node can only be returned if INCLUSTER is set
180 * and the node represents a cluster-push record.
182 if (node->type == HAMMER_BTREE_TYPE_INTERNAL) {
183 elm = &node->elms[cursor->index];
184 r = hammer_btree_cmp(&cursor->key_end, &elm[0].base);
185 s = hammer_btree_cmp(&cursor->key_beg, &elm[1].base);
186 if (hammer_debug_btree) {
187 kprintf("BRACKETL %d:%d:%08x[%d] %016llx %02x %016llx %d\n",
188 cursor->node->cluster->volume->vol_no,
189 cursor->node->cluster->clu_no,
190 cursor->node->node_offset,
191 cursor->index,
192 elm[0].internal.base.obj_id,
193 elm[0].internal.base.rec_type,
194 elm[0].internal.base.key,
197 kprintf("BRACKETR %d:%d:%08x[%d] %016llx %02x %016llx %d\n",
198 cursor->node->cluster->volume->vol_no,
199 cursor->node->cluster->clu_no,
200 cursor->node->node_offset,
201 cursor->index + 1,
202 elm[1].internal.base.obj_id,
203 elm[1].internal.base.rec_type,
204 elm[1].internal.base.key,
209 if (r < 0) {
210 error = ENOENT;
211 break;
213 if (r == 0 && (cursor->flags &
214 HAMMER_CURSOR_END_INCLUSIVE) == 0) {
215 error = ENOENT;
216 break;
218 KKASSERT(s <= 0);
221 * When iterating try to clean up any deleted
222 * internal elements left over from btree_remove()
223 * deadlocks, but it is ok if we can't.
225 if (elm->internal.subtree_offset == 0) {
226 btree_remove_deleted_element(cursor);
227 /* note: elm also invalid */
228 } else if (elm->internal.subtree_offset != 0) {
229 error = hammer_cursor_down(cursor);
230 if (error)
231 break;
232 KKASSERT(cursor->index == 0);
234 /* reload stale pointer */
235 node = cursor->node->ondisk;
236 continue;
237 } else {
238 elm = &node->elms[cursor->index];
239 r = hammer_btree_cmp(&cursor->key_end, &elm->base);
240 if (hammer_debug_btree) {
241 kprintf("ELEMENT %d:%d:%08x:%d %c %016llx %02x %016llx %d\n",
242 cursor->node->cluster->volume->vol_no,
243 cursor->node->cluster->clu_no,
244 cursor->node->node_offset,
245 cursor->index,
246 (elm[0].leaf.base.btype ?
247 elm[0].leaf.base.btype : '?'),
248 elm[0].leaf.base.obj_id,
249 elm[0].leaf.base.rec_type,
250 elm[0].leaf.base.key,
254 if (r < 0) {
255 error = ENOENT;
256 break;
260 * We support both end-inclusive and
261 * end-exclusive searches.
263 if (r == 0 &&
264 (cursor->flags & HAMMER_CURSOR_END_INCLUSIVE) == 0) {
265 error = ENOENT;
266 break;
269 switch(elm->leaf.base.btype) {
270 case HAMMER_BTREE_TYPE_RECORD:
271 if ((cursor->flags & HAMMER_CURSOR_ASOF) &&
272 hammer_btree_chkts(cursor->asof, &elm->base)) {
273 ++cursor->index;
274 continue;
276 break;
277 case HAMMER_BTREE_TYPE_SPIKE_BEG:
279 * NOTE: This code assumes that the spike
280 * ending element immediately follows the
281 * spike beginning element.
284 * We must cursor-down via the SPIKE_END
285 * element, otherwise cursor->parent will
286 * not be set correctly for deletions.
288 * fall-through to avoid an improper
289 * termination from the conditional above.
291 KKASSERT(cursor->index + 1 < node->count);
292 ++elm;
293 KKASSERT(elm->leaf.base.btype ==
294 HAMMER_BTREE_TYPE_SPIKE_END);
295 ++cursor->index;
296 /* fall through */
297 case HAMMER_BTREE_TYPE_SPIKE_END:
299 * The SPIKE_END element is inclusive, NOT
300 * like a boundary, so be careful with the
301 * match check.
303 * This code assumes that a preceding SPIKE_BEG
304 * has already been checked.
306 if (cursor->flags & HAMMER_CURSOR_INCLUSTER)
307 break;
308 error = hammer_cursor_down(cursor);
309 if (error)
310 break;
311 KKASSERT(cursor->index == 0);
312 /* reload stale pointer */
313 node = cursor->node->ondisk;
316 * If the cluster root is empty it and its
317 * related spike can be deleted. Ignore
318 * errors. Cursor
320 if (node->count == 0) {
321 error = hammer_cursor_upgrade(cursor);
322 if (error == 0)
323 error = btree_remove(cursor);
324 hammer_cursor_downgrade(cursor);
325 error = 0;
326 /* reload stale pointer */
327 node = cursor->node->ondisk;
329 continue;
330 default:
331 error = EINVAL;
332 break;
334 if (error)
335 break;
338 * node pointer invalid after loop
342 * Return entry
344 if (hammer_debug_btree) {
345 int i = cursor->index;
346 hammer_btree_elm_t elm = &cursor->node->ondisk->elms[i];
347 kprintf("ITERATE %p:%d %016llx %02x %016llx\n",
348 cursor->node, i,
349 elm->internal.base.obj_id,
350 elm->internal.base.rec_type,
351 elm->internal.base.key
354 return(0);
356 return(error);
360 * Iterate in the reverse direction. This is used by the pruning code to
361 * avoid overlapping records.
364 hammer_btree_iterate_reverse(hammer_cursor_t cursor)
366 hammer_node_ondisk_t node;
367 hammer_btree_elm_t elm;
368 int error;
369 int r;
370 int s;
373 * Skip past the current record. For various reasons the cursor
374 * may end up set to -1 or set to point at the end of the current
375 * node. These cases must be addressed.
377 node = cursor->node->ondisk;
378 if (node == NULL)
379 return(ENOENT);
380 if (cursor->index != -1 &&
381 (cursor->flags & HAMMER_CURSOR_ATEDISK)) {
382 --cursor->index;
384 if (cursor->index == cursor->node->ondisk->count)
385 --cursor->index;
388 * Loop until an element is found or we are done.
390 for (;;) {
392 * We iterate up the tree and then index over one element
393 * while we are at the last element in the current node.
395 * NOTE: This can pop us up to another cluster.
397 * If we are at the root of the root cluster, cursor_up
398 * returns ENOENT.
400 * NOTE: hammer_cursor_up() will adjust cursor->key_beg
401 * when told to re-search for the cluster tag.
403 * XXX this could be optimized by storing the information in
404 * the parent reference.
406 * XXX we can lose the node lock temporarily, this could mess
407 * up our scan.
409 if (cursor->index == -1) {
410 error = hammer_cursor_up(cursor);
411 if (error) {
412 cursor->index = 0; /* sanity */
413 break;
415 /* reload stale pointer */
416 node = cursor->node->ondisk;
417 KKASSERT(cursor->index != node->count);
418 --cursor->index;
419 continue;
423 * Check internal or leaf element. Determine if the record
424 * at the cursor has gone beyond the end of our range.
426 * Generally we recurse down through internal nodes. An
427 * internal node can only be returned if INCLUSTER is set
428 * and the node represents a cluster-push record.
430 KKASSERT(cursor->index != node->count);
431 if (node->type == HAMMER_BTREE_TYPE_INTERNAL) {
432 elm = &node->elms[cursor->index];
433 r = hammer_btree_cmp(&cursor->key_end, &elm[0].base);
434 s = hammer_btree_cmp(&cursor->key_beg, &elm[1].base);
435 if (hammer_debug_btree) {
436 kprintf("BRACKETL %d:%d:%08x[%d] %016llx %02x %016llx %d\n",
437 cursor->node->cluster->volume->vol_no,
438 cursor->node->cluster->clu_no,
439 cursor->node->node_offset,
440 cursor->index,
441 elm[0].internal.base.obj_id,
442 elm[0].internal.base.rec_type,
443 elm[0].internal.base.key,
446 kprintf("BRACKETR %d:%d:%08x[%d] %016llx %02x %016llx %d\n",
447 cursor->node->cluster->volume->vol_no,
448 cursor->node->cluster->clu_no,
449 cursor->node->node_offset,
450 cursor->index + 1,
451 elm[1].internal.base.obj_id,
452 elm[1].internal.base.rec_type,
453 elm[1].internal.base.key,
458 if (s >= 0) {
459 error = ENOENT;
460 break;
462 KKASSERT(r >= 0);
465 * When iterating try to clean up any deleted
466 * internal elements left over from btree_remove()
467 * deadlocks, but it is ok if we can't.
469 if (elm->internal.subtree_offset == 0) {
470 btree_remove_deleted_element(cursor);
471 /* note: elm also invalid */
472 } else if (elm->internal.subtree_offset != 0) {
473 error = hammer_cursor_down(cursor);
474 if (error)
475 break;
476 KKASSERT(cursor->index == 0);
477 cursor->index = cursor->node->ondisk->count - 1;
479 /* reload stale pointer */
480 node = cursor->node->ondisk;
481 continue;
482 } else {
483 elm = &node->elms[cursor->index];
484 s = hammer_btree_cmp(&cursor->key_beg, &elm->base);
485 if (hammer_debug_btree) {
486 kprintf("ELEMENT %d:%d:%08x:%d %c %016llx %02x %016llx %d\n",
487 cursor->node->cluster->volume->vol_no,
488 cursor->node->cluster->clu_no,
489 cursor->node->node_offset,
490 cursor->index,
491 (elm[0].leaf.base.btype ?
492 elm[0].leaf.base.btype : '?'),
493 elm[0].leaf.base.obj_id,
494 elm[0].leaf.base.rec_type,
495 elm[0].leaf.base.key,
499 if (s > 0) {
500 error = ENOENT;
501 break;
504 switch(elm->leaf.base.btype) {
505 case HAMMER_BTREE_TYPE_RECORD:
506 if ((cursor->flags & HAMMER_CURSOR_ASOF) &&
507 hammer_btree_chkts(cursor->asof, &elm->base)) {
508 --cursor->index;
509 continue;
511 break;
512 case HAMMER_BTREE_TYPE_SPIKE_BEG:
514 * Skip the spike BEG record. We will hit
515 * the END record first since we are
516 * iterating backwards.
518 --cursor->index;
519 continue;
520 case HAMMER_BTREE_TYPE_SPIKE_END:
522 * The SPIKE_END element is inclusive, NOT
523 * like a boundary, so be careful with the
524 * match check.
526 * This code assumes that a preceding SPIKE_BEG
527 * has already been checked.
529 if (cursor->flags & HAMMER_CURSOR_INCLUSTER)
530 break;
531 error = hammer_cursor_down(cursor);
532 if (error)
533 break;
534 KKASSERT(cursor->index == 0);
535 /* reload stale pointer */
536 node = cursor->node->ondisk;
539 * If the cluster root is empty it and its
540 * related spike can be deleted. Ignore
541 * errors. Cursor
543 if (node->count == 0) {
544 error = hammer_cursor_upgrade(cursor);
545 if (error == 0)
546 error = btree_remove(cursor);
547 hammer_cursor_downgrade(cursor);
548 error = 0;
549 /* reload stale pointer */
550 node = cursor->node->ondisk;
552 cursor->index = node->count - 1;
553 continue;
554 default:
555 error = EINVAL;
556 break;
558 if (error)
559 break;
562 * node pointer invalid after loop
566 * Return entry
568 if (hammer_debug_btree) {
569 int i = cursor->index;
570 hammer_btree_elm_t elm = &cursor->node->ondisk->elms[i];
571 kprintf("ITERATE %p:%d %016llx %02x %016llx\n",
572 cursor->node, i,
573 elm->internal.base.obj_id,
574 elm->internal.base.rec_type,
575 elm->internal.base.key
578 return(0);
580 return(error);
584 * Lookup cursor->key_beg. 0 is returned on success, ENOENT if the entry
585 * could not be found, EDEADLK if inserting and a retry is needed, and a
586 * fatal error otherwise. When retrying, the caller must terminate the
587 * cursor and reinitialize it. EDEADLK cannot be returned if not inserting.
589 * The cursor is suitably positioned for a deletion on success, and suitably
590 * positioned for an insertion on ENOENT if HAMMER_CURSOR_INSERT was
591 * specified.
593 * The cursor may begin anywhere, the search will traverse clusters in
594 * either direction to locate the requested element.
596 * Most of the logic implementing historical searches is handled here. We
597 * do an initial lookup with create_tid set to the asof TID. Due to the
598 * way records are laid out, a backwards iteration may be required if
599 * ENOENT is returned to locate the historical record. Here's the
600 * problem:
602 * create_tid: 10 15 20
603 * LEAF1 LEAF2
604 * records: (11) (18)
606 * Lets say we want to do a lookup AS-OF timestamp 17. We will traverse
607 * LEAF2 but the only record in LEAF2 has a create_tid of 18, which is
608 * not visible and thus causes ENOENT to be returned. We really need
609 * to check record 11 in LEAF1. If it also fails then the search fails
610 * (e.g. it might represent the range 11-16 and thus still not match our
611 * AS-OF timestamp of 17).
613 * If this case occurs btree_search() will set HAMMER_CURSOR_CREATE_CHECK
614 * and the cursor->create_check TID if an iteration might be needed.
615 * In the above example create_check would be set to 14.
618 hammer_btree_lookup(hammer_cursor_t cursor)
620 int error;
622 if (cursor->flags & HAMMER_CURSOR_ASOF) {
623 KKASSERT((cursor->flags & HAMMER_CURSOR_INSERT) == 0);
624 cursor->key_beg.create_tid = cursor->asof;
625 for (;;) {
626 cursor->flags &= ~HAMMER_CURSOR_CREATE_CHECK;
627 error = btree_search(cursor, 0);
628 if (error != ENOENT ||
629 (cursor->flags & HAMMER_CURSOR_CREATE_CHECK) == 0) {
631 * Stop if no error.
632 * Stop if error other then ENOENT.
633 * Stop if ENOENT and not special case.
635 break;
637 if (hammer_debug_btree) {
638 kprintf("CREATE_CHECK %016llx\n",
639 cursor->create_check);
641 cursor->key_beg.create_tid = cursor->create_check;
642 /* loop */
644 } else {
645 error = btree_search(cursor, 0);
647 if (error == 0 && cursor->flags)
648 error = hammer_btree_extract(cursor, cursor->flags);
649 return(error);
653 * Execute the logic required to start an iteration. The first record
654 * located within the specified range is returned and iteration control
655 * flags are adjusted for successive hammer_btree_iterate() calls.
658 hammer_btree_first(hammer_cursor_t cursor)
660 int error;
662 error = hammer_btree_lookup(cursor);
663 if (error == ENOENT) {
664 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
665 error = hammer_btree_iterate(cursor);
667 cursor->flags |= HAMMER_CURSOR_ATEDISK;
668 return(error);
672 * Similarly but for an iteration in the reverse direction.
675 hammer_btree_last(hammer_cursor_t cursor)
677 struct hammer_base_elm save;
678 int error;
680 save = cursor->key_beg;
681 cursor->key_beg = cursor->key_end;
682 error = hammer_btree_lookup(cursor);
683 cursor->key_beg = save;
684 if (error == ENOENT ||
685 (cursor->flags & HAMMER_CURSOR_END_INCLUSIVE) == 0) {
686 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
687 error = hammer_btree_iterate_reverse(cursor);
689 cursor->flags |= HAMMER_CURSOR_ATEDISK;
690 return(error);
694 * Extract the record and/or data associated with the cursor's current
695 * position. Any prior record or data stored in the cursor is replaced.
696 * The cursor must be positioned at a leaf node.
698 * NOTE: Most extractions occur at the leaf of the B-Tree. The only
699 * extraction allowed at an internal element is at a cluster-push.
700 * Cluster-push elements have records but no data.
703 hammer_btree_extract(hammer_cursor_t cursor, int flags)
705 hammer_node_ondisk_t node;
706 hammer_btree_elm_t elm;
707 hammer_cluster_t cluster;
708 u_int64_t buf_type;
709 int32_t cloff;
710 int32_t roff;
711 int error;
714 * A cluster record type has no data reference, the information
715 * is stored directly in the record and B-Tree element.
717 * The case where the data reference resolves to the same buffer
718 * as the record reference must be handled.
720 node = cursor->node->ondisk;
721 elm = &node->elms[cursor->index];
722 cluster = cursor->node->cluster;
723 cursor->flags &= ~HAMMER_CURSOR_DATA_EMBEDDED;
724 cursor->data = NULL;
727 * There is nothing to extract for an internal element.
729 if (node->type == HAMMER_BTREE_TYPE_INTERNAL)
730 return(EINVAL);
732 KKASSERT(node->type == HAMMER_BTREE_TYPE_LEAF);
735 * Leaf element.
737 if ((flags & HAMMER_CURSOR_GET_RECORD)) {
738 cloff = elm->leaf.rec_offset;
739 cursor->record = hammer_bread(cluster, cloff,
740 HAMMER_FSBUF_RECORDS, &error,
741 &cursor->record_buffer);
742 } else {
743 cloff = 0;
744 error = 0;
746 if ((flags & HAMMER_CURSOR_GET_DATA) && error == 0) {
747 if (elm->leaf.base.btype != HAMMER_BTREE_TYPE_RECORD) {
749 * Only records have data references. Spike elements
750 * do not.
752 cursor->data = NULL;
753 } else if ((cloff ^ elm->leaf.data_offset) & ~HAMMER_BUFMASK) {
755 * The data is not in the same buffer as the last
756 * record we cached, but it could still be embedded
757 * in a record. Note that we may not have loaded the
758 * record's buffer above, depending on flags.
760 if ((elm->leaf.rec_offset ^ elm->leaf.data_offset) &
761 ~HAMMER_BUFMASK) {
762 if (elm->leaf.data_len & HAMMER_BUFMASK)
763 buf_type = HAMMER_FSBUF_DATA;
764 else
765 buf_type = 0; /* pure data buffer */
766 } else {
767 buf_type = HAMMER_FSBUF_RECORDS;
769 cursor->data = hammer_bread(cluster,
770 elm->leaf.data_offset,
771 buf_type, &error,
772 &cursor->data_buffer);
773 } else {
775 * Data in same buffer as record. Note that we
776 * leave any existing data_buffer intact, even
777 * though we don't use it in this case, in case
778 * other records extracted during an iteration
779 * go back to it.
781 * The data must be embedded in the record for this
782 * case to be hit.
784 * Just assume the buffer type is correct.
786 cursor->data = (void *)
787 ((char *)cursor->record_buffer->ondisk +
788 (elm->leaf.data_offset & HAMMER_BUFMASK));
789 roff = (char *)cursor->data - (char *)cursor->record;
790 KKASSERT (roff >= 0 && roff < HAMMER_RECORD_SIZE);
791 cursor->flags |= HAMMER_CURSOR_DATA_EMBEDDED;
794 return(error);
799 * Insert a leaf element into the B-Tree at the current cursor position.
800 * The cursor is positioned such that the element at and beyond the cursor
801 * are shifted to make room for the new record.
803 * The caller must call hammer_btree_lookup() with the HAMMER_CURSOR_INSERT
804 * flag set and that call must return ENOENT before this function can be
805 * called.
807 * ENOSPC is returned if there is no room to insert a new record.
810 hammer_btree_insert(hammer_cursor_t cursor, hammer_btree_elm_t elm)
812 hammer_node_ondisk_t node;
813 int i;
814 int error;
816 if ((error = hammer_cursor_upgrade(cursor)) != 0)
817 return(error);
820 * Insert the element at the leaf node and update the count in the
821 * parent. It is possible for parent to be NULL, indicating that
822 * the root of the B-Tree in the cluster is a leaf. It is also
823 * possible for the leaf to be empty.
825 * Remember that the right-hand boundary is not included in the
826 * count.
828 hammer_modify_node(cursor->node);
829 node = cursor->node->ondisk;
830 i = cursor->index;
831 KKASSERT(elm->base.btype != 0);
832 KKASSERT(node->type == HAMMER_BTREE_TYPE_LEAF);
833 KKASSERT(node->count < HAMMER_BTREE_LEAF_ELMS);
834 if (i != node->count) {
835 bcopy(&node->elms[i], &node->elms[i+1],
836 (node->count - i) * sizeof(*elm));
838 node->elms[i] = *elm;
839 ++node->count;
842 * Debugging sanity checks. Note that the element to the left
843 * can match the element we are inserting if it is a SPIKE_END,
844 * because spike-end's represent a non-inclusive end to a range.
846 KKASSERT(hammer_btree_cmp(cursor->left_bound, &elm->leaf.base) <= 0);
847 KKASSERT(hammer_btree_cmp(cursor->right_bound, &elm->leaf.base) > 0);
848 if (i) {
849 KKASSERT(hammer_btree_cmp(&node->elms[i-1].leaf.base, &elm->leaf.base) < 0);
851 if (i != node->count - 1)
852 KKASSERT(hammer_btree_cmp(&node->elms[i+1].leaf.base, &elm->leaf.base) > 0);
854 return(0);
858 * Insert a cluster spike into the B-Tree at the current cursor position.
859 * The caller pre-positions the insertion cursor at ncluster's
860 * left bound in the originating cluster. Both the originating cluster
861 * and the target cluster must be serialized, EDEADLK is fatal.
863 * Basically we have to lay down the two spike elements and assert that
864 * the leaf's right bound does not bisect the ending element. The ending
865 * spike element is non-inclusive, just like a boundary. The target cluster's
866 * clu_btree_parent_offset may have to adjusted.
868 * NOTE: Serialization is usually accoplished by virtue of being the
869 * initial accessor of a cluster.
872 hammer_btree_insert_cluster(hammer_cursor_t cursor, hammer_cluster_t ncluster,
873 int32_t rec_offset)
875 hammer_node_ondisk_t node;
876 hammer_btree_elm_t elm;
877 hammer_cluster_t ocluster;
878 const int esize = sizeof(*elm);
879 int error;
880 int i;
881 int32_t node_offset;
883 if ((error = hammer_cursor_upgrade(cursor)) != 0)
884 return(error);
885 hammer_modify_node(cursor->node);
886 node = cursor->node->ondisk;
887 node_offset = cursor->node->node_offset;
888 i = cursor->index;
890 KKASSERT(node->type == HAMMER_BTREE_TYPE_LEAF);
891 KKASSERT(node->count <= HAMMER_BTREE_LEAF_ELMS - 2);
892 KKASSERT(i >= 0 && i <= node->count);
895 * Make sure the spike is legal or the B-Tree code will get really
896 * confused.
898 * XXX the right bound my bisect the two spike elements. We
899 * need code here to 'fix' the right bound going up the tree
900 * instead of an assertion.
902 KKASSERT(hammer_btree_cmp(&ncluster->ondisk->clu_btree_beg,
903 cursor->left_bound) >= 0);
904 KKASSERT(hammer_btree_cmp(&ncluster->ondisk->clu_btree_end,
905 cursor->right_bound) <= 0);
906 if (i != node->count) {
907 KKASSERT(hammer_btree_cmp(&ncluster->ondisk->clu_btree_end,
908 &node->elms[i].leaf.base) <= 0);
911 elm = &node->elms[i];
912 bcopy(elm, elm + 2, (node->count - i) * esize);
913 bzero(elm, 2 * esize);
914 node->count += 2;
916 elm[0].leaf.base = ncluster->ondisk->clu_btree_beg;
917 elm[0].leaf.base.btype = HAMMER_BTREE_TYPE_SPIKE_BEG;
918 elm[0].leaf.rec_offset = rec_offset;
919 elm[0].leaf.spike_clu_no = ncluster->clu_no;
920 elm[0].leaf.spike_vol_no = ncluster->volume->vol_no;
922 elm[1].leaf.base = ncluster->ondisk->clu_btree_end;
923 elm[1].leaf.base.btype = HAMMER_BTREE_TYPE_SPIKE_END;
924 elm[1].leaf.rec_offset = rec_offset;
925 elm[1].leaf.spike_clu_no = ncluster->clu_no;
926 elm[1].leaf.spike_vol_no = ncluster->volume->vol_no;
929 * SPIKE_END must be inclusive, not exclusive.
931 KKASSERT(elm[1].leaf.base.create_tid != 1);
932 --elm[1].leaf.base.create_tid;
935 * The target cluster's parent offset may have to be updated.
937 * NOTE: Modifying a cluster header does not mark it open, and
938 * flushing it will only clear an existing open flag if the cluster
939 * has been validated.
941 if (hammer_debug_general & 0x40) {
942 kprintf("INSERT CLUSTER %d:%d -> %d:%d ",
943 ncluster->ondisk->clu_btree_parent_vol_no,
944 ncluster->ondisk->clu_btree_parent_clu_no,
945 ncluster->volume->vol_no,
946 ncluster->clu_no);
949 ocluster = cursor->node->cluster;
950 if (ncluster->ondisk->clu_btree_parent_offset != node_offset ||
951 ncluster->ondisk->clu_btree_parent_clu_no != ocluster->clu_no ||
952 ncluster->ondisk->clu_btree_parent_vol_no != ocluster->volume->vol_no) {
953 hammer_modify_cluster(ncluster);
954 ncluster->ondisk->clu_btree_parent_offset = node_offset;
955 ncluster->ondisk->clu_btree_parent_clu_no = ocluster->clu_no;
956 ncluster->ondisk->clu_btree_parent_vol_no = ocluster->volume->vol_no;
957 if (hammer_debug_general & 0x40)
958 kprintf("(offset fixup)\n");
959 } else {
960 if (hammer_debug_general & 0x40)
961 kprintf("(offset unchanged)\n");
964 return(0);
968 * Delete a record from the B-Tree at the current cursor position.
969 * The cursor is positioned such that the current element is the one
970 * to be deleted.
972 * On return the cursor will be positioned after the deleted element and
973 * MAY point to an internal node. It will be suitable for the continuation
974 * of an iteration but not for an insertion or deletion.
976 * Deletions will attempt to partially rebalance the B-Tree in an upward
977 * direction, but will terminate rather then deadlock. Empty leaves are
978 * not allowed except at the root node of a cluster. An early termination
979 * will leave an internal node with an element whos subtree_offset is 0,
980 * a case detected and handled by btree_search().
982 * This function can return EDEADLK, requiring the caller to retry the
983 * operation after clearing the deadlock.
986 hammer_btree_delete(hammer_cursor_t cursor)
988 hammer_node_ondisk_t ondisk;
989 hammer_node_t node;
990 hammer_node_t parent;
991 int error;
992 int i;
994 if ((error = hammer_cursor_upgrade(cursor)) != 0)
995 return(error);
998 * Delete the element from the leaf node.
1000 * Remember that leaf nodes do not have boundaries.
1002 node = cursor->node;
1003 ondisk = node->ondisk;
1004 i = cursor->index;
1006 KKASSERT(ondisk->type == HAMMER_BTREE_TYPE_LEAF);
1007 KKASSERT(i >= 0 && i < ondisk->count);
1008 hammer_modify_node(node);
1009 if (i + 1 != ondisk->count) {
1010 bcopy(&ondisk->elms[i+1], &ondisk->elms[i],
1011 (ondisk->count - i - 1) * sizeof(ondisk->elms[0]));
1013 --ondisk->count;
1016 * Validate local parent
1018 if (ondisk->parent) {
1019 parent = cursor->parent;
1021 KKASSERT(parent != NULL);
1022 KKASSERT(parent->node_offset == ondisk->parent);
1023 KKASSERT(parent->cluster == node->cluster);
1027 * If the leaf becomes empty it must be detached from the parent,
1028 * potentially recursing through to the cluster root.
1030 * This may reposition the cursor at one of the parent's of the
1031 * current node.
1033 * Ignore deadlock errors, that simply means that btree_remove
1034 * was unable to recurse and had to leave the subtree_offset
1035 * in the parent set to 0.
1037 KKASSERT(cursor->index <= ondisk->count);
1038 if (ondisk->count == 0) {
1039 do {
1040 error = btree_remove(cursor);
1041 } while (error == EAGAIN);
1042 if (error == EDEADLK)
1043 error = 0;
1044 } else {
1045 error = 0;
1047 KKASSERT(cursor->parent == NULL ||
1048 cursor->parent_index < cursor->parent->ondisk->count);
1049 return(error);
1053 * PRIMAY B-TREE SEARCH SUPPORT PROCEDURE
1055 * Search a cluster's B-Tree for cursor->key_beg, return the matching node.
1057 * The search can begin ANYWHERE in the B-Tree. As a first step the search
1058 * iterates up the tree as necessary to properly position itself prior to
1059 * actually doing the sarch.
1061 * INSERTIONS: The search will split full nodes and leaves on its way down
1062 * and guarentee that the leaf it ends up on is not full. If we run out
1063 * of space the search continues to the leaf (to position the cursor for
1064 * the spike), but ENOSPC is returned.
1066 * The search is only guarenteed to end up on a leaf if an error code of 0
1067 * is returned, or if inserting and an error code of ENOENT is returned.
1068 * Otherwise it can stop at an internal node. On success a search returns
1069 * a leaf node unless INCLUSTER is set and the search located a cluster push
1070 * node (which is an internal node).
1072 * COMPLEXITY WARNING! This is the core B-Tree search code for the entire
1073 * filesystem, and it is not simple code. Please note the following facts:
1075 * - Internal node recursions have a boundary on the left AND right. The
1076 * right boundary is non-inclusive. The create_tid is a generic part
1077 * of the key for internal nodes.
1079 * - Leaf nodes contain terminal elements AND spikes. A spike recurses into
1080 * another cluster and contains two leaf elements.. a beginning and an
1081 * ending element. The SPIKE_END element is RANGE-EXCLUSIVE, just like a
1082 * boundary. This means that it is possible to have two elements
1083 * (a spike ending element and a record) side by side with the same key.
1085 * - Because the SPIKE_END element is range inclusive, it cannot match the
1086 * right boundary of the parent node. SPIKE_BEG and SPIKE_END elements
1087 * always come in pairs, and always exist side by side in the same leaf.
1089 * - Filesystem lookups typically set HAMMER_CURSOR_ASOF, indicating a
1090 * historical search. ASOF and INSERT are mutually exclusive. When
1091 * doing an as-of lookup btree_search() checks for a right-edge boundary
1092 * case. If while recursing down the left-edge differs from the key
1093 * by ONLY its create_tid, HAMMER_CURSOR_CREATE_CHECK is set along
1094 * with cursor->create_check. This is used by btree_lookup() to iterate.
1095 * The iteration backwards because as-of searches can wind up going
1096 * down the wrong branch of the B-Tree.
1098 static
1100 btree_search(hammer_cursor_t cursor, int flags)
1102 hammer_node_ondisk_t node;
1103 hammer_cluster_t cluster;
1104 hammer_btree_elm_t elm;
1105 int error;
1106 int enospc = 0;
1107 int i;
1108 int r;
1109 int s;
1111 flags |= cursor->flags;
1113 if (hammer_debug_btree) {
1114 kprintf("SEARCH %d:%d:%08x[%d] %016llx %02x key=%016llx cre=%016llx\n",
1115 cursor->node->cluster->volume->vol_no,
1116 cursor->node->cluster->clu_no,
1117 cursor->node->node_offset,
1118 cursor->index,
1119 cursor->key_beg.obj_id,
1120 cursor->key_beg.rec_type,
1121 cursor->key_beg.key,
1122 cursor->key_beg.create_tid
1127 * Move our cursor up the tree until we find a node whos range covers
1128 * the key we are trying to locate. This may move us between
1129 * clusters.
1131 * The left bound is inclusive, the right bound is non-inclusive.
1132 * It is ok to cursor up too far so when cursoring across a cluster
1133 * boundary.
1135 * First see if we can skip the whole cluster. hammer_cursor_up()
1136 * handles both cases but this way we don't check the cluster
1137 * bounds when going up the tree within a cluster.
1139 * NOTE: If INCLUSTER is set and we are at the root of the cluster,
1140 * hammer_cursor_up() will return ENOENT.
1142 cluster = cursor->node->cluster;
1143 for (;;) {
1144 r = hammer_btree_cmp(&cursor->key_beg, &cluster->clu_btree_beg);
1145 s = hammer_btree_cmp(&cursor->key_beg, &cluster->clu_btree_end);
1147 if (r >= 0 && s < 0)
1148 break;
1149 error = hammer_cursor_toroot(cursor);
1150 if (error)
1151 goto done;
1152 KKASSERT(cursor->parent);
1153 error = hammer_cursor_up(cursor);
1154 if (error)
1155 goto done;
1156 cluster = cursor->node->cluster;
1158 for (;;) {
1159 r = hammer_btree_cmp(&cursor->key_beg, cursor->left_bound);
1160 s = hammer_btree_cmp(&cursor->key_beg, cursor->right_bound);
1161 if (r >= 0 && s < 0)
1162 break;
1163 KKASSERT(cursor->parent);
1164 error = hammer_cursor_up(cursor);
1165 if (error)
1166 goto done;
1170 * The delete-checks below are based on node, not parent. Set the
1171 * initial delete-check based on the parent.
1173 if (r == 1) {
1174 KKASSERT(cursor->left_bound->create_tid != 1);
1175 cursor->create_check = cursor->left_bound->create_tid - 1;
1176 cursor->flags |= HAMMER_CURSOR_CREATE_CHECK;
1180 * We better have ended up with a node somewhere, and our second
1181 * while loop had better not have traversed up a cluster.
1183 KKASSERT(cursor->node != NULL && cursor->node->cluster == cluster);
1186 * If we are inserting we can't start at a full node if the parent
1187 * is also full (because there is no way to split the node),
1188 * continue running up the tree until the requirement is satisfied
1189 * or we hit the root of the current cluster.
1191 * (If inserting we aren't doing an as-of search so we don't have
1192 * to worry about create_check).
1194 while ((flags & HAMMER_CURSOR_INSERT) && enospc == 0) {
1195 if (cursor->node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
1196 if (btree_node_is_full(cursor->node->ondisk) == 0)
1197 break;
1198 } else {
1199 if (btree_node_is_almost_full(cursor->node->ondisk) ==0)
1200 break;
1202 if (cursor->node->ondisk->parent == 0 ||
1203 cursor->parent->ondisk->count != HAMMER_BTREE_INT_ELMS) {
1204 break;
1206 error = hammer_cursor_up(cursor);
1207 /* cluster and node are now may become stale */
1208 if (error)
1209 goto done;
1211 /* cluster = cursor->node->cluster; not needed until next cluster = */
1213 new_cluster:
1215 * Push down through internal nodes to locate the requested key.
1217 cluster = cursor->node->cluster;
1218 node = cursor->node->ondisk;
1219 while (node->type == HAMMER_BTREE_TYPE_INTERNAL) {
1221 * Scan the node to find the subtree index to push down into.
1222 * We go one-past, then back-up.
1224 * We must proactively remove deleted elements which may
1225 * have been left over from a deadlocked btree_remove().
1227 * The left and right boundaries are included in the loop
1228 * in order to detect edge cases.
1230 * If the separator only differs by create_tid (r == 1)
1231 * and we are doing an as-of search, we may end up going
1232 * down a branch to the left of the one containing the
1233 * desired key. This requires numerous special cases.
1235 if (hammer_debug_btree) {
1236 kprintf("SEARCH-I %d:%d:%08x count=%d\n",
1237 cursor->node->cluster->volume->vol_no,
1238 cursor->node->cluster->clu_no,
1239 cursor->node->node_offset,
1240 node->count);
1242 for (i = 0; i <= node->count; ++i) {
1243 elm = &node->elms[i];
1244 r = hammer_btree_cmp(&cursor->key_beg, &elm->base);
1245 if (hammer_debug_btree > 2) {
1246 kprintf(" IELM %p %d r=%d\n",
1247 &node->elms[i], i, r);
1249 if (r < 0)
1250 break;
1251 if (r == 1) {
1252 KKASSERT(elm->base.create_tid != 1);
1253 cursor->create_check = elm->base.create_tid - 1;
1254 cursor->flags |= HAMMER_CURSOR_CREATE_CHECK;
1257 if (hammer_debug_btree) {
1258 kprintf("SEARCH-I preI=%d/%d r=%d\n",
1259 i, node->count, r);
1263 * These cases occur when the parent's idea of the boundary
1264 * is wider then the child's idea of the boundary, and
1265 * require special handling. If not inserting we can
1266 * terminate the search early for these cases but the
1267 * child's boundaries cannot be unconditionally modified.
1269 if (i == 0) {
1271 * If i == 0 the search terminated to the LEFT of the
1272 * left_boundary but to the RIGHT of the parent's left
1273 * boundary.
1275 u_int8_t save;
1277 elm = &node->elms[0];
1280 * If we aren't inserting we can stop here.
1282 if ((flags & HAMMER_CURSOR_INSERT) == 0) {
1283 cursor->index = 0;
1284 return(ENOENT);
1288 * Correct a left-hand boundary mismatch.
1290 * We can only do this if we can upgrade the lock.
1292 if ((error = hammer_cursor_upgrade(cursor)) != 0)
1293 return(error);
1294 hammer_modify_node(cursor->node);
1295 save = node->elms[0].base.btype;
1296 node->elms[0].base = *cursor->left_bound;
1297 node->elms[0].base.btype = save;
1298 } else if (i == node->count + 1) {
1300 * If i == node->count + 1 the search terminated to
1301 * the RIGHT of the right boundary but to the LEFT
1302 * of the parent's right boundary. If we aren't
1303 * inserting we can stop here.
1305 * Note that the last element in this case is
1306 * elms[i-2] prior to adjustments to 'i'.
1308 --i;
1309 if ((flags & HAMMER_CURSOR_INSERT) == 0) {
1310 cursor->index = i;
1311 return (ENOENT);
1315 * Correct a right-hand boundary mismatch.
1316 * (actual push-down record is i-2 prior to
1317 * adjustments to i).
1319 * We can only do this if we can upgrade the lock.
1321 if ((error = hammer_cursor_upgrade(cursor)) != 0)
1322 return(error);
1323 elm = &node->elms[i];
1324 hammer_modify_node(cursor->node);
1325 elm->base = *cursor->right_bound;
1326 --i;
1327 } else {
1329 * The push-down index is now i - 1. If we had
1330 * terminated on the right boundary this will point
1331 * us at the last element.
1333 --i;
1335 cursor->index = i;
1336 elm = &node->elms[i];
1338 if (hammer_debug_btree) {
1339 kprintf("RESULT-I %d:%d:%08x[%d] %016llx %02x "
1340 "key=%016llx cre=%016llx\n",
1341 cursor->node->cluster->volume->vol_no,
1342 cursor->node->cluster->clu_no,
1343 cursor->node->node_offset,
1345 elm->internal.base.obj_id,
1346 elm->internal.base.rec_type,
1347 elm->internal.base.key,
1348 elm->internal.base.create_tid
1353 * When searching try to clean up any deleted
1354 * internal elements left over from btree_remove()
1355 * deadlocks.
1357 * If we fail and we are doing an insertion lookup,
1358 * we have to return EDEADLK, because an insertion lookup
1359 * must terminate at a leaf.
1361 if (elm->internal.subtree_offset == 0) {
1362 error = btree_remove_deleted_element(cursor);
1363 if (error == 0)
1364 goto new_cluster;
1365 if (error == EDEADLK &&
1366 (flags & HAMMER_CURSOR_INSERT) == 0) {
1367 error = ENOENT;
1369 return(error);
1374 * Handle insertion and deletion requirements.
1376 * If inserting split full nodes. The split code will
1377 * adjust cursor->node and cursor->index if the current
1378 * index winds up in the new node.
1380 * If inserting and a left or right edge case was detected,
1381 * we cannot correct the left or right boundary and must
1382 * prepend and append an empty leaf node in order to make
1383 * the boundary correction.
1385 * If we run out of space we set enospc and continue on
1386 * to a leaf to provide the spike code with a good point
1387 * of entry. Enospc is reset if we cross a cluster boundary.
1389 if ((flags & HAMMER_CURSOR_INSERT) && enospc == 0) {
1390 if (btree_node_is_full(node)) {
1391 error = btree_split_internal(cursor);
1392 if (error) {
1393 if (error != ENOSPC)
1394 goto done;
1395 enospc = 1;
1398 * reload stale pointers
1400 i = cursor->index;
1401 node = cursor->node->ondisk;
1406 * Push down (push into new node, existing node becomes
1407 * the parent) and continue the search.
1409 error = hammer_cursor_down(cursor);
1410 /* node and cluster become stale */
1411 if (error)
1412 goto done;
1413 node = cursor->node->ondisk;
1414 cluster = cursor->node->cluster;
1418 * We are at a leaf, do a linear search of the key array.
1420 * If we encounter a spike element type within the necessary
1421 * range we push into it. Note that SPIKE_END is non-inclusive
1422 * of the spike range.
1424 * On success the index is set to the matching element and 0
1425 * is returned.
1427 * On failure the index is set to the insertion point and ENOENT
1428 * is returned.
1430 * Boundaries are not stored in leaf nodes, so the index can wind
1431 * up to the left of element 0 (index == 0) or past the end of
1432 * the array (index == node->count).
1434 KKASSERT (node->type == HAMMER_BTREE_TYPE_LEAF);
1435 KKASSERT(node->count <= HAMMER_BTREE_LEAF_ELMS);
1436 if (hammer_debug_btree) {
1437 kprintf("SEARCH-L %d:%d:%08x count=%d\n",
1438 cursor->node->cluster->volume->vol_no,
1439 cursor->node->cluster->clu_no,
1440 cursor->node->node_offset,
1441 node->count);
1444 for (i = 0; i < node->count; ++i) {
1445 elm = &node->elms[i];
1447 r = hammer_btree_cmp(&cursor->key_beg, &elm->leaf.base);
1449 if (hammer_debug_btree > 1)
1450 kprintf(" ELM %p %d r=%d\n", &node->elms[i], i, r);
1452 if (elm->leaf.base.btype == HAMMER_BTREE_TYPE_SPIKE_BEG) {
1454 * SPIKE_BEG. Stop if we are to the left of the
1455 * spike begin element.
1457 * If we are not the last element in the leaf continue
1458 * the loop looking for the SPIKE_END. If we are
1459 * the last element, however, then push into the
1460 * spike.
1462 * If doing an as-of search a Spike demark on a
1463 * create_tid boundary must be pushed into and an
1464 * iteration will be forced if it turned out to be
1465 * the wrong choice.
1467 * If not doing an as-of search exact comparisons
1468 * must be used.
1470 * enospc must be reset because we have crossed a
1471 * cluster boundary.
1473 if (r < 0)
1474 goto failed;
1477 * Set the create_check if the spike element
1478 * only differs by its create_tid.
1480 if (r == 1) {
1481 cursor->create_check = elm->base.create_tid - 1;
1482 cursor->flags |= HAMMER_CURSOR_CREATE_CHECK;
1484 if (i != node->count - 1)
1485 continue;
1486 panic("btree_search: illegal spike, no SPIKE_END "
1487 "in leaf node! %p\n", cursor->node);
1489 if (elm->leaf.base.btype == HAMMER_BTREE_TYPE_SPIKE_END) {
1491 * SPIKE_END. We can only hit this case if we are
1492 * greater or equal to SPIKE_BEG.
1494 * If we are <= SPIKE_END we must push into
1495 * it, otherwise continue the search. The SPIKE_END
1496 * element is range-inclusive.
1498 * enospc must be reset because we have crossed a
1499 * cluster boundary.
1501 if (r > 0) {
1503 * Continue the search but check for a
1504 * create_tid boundary. Because the
1505 * SPIKE_END is inclusive we do not have
1506 * to subtract 1 to force an iteration to
1507 * go down the spike.
1509 if (r == 1) {
1510 cursor->create_check =
1511 elm->base.create_tid - 1;
1512 cursor->flags |=
1513 HAMMER_CURSOR_CREATE_CHECK;
1515 continue;
1517 if (flags & HAMMER_CURSOR_INCLUSTER)
1518 goto success;
1519 cursor->index = i;
1520 error = hammer_cursor_down(cursor);
1521 enospc = 0;
1522 if (error)
1523 goto done;
1524 goto new_cluster;
1528 * We are at a record element. Stop if we've flipped past
1529 * key_beg, not counting the create_tid test. Allow the
1530 * r == 1 case (key_beg > element but differs only by its
1531 * create_tid) to fall through to the AS-OF check.
1533 KKASSERT (elm->leaf.base.btype == HAMMER_BTREE_TYPE_RECORD);
1535 if (r < 0)
1536 goto failed;
1537 if (r > 1)
1538 continue;
1541 * Check our as-of timestamp against the element.
1543 if (flags & HAMMER_CURSOR_ASOF) {
1544 if (hammer_btree_chkts(cursor->asof,
1545 &node->elms[i].base) != 0) {
1546 continue;
1548 /* success */
1549 } else {
1550 if (r > 0) /* can only be +1 */
1551 continue;
1552 /* success */
1554 success:
1555 cursor->index = i;
1556 error = 0;
1557 if (hammer_debug_btree) {
1558 kprintf("RESULT-L %d:%d:%08x[%d] (SUCCESS)\n",
1559 cursor->node->cluster->volume->vol_no,
1560 cursor->node->cluster->clu_no,
1561 cursor->node->node_offset,
1564 goto done;
1568 * The search of the leaf node failed. i is the insertion point.
1570 failed:
1571 if (hammer_debug_btree) {
1572 kprintf("RESULT-L %d:%d:%08x[%d] (FAILED)\n",
1573 cursor->node->cluster->volume->vol_no,
1574 cursor->node->cluster->clu_no,
1575 cursor->node->node_offset,
1580 * No exact match was found, i is now at the insertion point.
1582 * If inserting split a full leaf before returning. This
1583 * may have the side effect of adjusting cursor->node and
1584 * cursor->index.
1586 * For now the leaf must have at least 2 free elements to accomodate
1587 * the insertion of a spike during recovery. See the
1588 * hammer_btree_insert_cluster() function.
1590 cursor->index = i;
1591 if ((flags & HAMMER_CURSOR_INSERT) && enospc == 0 &&
1592 btree_node_is_almost_full(node)) {
1593 error = btree_split_leaf(cursor);
1594 if (error) {
1595 if (error != ENOSPC)
1596 goto done;
1597 enospc = 1;
1600 * reload stale pointers
1602 /* NOT USED
1603 i = cursor->index;
1604 node = &cursor->node->internal;
1609 * We reached a leaf but did not find the key we were looking for.
1610 * If this is an insert we will be properly positioned for an insert
1611 * (ENOENT) or spike (ENOSPC) operation.
1613 error = enospc ? ENOSPC : ENOENT;
1614 done:
1615 return(error);
1619 /************************************************************************
1620 * SPLITTING AND MERGING *
1621 ************************************************************************
1623 * These routines do all the dirty work required to split and merge nodes.
1627 * Split an internal node into two nodes and move the separator at the split
1628 * point to the parent.
1630 * (cursor->node, cursor->index) indicates the element the caller intends
1631 * to push into. We will adjust node and index if that element winds
1632 * up in the split node.
1634 * If we are at the root of a cluster a new root must be created with two
1635 * elements, one pointing to the original root and one pointing to the
1636 * newly allocated split node.
1638 * NOTE! Being at the root of a cluster is different from being at the
1639 * root of the root cluster. cursor->parent will not be NULL and
1640 * cursor->node->ondisk.parent must be tested against 0. Theoretically
1641 * we could propogate the algorithm into the parent and deal with multiple
1642 * 'roots' in the cluster header, but it's easier not to.
1644 static
1646 btree_split_internal(hammer_cursor_t cursor)
1648 hammer_node_ondisk_t ondisk;
1649 hammer_node_t node;
1650 hammer_node_t parent;
1651 hammer_node_t new_node;
1652 hammer_btree_elm_t elm;
1653 hammer_btree_elm_t parent_elm;
1654 hammer_node_locklist_t locklist = NULL;
1655 int parent_index;
1656 int made_root;
1657 int split;
1658 int error;
1659 int i;
1660 const int esize = sizeof(*elm);
1662 if ((error = hammer_cursor_upgrade(cursor)) != 0)
1663 return(error);
1664 if ((cursor->flags & HAMMER_CURSOR_RECOVER) == 0) {
1665 error = hammer_btree_lock_children(cursor, &locklist);
1666 if (error)
1667 goto done;
1671 * We are splitting but elms[split] will be promoted to the parent,
1672 * leaving the right hand node with one less element. If the
1673 * insertion point will be on the left-hand side adjust the split
1674 * point to give the right hand side one additional node.
1676 node = cursor->node;
1677 ondisk = node->ondisk;
1678 split = (ondisk->count + 1) / 2;
1679 if (cursor->index <= split)
1680 --split;
1683 * If we are at the root of the cluster, create a new root node with
1684 * 1 element and split normally. Avoid making major modifications
1685 * until we know the whole operation will work.
1687 * The root of the cluster is different from the root of the root
1688 * cluster. Use the node's on-disk structure's parent offset to
1689 * detect the case.
1691 if (ondisk->parent == 0) {
1692 parent = hammer_alloc_btree(node->cluster, &error);
1693 if (parent == NULL)
1694 goto done;
1695 hammer_lock_ex(&parent->lock);
1696 hammer_modify_node(parent);
1697 ondisk = parent->ondisk;
1698 ondisk->count = 1;
1699 ondisk->parent = 0;
1700 ondisk->type = HAMMER_BTREE_TYPE_INTERNAL;
1701 ondisk->elms[0].base = node->cluster->clu_btree_beg;
1702 ondisk->elms[0].base.btype = node->ondisk->type;
1703 ondisk->elms[0].internal.subtree_offset = node->node_offset;
1704 ondisk->elms[1].base = node->cluster->clu_btree_end;
1705 /* ondisk->elms[1].base.btype - not used */
1706 made_root = 1;
1707 parent_index = 0; /* index of current node in parent */
1708 } else {
1709 made_root = 0;
1710 parent = cursor->parent;
1711 parent_index = cursor->parent_index;
1712 KKASSERT(parent->cluster == node->cluster);
1716 * Split node into new_node at the split point.
1718 * B O O O P N N B <-- P = node->elms[split]
1719 * 0 1 2 3 4 5 6 <-- subtree indices
1721 * x x P x x
1722 * s S S s
1723 * / \
1724 * B O O O B B N N B <--- inner boundary points are 'P'
1725 * 0 1 2 3 4 5 6
1728 new_node = hammer_alloc_btree(node->cluster, &error);
1729 if (new_node == NULL) {
1730 if (made_root) {
1731 hammer_unlock(&parent->lock);
1732 parent->flags |= HAMMER_NODE_DELETED;
1733 hammer_rel_node(parent);
1735 goto done;
1737 hammer_lock_ex(&new_node->lock);
1740 * Create the new node. P becomes the left-hand boundary in the
1741 * new node. Copy the right-hand boundary as well.
1743 * elm is the new separator.
1745 hammer_modify_node(new_node);
1746 hammer_modify_node(node);
1747 ondisk = node->ondisk;
1748 elm = &ondisk->elms[split];
1749 bcopy(elm, &new_node->ondisk->elms[0],
1750 (ondisk->count - split + 1) * esize);
1751 new_node->ondisk->count = ondisk->count - split;
1752 new_node->ondisk->parent = parent->node_offset;
1753 new_node->ondisk->type = HAMMER_BTREE_TYPE_INTERNAL;
1754 KKASSERT(ondisk->type == new_node->ondisk->type);
1757 * Cleanup the original node. Elm (P) becomes the new boundary,
1758 * its subtree_offset was moved to the new node. If we had created
1759 * a new root its parent pointer may have changed.
1761 elm->internal.subtree_offset = 0;
1762 ondisk->count = split;
1765 * Insert the separator into the parent, fixup the parent's
1766 * reference to the original node, and reference the new node.
1767 * The separator is P.
1769 * Remember that base.count does not include the right-hand boundary.
1771 hammer_modify_node(parent);
1772 ondisk = parent->ondisk;
1773 KKASSERT(ondisk->count != HAMMER_BTREE_INT_ELMS);
1774 parent_elm = &ondisk->elms[parent_index+1];
1775 bcopy(parent_elm, parent_elm + 1,
1776 (ondisk->count - parent_index) * esize);
1777 parent_elm->internal.base = elm->base; /* separator P */
1778 parent_elm->internal.base.btype = new_node->ondisk->type;
1779 parent_elm->internal.subtree_offset = new_node->node_offset;
1780 ++ondisk->count;
1783 * The children of new_node need their parent pointer set to new_node.
1784 * The children have already been locked by
1785 * hammer_btree_lock_children().
1787 for (i = 0; i < new_node->ondisk->count; ++i) {
1788 elm = &new_node->ondisk->elms[i];
1789 error = btree_set_parent(new_node, elm);
1790 if (error) {
1791 panic("btree_split_internal: btree-fixup problem");
1796 * The cluster's root pointer may have to be updated.
1798 if (made_root) {
1799 hammer_modify_cluster(node->cluster);
1800 node->cluster->ondisk->clu_btree_root = parent->node_offset;
1801 node->ondisk->parent = parent->node_offset;
1802 if (cursor->parent) {
1803 hammer_unlock(&cursor->parent->lock);
1804 hammer_rel_node(cursor->parent);
1806 cursor->parent = parent; /* lock'd and ref'd */
1811 * Ok, now adjust the cursor depending on which element the original
1812 * index was pointing at. If we are >= the split point the push node
1813 * is now in the new node.
1815 * NOTE: If we are at the split point itself we cannot stay with the
1816 * original node because the push index will point at the right-hand
1817 * boundary, which is illegal.
1819 * NOTE: The cursor's parent or parent_index must be adjusted for
1820 * the case where a new parent (new root) was created, and the case
1821 * where the cursor is now pointing at the split node.
1823 if (cursor->index >= split) {
1824 cursor->parent_index = parent_index + 1;
1825 cursor->index -= split;
1826 hammer_unlock(&cursor->node->lock);
1827 hammer_rel_node(cursor->node);
1828 cursor->node = new_node; /* locked and ref'd */
1829 } else {
1830 cursor->parent_index = parent_index;
1831 hammer_unlock(&new_node->lock);
1832 hammer_rel_node(new_node);
1836 * Fixup left and right bounds
1838 parent_elm = &parent->ondisk->elms[cursor->parent_index];
1839 cursor->left_bound = &parent_elm[0].internal.base;
1840 cursor->right_bound = &parent_elm[1].internal.base;
1841 KKASSERT(hammer_btree_cmp(cursor->left_bound,
1842 &cursor->node->ondisk->elms[0].internal.base) <= 0);
1843 KKASSERT(hammer_btree_cmp(cursor->right_bound,
1844 &cursor->node->ondisk->elms[cursor->node->ondisk->count].internal.base) >= 0);
1846 done:
1847 hammer_btree_unlock_children(&locklist);
1848 hammer_cursor_downgrade(cursor);
1849 return (error);
1853 * Same as the above, but splits a full leaf node.
1855 * This function
1857 static
1859 btree_split_leaf(hammer_cursor_t cursor)
1861 hammer_node_ondisk_t ondisk;
1862 hammer_node_t parent;
1863 hammer_node_t leaf;
1864 hammer_node_t new_leaf;
1865 hammer_btree_elm_t elm;
1866 hammer_btree_elm_t parent_elm;
1867 hammer_base_elm_t mid_boundary;
1868 hammer_node_locklist_t locklist = NULL;
1869 int parent_index;
1870 int made_root;
1871 int split;
1872 int error;
1873 int i;
1874 const size_t esize = sizeof(*elm);
1876 if ((error = hammer_cursor_upgrade(cursor)) != 0)
1877 return(error);
1878 if ((cursor->flags & HAMMER_CURSOR_RECOVER) == 0) {
1879 error = hammer_btree_lock_children(cursor, &locklist);
1880 if (error)
1881 goto done;
1885 * Calculate the split point. If the insertion point will be on
1886 * the left-hand side adjust the split point to give the right
1887 * hand side one additional node.
1889 * Spikes are made up of two leaf elements which cannot be
1890 * safely split.
1892 leaf = cursor->node;
1893 ondisk = leaf->ondisk;
1894 split = (ondisk->count + 1) / 2;
1895 if (cursor->index <= split)
1896 --split;
1897 error = 0;
1899 elm = &ondisk->elms[split];
1900 if (elm->leaf.base.btype == HAMMER_BTREE_TYPE_SPIKE_END) {
1901 KKASSERT(split &&
1902 elm[-1].leaf.base.btype == HAMMER_BTREE_TYPE_SPIKE_BEG);
1903 --split;
1907 * If we are at the root of the tree, create a new root node with
1908 * 1 element and split normally. Avoid making major modifications
1909 * until we know the whole operation will work.
1911 if (ondisk->parent == 0) {
1912 parent = hammer_alloc_btree(leaf->cluster, &error);
1913 if (parent == NULL)
1914 goto done;
1915 hammer_lock_ex(&parent->lock);
1916 hammer_modify_node(parent);
1917 ondisk = parent->ondisk;
1918 ondisk->count = 1;
1919 ondisk->parent = 0;
1920 ondisk->type = HAMMER_BTREE_TYPE_INTERNAL;
1921 ondisk->elms[0].base = leaf->cluster->clu_btree_beg;
1922 ondisk->elms[0].base.btype = leaf->ondisk->type;
1923 ondisk->elms[0].internal.subtree_offset = leaf->node_offset;
1924 ondisk->elms[1].base = leaf->cluster->clu_btree_end;
1925 /* ondisk->elms[1].base.btype = not used */
1926 made_root = 1;
1927 parent_index = 0; /* insertion point in parent */
1928 } else {
1929 made_root = 0;
1930 parent = cursor->parent;
1931 parent_index = cursor->parent_index;
1932 KKASSERT(parent->cluster == leaf->cluster);
1936 * Split leaf into new_leaf at the split point. Select a separator
1937 * value in-between the two leafs but with a bent towards the right
1938 * leaf since comparisons use an 'elm >= separator' inequality.
1940 * L L L L L L L L
1942 * x x P x x
1943 * s S S s
1944 * / \
1945 * L L L L L L L L
1947 new_leaf = hammer_alloc_btree(leaf->cluster, &error);
1948 if (new_leaf == NULL) {
1949 if (made_root) {
1950 hammer_unlock(&parent->lock);
1951 parent->flags |= HAMMER_NODE_DELETED;
1952 hammer_rel_node(parent);
1954 goto done;
1956 hammer_lock_ex(&new_leaf->lock);
1959 * Create the new node. P (elm) become the left-hand boundary in the
1960 * new node. Copy the right-hand boundary as well.
1962 hammer_modify_node(leaf);
1963 hammer_modify_node(new_leaf);
1964 ondisk = leaf->ondisk;
1965 elm = &ondisk->elms[split];
1966 bcopy(elm, &new_leaf->ondisk->elms[0], (ondisk->count - split) * esize);
1967 new_leaf->ondisk->count = ondisk->count - split;
1968 new_leaf->ondisk->parent = parent->node_offset;
1969 new_leaf->ondisk->type = HAMMER_BTREE_TYPE_LEAF;
1970 KKASSERT(ondisk->type == new_leaf->ondisk->type);
1973 * Cleanup the original node. Because this is a leaf node and
1974 * leaf nodes do not have a right-hand boundary, there
1975 * aren't any special edge cases to clean up. We just fixup the
1976 * count.
1978 ondisk->count = split;
1981 * Insert the separator into the parent, fixup the parent's
1982 * reference to the original node, and reference the new node.
1983 * The separator is P.
1985 * Remember that base.count does not include the right-hand boundary.
1986 * We are copying parent_index+1 to parent_index+2, not +0 to +1.
1988 hammer_modify_node(parent);
1989 ondisk = parent->ondisk;
1990 KKASSERT(ondisk->count != HAMMER_BTREE_INT_ELMS);
1991 parent_elm = &ondisk->elms[parent_index+1];
1992 bcopy(parent_elm, parent_elm + 1,
1993 (ondisk->count - parent_index) * esize);
1996 * Create the separator. XXX At the moment use exactly the
1997 * right-hand element if this is a recovery operation in order
1998 * to guarantee that it does not bisect the spike elements in a
1999 * later call to hammer_btree_insert_cluster().
2001 if (cursor->flags & HAMMER_CURSOR_RECOVER) {
2002 parent_elm->base = elm[0].base;
2003 } else {
2004 hammer_make_separator(&elm[-1].base, &elm[0].base,
2005 &parent_elm->base);
2007 parent_elm->internal.base.btype = new_leaf->ondisk->type;
2008 parent_elm->internal.subtree_offset = new_leaf->node_offset;
2009 mid_boundary = &parent_elm->base;
2010 ++ondisk->count;
2013 * The children of new_leaf need their parent pointer set to new_leaf.
2014 * The children have already been locked by btree_lock_children().
2016 * The leaf's elements are either TYPE_RECORD or TYPE_SPIKE_*. Only
2017 * elements of BTREE_TYPE_SPIKE_END really requires any action.
2019 for (i = 0; i < new_leaf->ondisk->count; ++i) {
2020 elm = &new_leaf->ondisk->elms[i];
2021 error = btree_set_parent(new_leaf, elm);
2022 if (error) {
2023 panic("btree_split_internal: btree-fixup problem");
2028 * The cluster's root pointer may have to be updated.
2030 if (made_root) {
2031 hammer_modify_cluster(leaf->cluster);
2032 leaf->cluster->ondisk->clu_btree_root = parent->node_offset;
2033 leaf->ondisk->parent = parent->node_offset;
2034 if (cursor->parent) {
2035 hammer_unlock(&cursor->parent->lock);
2036 hammer_rel_node(cursor->parent);
2038 cursor->parent = parent; /* lock'd and ref'd */
2042 * Ok, now adjust the cursor depending on which element the original
2043 * index was pointing at. If we are >= the split point the push node
2044 * is now in the new node.
2046 * NOTE: If we are at the split point itself we need to select the
2047 * old or new node based on where key_beg's insertion point will be.
2048 * If we pick the wrong side the inserted element will wind up in
2049 * the wrong leaf node and outside that node's bounds.
2051 if (cursor->index > split ||
2052 (cursor->index == split &&
2053 hammer_btree_cmp(&cursor->key_beg, mid_boundary) >= 0)) {
2054 cursor->parent_index = parent_index + 1;
2055 cursor->index -= split;
2056 hammer_unlock(&cursor->node->lock);
2057 hammer_rel_node(cursor->node);
2058 cursor->node = new_leaf;
2059 } else {
2060 cursor->parent_index = parent_index;
2061 hammer_unlock(&new_leaf->lock);
2062 hammer_rel_node(new_leaf);
2066 * Fixup left and right bounds
2068 parent_elm = &parent->ondisk->elms[cursor->parent_index];
2069 cursor->left_bound = &parent_elm[0].internal.base;
2070 cursor->right_bound = &parent_elm[1].internal.base;
2073 * Note: The right assertion is typically > 0, but if the last element
2074 * is a SPIKE_END it can be == 0 because the spike-end is non-inclusive
2075 * of the range being spiked.
2077 * This may seem a bit odd but it works.
2079 KKASSERT(hammer_btree_cmp(cursor->left_bound,
2080 &cursor->node->ondisk->elms[0].leaf.base) <= 0);
2081 KKASSERT(hammer_btree_cmp(cursor->right_bound,
2082 &cursor->node->ondisk->elms[cursor->node->ondisk->count-1].leaf.base) >= 0);
2084 done:
2085 hammer_btree_unlock_children(&locklist);
2086 hammer_cursor_downgrade(cursor);
2087 return (error);
2091 * Recursively correct the right-hand boundary's create_tid to (tid) as
2092 * long as the rest of the key matches. We have to recurse upward in
2093 * the tree as well as down the left side of each parent's right node.
2095 * Return EDEADLK if we were only partially successful, forcing the caller
2096 * to try again. The original cursor is not modified. This routine can
2097 * also fail with EDEADLK if it is forced to throw away a portion of its
2098 * record history.
2100 * The caller must pass a downgraded cursor to us (otherwise we can't dup it).
2102 struct hammer_rhb {
2103 TAILQ_ENTRY(hammer_rhb) entry;
2104 hammer_node_t node;
2105 int index;
2108 TAILQ_HEAD(hammer_rhb_list, hammer_rhb);
2111 hammer_btree_correct_rhb(hammer_cursor_t cursor, hammer_tid_t tid)
2113 struct hammer_rhb_list rhb_list;
2114 hammer_base_elm_t elm;
2115 hammer_node_t orig_node;
2116 struct hammer_rhb *rhb;
2117 int orig_index;
2118 int error;
2120 TAILQ_INIT(&rhb_list);
2123 * Save our position so we can restore it on return. This also
2124 * gives us a stable 'elm'.
2126 orig_node = cursor->node;
2127 hammer_ref_node(orig_node);
2128 hammer_lock_sh(&orig_node->lock);
2129 orig_index = cursor->index;
2130 elm = &orig_node->ondisk->elms[orig_index].base;
2133 * Now build a list of parents going up, allocating a rhb
2134 * structure for each one.
2136 while (cursor->parent) {
2138 * Stop if we no longer have any right-bounds to fix up
2140 if (elm->obj_id != cursor->right_bound->obj_id ||
2141 elm->rec_type != cursor->right_bound->rec_type ||
2142 elm->key != cursor->right_bound->key) {
2143 break;
2147 * Stop if the right-hand bound's create_tid does not
2148 * need to be corrected. Note that if the parent is
2149 * a cluster the bound is pointing at the actual bound
2150 * in the cluster header, not the SPIKE_END element in
2151 * the parent cluster, so we don't have to worry about
2152 * the fact that SPIKE_END is range-inclusive.
2154 if (cursor->right_bound->create_tid >= tid)
2155 break;
2157 KKASSERT(cursor->parent->ondisk->elms[cursor->parent_index].base.btype != HAMMER_BTREE_TYPE_SPIKE_BEG);
2159 rhb = kmalloc(sizeof(*rhb), M_HAMMER, M_WAITOK|M_ZERO);
2160 rhb->node = cursor->parent;
2161 rhb->index = cursor->parent_index;
2162 hammer_ref_node(rhb->node);
2163 hammer_lock_sh(&rhb->node->lock);
2164 TAILQ_INSERT_HEAD(&rhb_list, rhb, entry);
2166 hammer_cursor_up(cursor);
2170 * now safely adjust the right hand bound for each rhb. This may
2171 * also require taking the right side of the tree and iterating down
2172 * ITS left side.
2174 error = 0;
2175 while (error == 0 && (rhb = TAILQ_FIRST(&rhb_list)) != NULL) {
2176 error = hammer_cursor_seek(cursor, rhb->node, rhb->index);
2177 kprintf("CORRECT RHB %d:%d:%08x index %d type=%c\n",
2178 rhb->node->cluster->volume->vol_no,
2179 rhb->node->cluster->clu_no, rhb->node->node_offset,
2180 rhb->index, cursor->node->ondisk->type);
2181 if (error)
2182 break;
2183 TAILQ_REMOVE(&rhb_list, rhb, entry);
2184 hammer_unlock(&rhb->node->lock);
2185 hammer_rel_node(rhb->node);
2186 kfree(rhb, M_HAMMER);
2188 switch (cursor->node->ondisk->type) {
2189 case HAMMER_BTREE_TYPE_INTERNAL:
2191 * Right-boundary for parent at internal node
2192 * is one element to the right of the element whos
2193 * right boundary needs adjusting. We must then
2194 * traverse down the left side correcting any left
2195 * bounds (which may now be too far to the left).
2197 ++cursor->index;
2198 error = hammer_btree_correct_lhb(cursor, tid);
2199 break;
2200 case HAMMER_BTREE_TYPE_LEAF:
2202 * Right-boundary for parent at leaf node. Both
2203 * the SPIKE_END and the cluster header must be
2204 * corrected, but we don't have to traverse down
2205 * (there's nothing TO traverse down other then what
2206 * we've already recorded).
2208 * The SPIKE_END is range-inclusive.
2210 error = hammer_cursor_down(cursor);
2211 if (error == 0)
2212 error = hammer_lock_upgrade(&cursor->parent->lock);
2213 if (error == 0) {
2214 kprintf("hammer_btree_correct_rhb-X @%d:%d:%08x\n",
2215 cursor->parent->cluster->volume->vol_no,
2216 cursor->parent->cluster->clu_no,
2217 cursor->parent->node_offset);
2218 hammer_modify_node(cursor->parent);
2219 elm = &cursor->parent->ondisk->elms[cursor->parent_index].base;
2220 KKASSERT(elm->btype == HAMMER_BTREE_TYPE_SPIKE_END);
2221 elm->create_tid = tid - 1;
2222 hammer_modify_cluster(cursor->node->cluster);
2223 cursor->node->cluster->ondisk->clu_btree_end.create_tid = tid;
2224 cursor->node->cluster->clu_btree_end.create_tid = tid;
2226 break;
2227 default:
2228 panic("hammer_btree_correct_rhb(): Bad node type");
2229 error = EINVAL;
2230 break;
2235 * Cleanup
2237 while ((rhb = TAILQ_FIRST(&rhb_list)) != NULL) {
2238 TAILQ_REMOVE(&rhb_list, rhb, entry);
2239 hammer_unlock(&rhb->node->lock);
2240 hammer_rel_node(rhb->node);
2241 kfree(rhb, M_HAMMER);
2243 error = hammer_cursor_seek(cursor, orig_node, orig_index);
2244 hammer_unlock(&orig_node->lock);
2245 hammer_rel_node(orig_node);
2246 return (error);
2250 * Similar to rhb (in fact, rhb calls lhb), but corrects the left hand
2251 * bound going downward starting at the current cursor position.
2253 * This function does not restore the cursor after use.
2256 hammer_btree_correct_lhb(hammer_cursor_t cursor, hammer_tid_t tid)
2258 struct hammer_rhb_list rhb_list;
2259 hammer_base_elm_t elm;
2260 hammer_base_elm_t cmp;
2261 struct hammer_rhb *rhb;
2262 int error;
2264 TAILQ_INIT(&rhb_list);
2266 cmp = &cursor->node->ondisk->elms[cursor->index].base;
2269 * Record the node and traverse down the left-hand side for all
2270 * matching records needing a boundary correction.
2272 error = 0;
2273 for (;;) {
2274 rhb = kmalloc(sizeof(*rhb), M_HAMMER, M_WAITOK|M_ZERO);
2275 rhb->node = cursor->node;
2276 rhb->index = cursor->index;
2277 hammer_ref_node(rhb->node);
2278 hammer_lock_sh(&rhb->node->lock);
2279 TAILQ_INSERT_HEAD(&rhb_list, rhb, entry);
2281 if (cursor->node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
2283 * Nothing to traverse down if we are at the right
2284 * boundary of an internal node.
2286 if (cursor->index == cursor->node->ondisk->count)
2287 break;
2288 } else {
2289 elm = &cursor->node->ondisk->elms[cursor->index].base;
2290 if (elm->btype == HAMMER_BTREE_TYPE_RECORD)
2291 break;
2292 KKASSERT(elm->btype == HAMMER_BTREE_TYPE_SPIKE_BEG);
2294 error = hammer_cursor_down(cursor);
2295 if (error)
2296 break;
2298 elm = &cursor->node->ondisk->elms[cursor->index].base;
2299 if (elm->obj_id != cmp->obj_id ||
2300 elm->rec_type != cmp->rec_type ||
2301 elm->key != cmp->key) {
2302 break;
2304 if (elm->create_tid >= tid)
2305 break;
2310 * Now we can safely adjust the left-hand boundary from the bottom-up.
2311 * The last element we remove from the list is the caller's right hand
2312 * boundary, which must also be adjusted.
2314 while (error == 0 && (rhb = TAILQ_FIRST(&rhb_list)) != NULL) {
2315 error = hammer_cursor_seek(cursor, rhb->node, rhb->index);
2316 if (error)
2317 break;
2318 TAILQ_REMOVE(&rhb_list, rhb, entry);
2319 hammer_unlock(&rhb->node->lock);
2320 hammer_rel_node(rhb->node);
2321 kfree(rhb, M_HAMMER);
2323 elm = &cursor->node->ondisk->elms[cursor->index].base;
2324 if (cursor->node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
2325 kprintf("hammer_btree_correct_lhb-I @%d:%d:%08x @%d\n",
2326 cursor->node->cluster->volume->vol_no,
2327 cursor->node->cluster->clu_no,
2328 cursor->node->node_offset, cursor->index);
2329 hammer_modify_node(cursor->node);
2330 elm->create_tid = tid;
2331 } else if (elm->btype == HAMMER_BTREE_TYPE_SPIKE_BEG) {
2333 * SPIKE_BEG, also correct cluster header. Occurs
2334 * only while we are traversing the left-hand
2335 * boundary.
2337 kprintf("hammer_btree_correct_lhb-B @%d:%d:%08x\n",
2338 cursor->node->cluster->volume->vol_no,
2339 cursor->node->cluster->clu_no,
2340 cursor->node->node_offset);
2341 hammer_modify_node(cursor->node);
2342 elm->create_tid = tid;
2345 * We can only cursor down through SPIKE_END.
2347 ++cursor->index;
2348 error = hammer_cursor_down(cursor);
2349 if (error == 0)
2350 error = hammer_lock_upgrade(&cursor->parent->lock);
2351 if (error == 0) {
2352 hammer_modify_node(cursor->parent);
2353 elm = &cursor->parent->ondisk->elms[cursor->parent_index - 1].base;
2354 KKASSERT(elm->btype == HAMMER_BTREE_TYPE_SPIKE_BEG);
2355 elm->create_tid = tid;
2356 hammer_modify_cluster(cursor->node->cluster);
2357 cursor->node->cluster->ondisk->clu_btree_end.create_tid = tid;
2358 cursor->node->cluster->clu_btree_end.create_tid = tid;
2360 } else {
2361 panic("hammer_btree_correct_lhb(): Bad element type");
2366 * Cleanup
2368 while ((rhb = TAILQ_FIRST(&rhb_list)) != NULL) {
2369 TAILQ_REMOVE(&rhb_list, rhb, entry);
2370 hammer_unlock(&rhb->node->lock);
2371 hammer_rel_node(rhb->node);
2372 kfree(rhb, M_HAMMER);
2374 return (error);
2378 * Attempt to remove the empty B-Tree node at (cursor->node). Returns 0
2379 * on success, EAGAIN if we could not acquire the necessary locks, or some
2380 * other error. This node can be a leaf node or an internal node.
2382 * On return the cursor may end up pointing at an internal node, suitable
2383 * for further iteration but not for an immediate insertion or deletion.
2385 * cursor->node may be an internal node or a leaf node.
2387 * NOTE: If cursor->node has one element it is the parent trying to delete
2388 * that element, make sure cursor->index is properly adjusted on success.
2391 btree_remove(hammer_cursor_t cursor)
2393 hammer_node_ondisk_t ondisk;
2394 hammer_btree_elm_t elm;
2395 hammer_node_t node;
2396 hammer_node_t save;
2397 hammer_node_t parent;
2398 const int esize = sizeof(*elm);
2399 int error;
2402 * If we are at the root of the cluster we must be able to
2403 * successfully delete the HAMMER_BTREE_SPIKE_* leaf elements in
2404 * the parent in order to be able to destroy the cluster.
2406 node = cursor->node;
2408 if (node->ondisk->parent == 0) {
2409 hammer_modify_node(node);
2410 ondisk = node->ondisk;
2411 ondisk->type = HAMMER_BTREE_TYPE_LEAF;
2412 ondisk->count = 0;
2413 cursor->index = 0;
2414 error = 0;
2417 * When trying to delete a cluster we need to exclusively
2418 * lock the cluster root, its parent (leaf in parent cluster),
2419 * AND the parent of that leaf if it's going to be empty,
2420 * because we can't leave around an empty leaf.
2422 * XXX this is messy due to potentially recursive locks.
2423 * downgrade the cursor, get a second shared lock on the
2424 * node that cannot deadlock because we only own shared locks
2425 * then, cursor-up, and re-upgrade everything. If the
2426 * upgrades EDEADLK then don't try to remove the cluster
2427 * at this time.
2429 if ((parent = cursor->parent) != NULL) {
2430 hammer_cursor_downgrade(cursor);
2431 save = node;
2432 hammer_ref_node(save);
2433 hammer_lock_sh(&save->lock);
2436 * After the cursor up save has the empty root node
2437 * of the target cluster to be deleted, cursor->node
2438 * is at the leaf containing the spikes, and
2439 * cursor->parent is the parent of that leaf.
2441 * cursor->node and cursor->parent are both in the
2442 * parent cluster of the cluster being deleted.
2444 error = hammer_cursor_up(cursor);
2446 if (error == 0)
2447 error = hammer_cursor_upgrade(cursor);
2448 if (error == 0)
2449 error = hammer_lock_upgrade(&save->lock);
2451 if (error) {
2452 /* may be EDEADLK */
2453 kprintf("BTREE_REMOVE: Cannot delete cluster\n");
2454 Debugger("BTREE_REMOVE");
2455 } else {
2457 * cursor->node is now the leaf in the parent
2458 * cluster containing the spike elements.
2460 * The cursor should be pointing at the
2461 * SPIKE_END element.
2463 * Remove the spike elements and recurse
2464 * if the leaf becomes empty.
2466 node = cursor->node;
2467 hammer_modify_node(node);
2468 ondisk = node->ondisk;
2469 KKASSERT(cursor->index > 0);
2470 --cursor->index;
2471 elm = &ondisk->elms[cursor->index];
2472 KKASSERT(elm[0].leaf.base.btype ==
2473 HAMMER_BTREE_TYPE_SPIKE_BEG);
2474 KKASSERT(elm[1].leaf.base.btype ==
2475 HAMMER_BTREE_TYPE_SPIKE_END);
2478 * Ok, remove it and the underlying record.
2480 hammer_free_record(node->cluster,
2481 elm->leaf.rec_offset,
2482 HAMMER_RECTYPE_CLUSTER);
2483 bcopy(elm + 2, elm, (ondisk->count -
2484 cursor->index - 2) * esize);
2485 ondisk->count -= 2;
2486 save->flags |= HAMMER_NODE_DELETED;
2487 save->cluster->flags |= HAMMER_CLUSTER_DELETED;
2488 hammer_flush_node(save);
2489 hammer_unlock(&save->lock);
2490 hammer_rel_node(save);
2491 if (ondisk->count == 0)
2492 error = EAGAIN;
2495 return(error);
2499 * Zero-out the parent's reference to the child and flag the
2500 * child for destruction. This ensures that the child is not
2501 * reused while other references to it exist.
2503 parent = cursor->parent;
2504 hammer_modify_node(parent);
2505 ondisk = parent->ondisk;
2506 KKASSERT(ondisk->type == HAMMER_BTREE_TYPE_INTERNAL);
2507 elm = &ondisk->elms[cursor->parent_index];
2508 KKASSERT(elm->internal.subtree_offset == node->node_offset);
2509 elm->internal.subtree_offset = 0;
2511 hammer_flush_node(node);
2512 node->flags |= HAMMER_NODE_DELETED;
2515 * If the parent would otherwise not become empty we can physically
2516 * remove the zero'd element. Note however that in order to
2517 * guarentee a valid cursor we still need to be able to cursor up
2518 * because we no longer have a node.
2520 * This collapse will change the parent's boundary elements, making
2521 * them wider. The new boundaries are recursively corrected in
2522 * btree_search().
2524 * XXX we can theoretically recalculate the midpoint but there isn't
2525 * much of a reason to do it.
2527 error = hammer_cursor_up(cursor);
2528 if (error == 0)
2529 error = hammer_cursor_upgrade(cursor);
2531 if (error) {
2532 kprintf("BTREE_REMOVE: Cannot lock parent, skipping\n");
2533 Debugger("BTREE_REMOVE");
2534 return (0);
2538 * Remove the internal element from the parent. The bcopy must
2539 * include the right boundary element.
2541 KKASSERT(parent == cursor->node && ondisk == parent->ondisk);
2542 node = parent;
2543 parent = NULL;
2544 /* ondisk is node's ondisk */
2545 /* elm is node's element */
2548 * Remove the internal element that we zero'd out. Tell the caller
2549 * to loop if it hits zero (to try to avoid eating up precious kernel
2550 * stack).
2552 KKASSERT(ondisk->count > 0);
2553 bcopy(&elm[1], &elm[0], (ondisk->count - cursor->index) * esize);
2554 --ondisk->count;
2555 if (ondisk->count == 0)
2556 error = EAGAIN;
2557 return(error);
2561 * Attempt to remove the deleted internal element at the current cursor
2562 * position. If we are unable to remove the element we return EDEADLK.
2564 * If the current internal node becomes empty we delete it in the parent
2565 * and cursor up, looping until we finish or we deadlock.
2567 * On return, if successful, the cursor will be pointing at the next
2568 * iterative position in the B-Tree. If unsuccessful the cursor will be
2569 * pointing at the last deleted internal element that could not be
2570 * removed.
2572 static
2574 btree_remove_deleted_element(hammer_cursor_t cursor)
2576 hammer_node_t node;
2577 hammer_btree_elm_t elm;
2578 int error;
2580 if ((error = hammer_cursor_upgrade(cursor)) != 0)
2581 return(error);
2582 node = cursor->node;
2583 elm = &node->ondisk->elms[cursor->index];
2584 if (elm->internal.subtree_offset == 0) {
2585 do {
2586 error = btree_remove(cursor);
2587 kprintf("BTREE REMOVE DELETED ELEMENT %d\n", error);
2588 } while (error == EAGAIN);
2590 return(error);
2594 * The element (elm) has been moved to a new internal node (node).
2596 * If the element represents a pointer to an internal node that node's
2597 * parent must be adjusted to the element's new location.
2599 * If the element represents a spike the target cluster's header must
2600 * be adjusted to point to the element's new location. This only
2601 * applies to HAMMER_SPIKE_END.
2603 * GET_CLUSTER_NORECOVER must be used to avoid a recovery recursion during
2604 * the rebuild of the recovery cluster's B-Tree, which can blow the kernel
2605 * stack.
2607 * XXX deadlock potential here with our exclusive locks
2609 static
2611 btree_set_parent(hammer_node_t node, hammer_btree_elm_t elm)
2613 hammer_volume_t volume;
2614 hammer_cluster_t cluster;
2615 hammer_node_t child;
2616 int error;
2618 error = 0;
2620 switch(elm->base.btype) {
2621 case HAMMER_BTREE_TYPE_INTERNAL:
2622 case HAMMER_BTREE_TYPE_LEAF:
2623 child = hammer_get_node(node->cluster,
2624 elm->internal.subtree_offset, &error);
2625 if (error == 0) {
2626 hammer_modify_node(child);
2627 child->ondisk->parent = node->node_offset;
2628 hammer_rel_node(child);
2630 break;
2631 case HAMMER_BTREE_TYPE_SPIKE_END:
2632 volume = hammer_get_volume(node->cluster->volume->hmp,
2633 elm->leaf.spike_vol_no, &error);
2634 if (error)
2635 break;
2636 cluster = hammer_get_cluster(volume, elm->leaf.spike_clu_no,
2637 &error, GET_CLUSTER_NORECOVER);
2638 hammer_rel_volume(volume, 0);
2639 if (error)
2640 break;
2641 hammer_modify_cluster(cluster);
2642 cluster->ondisk->clu_btree_parent_offset = node->node_offset;
2643 KKASSERT(cluster->ondisk->clu_btree_parent_clu_no ==
2644 node->cluster->clu_no);
2645 KKASSERT(cluster->ondisk->clu_btree_parent_vol_no ==
2646 node->cluster->volume->vol_no);
2647 hammer_rel_cluster(cluster, 0);
2648 break;
2649 default:
2650 break;
2652 return(error);
2656 * Exclusively lock all the children of node. This is used by the split
2657 * code to prevent anyone from accessing the children of a cursor node
2658 * while we fix-up its parent offset.
2660 * If we don't lock the children we can really mess up cursors which block
2661 * trying to cursor-up into our node.
2663 * WARNING: Cannot be used when doing B-tree operations on a recovery
2664 * cluster because the target cluster may require recovery, resulting
2665 * in a deep recursion which blows the kernel stack.
2667 * On failure EDEADLK (or some other error) is returned. If a deadlock
2668 * error is returned the cursor is adjusted to block on termination.
2671 hammer_btree_lock_children(hammer_cursor_t cursor,
2672 struct hammer_node_locklist **locklistp)
2674 hammer_node_t node;
2675 hammer_node_locklist_t item;
2676 hammer_node_ondisk_t ondisk;
2677 hammer_btree_elm_t elm;
2678 hammer_volume_t volume;
2679 hammer_cluster_t cluster;
2680 hammer_node_t child;
2681 int error;
2682 int i;
2684 node = cursor->node;
2685 ondisk = node->ondisk;
2686 error = 0;
2687 for (i = 0; error == 0 && i < ondisk->count; ++i) {
2688 elm = &ondisk->elms[i];
2690 child = NULL;
2691 switch(elm->base.btype) {
2692 case HAMMER_BTREE_TYPE_INTERNAL:
2693 case HAMMER_BTREE_TYPE_LEAF:
2694 child = hammer_get_node(node->cluster,
2695 elm->internal.subtree_offset,
2696 &error);
2697 break;
2698 case HAMMER_BTREE_TYPE_SPIKE_END:
2699 volume = hammer_get_volume(node->cluster->volume->hmp,
2700 elm->leaf.spike_vol_no,
2701 &error);
2702 if (error)
2703 break;
2704 cluster = hammer_get_cluster(volume,
2705 elm->leaf.spike_clu_no,
2706 &error,
2708 hammer_rel_volume(volume, 0);
2709 if (error)
2710 break;
2711 KKASSERT(cluster->ondisk->clu_btree_root != 0);
2712 child = hammer_get_node(cluster,
2713 cluster->ondisk->clu_btree_root,
2714 &error);
2715 hammer_rel_cluster(cluster, 0);
2716 break;
2717 default:
2718 break;
2720 if (child) {
2721 if (hammer_lock_ex_try(&child->lock) != 0) {
2722 if (cursor->deadlk_node == NULL) {
2723 cursor->deadlk_node = node;
2724 hammer_ref_node(cursor->deadlk_node);
2726 error = EDEADLK;
2727 } else {
2728 item = kmalloc(sizeof(*item),
2729 M_HAMMER, M_WAITOK);
2730 item->next = *locklistp;
2731 item->node = child;
2732 *locklistp = item;
2736 if (error)
2737 hammer_btree_unlock_children(locklistp);
2738 return(error);
2743 * Release previously obtained node locks.
2745 void
2746 hammer_btree_unlock_children(struct hammer_node_locklist **locklistp)
2748 hammer_node_locklist_t item;
2750 while ((item = *locklistp) != NULL) {
2751 *locklistp = item->next;
2752 hammer_unlock(&item->node->lock);
2753 hammer_rel_node(item->node);
2754 kfree(item, M_HAMMER);
2758 /************************************************************************
2759 * MISCELLANIOUS SUPPORT *
2760 ************************************************************************/
2763 * Compare two B-Tree elements, return -N, 0, or +N (e.g. similar to strcmp).
2765 * Note that for this particular function a return value of -1, 0, or +1
2766 * can denote a match if create_tid is otherwise discounted. A create_tid
2767 * of zero is considered to be 'infinity' in comparisons.
2769 * See also hammer_rec_rb_compare() and hammer_rec_cmp() in hammer_object.c.
2772 hammer_btree_cmp(hammer_base_elm_t key1, hammer_base_elm_t key2)
2774 if (key1->obj_id < key2->obj_id)
2775 return(-4);
2776 if (key1->obj_id > key2->obj_id)
2777 return(4);
2779 if (key1->rec_type < key2->rec_type)
2780 return(-3);
2781 if (key1->rec_type > key2->rec_type)
2782 return(3);
2784 if (key1->key < key2->key)
2785 return(-2);
2786 if (key1->key > key2->key)
2787 return(2);
2790 * A create_tid of zero indicates a record which is undeletable
2791 * and must be considered to have a value of positive infinity.
2793 if (key1->create_tid == 0) {
2794 if (key2->create_tid == 0)
2795 return(0);
2796 return(1);
2798 if (key2->create_tid == 0)
2799 return(-1);
2800 if (key1->create_tid < key2->create_tid)
2801 return(-1);
2802 if (key1->create_tid > key2->create_tid)
2803 return(1);
2804 return(0);
2808 * Test a timestamp against an element to determine whether the
2809 * element is visible. A timestamp of 0 means 'infinity'.
2812 hammer_btree_chkts(hammer_tid_t asof, hammer_base_elm_t base)
2814 if (asof == 0) {
2815 if (base->delete_tid)
2816 return(1);
2817 return(0);
2819 if (asof < base->create_tid)
2820 return(-1);
2821 if (base->delete_tid && asof >= base->delete_tid)
2822 return(1);
2823 return(0);
2827 * Create a separator half way inbetween key1 and key2. For fields just
2828 * one unit apart, the separator will match key2. key1 is on the left-hand
2829 * side and key2 is on the right-hand side.
2831 * create_tid has to be special cased because a value of 0 represents
2832 * infinity.
2834 #define MAKE_SEPARATOR(key1, key2, dest, field) \
2835 dest->field = key1->field + ((key2->field - key1->field + 1) >> 1);
2837 static void
2838 hammer_make_separator(hammer_base_elm_t key1, hammer_base_elm_t key2,
2839 hammer_base_elm_t dest)
2841 bzero(dest, sizeof(*dest));
2842 MAKE_SEPARATOR(key1, key2, dest, obj_id);
2843 MAKE_SEPARATOR(key1, key2, dest, rec_type);
2844 MAKE_SEPARATOR(key1, key2, dest, key);
2846 if (key1->obj_id == key2->obj_id &&
2847 key1->rec_type == key2->rec_type &&
2848 key1->key == key2->key) {
2849 if (key1->create_tid == 0) {
2851 * Oops, a create_tid of 0 means 'infinity', so
2852 * if everything matches this just isn't legal.
2854 panic("key1->create_tid of 0 is impossible here");
2855 } else if (key2->create_tid == 0) {
2856 dest->create_tid = key1->create_tid + 1;
2857 } else {
2858 MAKE_SEPARATOR(key1, key2, dest, create_tid);
2860 } else {
2861 dest->create_tid = 0;
2865 #undef MAKE_SEPARATOR
2868 * Return whether a generic internal or leaf node is full
2870 static int
2871 btree_node_is_full(hammer_node_ondisk_t node)
2873 switch(node->type) {
2874 case HAMMER_BTREE_TYPE_INTERNAL:
2875 if (node->count == HAMMER_BTREE_INT_ELMS)
2876 return(1);
2877 break;
2878 case HAMMER_BTREE_TYPE_LEAF:
2879 if (node->count == HAMMER_BTREE_LEAF_ELMS)
2880 return(1);
2881 break;
2882 default:
2883 panic("illegal btree subtype");
2885 return(0);
2889 * Return whether a generic internal or leaf node is almost full. This
2890 * routine is used as a helper for search insertions to guarentee at
2891 * least 2 available slots in the internal node(s) leading up to a leaf,
2892 * so hammer_btree_insert_cluster() will function properly.
2894 static int
2895 btree_node_is_almost_full(hammer_node_ondisk_t node)
2897 switch(node->type) {
2898 case HAMMER_BTREE_TYPE_INTERNAL:
2899 if (node->count > HAMMER_BTREE_INT_ELMS - 2)
2900 return(1);
2901 break;
2902 case HAMMER_BTREE_TYPE_LEAF:
2903 if (node->count > HAMMER_BTREE_LEAF_ELMS - 2)
2904 return(1);
2905 break;
2906 default:
2907 panic("illegal btree subtype");
2909 return(0);
2912 #if 0
2913 static int
2914 btree_max_elements(u_int8_t type)
2916 if (type == HAMMER_BTREE_TYPE_LEAF)
2917 return(HAMMER_BTREE_LEAF_ELMS);
2918 if (type == HAMMER_BTREE_TYPE_INTERNAL)
2919 return(HAMMER_BTREE_INT_ELMS);
2920 panic("btree_max_elements: bad type %d\n", type);
2922 #endif
2924 void
2925 hammer_print_btree_node(hammer_node_ondisk_t ondisk)
2927 hammer_btree_elm_t elm;
2928 int i;
2930 kprintf("node %p count=%d parent=%d type=%c\n",
2931 ondisk, ondisk->count, ondisk->parent, ondisk->type);
2934 * Dump both boundary elements if an internal node
2936 if (ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
2937 for (i = 0; i <= ondisk->count; ++i) {
2938 elm = &ondisk->elms[i];
2939 hammer_print_btree_elm(elm, ondisk->type, i);
2941 } else {
2942 for (i = 0; i < ondisk->count; ++i) {
2943 elm = &ondisk->elms[i];
2944 hammer_print_btree_elm(elm, ondisk->type, i);
2949 void
2950 hammer_print_btree_elm(hammer_btree_elm_t elm, u_int8_t type, int i)
2952 kprintf(" %2d", i);
2953 kprintf("\tobj_id = %016llx\n", elm->base.obj_id);
2954 kprintf("\tkey = %016llx\n", elm->base.key);
2955 kprintf("\tcreate_tid = %016llx\n", elm->base.create_tid);
2956 kprintf("\tdelete_tid = %016llx\n", elm->base.delete_tid);
2957 kprintf("\trec_type = %04x\n", elm->base.rec_type);
2958 kprintf("\tobj_type = %02x\n", elm->base.obj_type);
2959 kprintf("\tbtype = %02x (%c)\n",
2960 elm->base.btype,
2961 (elm->base.btype ? elm->base.btype : '?'));
2963 switch(type) {
2964 case HAMMER_BTREE_TYPE_INTERNAL:
2965 kprintf("\tsubtree_off = %08x\n",
2966 elm->internal.subtree_offset);
2967 break;
2968 case HAMMER_BTREE_TYPE_SPIKE_BEG:
2969 case HAMMER_BTREE_TYPE_SPIKE_END:
2970 kprintf("\tspike_clu_no = %d\n", elm->leaf.spike_clu_no);
2971 kprintf("\tspike_vol_no = %d\n", elm->leaf.spike_vol_no);
2972 break;
2973 case HAMMER_BTREE_TYPE_RECORD:
2974 kprintf("\trec_offset = %08x\n", elm->leaf.rec_offset);
2975 kprintf("\tdata_offset = %08x\n", elm->leaf.data_offset);
2976 kprintf("\tdata_len = %08x\n", elm->leaf.data_len);
2977 kprintf("\tdata_crc = %08x\n", elm->leaf.data_crc);
2978 break;