2 * lib/btree.c - Simple In-memory B+Tree
4 * As should be obvious for Linux kernel code, license is GPLv2
6 * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org>
7 * Bits and pieces stolen from Peter Zijlstra's code, which is
8 * Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com>
11 * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch
13 * A relatively simple B+Tree implementation. I have written it as a learning
14 * excercise to understand how B+Trees work. Turned out to be useful as well.
16 * B+Trees can be used similar to Linux radix trees (which don't have anything
17 * in common with textbook radix trees, beware). Prerequisite for them working
18 * well is that access to a random tree node is much faster than a large number
19 * of operations within each node.
21 * Disks have fulfilled the prerequisite for a long time. More recently DRAM
22 * has gained similar properties, as memory access times, when measured in cpu
23 * cycles, have increased. Cacheline sizes have increased as well, which also
26 * Compared to radix trees, B+Trees are more efficient when dealing with a
27 * sparsely populated address space. Between 25% and 50% of the memory is
28 * occupied with valid pointers. When densely populated, radix trees contain
29 * ~98% pointers - hard to beat. Very sparse radix trees contain only ~2%
32 * This particular implementation stores pointers identified by a long value.
33 * Storing NULL pointers is illegal, lookup will return NULL when no entry
36 * A tricks was used that is not commonly found in textbooks. The lowest
37 * values are to the right, not to the left. All used slots within a node
38 * are on the left, all unused slots contain NUL values. Most operations
39 * simply loop once over all slots and terminate on the first NUL.
42 #include <linux/btree.h>
43 #include <linux/cache.h>
44 #include <linux/kernel.h>
45 #include <linux/slab.h>
46 #include <linux/module.h>
48 #define MAX(a, b) ((a) > (b) ? (a) : (b))
49 #define NODESIZE MAX(L1_CACHE_BYTES, 128)
57 struct btree_geo btree_geo32
= {
59 .no_pairs
= NODESIZE
/ sizeof(long) / 2,
60 .no_longs
= NODESIZE
/ sizeof(long) / 2,
62 EXPORT_SYMBOL_GPL(btree_geo32
);
64 #define LONG_PER_U64 (64 / BITS_PER_LONG)
65 struct btree_geo btree_geo64
= {
66 .keylen
= LONG_PER_U64
,
67 .no_pairs
= NODESIZE
/ sizeof(long) / (1 + LONG_PER_U64
),
68 .no_longs
= LONG_PER_U64
* (NODESIZE
/ sizeof(long) / (1 + LONG_PER_U64
)),
70 EXPORT_SYMBOL_GPL(btree_geo64
);
72 struct btree_geo btree_geo128
= {
73 .keylen
= 2 * LONG_PER_U64
,
74 .no_pairs
= NODESIZE
/ sizeof(long) / (1 + 2 * LONG_PER_U64
),
75 .no_longs
= 2 * LONG_PER_U64
* (NODESIZE
/ sizeof(long) / (1 + 2 * LONG_PER_U64
)),
77 EXPORT_SYMBOL_GPL(btree_geo128
);
79 static struct kmem_cache
*btree_cachep
;
81 void *btree_alloc(gfp_t gfp_mask
, void *pool_data
)
83 return kmem_cache_alloc(btree_cachep
, gfp_mask
);
85 EXPORT_SYMBOL_GPL(btree_alloc
);
87 void btree_free(void *element
, void *pool_data
)
89 kmem_cache_free(btree_cachep
, element
);
91 EXPORT_SYMBOL_GPL(btree_free
);
93 static unsigned long *btree_node_alloc(struct btree_head
*head
, gfp_t gfp
)
97 node
= mempool_alloc(head
->mempool
, gfp
);
99 memset(node
, 0, NODESIZE
);
103 static int longcmp(const unsigned long *l1
, const unsigned long *l2
, size_t n
)
107 for (i
= 0; i
< n
; i
++) {
116 static unsigned long *longcpy(unsigned long *dest
, const unsigned long *src
,
121 for (i
= 0; i
< n
; i
++)
126 static unsigned long *longset(unsigned long *s
, unsigned long c
, size_t n
)
130 for (i
= 0; i
< n
; i
++)
135 static void dec_key(struct btree_geo
*geo
, unsigned long *key
)
140 for (i
= geo
->keylen
- 1; i
>= 0; i
--) {
148 static unsigned long *bkey(struct btree_geo
*geo
, unsigned long *node
, int n
)
150 return &node
[n
* geo
->keylen
];
153 static void *bval(struct btree_geo
*geo
, unsigned long *node
, int n
)
155 return (void *)node
[geo
->no_longs
+ n
];
158 static void setkey(struct btree_geo
*geo
, unsigned long *node
, int n
,
161 longcpy(bkey(geo
, node
, n
), key
, geo
->keylen
);
164 static void setval(struct btree_geo
*geo
, unsigned long *node
, int n
,
167 node
[geo
->no_longs
+ n
] = (unsigned long) val
;
170 static void clearpair(struct btree_geo
*geo
, unsigned long *node
, int n
)
172 longset(bkey(geo
, node
, n
), 0, geo
->keylen
);
173 node
[geo
->no_longs
+ n
] = 0;
176 static inline void __btree_init(struct btree_head
*head
)
182 void btree_init_mempool(struct btree_head
*head
, mempool_t
*mempool
)
185 head
->mempool
= mempool
;
187 EXPORT_SYMBOL_GPL(btree_init_mempool
);
189 int btree_init(struct btree_head
*head
)
192 head
->mempool
= mempool_create(0, btree_alloc
, btree_free
, NULL
);
197 EXPORT_SYMBOL_GPL(btree_init
);
199 void btree_destroy(struct btree_head
*head
)
201 mempool_destroy(head
->mempool
);
202 head
->mempool
= NULL
;
204 EXPORT_SYMBOL_GPL(btree_destroy
);
206 void *btree_last(struct btree_head
*head
, struct btree_geo
*geo
,
209 int height
= head
->height
;
210 unsigned long *node
= head
->node
;
215 for ( ; height
> 1; height
--)
216 node
= bval(geo
, node
, 0);
218 longcpy(key
, bkey(geo
, node
, 0), geo
->keylen
);
219 return bval(geo
, node
, 0);
221 EXPORT_SYMBOL_GPL(btree_last
);
223 static int keycmp(struct btree_geo
*geo
, unsigned long *node
, int pos
,
226 return longcmp(bkey(geo
, node
, pos
), key
, geo
->keylen
);
229 static int keyzero(struct btree_geo
*geo
, unsigned long *key
)
233 for (i
= 0; i
< geo
->keylen
; i
++)
240 void *btree_lookup(struct btree_head
*head
, struct btree_geo
*geo
,
243 int i
, height
= head
->height
;
244 unsigned long *node
= head
->node
;
249 for ( ; height
> 1; height
--) {
250 for (i
= 0; i
< geo
->no_pairs
; i
++)
251 if (keycmp(geo
, node
, i
, key
) <= 0)
253 if (i
== geo
->no_pairs
)
255 node
= bval(geo
, node
, i
);
263 for (i
= 0; i
< geo
->no_pairs
; i
++)
264 if (keycmp(geo
, node
, i
, key
) == 0)
265 return bval(geo
, node
, i
);
268 EXPORT_SYMBOL_GPL(btree_lookup
);
270 int btree_update(struct btree_head
*head
, struct btree_geo
*geo
,
271 unsigned long *key
, void *val
)
273 int i
, height
= head
->height
;
274 unsigned long *node
= head
->node
;
279 for ( ; height
> 1; height
--) {
280 for (i
= 0; i
< geo
->no_pairs
; i
++)
281 if (keycmp(geo
, node
, i
, key
) <= 0)
283 if (i
== geo
->no_pairs
)
285 node
= bval(geo
, node
, i
);
293 for (i
= 0; i
< geo
->no_pairs
; i
++)
294 if (keycmp(geo
, node
, i
, key
) == 0) {
295 setval(geo
, node
, i
, val
);
300 EXPORT_SYMBOL_GPL(btree_update
);
303 * Usually this function is quite similar to normal lookup. But the key of
304 * a parent node may be smaller than the smallest key of all its siblings.
305 * In such a case we cannot just return NULL, as we have only proven that no
306 * key smaller than __key, but larger than this parent key exists.
307 * So we set __key to the parent key and retry. We have to use the smallest
308 * such parent key, which is the last parent key we encountered.
310 void *btree_get_prev(struct btree_head
*head
, struct btree_geo
*geo
,
311 unsigned long *__key
)
314 unsigned long *node
, *oldnode
;
315 unsigned long *retry_key
= NULL
, key
[geo
->keylen
];
317 if (keyzero(geo
, __key
))
320 if (head
->height
== 0)
323 longcpy(key
, __key
, geo
->keylen
);
327 for (height
= head
->height
; height
> 1; height
--) {
328 for (i
= 0; i
< geo
->no_pairs
; i
++)
329 if (keycmp(geo
, node
, i
, key
) <= 0)
331 if (i
== geo
->no_pairs
)
334 node
= bval(geo
, node
, i
);
337 retry_key
= bkey(geo
, oldnode
, i
);
343 for (i
= 0; i
< geo
->no_pairs
; i
++) {
344 if (keycmp(geo
, node
, i
, key
) <= 0) {
345 if (bval(geo
, node
, i
)) {
346 longcpy(__key
, bkey(geo
, node
, i
), geo
->keylen
);
347 return bval(geo
, node
, i
);
361 static int getpos(struct btree_geo
*geo
, unsigned long *node
,
366 for (i
= 0; i
< geo
->no_pairs
; i
++) {
367 if (keycmp(geo
, node
, i
, key
) <= 0)
373 static int getfill(struct btree_geo
*geo
, unsigned long *node
, int start
)
377 for (i
= start
; i
< geo
->no_pairs
; i
++)
378 if (!bval(geo
, node
, i
))
384 * locate the correct leaf node in the btree
386 static unsigned long *find_level(struct btree_head
*head
, struct btree_geo
*geo
,
387 unsigned long *key
, int level
)
389 unsigned long *node
= head
->node
;
392 for (height
= head
->height
; height
> level
; height
--) {
393 for (i
= 0; i
< geo
->no_pairs
; i
++)
394 if (keycmp(geo
, node
, i
, key
) <= 0)
397 if ((i
== geo
->no_pairs
) || !bval(geo
, node
, i
)) {
398 /* right-most key is too large, update it */
399 /* FIXME: If the right-most key on higher levels is
400 * always zero, this wouldn't be necessary. */
402 setkey(geo
, node
, i
, key
);
405 node
= bval(geo
, node
, i
);
411 static int btree_grow(struct btree_head
*head
, struct btree_geo
*geo
,
417 node
= btree_node_alloc(head
, gfp
);
421 fill
= getfill(geo
, head
->node
, 0);
422 setkey(geo
, node
, 0, bkey(geo
, head
->node
, fill
- 1));
423 setval(geo
, node
, 0, head
->node
);
430 static void btree_shrink(struct btree_head
*head
, struct btree_geo
*geo
)
435 if (head
->height
<= 1)
439 fill
= getfill(geo
, node
, 0);
441 head
->node
= bval(geo
, node
, 0);
443 mempool_free(node
, head
->mempool
);
446 static int btree_insert_level(struct btree_head
*head
, struct btree_geo
*geo
,
447 unsigned long *key
, void *val
, int level
,
451 int i
, pos
, fill
, err
;
454 if (head
->height
< level
) {
455 err
= btree_grow(head
, geo
, gfp
);
461 node
= find_level(head
, geo
, key
, level
);
462 pos
= getpos(geo
, node
, key
);
463 fill
= getfill(geo
, node
, pos
);
464 /* two identical keys are not allowed */
465 BUG_ON(pos
< fill
&& keycmp(geo
, node
, pos
, key
) == 0);
467 if (fill
== geo
->no_pairs
) {
468 /* need to split node */
471 new = btree_node_alloc(head
, gfp
);
474 err
= btree_insert_level(head
, geo
,
475 bkey(geo
, node
, fill
/ 2 - 1),
476 new, level
+ 1, gfp
);
478 mempool_free(new, head
->mempool
);
481 for (i
= 0; i
< fill
/ 2; i
++) {
482 setkey(geo
, new, i
, bkey(geo
, node
, i
));
483 setval(geo
, new, i
, bval(geo
, node
, i
));
484 setkey(geo
, node
, i
, bkey(geo
, node
, i
+ fill
/ 2));
485 setval(geo
, node
, i
, bval(geo
, node
, i
+ fill
/ 2));
486 clearpair(geo
, node
, i
+ fill
/ 2);
489 setkey(geo
, node
, i
, bkey(geo
, node
, fill
- 1));
490 setval(geo
, node
, i
, bval(geo
, node
, fill
- 1));
491 clearpair(geo
, node
, fill
- 1);
495 BUG_ON(fill
>= geo
->no_pairs
);
497 /* shift and insert */
498 for (i
= fill
; i
> pos
; i
--) {
499 setkey(geo
, node
, i
, bkey(geo
, node
, i
- 1));
500 setval(geo
, node
, i
, bval(geo
, node
, i
- 1));
502 setkey(geo
, node
, pos
, key
);
503 setval(geo
, node
, pos
, val
);
508 int btree_insert(struct btree_head
*head
, struct btree_geo
*geo
,
509 unsigned long *key
, void *val
, gfp_t gfp
)
511 return btree_insert_level(head
, geo
, key
, val
, 1, gfp
);
513 EXPORT_SYMBOL_GPL(btree_insert
);
515 static void *btree_remove_level(struct btree_head
*head
, struct btree_geo
*geo
,
516 unsigned long *key
, int level
);
517 static void merge(struct btree_head
*head
, struct btree_geo
*geo
, int level
,
518 unsigned long *left
, int lfill
,
519 unsigned long *right
, int rfill
,
520 unsigned long *parent
, int lpos
)
524 for (i
= 0; i
< rfill
; i
++) {
525 /* Move all keys to the left */
526 setkey(geo
, left
, lfill
+ i
, bkey(geo
, right
, i
));
527 setval(geo
, left
, lfill
+ i
, bval(geo
, right
, i
));
529 /* Exchange left and right child in parent */
530 setval(geo
, parent
, lpos
, right
);
531 setval(geo
, parent
, lpos
+ 1, left
);
532 /* Remove left (formerly right) child from parent */
533 btree_remove_level(head
, geo
, bkey(geo
, parent
, lpos
), level
+ 1);
534 mempool_free(right
, head
->mempool
);
537 static void rebalance(struct btree_head
*head
, struct btree_geo
*geo
,
538 unsigned long *key
, int level
, unsigned long *child
, int fill
)
540 unsigned long *parent
, *left
= NULL
, *right
= NULL
;
541 int i
, no_left
, no_right
;
544 /* Because we don't steal entries from a neigbour, this case
545 * can happen. Parent node contains a single child, this
546 * node, so merging with a sibling never happens.
548 btree_remove_level(head
, geo
, key
, level
+ 1);
549 mempool_free(child
, head
->mempool
);
553 parent
= find_level(head
, geo
, key
, level
+ 1);
554 i
= getpos(geo
, parent
, key
);
555 BUG_ON(bval(geo
, parent
, i
) != child
);
558 left
= bval(geo
, parent
, i
- 1);
559 no_left
= getfill(geo
, left
, 0);
560 if (fill
+ no_left
<= geo
->no_pairs
) {
561 merge(head
, geo
, level
,
568 if (i
+ 1 < getfill(geo
, parent
, i
)) {
569 right
= bval(geo
, parent
, i
+ 1);
570 no_right
= getfill(geo
, right
, 0);
571 if (fill
+ no_right
<= geo
->no_pairs
) {
572 merge(head
, geo
, level
,
580 * We could also try to steal one entry from the left or right
581 * neighbor. By not doing so we changed the invariant from
582 * "all nodes are at least half full" to "no two neighboring
583 * nodes can be merged". Which means that the average fill of
584 * all nodes is still half or better.
588 static void *btree_remove_level(struct btree_head
*head
, struct btree_geo
*geo
,
589 unsigned long *key
, int level
)
595 if (level
> head
->height
) {
596 /* we recursed all the way up */
602 node
= find_level(head
, geo
, key
, level
);
603 pos
= getpos(geo
, node
, key
);
604 fill
= getfill(geo
, node
, pos
);
605 if ((level
== 1) && (keycmp(geo
, node
, pos
, key
) != 0))
607 ret
= bval(geo
, node
, pos
);
609 /* remove and shift */
610 for (i
= pos
; i
< fill
- 1; i
++) {
611 setkey(geo
, node
, i
, bkey(geo
, node
, i
+ 1));
612 setval(geo
, node
, i
, bval(geo
, node
, i
+ 1));
614 clearpair(geo
, node
, fill
- 1);
616 if (fill
- 1 < geo
->no_pairs
/ 2) {
617 if (level
< head
->height
)
618 rebalance(head
, geo
, key
, level
, node
, fill
- 1);
619 else if (fill
- 1 == 1)
620 btree_shrink(head
, geo
);
626 void *btree_remove(struct btree_head
*head
, struct btree_geo
*geo
,
629 if (head
->height
== 0)
632 return btree_remove_level(head
, geo
, key
, 1);
634 EXPORT_SYMBOL_GPL(btree_remove
);
636 int btree_merge(struct btree_head
*target
, struct btree_head
*victim
,
637 struct btree_geo
*geo
, gfp_t gfp
)
639 unsigned long key
[geo
->keylen
];
640 unsigned long dup
[geo
->keylen
];
644 BUG_ON(target
== victim
);
646 if (!(target
->node
)) {
647 /* target is empty, just copy fields over */
648 target
->node
= victim
->node
;
649 target
->height
= victim
->height
;
650 __btree_init(victim
);
654 /* TODO: This needs some optimizations. Currently we do three tree
655 * walks to remove a single object from the victim.
658 if (!btree_last(victim
, geo
, key
))
660 val
= btree_lookup(victim
, geo
, key
);
661 err
= btree_insert(target
, geo
, key
, val
, gfp
);
664 /* We must make a copy of the key, as the original will get
665 * mangled inside btree_remove. */
666 longcpy(dup
, key
, geo
->keylen
);
667 btree_remove(victim
, geo
, dup
);
671 EXPORT_SYMBOL_GPL(btree_merge
);
673 static size_t __btree_for_each(struct btree_head
*head
, struct btree_geo
*geo
,
674 unsigned long *node
, unsigned long opaque
,
675 void (*func
)(void *elem
, unsigned long opaque
,
676 unsigned long *key
, size_t index
,
678 void *func2
, int reap
, int height
, size_t count
)
681 unsigned long *child
;
683 for (i
= 0; i
< geo
->no_pairs
; i
++) {
684 child
= bval(geo
, node
, i
);
688 count
= __btree_for_each(head
, geo
, child
, opaque
,
689 func
, func2
, reap
, height
- 1, count
);
691 func(child
, opaque
, bkey(geo
, node
, i
), count
++,
695 mempool_free(node
, head
->mempool
);
699 static void empty(void *elem
, unsigned long opaque
, unsigned long *key
,
700 size_t index
, void *func2
)
704 void visitorl(void *elem
, unsigned long opaque
, unsigned long *key
,
705 size_t index
, void *__func
)
707 visitorl_t func
= __func
;
709 func(elem
, opaque
, *key
, index
);
711 EXPORT_SYMBOL_GPL(visitorl
);
713 void visitor32(void *elem
, unsigned long opaque
, unsigned long *__key
,
714 size_t index
, void *__func
)
716 visitor32_t func
= __func
;
717 u32
*key
= (void *)__key
;
719 func(elem
, opaque
, *key
, index
);
721 EXPORT_SYMBOL_GPL(visitor32
);
723 void visitor64(void *elem
, unsigned long opaque
, unsigned long *__key
,
724 size_t index
, void *__func
)
726 visitor64_t func
= __func
;
727 u64
*key
= (void *)__key
;
729 func(elem
, opaque
, *key
, index
);
731 EXPORT_SYMBOL_GPL(visitor64
);
733 void visitor128(void *elem
, unsigned long opaque
, unsigned long *__key
,
734 size_t index
, void *__func
)
736 visitor128_t func
= __func
;
737 u64
*key
= (void *)__key
;
739 func(elem
, opaque
, key
[0], key
[1], index
);
741 EXPORT_SYMBOL_GPL(visitor128
);
743 size_t btree_visitor(struct btree_head
*head
, struct btree_geo
*geo
,
744 unsigned long opaque
,
745 void (*func
)(void *elem
, unsigned long opaque
,
747 size_t index
, void *func2
),
755 count
= __btree_for_each(head
, geo
, head
->node
, opaque
, func
,
756 func2
, 0, head
->height
, 0);
759 EXPORT_SYMBOL_GPL(btree_visitor
);
761 size_t btree_grim_visitor(struct btree_head
*head
, struct btree_geo
*geo
,
762 unsigned long opaque
,
763 void (*func
)(void *elem
, unsigned long opaque
,
765 size_t index
, void *func2
),
773 count
= __btree_for_each(head
, geo
, head
->node
, opaque
, func
,
774 func2
, 1, head
->height
, 0);
778 EXPORT_SYMBOL_GPL(btree_grim_visitor
);
780 static int __init
btree_module_init(void)
782 btree_cachep
= kmem_cache_create("btree_node", NODESIZE
, 0,
783 SLAB_HWCACHE_ALIGN
, NULL
);
787 static void __exit
btree_module_exit(void)
789 kmem_cache_destroy(btree_cachep
);
792 /* If core code starts using btree, initialization should happen even earlier */
793 module_init(btree_module_init
);
794 module_exit(btree_module_exit
);
796 MODULE_AUTHOR("Joern Engel <joern@logfs.org>");
797 MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
798 MODULE_LICENSE("GPL");