2 * Copyright (C) 2011 Red Hat, Inc.
4 * This file is released under the GPL.
7 #include "dm-btree-internal.h"
8 #include "dm-space-map.h"
9 #include "dm-transaction-manager.h"
11 #include <linux/export.h>
12 #include <linux/device-mapper.h>
14 #define DM_MSG_PREFIX "btree"
16 /*----------------------------------------------------------------
18 *--------------------------------------------------------------*/
19 static void memcpy_disk(void *dest
, const void *src
, size_t len
)
20 __dm_written_to_disk(src
)
22 memcpy(dest
, src
, len
);
23 __dm_unbless_for_disk(src
);
26 static void array_insert(void *base
, size_t elt_size
, unsigned nr_elts
,
27 unsigned index
, void *elt
)
28 __dm_written_to_disk(elt
)
31 memmove(base
+ (elt_size
* (index
+ 1)),
32 base
+ (elt_size
* index
),
33 (nr_elts
- index
) * elt_size
);
35 memcpy_disk(base
+ (elt_size
* index
), elt
, elt_size
);
38 /*----------------------------------------------------------------*/
40 /* makes the assumption that no two keys are the same. */
41 static int bsearch(struct btree_node
*n
, uint64_t key
, int want_hi
)
43 int lo
= -1, hi
= le32_to_cpu(n
->header
.nr_entries
);
46 int mid
= lo
+ ((hi
- lo
) / 2);
47 uint64_t mid_key
= le64_to_cpu(n
->keys
[mid
]);
58 return want_hi
? hi
: lo
;
61 int lower_bound(struct btree_node
*n
, uint64_t key
)
63 return bsearch(n
, key
, 0);
66 void inc_children(struct dm_transaction_manager
*tm
, struct btree_node
*n
,
67 struct dm_btree_value_type
*vt
)
70 uint32_t nr_entries
= le32_to_cpu(n
->header
.nr_entries
);
72 if (le32_to_cpu(n
->header
.flags
) & INTERNAL_NODE
)
73 for (i
= 0; i
< nr_entries
; i
++)
74 dm_tm_inc(tm
, value64(n
, i
));
76 for (i
= 0; i
< nr_entries
; i
++)
77 vt
->inc(vt
->context
, value_ptr(n
, i
));
80 static int insert_at(size_t value_size
, struct btree_node
*node
, unsigned index
,
81 uint64_t key
, void *value
)
82 __dm_written_to_disk(value
)
84 uint32_t nr_entries
= le32_to_cpu(node
->header
.nr_entries
);
85 __le64 key_le
= cpu_to_le64(key
);
87 if (index
> nr_entries
||
88 index
>= le32_to_cpu(node
->header
.max_entries
)) {
89 DMERR("too many entries in btree node for insert");
90 __dm_unbless_for_disk(value
);
94 __dm_bless_for_disk(&key_le
);
96 array_insert(node
->keys
, sizeof(*node
->keys
), nr_entries
, index
, &key_le
);
97 array_insert(value_base(node
), value_size
, nr_entries
, index
, value
);
98 node
->header
.nr_entries
= cpu_to_le32(nr_entries
+ 1);
103 /*----------------------------------------------------------------*/
106 * We want 3n entries (for some n). This works more nicely for repeated
107 * insert remove loops than (2n + 1).
109 static uint32_t calc_max_entries(size_t value_size
, size_t block_size
)
112 size_t elt_size
= sizeof(uint64_t) + value_size
; /* key + value */
114 block_size
-= sizeof(struct node_header
);
115 total
= block_size
/ elt_size
;
116 n
= total
/ 3; /* rounds down */
121 int dm_btree_empty(struct dm_btree_info
*info
, dm_block_t
*root
)
125 struct btree_node
*n
;
127 uint32_t max_entries
;
129 r
= new_block(info
, &b
);
133 block_size
= dm_bm_block_size(dm_tm_get_bm(info
->tm
));
134 max_entries
= calc_max_entries(info
->value_type
.size
, block_size
);
136 n
= dm_block_data(b
);
137 memset(n
, 0, block_size
);
138 n
->header
.flags
= cpu_to_le32(LEAF_NODE
);
139 n
->header
.nr_entries
= cpu_to_le32(0);
140 n
->header
.max_entries
= cpu_to_le32(max_entries
);
141 n
->header
.value_size
= cpu_to_le32(info
->value_type
.size
);
143 *root
= dm_block_location(b
);
144 return unlock_block(info
, b
);
146 EXPORT_SYMBOL_GPL(dm_btree_empty
);
148 /*----------------------------------------------------------------*/
151 * Deletion uses a recursive algorithm, since we have limited stack space
152 * we explicitly manage our own stack on the heap.
154 #define MAX_SPINE_DEPTH 64
157 struct btree_node
*n
;
159 unsigned nr_children
;
160 unsigned current_child
;
164 struct dm_transaction_manager
*tm
;
166 struct frame spine
[MAX_SPINE_DEPTH
];
169 static int top_frame(struct del_stack
*s
, struct frame
**f
)
172 DMERR("btree deletion stack empty");
176 *f
= s
->spine
+ s
->top
;
181 static int unprocessed_frames(struct del_stack
*s
)
186 static int push_frame(struct del_stack
*s
, dm_block_t b
, unsigned level
)
191 if (s
->top
>= MAX_SPINE_DEPTH
- 1) {
192 DMERR("btree deletion stack out of memory");
196 r
= dm_tm_ref(s
->tm
, b
, &ref_count
);
202 * This is a shared node, so we can just decrement it's
203 * reference counter and leave the children.
208 struct frame
*f
= s
->spine
+ ++s
->top
;
210 r
= dm_tm_read_lock(s
->tm
, b
, &btree_node_validator
, &f
->b
);
216 f
->n
= dm_block_data(f
->b
);
218 f
->nr_children
= le32_to_cpu(f
->n
->header
.nr_entries
);
219 f
->current_child
= 0;
225 static void pop_frame(struct del_stack
*s
)
227 struct frame
*f
= s
->spine
+ s
->top
--;
229 dm_tm_dec(s
->tm
, dm_block_location(f
->b
));
230 dm_tm_unlock(s
->tm
, f
->b
);
233 static bool is_internal_level(struct dm_btree_info
*info
, struct frame
*f
)
235 return f
->level
< (info
->levels
- 1);
238 int dm_btree_del(struct dm_btree_info
*info
, dm_block_t root
)
243 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
249 r
= push_frame(s
, root
, 0);
253 while (unprocessed_frames(s
)) {
258 r
= top_frame(s
, &f
);
262 if (f
->current_child
>= f
->nr_children
) {
267 flags
= le32_to_cpu(f
->n
->header
.flags
);
268 if (flags
& INTERNAL_NODE
) {
269 b
= value64(f
->n
, f
->current_child
);
271 r
= push_frame(s
, b
, f
->level
);
275 } else if (is_internal_level(info
, f
)) {
276 b
= value64(f
->n
, f
->current_child
);
278 r
= push_frame(s
, b
, f
->level
+ 1);
283 if (info
->value_type
.dec
) {
286 for (i
= 0; i
< f
->nr_children
; i
++)
287 info
->value_type
.dec(info
->value_type
.context
,
290 f
->current_child
= f
->nr_children
;
298 EXPORT_SYMBOL_GPL(dm_btree_del
);
300 /*----------------------------------------------------------------*/
302 static int btree_lookup_raw(struct ro_spine
*s
, dm_block_t block
, uint64_t key
,
303 int (*search_fn
)(struct btree_node
*, uint64_t),
304 uint64_t *result_key
, void *v
, size_t value_size
)
307 uint32_t flags
, nr_entries
;
310 r
= ro_step(s
, block
);
314 i
= search_fn(ro_node(s
), key
);
316 flags
= le32_to_cpu(ro_node(s
)->header
.flags
);
317 nr_entries
= le32_to_cpu(ro_node(s
)->header
.nr_entries
);
318 if (i
< 0 || i
>= nr_entries
)
321 if (flags
& INTERNAL_NODE
)
322 block
= value64(ro_node(s
), i
);
324 } while (!(flags
& LEAF_NODE
));
326 *result_key
= le64_to_cpu(ro_node(s
)->keys
[i
]);
327 memcpy(v
, value_ptr(ro_node(s
), i
), value_size
);
332 int dm_btree_lookup(struct dm_btree_info
*info
, dm_block_t root
,
333 uint64_t *keys
, void *value_le
)
335 unsigned level
, last_level
= info
->levels
- 1;
338 __le64 internal_value_le
;
339 struct ro_spine spine
;
341 init_ro_spine(&spine
, info
);
342 for (level
= 0; level
< info
->levels
; level
++) {
346 if (level
== last_level
) {
348 size
= info
->value_type
.size
;
351 value_p
= &internal_value_le
;
352 size
= sizeof(uint64_t);
355 r
= btree_lookup_raw(&spine
, root
, keys
[level
],
360 if (rkey
!= keys
[level
]) {
361 exit_ro_spine(&spine
);
365 exit_ro_spine(&spine
);
369 root
= le64_to_cpu(internal_value_le
);
371 exit_ro_spine(&spine
);
375 EXPORT_SYMBOL_GPL(dm_btree_lookup
);
378 * Splits a node by creating a sibling node and shifting half the nodes
379 * contents across. Assumes there is a parent node, and it has room for
401 * +---------+ +-------+
405 * Where A* is a shadow of A.
407 static int btree_split_sibling(struct shadow_spine
*s
, dm_block_t root
,
408 unsigned parent_index
, uint64_t key
)
412 unsigned nr_left
, nr_right
;
413 struct dm_block
*left
, *right
, *parent
;
414 struct btree_node
*ln
, *rn
, *pn
;
417 left
= shadow_current(s
);
419 r
= new_block(s
->info
, &right
);
423 ln
= dm_block_data(left
);
424 rn
= dm_block_data(right
);
426 nr_left
= le32_to_cpu(ln
->header
.nr_entries
) / 2;
427 nr_right
= le32_to_cpu(ln
->header
.nr_entries
) - nr_left
;
429 ln
->header
.nr_entries
= cpu_to_le32(nr_left
);
431 rn
->header
.flags
= ln
->header
.flags
;
432 rn
->header
.nr_entries
= cpu_to_le32(nr_right
);
433 rn
->header
.max_entries
= ln
->header
.max_entries
;
434 rn
->header
.value_size
= ln
->header
.value_size
;
435 memcpy(rn
->keys
, ln
->keys
+ nr_left
, nr_right
* sizeof(rn
->keys
[0]));
437 size
= le32_to_cpu(ln
->header
.flags
) & INTERNAL_NODE
?
438 sizeof(uint64_t) : s
->info
->value_type
.size
;
439 memcpy(value_ptr(rn
, 0), value_ptr(ln
, nr_left
),
443 * Patch up the parent
445 parent
= shadow_parent(s
);
447 pn
= dm_block_data(parent
);
448 location
= cpu_to_le64(dm_block_location(left
));
449 __dm_bless_for_disk(&location
);
450 memcpy_disk(value_ptr(pn
, parent_index
),
451 &location
, sizeof(__le64
));
453 location
= cpu_to_le64(dm_block_location(right
));
454 __dm_bless_for_disk(&location
);
456 r
= insert_at(sizeof(__le64
), pn
, parent_index
+ 1,
457 le64_to_cpu(rn
->keys
[0]), &location
);
461 if (key
< le64_to_cpu(rn
->keys
[0])) {
462 unlock_block(s
->info
, right
);
465 unlock_block(s
->info
, left
);
473 * Splits a node by creating two new children beneath the given node.
489 * +-------+ +-------+
490 * | B +++ | | C +++ |
491 * +-------+ +-------+
493 static int btree_split_beneath(struct shadow_spine
*s
, uint64_t key
)
497 unsigned nr_left
, nr_right
;
498 struct dm_block
*left
, *right
, *new_parent
;
499 struct btree_node
*pn
, *ln
, *rn
;
502 new_parent
= shadow_current(s
);
504 r
= new_block(s
->info
, &left
);
508 r
= new_block(s
->info
, &right
);
510 /* FIXME: put left */
514 pn
= dm_block_data(new_parent
);
515 ln
= dm_block_data(left
);
516 rn
= dm_block_data(right
);
518 nr_left
= le32_to_cpu(pn
->header
.nr_entries
) / 2;
519 nr_right
= le32_to_cpu(pn
->header
.nr_entries
) - nr_left
;
521 ln
->header
.flags
= pn
->header
.flags
;
522 ln
->header
.nr_entries
= cpu_to_le32(nr_left
);
523 ln
->header
.max_entries
= pn
->header
.max_entries
;
524 ln
->header
.value_size
= pn
->header
.value_size
;
526 rn
->header
.flags
= pn
->header
.flags
;
527 rn
->header
.nr_entries
= cpu_to_le32(nr_right
);
528 rn
->header
.max_entries
= pn
->header
.max_entries
;
529 rn
->header
.value_size
= pn
->header
.value_size
;
531 memcpy(ln
->keys
, pn
->keys
, nr_left
* sizeof(pn
->keys
[0]));
532 memcpy(rn
->keys
, pn
->keys
+ nr_left
, nr_right
* sizeof(pn
->keys
[0]));
534 size
= le32_to_cpu(pn
->header
.flags
) & INTERNAL_NODE
?
535 sizeof(__le64
) : s
->info
->value_type
.size
;
536 memcpy(value_ptr(ln
, 0), value_ptr(pn
, 0), nr_left
* size
);
537 memcpy(value_ptr(rn
, 0), value_ptr(pn
, nr_left
),
540 /* new_parent should just point to l and r now */
541 pn
->header
.flags
= cpu_to_le32(INTERNAL_NODE
);
542 pn
->header
.nr_entries
= cpu_to_le32(2);
543 pn
->header
.max_entries
= cpu_to_le32(
544 calc_max_entries(sizeof(__le64
),
546 dm_tm_get_bm(s
->info
->tm
))));
547 pn
->header
.value_size
= cpu_to_le32(sizeof(__le64
));
549 val
= cpu_to_le64(dm_block_location(left
));
550 __dm_bless_for_disk(&val
);
551 pn
->keys
[0] = ln
->keys
[0];
552 memcpy_disk(value_ptr(pn
, 0), &val
, sizeof(__le64
));
554 val
= cpu_to_le64(dm_block_location(right
));
555 __dm_bless_for_disk(&val
);
556 pn
->keys
[1] = rn
->keys
[0];
557 memcpy_disk(value_ptr(pn
, 1), &val
, sizeof(__le64
));
560 * rejig the spine. This is ugly, since it knows too
561 * much about the spine
563 if (s
->nodes
[0] != new_parent
) {
564 unlock_block(s
->info
, s
->nodes
[0]);
565 s
->nodes
[0] = new_parent
;
567 if (key
< le64_to_cpu(rn
->keys
[0])) {
568 unlock_block(s
->info
, right
);
571 unlock_block(s
->info
, left
);
579 static int btree_insert_raw(struct shadow_spine
*s
, dm_block_t root
,
580 struct dm_btree_value_type
*vt
,
581 uint64_t key
, unsigned *index
)
583 int r
, i
= *index
, top
= 1;
584 struct btree_node
*node
;
587 r
= shadow_step(s
, root
, vt
);
591 node
= dm_block_data(shadow_current(s
));
594 * We have to patch up the parent node, ugly, but I don't
595 * see a way to do this automatically as part of the spine
598 if (shadow_has_parent(s
) && i
>= 0) { /* FIXME: second clause unness. */
599 __le64 location
= cpu_to_le64(dm_block_location(shadow_current(s
)));
601 __dm_bless_for_disk(&location
);
602 memcpy_disk(value_ptr(dm_block_data(shadow_parent(s
)), i
),
603 &location
, sizeof(__le64
));
606 node
= dm_block_data(shadow_current(s
));
608 if (node
->header
.nr_entries
== node
->header
.max_entries
) {
610 r
= btree_split_beneath(s
, key
);
612 r
= btree_split_sibling(s
, root
, i
, key
);
618 node
= dm_block_data(shadow_current(s
));
620 i
= lower_bound(node
, key
);
622 if (le32_to_cpu(node
->header
.flags
) & LEAF_NODE
)
626 /* change the bounds on the lowest key */
627 node
->keys
[0] = cpu_to_le64(key
);
631 root
= value64(node
, i
);
635 if (i
< 0 || le64_to_cpu(node
->keys
[i
]) != key
)
642 static int insert(struct dm_btree_info
*info
, dm_block_t root
,
643 uint64_t *keys
, void *value
, dm_block_t
*new_root
,
645 __dm_written_to_disk(value
)
648 unsigned level
, index
= -1, last_level
= info
->levels
- 1;
649 dm_block_t block
= root
;
650 struct shadow_spine spine
;
651 struct btree_node
*n
;
652 struct dm_btree_value_type le64_type
;
654 le64_type
.context
= NULL
;
655 le64_type
.size
= sizeof(__le64
);
656 le64_type
.inc
= NULL
;
657 le64_type
.dec
= NULL
;
658 le64_type
.equal
= NULL
;
660 init_shadow_spine(&spine
, info
);
662 for (level
= 0; level
< (info
->levels
- 1); level
++) {
663 r
= btree_insert_raw(&spine
, block
, &le64_type
, keys
[level
], &index
);
667 n
= dm_block_data(shadow_current(&spine
));
668 need_insert
= ((index
>= le32_to_cpu(n
->header
.nr_entries
)) ||
669 (le64_to_cpu(n
->keys
[index
]) != keys
[level
]));
675 r
= dm_btree_empty(info
, &new_tree
);
679 new_le
= cpu_to_le64(new_tree
);
680 __dm_bless_for_disk(&new_le
);
682 r
= insert_at(sizeof(uint64_t), n
, index
,
683 keys
[level
], &new_le
);
688 if (level
< last_level
)
689 block
= value64(n
, index
);
692 r
= btree_insert_raw(&spine
, block
, &info
->value_type
,
693 keys
[level
], &index
);
697 n
= dm_block_data(shadow_current(&spine
));
698 need_insert
= ((index
>= le32_to_cpu(n
->header
.nr_entries
)) ||
699 (le64_to_cpu(n
->keys
[index
]) != keys
[level
]));
705 r
= insert_at(info
->value_type
.size
, n
, index
,
713 if (info
->value_type
.dec
&&
714 (!info
->value_type
.equal
||
715 !info
->value_type
.equal(
716 info
->value_type
.context
,
719 info
->value_type
.dec(info
->value_type
.context
,
720 value_ptr(n
, index
));
722 memcpy_disk(value_ptr(n
, index
),
723 value
, info
->value_type
.size
);
726 *new_root
= shadow_root(&spine
);
727 exit_shadow_spine(&spine
);
732 __dm_unbless_for_disk(value
);
734 exit_shadow_spine(&spine
);
738 int dm_btree_insert(struct dm_btree_info
*info
, dm_block_t root
,
739 uint64_t *keys
, void *value
, dm_block_t
*new_root
)
740 __dm_written_to_disk(value
)
742 return insert(info
, root
, keys
, value
, new_root
, NULL
);
744 EXPORT_SYMBOL_GPL(dm_btree_insert
);
746 int dm_btree_insert_notify(struct dm_btree_info
*info
, dm_block_t root
,
747 uint64_t *keys
, void *value
, dm_block_t
*new_root
,
749 __dm_written_to_disk(value
)
751 return insert(info
, root
, keys
, value
, new_root
, inserted
);
753 EXPORT_SYMBOL_GPL(dm_btree_insert_notify
);
755 /*----------------------------------------------------------------*/
757 static int find_highest_key(struct ro_spine
*s
, dm_block_t block
,
758 uint64_t *result_key
, dm_block_t
*next_block
)
764 r
= ro_step(s
, block
);
768 flags
= le32_to_cpu(ro_node(s
)->header
.flags
);
769 i
= le32_to_cpu(ro_node(s
)->header
.nr_entries
);
775 *result_key
= le64_to_cpu(ro_node(s
)->keys
[i
]);
776 if (next_block
|| flags
& INTERNAL_NODE
)
777 block
= value64(ro_node(s
), i
);
779 } while (flags
& INTERNAL_NODE
);
786 int dm_btree_find_highest_key(struct dm_btree_info
*info
, dm_block_t root
,
787 uint64_t *result_keys
)
789 int r
= 0, count
= 0, level
;
790 struct ro_spine spine
;
792 init_ro_spine(&spine
, info
);
793 for (level
= 0; level
< info
->levels
; level
++) {
794 r
= find_highest_key(&spine
, root
, result_keys
+ level
,
795 level
== info
->levels
- 1 ? NULL
: &root
);
805 exit_ro_spine(&spine
);
807 return r
? r
: count
;
809 EXPORT_SYMBOL_GPL(dm_btree_find_highest_key
);
812 * FIXME: We shouldn't use a recursive algorithm when we have limited stack
813 * space. Also this only works for single level trees.
815 static int walk_node(struct ro_spine
*s
, dm_block_t block
,
816 int (*fn
)(void *context
, uint64_t *keys
, void *leaf
),
821 struct btree_node
*n
;
824 r
= ro_step(s
, block
);
827 nr
= le32_to_cpu(n
->header
.nr_entries
);
828 for (i
= 0; i
< nr
; i
++) {
829 if (le32_to_cpu(n
->header
.flags
) & INTERNAL_NODE
) {
830 r
= walk_node(s
, value64(n
, i
), fn
, context
);
834 keys
= le64_to_cpu(*key_ptr(n
, i
));
835 r
= fn(context
, &keys
, value_ptr(n
, i
));
846 int dm_btree_walk(struct dm_btree_info
*info
, dm_block_t root
,
847 int (*fn
)(void *context
, uint64_t *keys
, void *leaf
),
851 struct ro_spine spine
;
853 BUG_ON(info
->levels
> 1);
855 init_ro_spine(&spine
, info
);
856 r
= walk_node(&spine
, root
, fn
, context
);
857 exit_ro_spine(&spine
);
861 EXPORT_SYMBOL_GPL(dm_btree_walk
);