1 #include <linux/module.h>
2 #include <linux/moduleparam.h>
3 #include <linux/rbtree_augmented.h>
4 #include <linux/random.h>
5 #include <linux/slab.h>
8 #define __param(type, name, init, msg) \
9 static type name = init; \
10 module_param(name, type, 0444); \
11 MODULE_PARM_DESC(name, msg);
13 __param(int, nnodes
, 100, "Number of nodes in the rb-tree");
14 __param(int, perf_loops
, 100000, "Number of iterations modifying the rb-tree");
15 __param(int, check_loops
, 100, "Number of iterations modifying and verifying the rb-tree");
21 /* following fields used for testing augmented rbtree functionality */
26 static struct rb_root_cached root
= RB_ROOT_CACHED
;
27 static struct test_node
*nodes
= NULL
;
29 static struct rnd_state rnd
;
31 static void insert(struct test_node
*node
, struct rb_root_cached
*root
)
33 struct rb_node
**new = &root
->rb_root
.rb_node
, *parent
= NULL
;
38 if (key
< rb_entry(parent
, struct test_node
, rb
)->key
)
39 new = &parent
->rb_left
;
41 new = &parent
->rb_right
;
44 rb_link_node(&node
->rb
, parent
, new);
45 rb_insert_color(&node
->rb
, &root
->rb_root
);
48 static void insert_cached(struct test_node
*node
, struct rb_root_cached
*root
)
50 struct rb_node
**new = &root
->rb_root
.rb_node
, *parent
= NULL
;
56 if (key
< rb_entry(parent
, struct test_node
, rb
)->key
)
57 new = &parent
->rb_left
;
59 new = &parent
->rb_right
;
64 rb_link_node(&node
->rb
, parent
, new);
65 rb_insert_color_cached(&node
->rb
, root
, leftmost
);
68 static inline void erase(struct test_node
*node
, struct rb_root_cached
*root
)
70 rb_erase(&node
->rb
, &root
->rb_root
);
73 static inline void erase_cached(struct test_node
*node
, struct rb_root_cached
*root
)
75 rb_erase_cached(&node
->rb
, root
);
79 static inline u32
augment_recompute(struct test_node
*node
)
81 u32 max
= node
->val
, child_augmented
;
82 if (node
->rb
.rb_left
) {
83 child_augmented
= rb_entry(node
->rb
.rb_left
, struct test_node
,
85 if (max
< child_augmented
)
86 max
= child_augmented
;
88 if (node
->rb
.rb_right
) {
89 child_augmented
= rb_entry(node
->rb
.rb_right
, struct test_node
,
91 if (max
< child_augmented
)
92 max
= child_augmented
;
97 RB_DECLARE_CALLBACKS(static, augment_callbacks
, struct test_node
, rb
,
98 u32
, augmented
, augment_recompute
)
100 static void insert_augmented(struct test_node
*node
,
101 struct rb_root_cached
*root
)
103 struct rb_node
**new = &root
->rb_root
.rb_node
, *rb_parent
= NULL
;
106 struct test_node
*parent
;
110 parent
= rb_entry(rb_parent
, struct test_node
, rb
);
111 if (parent
->augmented
< val
)
112 parent
->augmented
= val
;
113 if (key
< parent
->key
)
114 new = &parent
->rb
.rb_left
;
116 new = &parent
->rb
.rb_right
;
119 node
->augmented
= val
;
120 rb_link_node(&node
->rb
, rb_parent
, new);
121 rb_insert_augmented(&node
->rb
, &root
->rb_root
, &augment_callbacks
);
124 static void insert_augmented_cached(struct test_node
*node
,
125 struct rb_root_cached
*root
)
127 struct rb_node
**new = &root
->rb_root
.rb_node
, *rb_parent
= NULL
;
130 struct test_node
*parent
;
131 bool leftmost
= true;
135 parent
= rb_entry(rb_parent
, struct test_node
, rb
);
136 if (parent
->augmented
< val
)
137 parent
->augmented
= val
;
138 if (key
< parent
->key
)
139 new = &parent
->rb
.rb_left
;
141 new = &parent
->rb
.rb_right
;
146 node
->augmented
= val
;
147 rb_link_node(&node
->rb
, rb_parent
, new);
148 rb_insert_augmented_cached(&node
->rb
, root
,
149 leftmost
, &augment_callbacks
);
153 static void erase_augmented(struct test_node
*node
, struct rb_root_cached
*root
)
155 rb_erase_augmented(&node
->rb
, &root
->rb_root
, &augment_callbacks
);
158 static void erase_augmented_cached(struct test_node
*node
,
159 struct rb_root_cached
*root
)
161 rb_erase_augmented_cached(&node
->rb
, root
, &augment_callbacks
);
164 static void init(void)
167 for (i
= 0; i
< nnodes
; i
++) {
168 nodes
[i
].key
= prandom_u32_state(&rnd
);
169 nodes
[i
].val
= prandom_u32_state(&rnd
);
173 static bool is_red(struct rb_node
*rb
)
175 return !(rb
->__rb_parent_color
& 1);
178 static int black_path_count(struct rb_node
*rb
)
181 for (count
= 0; rb
; rb
= rb_parent(rb
))
182 count
+= !is_red(rb
);
186 static void check_postorder_foreach(int nr_nodes
)
188 struct test_node
*cur
, *n
;
190 rbtree_postorder_for_each_entry_safe(cur
, n
, &root
.rb_root
, rb
)
193 WARN_ON_ONCE(count
!= nr_nodes
);
196 static void check_postorder(int nr_nodes
)
200 for (rb
= rb_first_postorder(&root
.rb_root
); rb
; rb
= rb_next_postorder(rb
))
203 WARN_ON_ONCE(count
!= nr_nodes
);
206 static void check(int nr_nodes
)
209 int count
= 0, blacks
= 0;
212 for (rb
= rb_first(&root
.rb_root
); rb
; rb
= rb_next(rb
)) {
213 struct test_node
*node
= rb_entry(rb
, struct test_node
, rb
);
214 WARN_ON_ONCE(node
->key
< prev_key
);
215 WARN_ON_ONCE(is_red(rb
) &&
216 (!rb_parent(rb
) || is_red(rb_parent(rb
))));
218 blacks
= black_path_count(rb
);
220 WARN_ON_ONCE((!rb
->rb_left
|| !rb
->rb_right
) &&
221 blacks
!= black_path_count(rb
));
222 prev_key
= node
->key
;
226 WARN_ON_ONCE(count
!= nr_nodes
);
227 WARN_ON_ONCE(count
< (1 << black_path_count(rb_last(&root
.rb_root
))) - 1);
229 check_postorder(nr_nodes
);
230 check_postorder_foreach(nr_nodes
);
233 static void check_augmented(int nr_nodes
)
238 for (rb
= rb_first(&root
.rb_root
); rb
; rb
= rb_next(rb
)) {
239 struct test_node
*node
= rb_entry(rb
, struct test_node
, rb
);
240 WARN_ON_ONCE(node
->augmented
!= augment_recompute(node
));
244 static int __init
rbtree_test_init(void)
247 cycles_t time1
, time2
, time
;
248 struct rb_node
*node
;
250 nodes
= kmalloc(nnodes
* sizeof(*nodes
), GFP_KERNEL
);
254 printk(KERN_ALERT
"rbtree testing");
256 prandom_seed_state(&rnd
, 3141592653589793238ULL);
259 time1
= get_cycles();
261 for (i
= 0; i
< perf_loops
; i
++) {
262 for (j
= 0; j
< nnodes
; j
++)
263 insert(nodes
+ j
, &root
);
264 for (j
= 0; j
< nnodes
; j
++)
265 erase(nodes
+ j
, &root
);
268 time2
= get_cycles();
269 time
= time2
- time1
;
271 time
= div_u64(time
, perf_loops
);
272 printk(" -> test 1 (latency of nnodes insert+delete): %llu cycles\n",
273 (unsigned long long)time
);
275 time1
= get_cycles();
277 for (i
= 0; i
< perf_loops
; i
++) {
278 for (j
= 0; j
< nnodes
; j
++)
279 insert_cached(nodes
+ j
, &root
);
280 for (j
= 0; j
< nnodes
; j
++)
281 erase_cached(nodes
+ j
, &root
);
284 time2
= get_cycles();
285 time
= time2
- time1
;
287 time
= div_u64(time
, perf_loops
);
288 printk(" -> test 2 (latency of nnodes cached insert+delete): %llu cycles\n",
289 (unsigned long long)time
);
291 for (i
= 0; i
< nnodes
; i
++)
292 insert(nodes
+ i
, &root
);
294 time1
= get_cycles();
296 for (i
= 0; i
< perf_loops
; i
++) {
297 for (node
= rb_first(&root
.rb_root
); node
; node
= rb_next(node
))
301 time2
= get_cycles();
302 time
= time2
- time1
;
304 time
= div_u64(time
, perf_loops
);
305 printk(" -> test 3 (latency of inorder traversal): %llu cycles\n",
306 (unsigned long long)time
);
308 time1
= get_cycles();
310 for (i
= 0; i
< perf_loops
; i
++)
311 node
= rb_first(&root
.rb_root
);
313 time2
= get_cycles();
314 time
= time2
- time1
;
316 time
= div_u64(time
, perf_loops
);
317 printk(" -> test 4 (latency to fetch first node)\n");
318 printk(" non-cached: %llu cycles\n", (unsigned long long)time
);
320 time1
= get_cycles();
322 for (i
= 0; i
< perf_loops
; i
++)
323 node
= rb_first_cached(&root
);
325 time2
= get_cycles();
326 time
= time2
- time1
;
328 time
= div_u64(time
, perf_loops
);
329 printk(" cached: %llu cycles\n", (unsigned long long)time
);
331 for (i
= 0; i
< nnodes
; i
++)
332 erase(nodes
+ i
, &root
);
335 for (i
= 0; i
< check_loops
; i
++) {
337 for (j
= 0; j
< nnodes
; j
++) {
339 insert(nodes
+ j
, &root
);
341 for (j
= 0; j
< nnodes
; j
++) {
343 erase(nodes
+ j
, &root
);
348 printk(KERN_ALERT
"augmented rbtree testing");
352 time1
= get_cycles();
354 for (i
= 0; i
< perf_loops
; i
++) {
355 for (j
= 0; j
< nnodes
; j
++)
356 insert_augmented(nodes
+ j
, &root
);
357 for (j
= 0; j
< nnodes
; j
++)
358 erase_augmented(nodes
+ j
, &root
);
361 time2
= get_cycles();
362 time
= time2
- time1
;
364 time
= div_u64(time
, perf_loops
);
365 printk(" -> test 1 (latency of nnodes insert+delete): %llu cycles\n", (unsigned long long)time
);
367 time1
= get_cycles();
369 for (i
= 0; i
< perf_loops
; i
++) {
370 for (j
= 0; j
< nnodes
; j
++)
371 insert_augmented_cached(nodes
+ j
, &root
);
372 for (j
= 0; j
< nnodes
; j
++)
373 erase_augmented_cached(nodes
+ j
, &root
);
376 time2
= get_cycles();
377 time
= time2
- time1
;
379 time
= div_u64(time
, perf_loops
);
380 printk(" -> test 2 (latency of nnodes cached insert+delete): %llu cycles\n", (unsigned long long)time
);
382 for (i
= 0; i
< check_loops
; i
++) {
384 for (j
= 0; j
< nnodes
; j
++) {
386 insert_augmented(nodes
+ j
, &root
);
388 for (j
= 0; j
< nnodes
; j
++) {
389 check_augmented(nnodes
- j
);
390 erase_augmented(nodes
+ j
, &root
);
397 return -EAGAIN
; /* Fail will directly unload the module */
400 static void __exit
rbtree_test_exit(void)
402 printk(KERN_ALERT
"test exit\n");
405 module_init(rbtree_test_init
)
406 module_exit(rbtree_test_exit
)
408 MODULE_LICENSE("GPL");
409 MODULE_AUTHOR("Michel Lespinasse");
410 MODULE_DESCRIPTION("Red Black Tree test");