removed ifpps as std tool
[ana-net.git] / src / xt_critbit.c
blob42c3c256db34606feb01286e9c0038662b00fc69
1 /*
2 * Lightweight Autonomic Network Architecture
4 * Original userspace code from D. J. Bernstein. (http://cr.yp.to/critbit.html)
5 * Added critbit_get method hack by instead of copying strings into the nodes
6 * (original version), we now hold the reference to it and fetch the container
7 * structure on lookups. By doing this, we only need to guarantee, that the
8 * string is power of two boundary aligned. Added RCU and kmem_cache aligned
9 * node allocation support.
11 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
12 * Swiss federal institute of technology (ETH Zurich)
13 * Subject to the GPL.
17 * Compared to a hash table, a crit-bit tree has comparable speed and two big
18 * advantages. The first advantage is that a crit-bit tree supports more fast
19 * operations: finding the smallest string, for example. The second advantage
20 * is that a crit-bit tree guarantees good performance: it doesn't have any
21 * tricky slowdowns for unusual (or malicious) data.
23 * Crit-bit trees are faster than comparison-based structures such as AVL trees
24 * and B-trees. They're also simpler, especially for variable-length strings.
26 * Crit-bit trees have the disadvantage of not (yet!) being widely appreciated.
27 * Very few textbooks explain them, and very few libraries implement them.
28 * (D. J. Bernstein, http://cr.yp.to/critbit.html)
31 #include <linux/kernel.h>
32 #include <linux/types.h>
33 #include <linux/module.h>
34 #include <linux/spinlock.h>
35 #include <linux/slab.h>
36 #include <linux/cache.h>
37 #include <linux/rcupdate.h>
38 #include <linux/atomic.h>
40 #include "xt_critbit.h"
42 typedef long intptr_t;
44 struct critbit_node {
45 void *child[2];
46 struct rcu_head rcu;
47 u32 byte;
48 u8 otherbits;
49 } ____cacheline_aligned;
51 struct critbit_node_cache {
52 struct kmem_cache *cache;
53 atomic_t refcnt;
56 static struct critbit_node_cache critbit_cache = { 0 };
58 static void critbit_ctor(void *obj)
60 struct critbit_node *node = obj;
61 node->child[0] = node->child[1] = NULL;
64 static inline struct critbit_node *critbit_alloc_node_aligned(gfp_t flags)
66 struct critbit_node *cn;
67 #ifndef __USE_KMALLOC
68 cn = kmem_cache_alloc(critbit_cache.cache, flags);
69 if (likely(cn))
70 __module_get(THIS_MODULE);
71 #else
72 cn = kmalloc(sizeof(*cn), flags);
73 if (likely(cn)) {
74 critbit_ctor(cn);
75 __module_get(THIS_MODULE);
77 #endif
78 return cn;
81 static inline void critbit_free_node(struct critbit_node *p)
83 #ifndef __USE_KMALLOC
84 kmem_cache_free(critbit_cache.cache, p);
85 #else
86 kfree(p);
87 #endif
88 module_put(THIS_MODULE);
91 int __critbit_contains(struct critbit_tree *tree, const char *elem)
93 const u8 *ubytes = (void *) elem;
94 const size_t ulen = strlen(elem);
95 u8 c, *p;
96 struct critbit_node *q;
97 int direction;
99 if (unlikely(!rcu_read_lock_held())) {
100 printk(KERN_ERR "No rcu_read_lock held!\n");
101 BUG();
103 p = rcu_dereference_raw(tree->root);
104 if (!p)
105 return 0;
106 while (1 & (intptr_t) p) {
107 c = 0;
108 q = (void *) (p - 1);
109 if (q->byte < ulen)
110 c = ubytes[q->byte];
111 direction = (1 + (q->otherbits | c)) >> 8;
112 p = rcu_dereference_raw(q->child[direction]);
115 return (0 == strcmp(elem, (char *) p));
117 EXPORT_SYMBOL(__critbit_contains);
119 int critbit_contains(struct critbit_tree *tree, const char *elem)
121 int ret;
122 rcu_read_lock();
123 ret = __critbit_contains(tree, elem);
124 rcu_read_unlock();
125 return ret;
127 EXPORT_SYMBOL(critbit_contains);
129 char *__critbit_get(struct critbit_tree *tree, const char *elem)
131 const u8 *ubytes = (void *) elem;
132 const size_t ulen = strlen(elem);
133 u8 c, *p;
134 struct critbit_node *q;
135 int direction;
137 if (unlikely(!rcu_read_lock_held())) {
138 printk(KERN_ERR "No rcu_read_lock held!\n");
139 BUG();
141 p = rcu_dereference_raw(tree->root);
142 if (!p)
143 return NULL;
144 while (1 & (intptr_t) p) {
145 c = 0;
146 q = (void *) (p - 1);
147 if (q->byte < ulen)
148 c = ubytes[q->byte];
149 direction = (1 + (q->otherbits | c)) >> 8;
150 p = rcu_dereference_raw(q->child[direction]);
153 return (0 == strcmp(elem, (char *) p)) ? (char *) p : NULL;
155 EXPORT_SYMBOL(__critbit_get);
157 char *critbit_get(struct critbit_tree *tree, const char *elem)
159 char *ret;
160 rcu_read_lock();
161 ret = __critbit_get(tree, elem);
162 rcu_read_unlock();
163 return ret;
165 EXPORT_SYMBOL(critbit_get);
167 int __critbit_insert(struct critbit_tree *tree, char *elem)
169 const u8 *const ubytes = (void *) elem;
170 const size_t ulen = strlen(elem);
171 u8 c, *p = rcu_dereference_raw(tree->root);
172 u32 newbyte, newotherbits;
173 struct critbit_node *q, *newnode;
174 int direction, newdirection;
175 void **wherep;
177 if (unlikely(!IS_ALIGNED((unsigned long) elem, SMP_CACHE_BYTES))) {
178 printk(KERN_ERR "[lana] Your string is not power "
179 "of two aligned!\n");
180 BUG();
182 if (!p) {
183 rcu_assign_pointer(tree->root, elem);
184 return 0;
187 while (1 & (intptr_t) p) {
188 c = 0;
189 q = (void *) (p - 1);
190 if (q->byte < ulen)
191 c = ubytes[q->byte];
192 direction = (1 + (q->otherbits | c)) >> 8;
193 p = rcu_dereference_raw(q->child[direction]);
196 for (newbyte = 0; newbyte < ulen; ++newbyte) {
197 if (p[newbyte] != ubytes[newbyte]) {
198 newotherbits = p[newbyte] ^ ubytes[newbyte];
199 goto different_byte_found;
203 if (p[newbyte] != 0) {
204 newotherbits = p[newbyte];
205 goto different_byte_found;
208 return -EEXIST;
210 different_byte_found:
211 while (newotherbits & (newotherbits - 1))
212 newotherbits &= newotherbits - 1;
213 newotherbits ^= 255;
214 c = p[newbyte];
215 newdirection = (1 + (newotherbits | c)) >> 8;
216 newnode = critbit_alloc_node_aligned(GFP_ATOMIC | __GFP_ZERO);
217 if (!newnode)
218 return -ENOMEM;
219 newnode->byte = newbyte;
220 newnode->otherbits = newotherbits;
221 newnode->child[1 - newdirection] = elem;
223 for (wherep = &tree->root;;) {
224 u8 *p = *wherep;
225 if (!(1 & (intptr_t) p))
226 break;
227 q = (void *) (p - 1);
228 if (q->byte > newbyte)
229 break;
230 if (q->byte == newbyte && q->otherbits > newotherbits)
231 break;
232 c = 0;
233 if (q->byte < ulen)
234 c = ubytes[q->byte];
235 direction = (1 + (q->otherbits | c)) >> 8;
236 wherep = q->child + direction;
239 newnode->child[newdirection] = *wherep;
240 rcu_assign_pointer(*wherep, (void *) (1 + (char *) newnode));
241 return 0;
243 EXPORT_SYMBOL(__critbit_insert);
245 int critbit_insert(struct critbit_tree *tree, char *elem)
247 int ret;
248 unsigned long flags;
249 spin_lock_irqsave(&tree->wr_lock, flags);
250 ret = __critbit_insert(tree, elem);
251 spin_unlock_irqrestore(&tree->wr_lock, flags);
252 return ret;
254 EXPORT_SYMBOL(critbit_insert);
256 static void critbit_do_free_rcu(struct rcu_head *rp)
258 struct critbit_node *p = container_of(rp, struct critbit_node, rcu);
259 critbit_free_node(p);
262 int __critbit_delete(struct critbit_tree *tree, const char *elem)
264 const u8 *ubytes = (void *) elem;
265 const size_t ulen = strlen(elem);
266 u8 c, *p = rcu_dereference_raw(tree->root);
267 void **wherep = &tree->root;
268 void **whereq = NULL;
269 struct critbit_node *q = NULL;
270 int direction = 0;
272 if (!p)
273 return 0;
274 while (1 & (intptr_t) p) {
275 whereq = wherep;
276 q = (void *) (p - 1);
277 c = 0;
278 if (q->byte < ulen)
279 c = ubytes[q->byte];
280 direction = (1 + (q->otherbits | c)) >> 8;
281 wherep = q->child + direction;
282 p = *wherep;
285 if (0 != strcmp(elem, (char *) p))
286 return -ENOENT;
287 /* Here, we could decrement a refcount to the elem. */
288 if (!whereq) {
289 tree->root = NULL;
290 return 0;
293 rcu_assign_pointer(*whereq, q->child[1 - direction]);
294 call_rcu(&q->rcu, critbit_do_free_rcu);
295 return 0;
297 EXPORT_SYMBOL(__critbit_delete);
299 int critbit_delete(struct critbit_tree *tree, const char *elem)
301 int ret;
302 unsigned long flags;
303 spin_lock_irqsave(&tree->wr_lock, flags);
304 ret = __critbit_delete(tree, elem);
305 spin_unlock_irqrestore(&tree->wr_lock, flags);
306 return ret;
308 EXPORT_SYMBOL(critbit_delete);
310 static int critbit_node_cache_init(void)
312 atomic_set(&critbit_cache.refcnt, 1);
313 critbit_cache.cache = kmem_cache_create("critbit",
314 sizeof(struct critbit_node),
315 0, SLAB_HWCACHE_ALIGN |
316 SLAB_MEM_SPREAD |
317 SLAB_RECLAIM_ACCOUNT,
318 critbit_ctor);
319 if (!critbit_cache.cache)
320 return -ENOMEM;
321 return 0;
324 static void critbit_node_cache_destroy(void)
326 rcu_barrier();
327 kmem_cache_destroy(critbit_cache.cache);
330 void get_critbit_cache(void)
332 if (unlikely(!atomic_read(&critbit_cache.refcnt))) {
333 if (critbit_node_cache_init())
334 panic("No mem left for critbit cache!\n");
335 } else
336 atomic_inc(&critbit_cache.refcnt);
338 EXPORT_SYMBOL(get_critbit_cache);
340 void put_critbit_cache(void)
342 if (likely(!atomic_dec_and_test(&critbit_cache.refcnt)))
343 return;
344 critbit_node_cache_destroy();
346 EXPORT_SYMBOL(put_critbit_cache);