do a put/get module on kmemcache allocation/frees so that module cannot be removed...
[ana-net.git] / src / xt_critbit.c
blob3af0213c019430edb102a09cb876200b492ff062
1 /*
2 * Lightweight Autonomic Network Architecture
4 * Original userspace code from D. J. Bernstein. (http://cr.yp.to/critbit.html)
5 * Added critbit_get method hack by instead of copying strings into the nodes
6 * (original version), we now hold the reference to it and fetch the container
7 * structure on lookups. By doing this, we only need to guarantee, that the
8 * string is power of two boundary aligned. Added RCU and kmem_cache aligned
9 * node allocation support.
11 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
12 * Swiss federal institute of technology (ETH Zurich)
13 * Subject to the GPL.
17 * Compared to a hash table, a crit-bit tree has comparable speed and two big
18 * advantages. The first advantage is that a crit-bit tree supports more fast
19 * operations: finding the smallest string, for example. The second advantage
20 * is that a crit-bit tree guarantees good performance: it doesn't have any
21 * tricky slowdowns for unusual (or malicious) data.
23 * Crit-bit trees are faster than comparison-based structures such as AVL trees
24 * and B-trees. They're also simpler, especially for variable-length strings.
26 * Crit-bit trees have the disadvantage of not (yet!) being widely appreciated.
27 * Very few textbooks explain them, and very few libraries implement them.
28 * (D. J. Bernstein, http://cr.yp.to/critbit.html)
31 #include <linux/kernel.h>
32 #include <linux/types.h>
33 #include <linux/module.h>
34 #include <linux/spinlock.h>
35 #include <linux/slab.h>
36 #include <linux/cache.h>
37 #include <linux/rcupdate.h>
38 #include <linux/atomic.h>
40 #include "xt_critbit.h"
42 typedef long intptr_t;
44 struct critbit_node {
45 void *child[2];
46 struct rcu_head rcu;
47 u32 byte;
48 u8 otherbits;
49 } ____cacheline_aligned;
51 struct critbit_node_cache {
52 struct kmem_cache *cache;
53 atomic_t refcnt;
56 static struct critbit_node_cache critbit_cache = { 0 };
58 static inline struct critbit_node *critbit_alloc_node_aligned(gfp_t flags)
60 __module_get(THIS_MODULE);
61 return kmem_cache_alloc(critbit_cache.cache, flags);
64 static inline void critbit_free_node(struct critbit_node *p)
66 kmem_cache_free(critbit_cache.cache, p);
67 module_put(THIS_MODULE);
70 int __critbit_contains(struct critbit_tree *tree, const char *elem)
72 const u8 *ubytes = (void *) elem;
73 const size_t ulen = strlen(elem);
74 u8 c, *p;
75 struct critbit_node *q;
76 int direction;
78 if (unlikely(!rcu_read_lock_held())) {
79 printk(KERN_ERR "No rcu_read_lock held!\n");
80 BUG();
82 p = rcu_dereference_raw(tree->root);
83 if (!p)
84 return 0;
85 while (1 & (intptr_t) p) {
86 c = 0;
87 q = (void *) (p - 1);
88 if (q->byte < ulen)
89 c = ubytes[q->byte];
90 direction = (1 + (q->otherbits | c)) >> 8;
91 p = rcu_dereference_raw(q->child[direction]);
94 return (0 == strcmp(elem, (char *) p));
96 EXPORT_SYMBOL(__critbit_contains);
98 int critbit_contains(struct critbit_tree *tree, const char *elem)
100 int ret;
101 rcu_read_lock();
102 ret = __critbit_contains(tree, elem);
103 rcu_read_unlock();
104 return ret;
106 EXPORT_SYMBOL(critbit_contains);
108 char *__critbit_get(struct critbit_tree *tree, const char *elem)
110 const u8 *ubytes = (void *) elem;
111 const size_t ulen = strlen(elem);
112 u8 c, *p;
113 struct critbit_node *q;
114 int direction;
116 if (unlikely(!rcu_read_lock_held())) {
117 printk(KERN_ERR "No rcu_read_lock held!\n");
118 BUG();
120 p = rcu_dereference_raw(tree->root);
121 if (!p)
122 return NULL;
123 while (1 & (intptr_t) p) {
124 c = 0;
125 q = (void *) (p - 1);
126 if (q->byte < ulen)
127 c = ubytes[q->byte];
128 direction = (1 + (q->otherbits | c)) >> 8;
129 p = rcu_dereference_raw(q->child[direction]);
132 return (0 == strcmp(elem, (char *) p)) ? (char *) p : NULL;
134 EXPORT_SYMBOL(__critbit_get);
136 char *critbit_get(struct critbit_tree *tree, const char *elem)
138 char *ret;
139 rcu_read_lock();
140 ret = __critbit_get(tree, elem);
141 rcu_read_unlock();
142 return ret;
144 EXPORT_SYMBOL(critbit_get);
146 int __critbit_insert(struct critbit_tree *tree, char *elem)
148 const u8 *const ubytes = (void *) elem;
149 const size_t ulen = strlen(elem);
150 u8 c, *p = rcu_dereference_raw(tree->root);
151 u32 newbyte, newotherbits;
152 struct critbit_node *q, *newnode;
153 int direction, newdirection;
154 void **wherep;
156 if (unlikely(!IS_ALIGNED((unsigned long) elem, SMP_CACHE_BYTES))) {
157 printk(KERN_ERR "[lana] Your string is not power "
158 "of two aligned!\n");
159 BUG();
161 if (!p) {
162 rcu_assign_pointer(tree->root, elem);
163 return 0;
166 while (1 & (intptr_t) p) {
167 c = 0;
168 q = (void *) (p - 1);
169 if (q->byte < ulen)
170 c = ubytes[q->byte];
171 direction = (1 + (q->otherbits | c)) >> 8;
172 p = rcu_dereference_raw(q->child[direction]);
175 for (newbyte = 0; newbyte < ulen; ++newbyte) {
176 if (p[newbyte] != ubytes[newbyte]) {
177 newotherbits = p[newbyte] ^ ubytes[newbyte];
178 goto different_byte_found;
182 if (p[newbyte] != 0) {
183 newotherbits = p[newbyte];
184 goto different_byte_found;
187 return -EEXIST;
189 different_byte_found:
190 while (newotherbits & (newotherbits - 1))
191 newotherbits &= newotherbits - 1;
192 newotherbits ^= 255;
193 c = p[newbyte];
194 newdirection = (1 + (newotherbits | c)) >> 8;
195 newnode = critbit_alloc_node_aligned(GFP_ATOMIC);
196 if (!newnode)
197 return -ENOMEM;
198 newnode->byte = newbyte;
199 newnode->otherbits = newotherbits;
200 newnode->child[1 - newdirection] = elem;
202 for (wherep = &tree->root;;) {
203 u8 *p = *wherep;
204 if (!(1 & (intptr_t) p))
205 break;
206 q = (void *) (p - 1);
207 if (q->byte > newbyte)
208 break;
209 if (q->byte == newbyte && q->otherbits > newotherbits)
210 break;
211 c = 0;
212 if (q->byte < ulen)
213 c = ubytes[q->byte];
214 direction = (1 + (q->otherbits | c)) >> 8;
215 wherep = q->child + direction;
218 newnode->child[newdirection] = *wherep;
219 rcu_assign_pointer(*wherep, (void *) (1 + (char *) newnode));
220 return 0;
222 EXPORT_SYMBOL(__critbit_insert);
224 int critbit_insert(struct critbit_tree *tree, char *elem)
226 int ret;
227 unsigned long flags;
228 spin_lock_irqsave(&tree->wr_lock, flags);
229 ret = __critbit_insert(tree, elem);
230 spin_unlock_irqrestore(&tree->wr_lock, flags);
231 return ret;
233 EXPORT_SYMBOL(critbit_insert);
235 static void critbit_do_free_rcu(struct rcu_head *rp)
237 struct critbit_node *p = container_of(rp, struct critbit_node, rcu);
238 critbit_free_node(p);
241 int __critbit_delete(struct critbit_tree *tree, const char *elem)
243 const u8 *ubytes = (void *) elem;
244 const size_t ulen = strlen(elem);
245 u8 c, *p = rcu_dereference_raw(tree->root);
246 void **wherep = &tree->root;
247 void **whereq = NULL;
248 struct critbit_node *q = NULL;
249 int direction = 0;
251 if (!p)
252 return 0;
253 while (1 & (intptr_t) p) {
254 whereq = wherep;
255 q = (void *) (p - 1);
256 c = 0;
257 if (q->byte < ulen)
258 c = ubytes[q->byte];
259 direction = (1 + (q->otherbits | c)) >> 8;
260 wherep = q->child + direction;
261 p = *wherep;
264 if (0 != strcmp(elem, (char *) p))
265 return -ENOENT;
266 /* Here, we could decrement a refcount to the elem. */
267 if (!whereq) {
268 tree->root = NULL;
269 return 0;
272 rcu_assign_pointer(*whereq, q->child[1 - direction]);
273 call_rcu(&q->rcu, critbit_do_free_rcu);
274 return 0;
276 EXPORT_SYMBOL(__critbit_delete);
278 int critbit_delete(struct critbit_tree *tree, const char *elem)
280 int ret;
281 unsigned long flags;
282 spin_lock_irqsave(&tree->wr_lock, flags);
283 ret = __critbit_delete(tree, elem);
284 spin_unlock_irqrestore(&tree->wr_lock, flags);
285 return ret;
287 EXPORT_SYMBOL(critbit_delete);
289 static void critbit_ctor(void *obj)
291 struct critbit_node *node = obj;
292 node->child[0] = node->child[1] = NULL;
295 static int critbit_node_cache_init(void)
297 atomic_set(&critbit_cache.refcnt, 1);
298 critbit_cache.cache = kmem_cache_create("critbit",
299 sizeof(struct critbit_node),
300 0, SLAB_HWCACHE_ALIGN,
301 critbit_ctor);
302 if (!critbit_cache.cache)
303 return -ENOMEM;
304 return 0;
307 static void critbit_node_cache_destroy(void)
309 synchronize_rcu();
310 kmem_cache_destroy(critbit_cache.cache);
313 void get_critbit_cache(void)
315 if (unlikely(!atomic_read(&critbit_cache.refcnt))) {
316 if (critbit_node_cache_init())
317 panic("No mem left for critbit cache!\n");
318 } else
319 atomic_inc(&critbit_cache.refcnt);
321 EXPORT_SYMBOL(get_critbit_cache);
323 void put_critbit_cache(void)
325 if (likely(!atomic_dec_and_test(&critbit_cache.refcnt)))
326 return;
327 critbit_node_cache_destroy();
329 EXPORT_SYMBOL(put_critbit_cache);