restricted access to procfs
[ana-net.git] / src / xt_fblock.c
blobdc913bfcddfcc1dadedda0f7c4269ea927d83506
1 /*
2 * Lightweight Autonomic Network Architecture
4 * Global LANA IDP translation tables, core backend.
6 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
7 * Swiss federal institute of technology (ETH Zurich)
8 * Subject to the GPL.
9 */
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/rcupdate.h>
14 #include <linux/atomic.h>
15 #include <linux/types.h>
16 #include <linux/cpu.h>
17 #include <linux/spinlock.h>
18 #include <linux/rwlock.h>
19 #include <linux/slab.h>
20 #include <linux/proc_fs.h>
22 #include "xt_fblock.h"
23 #include "xt_idp.h"
24 #include "xt_hash.h"
25 #include "xt_critbit.h"
27 struct idp_elem {
28 char name[FBNAMSIZ];
29 idp_t idp;
30 struct rcu_head rcu;
31 } ____cacheline_aligned;
33 static struct critbit_tree idpmap;
34 static struct fblock **fblmap_head = NULL;
35 static spinlock_t fblmap_head_lock;
37 static atomic64_t idp_counter;
39 static struct kmem_cache *fblock_cache = NULL;
41 extern struct proc_dir_entry *lana_proc_dir;
42 static struct proc_dir_entry *fblocks_proc;
44 static inline idp_t provide_new_fblock_idp(void)
46 return (idp_t) atomic64_inc_return(&idp_counter);
49 static int register_to_fblock_namespace(char *name, idp_t val)
51 struct idp_elem *elem;
53 if (critbit_contains(&idpmap, name))
54 return -EEXIST;
55 elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
56 if (!elem)
57 return -ENOMEM;
58 strlcpy(elem->name, name, sizeof(elem->name));
59 elem->idp = val;
61 return critbit_insert(&idpmap, elem->name);
64 static void fblock_namespace_do_free_rcu(struct rcu_head *rp)
66 struct idp_elem *p = container_of(rp, struct idp_elem, rcu);
67 kfree(p);
70 static int unregister_from_fblock_namespace(char *name)
72 int ret;
73 struct idp_elem *elem;
75 elem = struct_of(critbit_get(&idpmap, name), struct idp_elem);
76 if (!elem)
77 return -ENOENT;
78 ret = critbit_delete(&idpmap, elem->name);
79 if (ret)
80 return ret;
81 call_rcu(&elem->rcu, fblock_namespace_do_free_rcu);
83 return 0;
86 /* Called within RCU read lock! */
87 idp_t __get_fblock_namespace_mapping(char *name)
89 struct idp_elem *elem = struct_of(__critbit_get(&idpmap, name),
90 struct idp_elem);
91 if (unlikely(!elem))
92 return IDP_UNKNOWN;
93 smp_rmb();
94 return elem->idp;
96 EXPORT_SYMBOL_GPL(__get_fblock_namespace_mapping);
98 idp_t get_fblock_namespace_mapping(char *name)
100 idp_t ret;
101 rcu_read_lock();
102 ret = __get_fblock_namespace_mapping(name);
103 rcu_read_unlock();
104 return ret;
106 EXPORT_SYMBOL_GPL(get_fblock_namespace_mapping);
108 /* Called within RCU read lock! */
109 int __change_fblock_namespace_mapping(char *name, idp_t new)
111 struct idp_elem *elem = struct_of(__critbit_get(&idpmap, name),
112 struct idp_elem);
113 if (unlikely(!elem))
114 return -ENOENT;
115 elem->idp = new;
116 smp_wmb();
117 return 0;
119 EXPORT_SYMBOL_GPL(__change_fblock_namespace_mapping);
121 int change_fblock_namespace_mapping(char *name, idp_t new)
123 int ret;
124 rcu_read_lock();
125 ret = __change_fblock_namespace_mapping(name, new);
126 rcu_read_unlock();
127 return ret;
129 EXPORT_SYMBOL_GPL(change_fblock_namespace_mapping);
131 /* Caller needs to do a put_fblock() after his work is done! */
132 /* Called within RCU read lock! */
133 struct fblock *__search_fblock(idp_t idp)
135 struct fblock *p0;
137 p0 = rcu_dereference_raw(fblmap_head[hash_idp(idp)]);
138 if (!p0)
139 return NULL;
140 while (p0) {
141 if (p0->idp == idp) {
142 get_fblock(p0);
143 return p0;
145 p0 = rcu_dereference_raw(p0->next);
148 return NULL;
150 EXPORT_SYMBOL_GPL(__search_fblock);
152 struct fblock *search_fblock(idp_t idp)
154 struct fblock *ret;
156 if (unlikely(idp == IDP_UNKNOWN))
157 return NULL;
158 rcu_read_lock();
159 ret = __search_fblock(idp);
160 rcu_read_unlock();
162 return ret;
164 EXPORT_SYMBOL_GPL(search_fblock);
167 * fb1 on top of fb2 in the stack
169 int __fblock_bind(struct fblock *fb1, struct fblock *fb2)
171 int ret;
172 struct fblock_bind_msg msg;
173 /* Hack: we let the fb think that this belongs to his own chain to
174 * get the reference back to itself. */
175 struct fblock_notifier fbn;
177 memset(&fbn, 0, sizeof(fbn));
178 memset(&msg, 0, sizeof(msg));
180 get_fblock(fb1);
181 get_fblock(fb2);
183 msg.dir = TYPE_EGRESS;
184 msg.idp = fb2->idp;
185 fbn.self = fb1;
186 ret = fb1->ops->event_rx(&fbn.nb, FBLOCK_BIND_IDP, &msg);
187 if (ret != NOTIFY_OK) {
188 put_fblock(fb1);
189 put_fblock(fb2);
190 return -EBUSY;
193 msg.dir = TYPE_INGRESS;
194 msg.idp = fb1->idp;
195 fbn.self = fb2;
196 ret = fb2->ops->event_rx(&fbn.nb, FBLOCK_BIND_IDP, &msg);
197 if (ret != NOTIFY_OK) {
198 /* Release previous binding */
199 msg.dir = TYPE_EGRESS;
200 msg.idp = fb2->idp;
201 fbn.self = fb1;
202 ret = fb1->ops->event_rx(&fbn.nb, FBLOCK_UNBIND_IDP, &msg);
203 if (ret != NOTIFY_OK)
204 panic("Cannot release previously bound fblock!\n");
205 put_fblock(fb1);
206 put_fblock(fb2);
207 return -EBUSY;
210 ret = subscribe_to_remote_fblock(fb1, fb2);
211 if (ret) {
212 __fblock_unbind(fb1, fb2);
213 return -ENOMEM;
216 ret = subscribe_to_remote_fblock(fb2, fb1);
217 if (ret) {
218 __fblock_unbind(fb1, fb2);
219 return -ENOMEM;
222 /* We don't give refcount back! */
223 return 0;
225 EXPORT_SYMBOL_GPL(__fblock_bind);
227 int fblock_bind(struct fblock *fb1, struct fblock *fb2)
229 int ret;
230 rcu_read_lock();
231 ret = __fblock_bind(fb1, fb2);
232 rcu_read_unlock();
233 return ret;
235 EXPORT_SYMBOL_GPL(fblock_bind);
238 * fb1 on top of fb2 in the stack
240 int __fblock_unbind(struct fblock *fb1, struct fblock *fb2)
242 int ret;
243 struct fblock_bind_msg msg;
244 /* Hack: we let the fb think that this belongs to his own chain to
245 * get the reference back to itself. */
246 struct fblock_notifier fbn;
248 /* We still have refcnt, we drop it on exit! */
250 memset(&fbn, 0, sizeof(fbn));
251 memset(&msg, 0, sizeof(msg));
253 msg.dir = TYPE_EGRESS;
254 msg.idp = fb2->idp;
255 fbn.self = fb1;
256 ret = fb1->ops->event_rx(&fbn.nb, FBLOCK_UNBIND_IDP, &msg);
257 if (ret != NOTIFY_OK) {
258 /* We are not bound to fb2 */
259 return -EBUSY;
262 msg.dir = TYPE_INGRESS;
263 msg.idp = fb1->idp;
264 fbn.self = fb2;
265 ret = fb2->ops->event_rx(&fbn.nb, FBLOCK_UNBIND_IDP, &msg);
266 if (ret != NOTIFY_OK) {
267 /* We are not bound to fb1, but fb1 was bound to us, so only
268 * release fb1 */
269 put_fblock(fb1);
270 return -EBUSY;
273 unsubscribe_from_remote_fblock(fb1, fb2);
274 unsubscribe_from_remote_fblock(fb2, fb1);
276 put_fblock(fb2);
277 put_fblock(fb1);
279 return 0;
281 EXPORT_SYMBOL_GPL(__fblock_unbind);
283 int fblock_unbind(struct fblock *fb1, struct fblock *fb2)
285 int ret;
286 rcu_read_lock();
287 ret = __fblock_unbind(fb1, fb2);
288 rcu_read_unlock();
289 return ret;
291 EXPORT_SYMBOL_GPL(fblock_unbind);
294 * register_fblock is called when the idp is preknown to the
295 * caller and has already been registered previously. The previous
296 * registration has then called unregister_fblock to remove the
297 * fblock but to keep the namespace and idp number.
299 int register_fblock(struct fblock *p, idp_t idp)
301 struct fblock *p0;
302 unsigned long flags;
304 spin_lock_irqsave(&fblmap_head_lock, flags);
305 p->idp = idp;
306 p0 = rcu_dereference_raw(fblmap_head[hash_idp(p->idp)]);
307 if (!p0)
308 rcu_assign_pointer(fblmap_head[hash_idp(p->idp)], p);
309 else {
310 p->next = p0->next;
311 rcu_assign_pointer(p0->next, p);
313 spin_unlock_irqrestore(&fblmap_head_lock, flags);
314 return 0;
316 EXPORT_SYMBOL_GPL(register_fblock);
319 * register_fblock_namespace is called when a new functional block
320 * instance is registered to the system. Then, its name will be
321 * registered into the namespace and it receives a new idp number.
323 int register_fblock_namespace(struct fblock *p)
325 struct fblock *p0;
326 unsigned long flags;
328 spin_lock_irqsave(&fblmap_head_lock, flags);
329 p->idp = provide_new_fblock_idp();
330 p0 = rcu_dereference_raw(fblmap_head[hash_idp(p->idp)]);
331 if (!p0)
332 rcu_assign_pointer(fblmap_head[hash_idp(p->idp)], p);
333 else {
334 p->next = p0->next;
335 rcu_assign_pointer(p0->next, p);
337 spin_unlock_irqrestore(&fblmap_head_lock, flags);
338 return register_to_fblock_namespace(p->name, p->idp);
340 EXPORT_SYMBOL_GPL(register_fblock_namespace);
342 static void free_fblock_rcu(struct rcu_head *rp)
344 struct fblock *p = container_of(rp, struct fblock, rcu);
345 put_fblock(p);
349 * unregister_fblock releases the functional block _only_ from the idp to
350 * fblock translation table, but not from the namespace. The idp can then
351 * later be reused, e.g. by another fblock.
353 int unregister_fblock(struct fblock *p)
355 int ret = -ENOENT;
356 struct fblock *p0;
357 unsigned long flags;
359 spin_lock_irqsave(&fblmap_head_lock, flags);
360 p0 = rcu_dereference_raw(fblmap_head[hash_idp(p->idp)]);
361 if (p0 == p)
362 rcu_assign_pointer(fblmap_head[hash_idp(p->idp)], p->next);
363 else if (p0) {
364 struct fblock *p1;
365 while ((p1 = rcu_dereference_raw(p0->next))) {
366 if (p1 == p) {
367 rcu_assign_pointer(p0->next, p1->next);
368 ret = 0;
369 break;
371 p0 = p1;
374 spin_unlock_irqrestore(&fblmap_head_lock, flags);
375 call_rcu(&p->rcu, free_fblock_rcu);
376 return ret;
378 EXPORT_SYMBOL_GPL(unregister_fblock);
381 * Removes the functional block from the system along with its namespace
382 * mapping.
384 void unregister_fblock_namespace(struct fblock *p)
386 struct fblock *p0;
387 unsigned long flags;
389 spin_lock_irqsave(&fblmap_head_lock, flags);
390 p0 = rcu_dereference_raw(fblmap_head[hash_idp(p->idp)]);
391 if (p0 == p)
392 rcu_assign_pointer(fblmap_head[hash_idp(p->idp)], p->next);
393 else if (p0) {
394 struct fblock *p1;
395 while ((p1 = rcu_dereference_raw(p0->next))) {
396 if (p1 == p) {
397 rcu_assign_pointer(p0->next, p->next);
398 break;
400 p0 = p1;
403 spin_unlock_irqrestore(&fblmap_head_lock, flags);
404 unregister_from_fblock_namespace(p->name);
405 call_rcu(&p->rcu, free_fblock_rcu);
407 EXPORT_SYMBOL_GPL(unregister_fblock_namespace);
409 int xchg_fblock_idp(idp_t idp, struct fblock *new)
411 return 0;
413 EXPORT_SYMBOL_GPL(xchg_fblock_idp);
415 int xchg_fblock(struct fblock *old, struct fblock *new)
417 return 0;
419 EXPORT_SYMBOL_GPL(xchg_fblock);
421 /* If state changes on 'remote' fb, we ('us') want to be notified. */
422 int subscribe_to_remote_fblock(struct fblock *us, struct fblock *remote)
424 struct fblock_notifier *fn = kmalloc(sizeof(*fn), GFP_ATOMIC);
425 if (!fn)
426 return -ENOMEM;
427 write_lock(&us->lock);
428 fn->self = us;
429 fn->remote = remote;
430 init_fblock_subscriber(us, &fn->nb);
431 fn->next = us->notifiers;
432 us->notifiers = fn;
433 write_unlock(&us->lock);
434 return fblock_register_foreign_subscriber(remote, &us->notifiers->nb);
436 EXPORT_SYMBOL_GPL(subscribe_to_remote_fblock);
438 void unsubscribe_from_remote_fblock(struct fblock *us, struct fblock *remote)
440 int found = 0;
441 struct fblock_notifier *fn;
443 if (unlikely(!us->notifiers))
444 return;
445 write_lock(&us->lock);
446 fn = us->notifiers;
447 if (fn->remote == remote)
448 us->notifiers = us->notifiers->next;
449 else {
450 struct fblock_notifier *f1;
451 while ((f1 = fn->next)) {
452 if (f1->remote == remote) {
453 found = 1;
454 fn->next = f1->next;
455 fn = f1; /* free f1 */
456 break;
457 } else
458 fn = f1;
461 write_unlock(&us->lock);
462 if (found) {
463 fblock_unregister_foreign_subscriber(remote, &fn->nb);
464 kfree(fn);
467 EXPORT_SYMBOL_GPL(unsubscribe_from_remote_fblock);
469 static void ctor_fblock(void *obj)
471 struct fblock *p = obj;
472 atomic_set(&p->refcnt, 1);
473 rwlock_init(&p->lock);
474 p->idp = IDP_UNKNOWN;
475 p->next = NULL;
476 p->private_data = NULL;
477 p->ops = NULL;
478 p->notifiers = NULL;
479 p->others = NULL;
482 struct fblock *alloc_fblock(gfp_t flags)
484 return kmem_cache_alloc(fblock_cache, flags);
486 EXPORT_SYMBOL_GPL(alloc_fblock);
488 int init_fblock(struct fblock *fb, char *name, void *priv,
489 struct fblock_ops *ops)
491 write_lock(&fb->lock);
492 strlcpy(fb->name, name, sizeof(fb->name));
493 fb->private_data = priv;
494 fb->ops = ops;
495 fb->others = kmalloc(sizeof(*(fb->others)), GFP_ATOMIC);
496 if (!fb->others)
497 return -ENOMEM;
498 ATOMIC_INIT_NOTIFIER_HEAD(&fb->others->subscribers);
499 write_unlock(&fb->lock);
500 return 0;
502 EXPORT_SYMBOL_GPL(init_fblock);
504 void kfree_fblock(struct fblock *p)
506 kmem_cache_free(fblock_cache, p);
508 EXPORT_SYMBOL_GPL(kfree_fblock);
510 void cleanup_fblock(struct fblock *fb)
512 notify_fblock_subscribers(fb, FBLOCK_DOWN, &fb->idp);
513 fb->factory->dtor(fb);
514 kfree(fb->others);
516 EXPORT_SYMBOL_GPL(cleanup_fblock);
518 void cleanup_fblock_ctor(struct fblock *fb)
520 kfree(fb->others);
522 EXPORT_SYMBOL_GPL(cleanup_fblock_ctor);
524 static int procfs_fblocks(char *page, char **start, off_t offset,
525 int count, int *eof, void *data)
527 int i;
528 off_t len = 0;
529 struct fblock *fb;
531 len += sprintf(page + len, "name type addr idp refcnt\n");
532 rcu_read_lock();
533 for (i = 0; i < HASHTSIZ; ++i) {
534 fb = rcu_dereference_raw(fblmap_head[i]);
535 while (fb) {
536 len += sprintf(page + len, "%s %s %p %u %d\n",
537 fb->name, fb->factory->type,
538 fb, fb->idp, atomic_read(&fb->refcnt));
539 fb = rcu_dereference_raw(fb->next);
542 rcu_read_unlock();
544 /* FIXME: fits in page? */
545 *eof = 1;
546 return len;
549 int init_fblock_tables(void)
551 int ret = 0;
553 get_critbit_cache();
554 critbit_init_tree(&idpmap);
556 fblmap_head_lock = __SPIN_LOCK_UNLOCKED(fblmap_head_lock);
557 fblmap_head = kzalloc(sizeof(*fblmap_head) * HASHTSIZ, GFP_KERNEL);
558 if (!fblmap_head)
559 goto err;
560 fblock_cache = kmem_cache_create("fblock", sizeof(struct fblock),
561 0, SLAB_HWCACHE_ALIGN, ctor_fblock);
562 if (!fblock_cache)
563 goto err2;
564 atomic64_set(&idp_counter, 0);
565 fblocks_proc = create_proc_read_entry("fblocks", 0400, lana_proc_dir,
566 procfs_fblocks, NULL);
567 if (!fblocks_proc)
568 goto err3;
569 return 0;
570 err3:
571 kmem_cache_destroy(fblock_cache);
572 err2:
573 kfree(fblmap_head);
574 err:
575 put_critbit_cache();
576 return ret;
578 EXPORT_SYMBOL_GPL(init_fblock_tables);
580 void cleanup_fblock_tables(void)
582 remove_proc_entry("fblocks", lana_proc_dir);
583 put_critbit_cache();
584 kfree(fblmap_head);
585 synchronize_rcu();
586 kmem_cache_destroy(fblock_cache);
588 EXPORT_SYMBOL_GPL(cleanup_fblock_tables);