ethernetgmii: updated kernel config, simpleImage.xilinx and other misc files
[ana-net.git] / src / xt_fblock.c
blobf479133c8c42b018ec49e032fc0042eb7eaaa895
1 /*
2 * Lightweight Autonomic Network Architecture
4 * Global LANA IDP translation tables, core backend.
6 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
7 * Swiss federal institute of technology (ETH Zurich)
8 * Subject to the GPL.
9 */
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/rcupdate.h>
14 #include <linux/atomic.h>
15 #include <linux/types.h>
16 #include <linux/cpu.h>
17 #include <linux/spinlock.h>
18 #include <linux/rwlock.h>
19 #include <linux/slab.h>
20 #include <linux/proc_fs.h>
21 #include <linux/radix-tree.h>
23 #include "xt_fblock.h"
24 #include "xt_idp.h"
25 #include "xt_critbit.h"
27 struct idp_elem {
28 char name[FBNAMSIZ];
29 idp_t idp;
30 struct rcu_head rcu;
31 } ____cacheline_aligned;
33 static struct critbit_tree idpmap;
35 RADIX_TREE(fblmap, GFP_ATOMIC);
37 static atomic64_t idp_counter;
39 static struct kmem_cache *fblock_cache = NULL;
41 extern struct proc_dir_entry *lana_proc_dir;
43 static struct proc_dir_entry *fblocks_proc;
45 const char *path_names[] = {
46 "ingress",
47 "egress",
49 EXPORT_SYMBOL(path_names);
51 static inline idp_t provide_new_fblock_idp(void)
53 return (idp_t) atomic64_inc_return(&idp_counter);
56 static int register_to_fblock_namespace(char *name, idp_t val)
58 struct idp_elem *elem;
59 if (critbit_contains(&idpmap, name))
60 return -EEXIST;
61 elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
62 if (!elem)
63 return -ENOMEM;
64 strlcpy(elem->name, name, sizeof(elem->name));
65 elem->idp = val;
66 return critbit_insert(&idpmap, elem->name);
69 static void fblock_namespace_do_free_rcu(struct rcu_head *rp)
71 struct idp_elem *p = container_of(rp, struct idp_elem, rcu);
72 kfree(p);
75 static int unregister_from_fblock_namespace(char *name)
77 int ret;
78 struct idp_elem *elem;
79 elem = struct_of(critbit_get(&idpmap, name), struct idp_elem);
80 if (!elem)
81 return -ENOENT;
82 ret = critbit_delete(&idpmap, elem->name);
83 if (ret)
84 return ret;
85 call_rcu(&elem->rcu, fblock_namespace_do_free_rcu);
86 return 0;
89 /* Called within RCU read lock! */
90 idp_t __get_fblock_namespace_mapping(char *name)
92 struct idp_elem *elem = struct_of(__critbit_get(&idpmap, name),
93 struct idp_elem);
94 if (unlikely(!elem))
95 return IDP_UNKNOWN;
96 smp_rmb();
97 return elem->idp;
99 EXPORT_SYMBOL_GPL(__get_fblock_namespace_mapping);
101 idp_t get_fblock_namespace_mapping(char *name)
103 idp_t ret;
104 rcu_read_lock();
105 ret = __get_fblock_namespace_mapping(name);
106 rcu_read_unlock();
107 return ret;
109 EXPORT_SYMBOL_GPL(get_fblock_namespace_mapping);
111 /* Called within RCU read lock! */
112 int __change_fblock_namespace_mapping(char *name, idp_t new)
114 struct idp_elem *elem = struct_of(__critbit_get(&idpmap, name),
115 struct idp_elem);
116 if (unlikely(!elem))
117 return -ENOENT;
118 elem->idp = new;
119 smp_wmb();
120 return 0;
122 EXPORT_SYMBOL_GPL(__change_fblock_namespace_mapping);
124 int change_fblock_namespace_mapping(char *name, idp_t new)
126 int ret;
127 rcu_read_lock();
128 ret = __change_fblock_namespace_mapping(name, new);
129 rcu_read_unlock();
130 return ret;
132 EXPORT_SYMBOL_GPL(change_fblock_namespace_mapping);
134 struct fblock *search_fblock(idp_t idp)
136 struct fblock *ret;
137 if (unlikely(idp == IDP_UNKNOWN))
138 return NULL;
139 rcu_read_lock();
140 ret = __search_fblock(idp);
141 rcu_read_unlock();
142 return ret;
144 EXPORT_SYMBOL_GPL(search_fblock);
146 /* Note: user needs to do a put_fblock */
147 struct fblock *search_fblock_n(char *name)
149 idp_t id;
150 struct fblock *ret;
151 if (unlikely(!name))
152 return NULL;
153 rcu_read_lock();
154 id = __get_fblock_namespace_mapping(name);
155 ret = __search_fblock(id);
156 rcu_read_unlock();
157 return ret;
159 EXPORT_SYMBOL_GPL(search_fblock_n);
161 /* opt_string must be of type "key=val" and 0-terminated */
162 int __fblock_set_option(struct fblock *fb, char *opt_string)
164 int ret = 0;
165 char *val = opt_string;
166 struct fblock_opt_msg msg;
167 /* Hack: we let the fb think that this belongs to his own chain to
168 * get the reference back to itself. */
169 struct fblock_notifier fbn;
171 memset(&fbn, 0, sizeof(fbn));
172 memset(&msg, 0, sizeof(msg));
174 msg.key = opt_string;
175 while (*val != '=' && *val != '\0')
176 val++;
177 if (*val == '\0')
178 return -EINVAL;
179 val++;
180 *(val - 1) = '\0';
181 msg.val = val;
182 fbn.self = fb;
184 get_fblock(fb);
185 ret = fb->event_rx(&fbn.nb, FBLOCK_SET_OPT, &msg);
186 put_fblock(fb);
188 return ret;
190 EXPORT_SYMBOL_GPL(__fblock_set_option);
192 int fblock_set_option(struct fblock *fb, char *opt_string)
194 int ret;
195 if (unlikely(!opt_string || !fb))
196 return -EINVAL;
197 rcu_read_lock();
198 ret = __fblock_set_option(fb, opt_string);
199 rcu_read_unlock();
200 return ret;
202 EXPORT_SYMBOL_GPL(fblock_set_option);
204 /* Must already hold spin_lock */
205 static void fblock_update_selfref(struct fblock_notifier *head,
206 struct fblock *self)
208 while (rcu_dereference_raw(head) != NULL) {
209 rcu_assign_pointer(head->self, self);
210 rcu_assign_pointer(head, head->next);
215 * Migrate src to dst, both are of same type, working data is
216 * transferred to dst and droped from src. src gets dsts old data,
217 * so that on free, we do not need to explicitly ignore srcs
218 * private data and dsts remaining data.
220 void fblock_migrate_p(struct fblock *dst, struct fblock *src)
222 void *priv_old;
224 get_fblock(dst);
225 get_fblock(src);
227 rcu_assign_pointer(priv_old, dst->private_data);
228 rcu_assign_pointer(dst->private_data, src->private_data);
229 rcu_assign_pointer(src->private_data, priv_old);
231 put_fblock(dst);
232 put_fblock(src);
234 EXPORT_SYMBOL_GPL(fblock_migrate_p);
236 void fblock_migrate_r(struct fblock *dst, struct fblock *src)
238 struct fblock_notifier *not_old;
239 struct fblock_subscrib *sub_old;
241 get_fblock(dst);
242 get_fblock(src);
244 spin_lock(&dst->lock);
245 spin_lock(&src->lock);
247 dst->idp = src->idp;
248 strlcpy(dst->name, src->name, sizeof(dst->name));
250 rcu_assign_pointer(not_old, dst->notifiers);
251 rcu_assign_pointer(dst->notifiers, src->notifiers);
252 rcu_assign_pointer(src->notifiers, not_old);
254 fblock_update_selfref(dst->notifiers, dst);
255 fblock_update_selfref(src->notifiers, src);
257 rcu_assign_pointer(sub_old, dst->others);
258 rcu_assign_pointer(dst->others, src->others);
259 rcu_assign_pointer(src->others, sub_old);
261 atomic_xchg(&dst->refcnt, atomic_xchg(&src->refcnt,
262 atomic_read(&dst->refcnt)));
264 spin_unlock(&src->lock);
265 spin_unlock(&dst->lock);
267 put_fblock(dst);
268 put_fblock(src);
270 EXPORT_SYMBOL_GPL(fblock_migrate_r);
273 * fb1 on top of fb2 in the stack
275 int __fblock_bind(struct fblock *fb1, struct fblock *fb2)
277 int ret;
278 struct fblock_bind_msg msg;
279 /* Hack: we let the fb think that this belongs to his own chain to
280 * get the reference back to itself. */
281 struct fblock_notifier fbn;
283 memset(&fbn, 0, sizeof(fbn));
284 memset(&msg, 0, sizeof(msg));
286 get_fblock(fb1);
287 get_fblock(fb2);
289 msg.dir = TYPE_EGRESS;
290 msg.idp = fb2->idp;
291 fbn.self = fb1;
292 ret = fb1->event_rx(&fbn.nb, FBLOCK_BIND_IDP, &msg);
293 if (ret != NOTIFY_OK) {
294 put_fblock(fb1);
295 put_fblock(fb2);
296 return -EBUSY;
299 msg.dir = TYPE_INGRESS;
300 msg.idp = fb1->idp;
301 fbn.self = fb2;
302 ret = fb2->event_rx(&fbn.nb, FBLOCK_BIND_IDP, &msg);
303 if (ret != NOTIFY_OK) {
304 /* Release previous binding */
305 msg.dir = TYPE_EGRESS;
306 msg.idp = fb2->idp;
307 fbn.self = fb1;
308 ret = fb1->event_rx(&fbn.nb, FBLOCK_UNBIND_IDP, &msg);
309 if (ret != NOTIFY_OK)
310 panic("Cannot release previously bound fblock!\n");
311 put_fblock(fb1);
312 put_fblock(fb2);
313 return -EBUSY;
316 ret = subscribe_to_remote_fblock(fb1, fb2);
317 if (ret) {
318 __fblock_unbind(fb1, fb2);
319 return -ENOMEM;
322 ret = subscribe_to_remote_fblock(fb2, fb1);
323 if (ret) {
324 __fblock_unbind(fb1, fb2);
325 return -ENOMEM;
328 /* We don't give refcount back! */
329 return 0;
331 EXPORT_SYMBOL_GPL(__fblock_bind);
333 int fblock_bind(struct fblock *fb1, struct fblock *fb2)
335 int ret;
336 rcu_read_lock();
337 ret = __fblock_bind(fb1, fb2);
338 rcu_read_unlock();
339 return ret;
341 EXPORT_SYMBOL_GPL(fblock_bind);
344 * fb1 on top of fb2 in the stack
346 int __fblock_unbind(struct fblock *fb1, struct fblock *fb2)
348 int ret;
349 struct fblock_bind_msg msg;
350 /* Hack: we let the fb think that this belongs to his own chain to
351 * get the reference back to itself. */
352 struct fblock_notifier fbn;
354 /* We still have refcnt, we drop it on exit! */
356 memset(&fbn, 0, sizeof(fbn));
357 memset(&msg, 0, sizeof(msg));
359 msg.dir = TYPE_EGRESS;
360 msg.idp = fb2->idp;
361 fbn.self = fb1;
362 ret = fb1->event_rx(&fbn.nb, FBLOCK_UNBIND_IDP, &msg);
363 if (ret != NOTIFY_OK) {
364 /* We are not bound to fb2 */
365 return -EBUSY;
368 msg.dir = TYPE_INGRESS;
369 msg.idp = fb1->idp;
370 fbn.self = fb2;
371 ret = fb2->event_rx(&fbn.nb, FBLOCK_UNBIND_IDP, &msg);
372 if (ret != NOTIFY_OK) {
373 /* We are not bound to fb1, but fb1 was bound to us, so only
374 * release fb1 */
375 put_fblock(fb1);
376 return -EBUSY;
379 unsubscribe_from_remote_fblock(fb1, fb2);
380 unsubscribe_from_remote_fblock(fb2, fb1);
382 put_fblock(fb2);
383 put_fblock(fb1);
385 return 0;
387 EXPORT_SYMBOL_GPL(__fblock_unbind);
389 int fblock_unbind(struct fblock *fb1, struct fblock *fb2)
391 int ret;
392 rcu_read_lock();
393 ret = __fblock_unbind(fb1, fb2);
394 rcu_read_unlock();
395 return ret;
397 EXPORT_SYMBOL_GPL(fblock_unbind);
400 * register_fblock is called when the idp is preknown to the
401 * caller and has already been registered previously. The previous
402 * registration has then called unregister_fblock to remove the
403 * fblock but to keep the namespace and idp number.
405 int register_fblock(struct fblock *p, idp_t idp)
407 p->idp = idp;
408 return radix_tree_insert(&fblmap, idp, p);
410 EXPORT_SYMBOL_GPL(register_fblock);
413 * register_fblock_namespace is called when a new functional block
414 * instance is registered to the system. Then, its name will be
415 * registered into the namespace and it receives a new idp number.
417 int register_fblock_namespace(struct fblock *p)
419 int ret;
420 p->idp = provide_new_fblock_idp();
421 ret = radix_tree_insert(&fblmap, p->idp, p);
422 if (ret < 0)
423 return ret;
424 return register_to_fblock_namespace(p->name, p->idp);
426 EXPORT_SYMBOL_GPL(register_fblock_namespace);
428 void free_fblock_rcu(struct rcu_head *rp)
430 struct fblock *p = container_of(rp, struct fblock, rcu);
431 cleanup_fblock(p);
432 kfree_fblock(p);
434 EXPORT_SYMBOL_GPL(free_fblock_rcu);
437 * unregister_fblock releases the functional block _only_ from the idp to
438 * fblock translation table, but not from the namespace. The idp can then
439 * later be reused, e.g. by another fblock.
441 void unregister_fblock(struct fblock *p)
443 radix_tree_delete(&fblmap, p->idp);
444 put_fblock(p);
446 EXPORT_SYMBOL_GPL(unregister_fblock);
449 * Removes the functional block from the system along with its namespace
450 * mapping.
452 static void __unregister_fblock_namespace(struct fblock *p, int rcu)
454 radix_tree_delete(&fblmap, p->idp);
455 unregister_from_fblock_namespace(p->name);
456 if (rcu)
457 put_fblock(p);
460 void unregister_fblock_namespace(struct fblock *p)
462 __unregister_fblock_namespace(p, 1);
464 EXPORT_SYMBOL_GPL(unregister_fblock_namespace);
466 void unregister_fblock_namespace_no_rcu(struct fblock *p)
468 __unregister_fblock_namespace(p, 0);
470 EXPORT_SYMBOL_GPL(unregister_fblock_namespace_no_rcu);
472 /* If state changes on 'remote' fb, we ('us') want to be notified. */
473 int subscribe_to_remote_fblock(struct fblock *us, struct fblock *remote)
475 struct fblock_notifier *fn = kmalloc(sizeof(*fn), GFP_ATOMIC);
476 if (!fn)
477 return -ENOMEM;
478 /* hold ref */
479 get_fblock(us);
480 get_fblock(remote);
482 spin_lock(&us->lock);
483 fn->self = us;
484 fn->remote = remote->idp;
485 init_fblock_subscriber(us, &fn->nb);
486 fn->next = rcu_dereference_raw(us->notifiers);
487 rcu_assign_pointer(us->notifiers, fn);
488 spin_unlock(&us->lock);
490 return fblock_register_foreign_subscriber(remote,
491 &rcu_dereference_raw(us->notifiers)->nb);
493 EXPORT_SYMBOL_GPL(subscribe_to_remote_fblock);
495 void unsubscribe_from_remote_fblock(struct fblock *us, struct fblock *remote)
497 int found = 0;
498 struct fblock_notifier *fn;
500 if (unlikely(!rcu_dereference_raw(us->notifiers)))
501 return;
502 spin_lock(&us->lock);
503 fn = rcu_dereference_raw(us->notifiers);
504 if (fn->remote == remote->idp)
505 rcu_assign_pointer(us->notifiers, us->notifiers->next);
506 else {
507 struct fblock_notifier *f1;
508 while ((f1 = fn->next)) {
509 if (f1->remote == remote->idp) {
510 found = 1;
511 fn->next = f1->next;
512 fn = f1; /* free f1 */
513 break;
514 } else
515 fn = f1;
518 spin_unlock(&us->lock);
519 if (found) {
520 fblock_unregister_foreign_subscriber(remote, &fn->nb);
521 kfree(fn);
524 /* drop ref */
525 put_fblock(us);
526 put_fblock(remote);
528 EXPORT_SYMBOL_GPL(unsubscribe_from_remote_fblock);
530 static void ctor_fblock(void *obj)
532 struct fblock *p = obj;
533 memset(p, 0, sizeof(*p));
534 spin_lock_init(&p->lock);
535 p->idp = IDP_UNKNOWN;
538 struct fblock *alloc_fblock(gfp_t flags)
540 struct fblock *fb;
541 #ifndef __USE_KMALLOC
542 fb = kmem_cache_alloc(fblock_cache, flags);
543 if (likely(fb))
544 __module_get(THIS_MODULE);
545 #else
546 fb = kmalloc(sizeof(*fb), flags);
547 if (fb) {
548 ctor_fblock(fb);
549 __module_get(THIS_MODULE);
551 #endif
552 return fb;
554 EXPORT_SYMBOL_GPL(alloc_fblock);
556 int init_fblock(struct fblock *fb, char *name, void __percpu *priv)
558 spin_lock(&fb->lock);
559 strlcpy(fb->name, name, sizeof(fb->name));
560 rcu_assign_pointer(fb->private_data, priv);
561 fb->others = kmalloc(sizeof(*(fb->others)), GFP_ATOMIC);
562 if (!fb->others)
563 return -ENOMEM;
564 ATOMIC_INIT_NOTIFIER_HEAD(&fb->others->subscribers);
565 spin_unlock(&fb->lock);
566 atomic_set(&fb->refcnt, 1);
567 return 0;
569 EXPORT_SYMBOL_GPL(init_fblock);
571 void kfree_fblock(struct fblock *p)
573 #ifndef __USE_KMALLOC
574 kmem_cache_free(fblock_cache, p);
575 #else
576 kfree(p);
577 #endif
578 module_put(THIS_MODULE);
580 EXPORT_SYMBOL_GPL(kfree_fblock);
582 void cleanup_fblock(struct fblock *fb)
584 notify_fblock_subscribers(fb, FBLOCK_DOWN, &fb->idp);
585 if (fb->factory)
586 fb->factory->dtor(fb);
587 kfree(rcu_dereference_raw(fb->others));
589 EXPORT_SYMBOL_GPL(cleanup_fblock);
591 void cleanup_fblock_ctor(struct fblock *fb)
593 kfree(rcu_dereference_raw(fb->others));
595 EXPORT_SYMBOL_GPL(cleanup_fblock_ctor);
597 static int procfs_fblocks(char *page, char **start, off_t offset,
598 int count, int *eof, void *data)
600 int i, has_sub;
601 off_t len = 0;
602 struct fblock *fb;
603 struct fblock_notifier *fn;
604 long long max = atomic64_read(&idp_counter);
606 rcu_read_lock();
607 for (i = 0; i <= max; ++i) {
608 fb = radix_tree_lookup(&fblmap, i);
609 if (!fb)
610 continue;
611 has_sub = 0;
612 len += sprintf(page + len, "%s %s %p %u %d [",
613 fb->name, fb->factory ? fb->factory->type : "vlink",
614 fb, fb->idp,
615 atomic_read(&fb->refcnt));
616 fn = rcu_dereference_raw(fb->notifiers);
617 while (fn) {
618 len += sprintf(page + len, "%u ", fn->remote);
619 rcu_assign_pointer(fn, fn->next);
620 has_sub = 1;
622 len += sprintf(page + len - has_sub, "]\n");
624 rcu_read_unlock();
626 /* FIXME: fits in page? */
627 *eof = 1;
628 return len;
631 int init_fblock_tables(void)
633 int ret = 0;
635 get_critbit_cache();
636 critbit_init_tree(&idpmap);
637 fblock_cache = kmem_cache_create("fblock", sizeof(struct fblock),
638 0, SLAB_HWCACHE_ALIGN |
639 SLAB_MEM_SPREAD | SLAB_RECLAIM_ACCOUNT,
640 ctor_fblock);
641 if (!fblock_cache)
642 goto err;
643 atomic64_set(&idp_counter, 0);
644 fblocks_proc = create_proc_read_entry("fblocks", 0400, lana_proc_dir,
645 procfs_fblocks, NULL);
646 if (!fblocks_proc)
647 goto err2;
648 return 0;
649 err2:
650 kmem_cache_destroy(fblock_cache);
651 err:
652 put_critbit_cache();
653 return ret;
655 EXPORT_SYMBOL_GPL(init_fblock_tables);
657 void cleanup_fblock_tables(void)
659 remove_proc_entry("fblocks", lana_proc_dir);
660 put_critbit_cache();
661 rcu_barrier();
662 kmem_cache_destroy(fblock_cache);
664 EXPORT_SYMBOL_GPL(cleanup_fblock_tables);