2 * Lightweight Autonomic Network Architecture
4 * Global LANA IDP translation tables, core backend.
6 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
7 * Swiss federal institute of technology (ETH Zurich)
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/rcupdate.h>
14 #include <linux/atomic.h>
15 #include <linux/types.h>
16 #include <linux/cpu.h>
17 #include <linux/spinlock.h>
18 #include <linux/rwlock.h>
19 #include <linux/slab.h>
20 #include <linux/proc_fs.h>
22 #include "xt_fblock.h"
25 #include "xt_critbit.h"
31 } ____cacheline_aligned
;
33 static struct critbit_tree idpmap
;
34 static struct fblock
**fblmap_head
= NULL
;
35 static spinlock_t fblmap_head_lock
;
37 static atomic64_t idp_counter
;
39 static struct kmem_cache
*fblock_cache
= NULL
;
41 extern struct proc_dir_entry
*lana_proc_dir
;
42 static struct proc_dir_entry
*fblocks_proc
;
44 static inline idp_t
provide_new_fblock_idp(void)
46 return (idp_t
) atomic64_inc_return(&idp_counter
);
49 static int register_to_fblock_namespace(char *name
, idp_t val
)
51 struct idp_elem
*elem
;
53 if (critbit_contains(&idpmap
, name
))
55 elem
= kzalloc(sizeof(*elem
), GFP_ATOMIC
);
58 strlcpy(elem
->name
, name
, sizeof(elem
->name
));
61 return critbit_insert(&idpmap
, elem
->name
);
64 static void fblock_namespace_do_free_rcu(struct rcu_head
*rp
)
66 struct idp_elem
*p
= container_of(rp
, struct idp_elem
, rcu
);
70 static int unregister_from_fblock_namespace(char *name
)
73 struct idp_elem
*elem
;
75 elem
= struct_of(critbit_get(&idpmap
, name
), struct idp_elem
);
78 ret
= critbit_delete(&idpmap
, elem
->name
);
81 call_rcu(&elem
->rcu
, fblock_namespace_do_free_rcu
);
86 /* Called within RCU read lock! */
87 idp_t
__get_fblock_namespace_mapping(char *name
)
89 struct idp_elem
*elem
= struct_of(__critbit_get(&idpmap
, name
),
96 EXPORT_SYMBOL_GPL(__get_fblock_namespace_mapping
);
98 idp_t
get_fblock_namespace_mapping(char *name
)
102 ret
= __get_fblock_namespace_mapping(name
);
106 EXPORT_SYMBOL_GPL(get_fblock_namespace_mapping
);
108 /* Called within RCU read lock! */
109 int __change_fblock_namespace_mapping(char *name
, idp_t
new)
111 struct idp_elem
*elem
= struct_of(__critbit_get(&idpmap
, name
),
119 EXPORT_SYMBOL_GPL(__change_fblock_namespace_mapping
);
121 int change_fblock_namespace_mapping(char *name
, idp_t
new)
125 ret
= __change_fblock_namespace_mapping(name
, new);
129 EXPORT_SYMBOL_GPL(change_fblock_namespace_mapping
);
131 /* Caller needs to do a put_fblock() after his work is done! */
132 /* Called within RCU read lock! */
133 struct fblock
*__search_fblock(idp_t idp
)
137 p0
= rcu_dereference_raw(fblmap_head
[hash_idp(idp
)]);
141 if (p0
->idp
== idp
) {
145 p0
= rcu_dereference_raw(p0
->next
);
150 EXPORT_SYMBOL_GPL(__search_fblock
);
152 struct fblock
*search_fblock(idp_t idp
)
156 if (unlikely(idp
== IDP_UNKNOWN
))
159 ret
= __search_fblock(idp
);
164 EXPORT_SYMBOL_GPL(search_fblock
);
167 * fb1 on top of fb2 in the stack
169 int __fblock_bind(struct fblock
*fb1
, struct fblock
*fb2
)
172 struct fblock_bind_msg msg
;
173 /* Hack: we let the fb think that this belongs to his own chain to
174 * get the reference back to itself. */
175 struct fblock_notifier fbn
;
177 memset(&fbn
, 0, sizeof(fbn
));
178 memset(&msg
, 0, sizeof(msg
));
183 msg
.dir
= TYPE_EGRESS
;
186 ret
= fb1
->ops
->event_rx(&fbn
.nb
, FBLOCK_BIND_IDP
, &msg
);
187 if (ret
!= NOTIFY_OK
) {
193 msg
.dir
= TYPE_INGRESS
;
196 ret
= fb2
->ops
->event_rx(&fbn
.nb
, FBLOCK_BIND_IDP
, &msg
);
197 if (ret
!= NOTIFY_OK
) {
198 /* Release previous binding */
199 msg
.dir
= TYPE_EGRESS
;
202 ret
= fb1
->ops
->event_rx(&fbn
.nb
, FBLOCK_UNBIND_IDP
, &msg
);
203 if (ret
!= NOTIFY_OK
)
204 panic("Cannot release previously bound fblock!\n");
210 ret
= subscribe_to_remote_fblock(fb1
, fb2
);
212 __fblock_unbind(fb1
, fb2
);
216 ret
= subscribe_to_remote_fblock(fb2
, fb1
);
218 __fblock_unbind(fb1
, fb2
);
222 /* We don't give refcount back! */
225 EXPORT_SYMBOL_GPL(__fblock_bind
);
227 int fblock_bind(struct fblock
*fb1
, struct fblock
*fb2
)
231 ret
= __fblock_bind(fb1
, fb2
);
235 EXPORT_SYMBOL_GPL(fblock_bind
);
238 * fb1 on top of fb2 in the stack
240 int __fblock_unbind(struct fblock
*fb1
, struct fblock
*fb2
)
243 struct fblock_bind_msg msg
;
244 /* Hack: we let the fb think that this belongs to his own chain to
245 * get the reference back to itself. */
246 struct fblock_notifier fbn
;
248 /* We still have refcnt, we drop it on exit! */
250 memset(&fbn
, 0, sizeof(fbn
));
251 memset(&msg
, 0, sizeof(msg
));
253 msg
.dir
= TYPE_EGRESS
;
256 ret
= fb1
->ops
->event_rx(&fbn
.nb
, FBLOCK_UNBIND_IDP
, &msg
);
257 if (ret
!= NOTIFY_OK
) {
258 /* We are not bound to fb2 */
262 msg
.dir
= TYPE_INGRESS
;
265 ret
= fb2
->ops
->event_rx(&fbn
.nb
, FBLOCK_UNBIND_IDP
, &msg
);
266 if (ret
!= NOTIFY_OK
) {
267 /* We are not bound to fb1, but fb1 was bound to us, so only
273 unsubscribe_from_remote_fblock(fb1
, fb2
);
274 unsubscribe_from_remote_fblock(fb2
, fb1
);
281 EXPORT_SYMBOL_GPL(__fblock_unbind
);
283 int fblock_unbind(struct fblock
*fb1
, struct fblock
*fb2
)
287 ret
= __fblock_unbind(fb1
, fb2
);
291 EXPORT_SYMBOL_GPL(fblock_unbind
);
294 * register_fblock is called when the idp is preknown to the
295 * caller and has already been registered previously. The previous
296 * registration has then called unregister_fblock to remove the
297 * fblock but to keep the namespace and idp number.
299 int register_fblock(struct fblock
*p
, idp_t idp
)
304 spin_lock_irqsave(&fblmap_head_lock
, flags
);
306 p0
= rcu_dereference_raw(fblmap_head
[hash_idp(p
->idp
)]);
308 rcu_assign_pointer(fblmap_head
[hash_idp(p
->idp
)], p
);
311 rcu_assign_pointer(p0
->next
, p
);
313 spin_unlock_irqrestore(&fblmap_head_lock
, flags
);
316 EXPORT_SYMBOL_GPL(register_fblock
);
319 * register_fblock_namespace is called when a new functional block
320 * instance is registered to the system. Then, its name will be
321 * registered into the namespace and it receives a new idp number.
323 int register_fblock_namespace(struct fblock
*p
)
328 spin_lock_irqsave(&fblmap_head_lock
, flags
);
329 p
->idp
= provide_new_fblock_idp();
330 p0
= rcu_dereference_raw(fblmap_head
[hash_idp(p
->idp
)]);
332 rcu_assign_pointer(fblmap_head
[hash_idp(p
->idp
)], p
);
335 rcu_assign_pointer(p0
->next
, p
);
337 spin_unlock_irqrestore(&fblmap_head_lock
, flags
);
338 return register_to_fblock_namespace(p
->name
, p
->idp
);
340 EXPORT_SYMBOL_GPL(register_fblock_namespace
);
342 static void free_fblock_rcu(struct rcu_head
*rp
)
344 struct fblock
*p
= container_of(rp
, struct fblock
, rcu
);
349 * unregister_fblock releases the functional block _only_ from the idp to
350 * fblock translation table, but not from the namespace. The idp can then
351 * later be reused, e.g. by another fblock.
353 int unregister_fblock(struct fblock
*p
)
359 spin_lock_irqsave(&fblmap_head_lock
, flags
);
360 p0
= rcu_dereference_raw(fblmap_head
[hash_idp(p
->idp
)]);
362 rcu_assign_pointer(fblmap_head
[hash_idp(p
->idp
)], p
->next
);
365 while ((p1
= rcu_dereference_raw(p0
->next
))) {
367 rcu_assign_pointer(p0
->next
, p1
->next
);
374 spin_unlock_irqrestore(&fblmap_head_lock
, flags
);
375 call_rcu(&p
->rcu
, free_fblock_rcu
);
378 EXPORT_SYMBOL_GPL(unregister_fblock
);
381 * Removes the functional block from the system along with its namespace
384 void unregister_fblock_namespace(struct fblock
*p
)
389 spin_lock_irqsave(&fblmap_head_lock
, flags
);
390 p0
= rcu_dereference_raw(fblmap_head
[hash_idp(p
->idp
)]);
392 rcu_assign_pointer(fblmap_head
[hash_idp(p
->idp
)], p
->next
);
395 while ((p1
= rcu_dereference_raw(p0
->next
))) {
397 rcu_assign_pointer(p0
->next
, p
->next
);
403 spin_unlock_irqrestore(&fblmap_head_lock
, flags
);
404 unregister_from_fblock_namespace(p
->name
);
405 call_rcu(&p
->rcu
, free_fblock_rcu
);
407 EXPORT_SYMBOL_GPL(unregister_fblock_namespace
);
409 int xchg_fblock_idp(idp_t idp
, struct fblock
*new)
413 EXPORT_SYMBOL_GPL(xchg_fblock_idp
);
415 int xchg_fblock(struct fblock
*old
, struct fblock
*new)
419 EXPORT_SYMBOL_GPL(xchg_fblock
);
421 /* If state changes on 'remote' fb, we ('us') want to be notified. */
422 int subscribe_to_remote_fblock(struct fblock
*us
, struct fblock
*remote
)
424 struct fblock_notifier
*fn
= kmalloc(sizeof(*fn
), GFP_ATOMIC
);
427 write_lock(&us
->lock
);
430 init_fblock_subscriber(us
, &fn
->nb
);
431 fn
->next
= us
->notifiers
;
433 write_unlock(&us
->lock
);
434 return fblock_register_foreign_subscriber(remote
, &us
->notifiers
->nb
);
436 EXPORT_SYMBOL_GPL(subscribe_to_remote_fblock
);
438 void unsubscribe_from_remote_fblock(struct fblock
*us
, struct fblock
*remote
)
441 struct fblock_notifier
*fn
;
443 if (unlikely(!us
->notifiers
))
445 write_lock(&us
->lock
);
447 if (fn
->remote
== remote
)
448 us
->notifiers
= us
->notifiers
->next
;
450 struct fblock_notifier
*f1
;
451 while ((f1
= fn
->next
)) {
452 if (f1
->remote
== remote
) {
455 fn
= f1
; /* free f1 */
461 write_unlock(&us
->lock
);
463 fblock_unregister_foreign_subscriber(remote
, &fn
->nb
);
467 EXPORT_SYMBOL_GPL(unsubscribe_from_remote_fblock
);
469 static void ctor_fblock(void *obj
)
471 struct fblock
*p
= obj
;
472 atomic_set(&p
->refcnt
, 1);
473 rwlock_init(&p
->lock
);
474 p
->idp
= IDP_UNKNOWN
;
476 p
->private_data
= NULL
;
482 struct fblock
*alloc_fblock(gfp_t flags
)
484 return kmem_cache_alloc(fblock_cache
, flags
);
486 EXPORT_SYMBOL_GPL(alloc_fblock
);
488 int init_fblock(struct fblock
*fb
, char *name
, void *priv
,
489 struct fblock_ops
*ops
)
491 write_lock(&fb
->lock
);
492 strlcpy(fb
->name
, name
, sizeof(fb
->name
));
493 fb
->private_data
= priv
;
495 fb
->others
= kmalloc(sizeof(*(fb
->others
)), GFP_ATOMIC
);
498 ATOMIC_INIT_NOTIFIER_HEAD(&fb
->others
->subscribers
);
499 write_unlock(&fb
->lock
);
502 EXPORT_SYMBOL_GPL(init_fblock
);
504 void kfree_fblock(struct fblock
*p
)
506 kmem_cache_free(fblock_cache
, p
);
508 EXPORT_SYMBOL_GPL(kfree_fblock
);
510 void cleanup_fblock(struct fblock
*fb
)
512 notify_fblock_subscribers(fb
, FBLOCK_DOWN
, &fb
->idp
);
513 fb
->factory
->dtor(fb
);
516 EXPORT_SYMBOL_GPL(cleanup_fblock
);
518 void cleanup_fblock_ctor(struct fblock
*fb
)
522 EXPORT_SYMBOL_GPL(cleanup_fblock_ctor
);
524 static int procfs_fblocks(char *page
, char **start
, off_t offset
,
525 int count
, int *eof
, void *data
)
531 len
+= sprintf(page
+ len
, "name type addr idp refcnt\n");
533 for (i
= 0; i
< HASHTSIZ
; ++i
) {
534 fb
= rcu_dereference_raw(fblmap_head
[i
]);
536 len
+= sprintf(page
+ len
, "%s %s %p %u %d\n",
537 fb
->name
, fb
->factory
->type
,
538 fb
, fb
->idp
, atomic_read(&fb
->refcnt
));
539 fb
= rcu_dereference_raw(fb
->next
);
544 /* FIXME: fits in page? */
549 int init_fblock_tables(void)
554 critbit_init_tree(&idpmap
);
556 fblmap_head_lock
= __SPIN_LOCK_UNLOCKED(fblmap_head_lock
);
557 fblmap_head
= kzalloc(sizeof(*fblmap_head
) * HASHTSIZ
, GFP_KERNEL
);
560 fblock_cache
= kmem_cache_create("fblock", sizeof(struct fblock
),
561 0, SLAB_HWCACHE_ALIGN
, ctor_fblock
);
564 atomic64_set(&idp_counter
, 0);
565 fblocks_proc
= create_proc_read_entry("fblocks", 0400, lana_proc_dir
,
566 procfs_fblocks
, NULL
);
571 kmem_cache_destroy(fblock_cache
);
578 EXPORT_SYMBOL_GPL(init_fblock_tables
);
580 void cleanup_fblock_tables(void)
582 remove_proc_entry("fblocks", lana_proc_dir
);
586 kmem_cache_destroy(fblock_cache
);
588 EXPORT_SYMBOL_GPL(cleanup_fblock_tables
);