ethernetgmii: updated kernel config, simpleImage.xilinx and other misc files
[ana-net.git] / src / fb_pflana.c
blob512acf7760081f54a617ff0d1c17bb18cb14f24c
1 /*
2 * Lightweight Autonomic Network Architecture
4 * LANA BSD Socket interface for communication with user level.
5 * PF_LANA protocol family socket handler.
7 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
8 * Swiss federal institute of technology (ETH Zurich)
9 * Subject to the GPL.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/spinlock.h>
15 #include <linux/notifier.h>
16 #include <linux/rcupdate.h>
17 #include <linux/seqlock.h>
18 #include <linux/bug.h>
19 #include <linux/percpu.h>
20 #include <linux/prefetch.h>
21 #include <linux/atomic.h>
22 #include <linux/slab.h>
23 #include <net/sock.h>
25 #include "xt_fblock.h"
26 #include "xt_builder.h"
27 #include "xt_idp.h"
28 #include "xt_skb.h"
29 #include "xt_engine.h"
30 #include "xt_builder.h"
32 #define AF_LANA 27 /* For now.. */
33 #define PF_LANA AF_LANA
35 /* LANA protocol types on top of the PF_LANA family */
36 #define LANA_PROTO_AUTO 0 /* Auto-select if none is given */
37 #define LANA_PROTO_RAW 1 /* LANA raw proto, currently the only one */
38 /* Total num of protos available */
39 #define LANA_NPROTO 2
41 /* Protocols in LANA family */
42 struct lana_protocol {
43 int protocol;
44 const struct proto_ops *ops;
45 struct proto *proto;
46 struct module *owner;
49 struct fb_pflana_priv {
50 idp_t port[2];
51 seqlock_t lock;
52 struct lana_sock *sock_self;
55 struct lana_sock {
56 struct sock sk;
57 struct fblock *fb;
58 int ifindex;
59 int bound;
62 static DEFINE_MUTEX(proto_tab_lock);
64 static struct lana_protocol *proto_tab[LANA_NPROTO] __read_mostly;
66 static int fb_pflana_netrx(const struct fblock * const fb,
67 struct sk_buff *skb,
68 enum path_type * const dir)
70 u8 *skb_head = skb->data;
71 int skb_len = skb->len;
72 struct sock *sk;
73 struct fb_pflana_priv __percpu *fb_priv_cpu;
75 fb_priv_cpu = this_cpu_ptr(rcu_dereference_raw(fb->private_data));
76 sk = &fb_priv_cpu->sock_self->sk;
78 if (skb_shared(skb)) {
79 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
80 if (skb_head != skb->data) {
81 skb->data = skb_head;
82 skb->len = skb_len;
84 if (nskb == NULL)
85 goto out;
86 kfree_skb(skb);
87 skb = nskb;
89 sock_queue_rcv_skb(sk, skb);
90 out:
91 /* We are last in chain. */
92 write_next_idp_to_skb(skb, fb->idp, IDP_UNKNOWN);
93 return PPE_HALT;
96 static int fb_pflana_event(struct notifier_block *self, unsigned long cmd,
97 void *args)
99 int ret = NOTIFY_OK;
100 unsigned int cpu;
101 struct fblock *fb;
102 struct fb_pflana_priv __percpu *fb_priv;
104 rcu_read_lock();
105 fb = rcu_dereference_raw(container_of(self, struct fblock_notifier,
106 nb)->self);
107 fb_priv = (struct fb_pflana_priv __percpu *)
108 rcu_dereference_raw(fb->private_data);
109 rcu_read_unlock();
111 switch (cmd) {
112 case FBLOCK_BIND_IDP: {
113 int bound = 0;
114 struct fblock_bind_msg *msg = args;
115 get_online_cpus();
116 for_each_online_cpu(cpu) {
117 struct fb_pflana_priv *fb_priv_cpu;
118 fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
119 if (fb_priv_cpu->port[msg->dir] == IDP_UNKNOWN) {
120 write_seqlock(&fb_priv_cpu->lock);
121 fb_priv_cpu->port[msg->dir] = msg->idp;
122 write_sequnlock(&fb_priv_cpu->lock);
123 bound = 1;
124 } else {
125 ret = NOTIFY_BAD;
126 break;
129 put_online_cpus();
130 if (bound)
131 printk(KERN_INFO "[%s::bsdsock] port %s bound to IDP%u\n",
132 fb->name, path_names[msg->dir], msg->idp);
133 } break;
134 case FBLOCK_UNBIND_IDP: {
135 int unbound = 0;
136 struct fblock_bind_msg *msg = args;
137 get_online_cpus();
138 for_each_online_cpu(cpu) {
139 struct fb_pflana_priv *fb_priv_cpu;
140 fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
141 if (fb_priv_cpu->port[msg->dir] == msg->idp) {
142 write_seqlock(&fb_priv_cpu->lock);
143 fb_priv_cpu->port[msg->dir] = IDP_UNKNOWN;
144 write_sequnlock(&fb_priv_cpu->lock);
145 unbound = 1;
146 } else {
147 ret = NOTIFY_BAD;
148 break;
151 put_online_cpus();
152 if (unbound)
153 printk(KERN_INFO "[%s::bsdsock] port %s unbound\n",
154 fb->name, path_names[msg->dir]);
155 } break;
156 default:
157 break;
160 return ret;
163 static struct fblock *get_bound_fblock(struct fblock *self,
164 enum path_type dir)
166 idp_t fbidp;
167 unsigned int seq;
168 struct fb_pflana_priv __percpu *fb_priv_cpu;
169 fb_priv_cpu = this_cpu_ptr(rcu_dereference_raw(self->private_data));
170 do {
171 seq = read_seqbegin(&fb_priv_cpu->lock);
172 fbidp = fb_priv_cpu->port[dir];
173 } while (read_seqretry(&fb_priv_cpu->lock, seq));
174 return search_fblock(fbidp);
177 static inline struct lana_sock *to_lana_sk(const struct sock *sk)
179 return container_of(sk, struct lana_sock, sk);
182 static struct fblock *fb_pflana_build_fblock(char *name);
184 static int lana_sk_init(struct sock* sk)
186 int cpu;
187 char name[32];
188 struct lana_sock *lana = to_lana_sk(sk);
190 memset(name, 0, sizeof(name));
191 snprintf(name, sizeof(name), "%p", &lana->sk);
192 lana->fb = fb_pflana_build_fblock(name);
193 if (!lana->fb)
194 return -ENOMEM;
195 get_online_cpus();
196 for_each_online_cpu(cpu) {
197 struct fb_pflana_priv *fb_priv_cpu;
198 fb_priv_cpu = per_cpu_ptr(lana->fb->private_data, cpu);
199 fb_priv_cpu->sock_self = lana;
201 put_online_cpus();
202 smp_wmb();
203 return 0;
206 static void fb_pflana_destroy_fblock(struct fblock *fb);
208 static void lana_sk_free(struct sock *sk)
210 struct fblock *fb_bound;
211 struct lana_sock *lana;
213 lana = to_lana_sk(sk);
214 fb_bound = get_bound_fblock(lana->fb, TYPE_INGRESS);
215 if (fb_bound) {
216 fblock_unbind(fb_bound, lana->fb);
217 put_fblock(fb_bound);
219 fb_bound = get_bound_fblock(lana->fb, TYPE_EGRESS);
220 if (fb_bound) {
221 fblock_unbind(lana->fb, fb_bound);
222 put_fblock(fb_bound);
225 fb_pflana_destroy_fblock(lana->fb);
228 static int lana_raw_release(struct socket *sock)
230 struct sock *sk = sock->sk;
231 if (sk) {
232 sock->sk = NULL;
233 sk->sk_prot->close(sk, 0);
234 lana_sk_free(sk);
236 return 0;
239 static int lana_raw_bind(struct socket *sock, struct sockaddr *addr, int len)
241 int idx;
242 struct sock *sk = sock->sk;
243 struct net_device *dev = NULL;
244 struct lana_sock *lana = to_lana_sk(sk);
246 if (len < sizeof(struct sockaddr))
247 return -EINVAL;
248 if (addr->sa_family != AF_LANA)
249 return -EINVAL;
251 idx = addr->sa_data[0];
252 dev = dev_get_by_index(sock_net(sk), idx);
253 if (dev == NULL)
254 return -ENODEV;
255 lana->ifindex = idx;
256 lana->bound = 1;
257 dev_put(dev);
259 return 0;
262 static unsigned int lana_raw_poll(struct file *file, struct socket *sock,
263 poll_table *wait)
265 unsigned int mask = 0;
266 struct sock *sk = sock->sk;
267 poll_wait(file, sk_sleep(sk), wait);
268 if (!skb_queue_empty(&sk->sk_receive_queue))
269 mask |= POLLIN | POLLRDNORM;
270 return mask;
273 static int lana_raw_sendmsg(struct kiocb *iocb, struct socket *sock,
274 struct msghdr *msg, size_t len)
276 struct sock *sk = sock->sk;
277 return sk->sk_prot->sendmsg(iocb, sk, msg, len);
280 /* Todo later: send bound dev from fb_eth, not from userspace */
281 static int lana_proto_sendmsg(struct kiocb *iocb, struct sock *sk,
282 struct msghdr *msg, size_t len)
284 int err;
285 unsigned int seq;
286 struct net *net = sock_net(sk);
287 struct net_device *dev;
288 struct sockaddr *target;
289 struct sk_buff *skb;
290 struct lana_sock *lana = to_lana_sk(sk);
291 struct fblock *fb = lana->fb;
292 struct fb_pflana_priv *fb_priv_cpu;
294 if (msg->msg_name == NULL)
295 return -EDESTADDRREQ;
296 if (msg->msg_namelen < sizeof(struct sockaddr))
297 return -EINVAL;
299 target = (struct sockaddr *) msg->msg_name;
300 if (unlikely(target->sa_family != AF_LANA))
301 return -EAFNOSUPPORT;
303 lock_sock(sk);
304 if (sk->sk_bound_dev_if || lana->bound) {
305 dev = dev_get_by_index(net, lana->bound ? lana->ifindex :
306 sk->sk_bound_dev_if);
307 } else {
308 dev = dev_getfirstbyhwtype(sock_net(sk), ETH_P_ALL); //FIXME
310 release_sock(sk);
312 if (!dev || !(dev->flags & IFF_UP) || unlikely(len > dev->mtu)) {
313 err = -EIO;
314 goto drop_put;
317 skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + len,
318 msg->msg_flags & MSG_DONTWAIT, &err);
319 if (!skb)
320 goto drop_put;
322 skb_reserve(skb, LL_RESERVED_SPACE(dev));
324 skb_reset_mac_header(skb);
325 skb_reset_network_header(skb);
327 err = memcpy_fromiovec((void *) skb_put(skb, len), msg->msg_iov, len);
328 if (err < 0)
329 goto drop;
331 skb->dev = dev;
332 skb->sk = sk;
333 skb->protocol = htons(ETH_P_ALL); //FIXME
335 skb_orphan(skb);
337 dev_put(dev);
339 // err = dev_queue_xmit(skb);
340 // if (err > 0)
341 // err = net_xmit_errno(err);
343 #if 1
344 rcu_read_lock();
345 fb_priv_cpu = this_cpu_ptr(rcu_dereference(fb->private_data));
346 do {
347 seq = read_seqbegin(&fb_priv_cpu->lock);
348 write_next_idp_to_skb(skb, fb->idp,
349 fb_priv_cpu->port[TYPE_EGRESS]);
350 } while (read_seqretry(&fb_priv_cpu->lock, seq));
351 rcu_read_unlock();
353 process_packet(skb, TYPE_EGRESS);
354 #endif
356 return (err >= 0) ? len : err;
357 drop:
358 kfree_skb(skb);
359 drop_put:
360 dev_put(dev);
361 return err;
364 static int lana_proto_recvmsg(struct kiocb *iocb, struct sock *sk,
365 struct msghdr *msg, size_t len, int noblock,
366 int flags, int *addr_len)
368 int err = 0;
369 struct sk_buff *skb;
370 size_t copied = 0;
372 skb = skb_recv_datagram(sk, flags, noblock, &err);
373 if (!skb) {
374 if (sk->sk_shutdown & RCV_SHUTDOWN)
375 return 0;
376 return err;
378 msg->msg_namelen = 0;
379 if (addr_len)
380 *addr_len = msg->msg_namelen;
381 copied = skb->len;
382 if (len < copied) {
383 msg->msg_flags |= MSG_TRUNC;
384 copied = len;
386 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
387 if (err == 0)
388 sock_recv_ts_and_drops(msg, sk, skb);
389 skb_free_datagram(sk, skb);
391 return err ? : copied;
394 static int lana_proto_backlog_rcv(struct sock *sk, struct sk_buff *skb)
396 int err = -EPROTONOSUPPORT;
398 kfree_skb(skb);
399 #if 0
400 switch (sk->sk_protocol) {
401 case LANA_PROTO_RAW:
402 err = sock_queue_rcv_skb(sk, skb);
403 if (err != 0)
404 kfree_skb(skb);
405 break;
406 default:
407 kfree_skb(skb);
408 err = -EPROTONOSUPPORT;
409 break;
411 #endif
412 return err ? NET_RX_DROP : NET_RX_SUCCESS;
415 #if 0 /* unused */
416 static int lana_common_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
417 struct msghdr *msg, size_t len, int flags)
419 int err = 0;
420 long timeout;
421 size_t target, chunk, copied = 0;
422 struct sock *sk = sock->sk;
423 struct sk_buff *skb;
425 msg->msg_namelen = 0;
426 lock_sock(sk);
427 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
428 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
429 do {
430 skb = skb_dequeue(&sk->sk_receive_queue);
431 if (!skb) {
432 if (copied >= target)
433 break;
434 err = sock_error(sk);
435 if (err || sk->sk_shutdown & RCV_SHUTDOWN)
436 break;
437 err = -EAGAIN;
438 if (!timeout)
439 break;
440 timeout = sk_wait_data(sk, &timeout);
441 if (signal_pending(current)) {
442 err = sock_intr_errno(timeout);
443 break;
445 continue;
447 chunk = min_t(size_t, skb->len, len);
448 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
449 skb_queue_head(&sk->sk_receive_queue, skb);
450 if (!copied)
451 copied = -EFAULT;
452 break;
454 copied += chunk;
455 len -= chunk;
456 sock_recv_ts_and_drops(msg, sk, skb);
457 if (!(flags & MSG_PEEK)) {
458 skb_pull(skb, chunk);
459 if (skb->len) {
460 skb_queue_head(&sk->sk_receive_queue, skb);
461 break;
463 kfree_skb(skb);
464 } else {
465 /* put message back and return */
466 skb_queue_head(&sk->sk_receive_queue, skb);
467 break;
469 } while (len > 0);
471 release_sock(sk);
472 return copied ? : err;
474 #endif
476 static void lana_proto_destruct(struct sock *sk)
478 skb_queue_purge(&sk->sk_receive_queue);
481 static int lana_proto_init(struct sock *sk)
483 sk->sk_destruct = lana_proto_destruct;
484 return 0;
487 static void lana_proto_close(struct sock *sk, long timeout)
489 sk_common_release(sk);
492 static void lana_proto_hash(struct sock *sk)
496 static void lana_proto_unhash(struct sock *sk)
500 static int lana_proto_get_port(struct sock *sk, unsigned short sport)
502 return 0;
505 static struct lana_protocol *pflana_proto_get(int proto)
507 struct lana_protocol *ret = NULL;
509 if (proto < 0 || proto >= LANA_NPROTO)
510 return NULL;
511 rcu_read_lock();
512 ret = rcu_dereference_raw(proto_tab[proto]);
513 rcu_read_unlock();
515 return ret;
518 static int lana_family_create(struct net *net, struct socket *sock,
519 int protocol, int kern)
521 struct sock *sk;
522 struct lana_protocol *lp;
523 struct lana_sock *ls;
525 if (!net_eq(net, &init_net))
526 return -EAFNOSUPPORT;
528 if (protocol == LANA_PROTO_AUTO) {
529 switch (sock->type) {
530 case SOCK_RAW:
531 if (!capable(CAP_SYS_ADMIN))
532 return -EPERM;
533 protocol = LANA_PROTO_RAW;
534 break;
535 default:
536 return -EPROTONOSUPPORT;
540 lp = pflana_proto_get(protocol);
541 if (!lp)
542 return -EPROTONOSUPPORT;
544 sk = sk_alloc(net, PF_LANA, GFP_KERNEL, lp->proto);
545 if (!sk)
546 return -ENOMEM;
547 if (lana_sk_init(sk) < 0) {
548 sock_put(sk);
549 return -ENOMEM;
552 sock_init_data(sock, sk);
553 sock->state = SS_UNCONNECTED;
554 sock->ops = lp->ops;
556 sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
557 sk->sk_protocol = protocol;
558 sk->sk_family = PF_LANA;
559 sk->sk_type = sock->type;
560 sk->sk_prot->init(sk);
562 ls = to_lana_sk(sk);
563 ls->bound = 0;
565 return 0;
568 static const struct net_proto_family lana_family_ops = {
569 .family = PF_LANA,
570 .create = lana_family_create,
571 .owner = THIS_MODULE,
574 static const struct proto_ops lana_raw_ops = {
575 .family = PF_LANA,
576 .owner = THIS_MODULE,
577 .release = lana_raw_release,
578 .recvmsg = sock_common_recvmsg,
579 .sendmsg = lana_raw_sendmsg,
580 .poll = lana_raw_poll,
581 .bind = lana_raw_bind,
582 .setsockopt = sock_no_setsockopt,
583 .getsockopt = sock_no_getsockopt,
584 .connect = sock_no_connect,
585 .socketpair = sock_no_socketpair,
586 .accept = sock_no_accept,
587 .getname = sock_no_getname,
588 .ioctl = sock_no_ioctl,
589 .listen = sock_no_listen,
590 .shutdown = sock_no_shutdown,
591 .mmap = sock_no_mmap,
592 .sendpage = sock_no_sendpage,
595 static struct proto lana_proto __read_mostly = {
596 .name = "LANA",
597 .owner = THIS_MODULE,
598 .obj_size = sizeof(struct lana_sock),
599 .backlog_rcv = lana_proto_backlog_rcv,
600 .close = lana_proto_close,
601 .init = lana_proto_init,
602 .recvmsg = lana_proto_recvmsg,
603 .sendmsg = lana_proto_sendmsg,
604 .hash = lana_proto_hash,
605 .unhash = lana_proto_unhash,
606 .get_port = lana_proto_get_port,
609 static struct lana_protocol lana_proto_raw __read_mostly = {
610 .protocol = LANA_PROTO_RAW,
611 .ops = &lana_raw_ops,
612 .proto = &lana_proto,
613 .owner = THIS_MODULE,
616 int pflana_proto_register(int proto, struct lana_protocol *lp)
618 int err;
620 if (!lp || proto < 0 || proto >= LANA_NPROTO)
621 return -EINVAL;
622 if (rcu_dereference_raw(proto_tab[proto]))
623 return -EBUSY;
625 err = proto_register(lp->proto, 1);
626 if (err)
627 return err;
629 mutex_lock(&proto_tab_lock);
630 lp->protocol = proto;
631 rcu_assign_pointer(proto_tab[proto], lp);
632 mutex_unlock(&proto_tab_lock);
633 synchronize_rcu();
635 if (lp->owner != THIS_MODULE)
636 __module_get(lp->owner);
637 return 0;
639 EXPORT_SYMBOL(pflana_proto_register);
641 void pflana_proto_unregister(struct lana_protocol *lp)
643 if (!lp)
644 return;
645 if (lp->protocol < 0 || lp->protocol >= LANA_NPROTO)
646 return;
647 if (!rcu_dereference_raw(proto_tab[lp->protocol]))
648 return;
650 BUG_ON(proto_tab[lp->protocol] != lp);
652 mutex_lock(&proto_tab_lock);
653 rcu_assign_pointer(proto_tab[lp->protocol], NULL);
654 mutex_unlock(&proto_tab_lock);
655 synchronize_rcu();
657 proto_unregister(lp->proto);
658 if (lp->owner != THIS_MODULE)
659 module_put(lp->owner);
661 EXPORT_SYMBOL(pflana_proto_unregister);
663 static int init_fb_pflana(void)
665 int ret, i;
666 for (i = 0; i < LANA_NPROTO; ++i)
667 rcu_assign_pointer(proto_tab[i], NULL);
669 ret = pflana_proto_register(LANA_PROTO_RAW, &lana_proto_raw);
670 if (ret)
671 return ret;
673 ret = sock_register(&lana_family_ops);
674 if (ret) {
675 pflana_proto_unregister(&lana_proto_raw);
676 return ret;
678 return 0;
681 static void cleanup_fb_pflana(void)
683 int i;
684 sock_unregister(PF_LANA);
685 for (i = 0; i < LANA_NPROTO; ++i)
686 pflana_proto_unregister(rcu_dereference_raw(proto_tab[i]));
689 static struct fblock *fb_pflana_build_fblock(char *name)
691 int ret = 0;
692 unsigned int cpu;
693 struct fblock *fb;
694 struct fb_pflana_priv __percpu *fb_priv;
696 fb = alloc_fblock(GFP_ATOMIC);
697 if (!fb)
698 return NULL;
699 fb_priv = alloc_percpu(struct fb_pflana_priv);
700 if (!fb_priv)
701 goto err;
702 get_online_cpus();
703 for_each_online_cpu(cpu) {
704 struct fb_pflana_priv *fb_priv_cpu;
705 fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
706 seqlock_init(&fb_priv_cpu->lock);
707 fb_priv_cpu->port[0] = IDP_UNKNOWN;
708 fb_priv_cpu->port[1] = IDP_UNKNOWN;
710 put_online_cpus();
712 ret = init_fblock(fb, name, fb_priv);
713 if (ret)
714 goto err2;
715 fb->netfb_rx = fb_pflana_netrx;
716 fb->event_rx = fb_pflana_event;
717 fb->factory = NULL;
718 ret = register_fblock_namespace(fb);
719 if (ret)
720 goto err3;
721 __module_get(THIS_MODULE);
722 return fb;
723 err3:
724 cleanup_fblock_ctor(fb);
725 err2:
726 free_percpu(fb_priv);
727 err:
728 kfree_fblock(fb);
729 fb = NULL;
730 return NULL;
733 static void fb_pflana_destroy_fblock(struct fblock *fb)
735 unregister_fblock_namespace_no_rcu(fb);
736 cleanup_fblock(fb);
737 free_percpu(rcu_dereference_raw(fb->private_data));
738 kfree_fblock(fb);
739 module_put(THIS_MODULE);
742 static int __init init_fb_pflana_module(void)
744 return init_fb_pflana();
747 static void __exit cleanup_fb_pflana_module(void)
749 synchronize_rcu();
750 cleanup_fb_pflana();
753 module_init(init_fb_pflana_module);
754 module_exit(cleanup_fb_pflana_module);
756 MODULE_LICENSE("GPL");
757 MODULE_AUTHOR("Daniel Borkmann <dborkma@tik.ee.ethz.ch>");
758 MODULE_DESCRIPTION("LANA PF_LANA module");