[PATCH 4/8] Use container_of() to get lana sk from sock
[ana-net.git] / src / fb_pflana.c
blob1c17c50484949a81ebb97f49832221afbf986346
1 /*
2 * Lightweight Autonomic Network Architecture
4 * LANA BSD Socket interface for communication with user level.
5 * PF_LANA protocol family socket handler.
7 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
8 * Swiss federal institute of technology (ETH Zurich)
9 * Subject to the GPL.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/spinlock.h>
15 #include <linux/notifier.h>
16 #include <linux/rcupdate.h>
17 #include <linux/seqlock.h>
18 #include <linux/bug.h>
19 #include <linux/percpu.h>
20 #include <linux/prefetch.h>
21 #include <linux/atomic.h>
22 #include <linux/slab.h>
23 #include <net/sock.h>
25 #include "xt_fblock.h"
26 #include "xt_builder.h"
27 #include "xt_idp.h"
28 #include "xt_skb.h"
29 #include "xt_engine.h"
30 #include "xt_builder.h"
31 #include "fb_pflana.h"
33 struct fb_pflana_priv {
34 idp_t port[2];
35 seqlock_t lock;
36 struct lana_sock *sock_self;
39 struct lana_sock {
40 struct sock sk;
41 struct fblock *fb;
42 int bound;
45 static DEFINE_MUTEX(proto_tab_lock);
47 static struct lana_protocol *proto_tab[LANA_NPROTO] __read_mostly;
49 static int fb_pflana_netrx(const struct fblock * const fb,
50 struct sk_buff *skb,
51 enum path_type * const dir)
53 u8 *skb_head = skb->data;
54 int skb_len = skb->len;
55 struct sock *sk;
56 struct fb_pflana_priv __percpu *fb_priv_cpu;
58 fb_priv_cpu = this_cpu_ptr(rcu_dereference_raw(fb->private_data));
59 sk = &fb_priv_cpu->sock_self->sk;
61 if (skb_shared(skb)) {
62 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
63 if (skb_head != skb->data) {
64 skb->data = skb_head;
65 skb->len = skb_len;
67 if (nskb == NULL)
68 goto out;
69 kfree_skb(skb);
70 skb = nskb;
72 sk_receive_skb(sk, skb, 0);
73 out:
74 write_next_idp_to_skb(skb, fb->idp, IDP_UNKNOWN);
75 return PPE_HALT;
78 static int fb_pflana_event(struct notifier_block *self, unsigned long cmd,
79 void *args)
81 return 0;
84 static struct fblock *get_bound_fblock(struct fblock *self, enum path_type dir)
86 idp_t fbidp;
87 unsigned int seq;
88 struct fb_pflana_priv __percpu *fb_priv_cpu;
89 fb_priv_cpu = this_cpu_ptr(rcu_dereference_raw(self->private_data));
90 do {
91 seq = read_seqbegin(&fb_priv_cpu->lock);
92 fbidp = fb_priv_cpu->port[dir];
93 } while (read_seqretry(&fb_priv_cpu->lock, seq));
94 return search_fblock(fbidp);
97 static inline struct lana_sock *to_lana_sk(const struct sock *sk)
99 return container_of(sk, struct lana_sock, sk);
102 static struct fblock *fb_pflana_ctor(char *name);
104 static int lana_sk_init(struct sock* sk)
106 int cpu;
107 char name[256];
108 struct lana_sock *lana = to_lana_sk(sk);
110 memset(name, 0, sizeof(name));
111 snprintf(name, sizeof(name), "%p", &lana->sk);
112 lana->bound = 0;
113 lana->fb = fb_pflana_ctor(name);
114 if (!lana->fb)
115 return -ENOMEM;
116 get_online_cpus();
117 for_each_online_cpu(cpu) {
118 struct fb_pflana_priv *fb_priv_cpu;
119 fb_priv_cpu = per_cpu_ptr(lana->fb->private_data, cpu);
120 fb_priv_cpu->sock_self = lana;
122 put_online_cpus();
123 smp_wmb();
124 return 0;
127 static void lana_sk_free(struct sock *sk)
129 struct fblock *fb_bound;
130 struct lana_sock *lana;
132 lana = to_lana_sk(sk);
133 fb_bound = get_bound_fblock(lana->fb, TYPE_INGRESS);
134 if (fb_bound) {
135 fblock_unbind(fb_bound, lana->fb);
136 put_fblock(fb_bound);
138 fb_bound = get_bound_fblock(lana->fb, TYPE_EGRESS);
139 if (fb_bound) {
140 fblock_unbind(lana->fb, fb_bound);
141 put_fblock(fb_bound);
143 unregister_fblock_namespace(lana->fb);
146 int lana_raw_release(struct socket *sock)
148 struct sock *sk = sock->sk;
149 if (sk) {
150 sock->sk = NULL;
151 sk->sk_prot->close(sk, 0);
152 lana_sk_free(sk);
154 return 0;
157 static int lana_proto_recvmsg(struct kiocb *iocb, struct sock *sk,
158 struct msghdr *msg, size_t len, int noblock,
159 int flags, int *addr_len)
161 int err = 0;
162 struct sk_buff *skb;
163 size_t copied = 0;
165 skb = skb_recv_datagram(sk, flags, noblock, &err);
166 if (!skb) {
167 if (sk->sk_shutdown & RCV_SHUTDOWN)
168 return 0;
169 return err;
171 msg->msg_namelen = 0;
172 if (addr_len)
173 *addr_len = msg->msg_namelen;
174 copied = skb->len;
175 if (len < copied) {
176 msg->msg_flags |= MSG_TRUNC;
177 copied = len;
179 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
180 if (err == 0)
181 sock_recv_ts_and_drops(msg, sk, skb);
182 skb_free_datagram(sk, skb);
184 return err ? : copied;
187 static int lana_proto_backlog_rcv(struct sock *sk, struct sk_buff *skb)
189 int err = -EPROTONOSUPPORT;
191 switch (sk->sk_protocol) {
192 case LANA_PROTO_RAW:
193 err = sock_queue_rcv_skb(sk, skb);
194 if (err != 0)
195 kfree_skb(skb);
196 break;
197 default:
198 kfree_skb(skb);
199 err = -EPROTONOSUPPORT;
200 break;
203 return err ? NET_RX_DROP : NET_RX_SUCCESS;
206 int lana_common_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
207 struct msghdr *msg, size_t len, int flags)
209 int err = 0;
210 long timeout;
211 size_t target, chunk, copied = 0;
212 struct sock *sk = sock->sk;
213 struct sk_buff *skb;
215 msg->msg_namelen = 0;
216 lock_sock(sk);
217 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
218 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
219 do {
220 skb = skb_dequeue(&sk->sk_receive_queue);
221 if (!skb) {
222 if (copied >= target)
223 break;
224 err = sock_error(sk);
225 if (err)
226 break;
227 if (sk->sk_shutdown & RCV_SHUTDOWN)
228 break;
229 err = -EAGAIN;
230 if (!timeout)
231 break;
232 timeout = sk_wait_data(sk, &timeout);
233 if (signal_pending(current)) {
234 err = sock_intr_errno(timeout);
235 goto out;
237 continue;
239 chunk = min_t(size_t, skb->len, len);
240 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
241 skb_queue_head(&sk->sk_receive_queue, skb);
242 if (!copied)
243 copied = -EFAULT;
244 break;
246 copied += chunk;
247 len -= chunk;
248 sock_recv_ts_and_drops(msg, sk, skb);
249 if (!(flags & MSG_PEEK)) {
250 skb_pull(skb, chunk);
251 if (skb->len) {
252 skb_queue_head(&sk->sk_receive_queue, skb);
253 break;
255 kfree_skb(skb);
256 } else {
257 /* put message back and return */
258 skb_queue_head(&sk->sk_receive_queue, skb);
259 break;
261 } while (len > 0);
262 out:
263 release_sock(sk);
264 return copied ? : err;
266 EXPORT_SYMBOL(lana_common_stream_recvmsg);
268 static void lana_proto_destruct(struct sock *sk)
270 skb_queue_purge(&sk->sk_receive_queue);
273 static int lana_proto_init(struct sock *sk)
275 sk->sk_destruct = lana_proto_destruct;
276 return 0;
279 static void lana_proto_close(struct sock *sk, long timeout)
281 sk_common_release(sk);
284 static void lana_proto_hash(struct sock *sk)
288 static void lana_proto_unhash(struct sock *sk)
292 static int lana_proto_get_port(struct sock *sk, unsigned short sport)
294 return 0;
297 static struct lana_protocol *pflana_proto_get(int proto)
299 struct lana_protocol *ret = NULL;
301 if (proto < 0 || proto >= LANA_NPROTO)
302 return NULL;
303 rcu_read_lock();
304 ret = rcu_dereference_raw(proto_tab[proto]);
305 rcu_read_unlock();
307 return ret;
310 static int lana_family_create(struct net *net, struct socket *sock,
311 int protocol, int kern)
313 struct sock *sk;
314 struct lana_protocol *lp;
315 struct lana_sock *ls;
317 if (!net_eq(net, &init_net))
318 return -EAFNOSUPPORT;
320 if (protocol == LANA_PROTO_AUTO) {
321 switch (sock->type) {
322 case SOCK_RAW:
323 if (!capable(CAP_SYS_ADMIN))
324 return -EPERM;
325 protocol = LANA_PROTO_RAW;
326 break;
327 default:
328 return -EPROTONOSUPPORT;
332 lp = pflana_proto_get(protocol);
333 if (!lp)
334 return -EPROTONOSUPPORT;
336 sk = sk_alloc(net, PF_LANA, GFP_KERNEL, lp->proto);
337 if (!sk)
338 return -ENOMEM;
339 if (lana_sk_init(sk) < 0) {
340 sock_put(sk);
341 return -ENOMEM;
344 sock_init_data(sock, sk);
345 sock->state = SS_UNCONNECTED;
346 sock->ops = lp->ops;
348 sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
349 sk->sk_protocol = protocol;
350 sk->sk_family = PF_LANA;
351 sk->sk_type = sock->type;
352 sk->sk_prot->init(sk);
354 ls = to_lana_sk(sk);
355 ls->bound = 0;
357 return 0;
360 static const struct net_proto_family lana_family_ops = {
361 .family = PF_LANA,
362 .create = lana_family_create,
363 .owner = THIS_MODULE,
366 static const struct proto_ops lana_raw_ops = {
367 .family = PF_LANA,
368 .owner = THIS_MODULE,
369 .release = lana_raw_release,
370 .recvmsg = sock_common_recvmsg,
371 .setsockopt = sock_no_setsockopt,
372 .getsockopt = sock_no_getsockopt,
373 .bind = sock_no_bind,
374 .connect = sock_no_connect,
375 .socketpair = sock_no_socketpair,
376 .accept = sock_no_accept,
377 .getname = sock_no_getname,
378 .poll = sock_no_poll,
379 .ioctl = sock_no_ioctl,
380 .listen = sock_no_listen,
381 .shutdown = sock_no_shutdown,
382 .sendmsg = sock_no_sendmsg,
383 .mmap = sock_no_mmap,
384 .sendpage = sock_no_sendpage,
387 static struct proto lana_proto __read_mostly = {
388 .name = "LANA",
389 .owner = THIS_MODULE,
390 .obj_size = sizeof(struct lana_sock),
391 .backlog_rcv = lana_proto_backlog_rcv,
392 .close = lana_proto_close,
393 .init = lana_proto_init,
394 .recvmsg = lana_proto_recvmsg,
395 .hash = lana_proto_hash,
396 .unhash = lana_proto_unhash,
397 .get_port = lana_proto_get_port,
400 static struct lana_protocol lana_proto_raw __read_mostly = {
401 .protocol = LANA_PROTO_RAW,
402 .ops = &lana_raw_ops,
403 .proto = &lana_proto,
404 .owner = THIS_MODULE,
407 int pflana_proto_register(int proto, struct lana_protocol *lp)
409 int err;
411 if (!lp || proto < 0 || proto >= LANA_NPROTO)
412 return -EINVAL;
413 if (rcu_dereference_raw(proto_tab[proto]))
414 return -EBUSY;
416 err = proto_register(lp->proto, 1);
417 if (err)
418 return err;
420 mutex_lock(&proto_tab_lock);
421 lp->protocol = proto;
422 rcu_assign_pointer(proto_tab[proto], lp);
423 mutex_unlock(&proto_tab_lock);
425 if (lp->owner != THIS_MODULE)
426 __module_get(lp->owner);
427 return 0;
429 EXPORT_SYMBOL(pflana_proto_register);
431 void pflana_proto_unregister(struct lana_protocol *lp)
433 if (!lp)
434 return;
435 if (lp->protocol < 0 || lp->protocol >= LANA_NPROTO)
436 return;
437 if (!rcu_dereference_raw(proto_tab[lp->protocol]))
438 return;
440 BUG_ON(proto_tab[lp->protocol] != lp);
442 mutex_lock(&proto_tab_lock);
443 rcu_assign_pointer(proto_tab[lp->protocol], NULL);
444 mutex_unlock(&proto_tab_lock);
445 synchronize_rcu();
447 proto_unregister(lp->proto);
448 if (lp->owner != THIS_MODULE)
449 module_put(lp->owner);
451 EXPORT_SYMBOL(pflana_proto_unregister);
453 static int init_fb_pflana(void)
455 int ret, i;
456 for (i = 0; i < LANA_NPROTO; ++i)
457 rcu_assign_pointer(proto_tab[i], NULL);
459 /* Default proto types we definately want to load */
460 ret = pflana_proto_register(LANA_PROTO_RAW, &lana_proto_raw);
461 if (ret)
462 return ret;
464 ret = sock_register(&lana_family_ops);
465 if (ret) {
466 pflana_proto_unregister(&lana_proto_raw);
467 return ret;
469 return 0;
472 static void cleanup_fb_pflana(void)
474 int i;
475 sock_unregister(PF_LANA);
476 for (i = 0; i < LANA_NPROTO; ++i)
477 pflana_proto_unregister(rcu_dereference_raw(proto_tab[i]));
480 static struct fblock_factory fb_pflana_factory;
482 static struct fblock *fb_pflana_ctor(char *name)
484 int ret = 0;
485 unsigned int cpu;
486 struct fblock *fb;
487 struct fb_pflana_priv __percpu *fb_priv;
489 fb = alloc_fblock(GFP_ATOMIC);
490 if (!fb)
491 return NULL;
492 fb_priv = alloc_percpu(struct fb_pflana_priv);
493 if (!fb_priv)
494 goto err;
495 get_online_cpus();
496 for_each_online_cpu(cpu) {
497 struct fb_pflana_priv *fb_priv_cpu;
498 fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
499 seqlock_init(&fb_priv_cpu->lock);
500 fb_priv_cpu->port[0] = IDP_UNKNOWN;
501 fb_priv_cpu->port[1] = IDP_UNKNOWN;
503 put_online_cpus();
505 ret = init_fblock(fb, name, fb_priv);
506 if (ret)
507 goto err2;
508 fb->netfb_rx = fb_pflana_netrx;
509 fb->event_rx = fb_pflana_event;
510 fb->factory = &fb_pflana_factory;
511 ret = register_fblock_namespace(fb);
512 if (ret)
513 goto err3;
514 __module_get(THIS_MODULE);
515 return fb;
516 err3:
517 cleanup_fblock_ctor(fb);
518 err2:
519 free_percpu(fb_priv);
520 err:
521 kfree_fblock(fb);
522 fb = NULL;
523 return NULL;
526 static void fb_pflana_dtor(struct fblock *fb)
528 free_percpu(rcu_dereference_raw(fb->private_data));
529 module_put(THIS_MODULE);
532 static struct fblock_factory fb_pflana_factory = {
533 .type = "pflana",
534 .mode = MODE_SINK,
535 .ctor = fb_pflana_ctor,
536 .dtor = fb_pflana_dtor,
537 .owner = THIS_MODULE,
540 static int __init init_fb_pflana_module(void)
542 int ret;
543 ret = init_fb_pflana();
544 if (ret)
545 return ret;
546 ret = register_fblock_type(&fb_pflana_factory);
547 if (ret)
548 cleanup_fb_pflana();
549 return ret;
552 static void __exit cleanup_fb_pflana_module(void)
554 cleanup_fb_pflana();
555 unregister_fblock_type(&fb_pflana_factory);
558 module_init(init_fb_pflana_module);
559 module_exit(cleanup_fb_pflana_module);
561 MODULE_LICENSE("GPL");
562 MODULE_AUTHOR("Daniel Borkmann <dborkma@tik.ee.ethz.ch>");
563 MODULE_DESCRIPTION("LANA PF_LANA module");