use sk_receive_skb instead of sk_backlog_rcv
[ana-net.git] / src / fb_eth.c
blob584954ca32853ead9235803470678d58853f8183
1 /*
2 * Lightweight Autonomic Network Architecture
4 * Eth/PHY layer. Redirects all traffic into the LANA stack.
5 * Singleton object.
7 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
8 * Swiss federal institute of technology (ETH Zurich)
9 * Subject to the GPL.
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/notifier.h>
16 #include <linux/if_ether.h>
17 #include <linux/if_arp.h>
18 #include <linux/if.h>
19 #include <linux/etherdevice.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/seqlock.h>
23 #include "xt_idp.h"
24 #include "xt_engine.h"
25 #include "xt_skb.h"
26 #include "xt_fblock.h"
27 #include "xt_builder.h"
28 #include "xt_vlink.h"
30 #define IFF_IS_BRIDGED 0x60000
32 struct fb_eth_priv {
33 idp_t port[NUM_TYPES];
34 seqlock_t lock;
37 static int instantiated = 0;
38 static struct fblock *fb;
40 static inline int fb_eth_dev_is_bridged(struct net_device *dev)
42 return (dev->priv_flags & IFF_IS_BRIDGED) == IFF_IS_BRIDGED;
45 static inline void fb_eth_make_dev_bridged(struct net_device *dev)
47 dev->priv_flags |= IFF_IS_BRIDGED;
50 static inline void fb_eth_make_dev_unbridged(struct net_device *dev)
52 dev->priv_flags &= ~IFF_IS_BRIDGED;
55 static rx_handler_result_t fb_eth_handle_frame(struct sk_buff **pskb)
57 unsigned int seq;
58 struct sk_buff *skb = *pskb;
59 struct fb_eth_priv __percpu *fb_priv_cpu;
61 if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
62 return RX_HANDLER_PASS;
64 if (unlikely(!is_valid_ether_addr(eth_hdr(skb)->h_source)))
65 goto drop;
67 skb = skb_share_check(skb, GFP_ATOMIC);
68 if (unlikely(!skb))
69 return RX_HANDLER_CONSUMED;
71 fb_priv_cpu = this_cpu_ptr(rcu_dereference(fb->private_data));
72 do {
73 seq = read_seqbegin(&fb_priv_cpu->lock);
74 write_next_idp_to_skb(skb, fb->idp, 1
75 /*fb_priv_cpu->port[TYPE_INGRESS]*/);
76 } while (read_seqretry(&fb_priv_cpu->lock, seq));
77 if (process_packet(skb, TYPE_INGRESS) != PPE_DROPPED)
78 kfree_skb(skb); //XXX
79 return RX_HANDLER_CONSUMED;
80 drop:
81 kfree_skb(skb);
82 return RX_HANDLER_CONSUMED;
85 static int fb_eth_netrx(const struct fblock * const fb,
86 struct sk_buff * const skb,
87 enum path_type * const dir)
89 kfree_skb(skb);
90 return PPE_DROPPED;
93 static int fb_eth_event(struct notifier_block *self, unsigned long cmd,
94 void *args)
96 int ret = NOTIFY_OK;
97 unsigned int cpu;
98 struct fb_eth_priv __percpu *fb_priv;
100 rcu_read_lock();
101 fb_priv = (struct fb_eth_priv __percpu *) rcu_dereference_raw(fb->private_data);
102 rcu_read_unlock();
104 switch (cmd) {
105 case FBLOCK_BIND_IDP: {
106 struct fblock_bind_msg *msg = args;
107 get_online_cpus();
108 for_each_online_cpu(cpu) {
109 struct fb_eth_priv *fb_priv_cpu;
110 fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
111 if (fb_priv_cpu->port[msg->dir] == IDP_UNKNOWN) {
112 write_seqlock(&fb_priv_cpu->lock);
113 fb_priv_cpu->port[msg->dir] = msg->idp;
114 write_sequnlock(&fb_priv_cpu->lock);
115 } else {
116 ret = NOTIFY_BAD;
117 break;
120 put_online_cpus();
121 } break;
122 case FBLOCK_UNBIND_IDP: {
123 struct fblock_bind_msg *msg = args;
124 get_online_cpus();
125 for_each_online_cpu(cpu) {
126 struct fb_eth_priv *fb_priv_cpu;
127 fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
128 if (fb_priv_cpu->port[msg->dir] == msg->idp) {
129 write_seqlock(&fb_priv_cpu->lock);
130 fb_priv_cpu->port[msg->dir] = IDP_UNKNOWN;
131 write_sequnlock(&fb_priv_cpu->lock);
132 } else {
133 ret = NOTIFY_BAD;
134 break;
136 put_online_cpus();
138 put_online_cpus();
139 } break;
140 default:
141 break;
144 return ret;
147 static void cleanup_fb_eth(void)
149 struct net_device *dev;
150 rtnl_lock();
151 for_each_netdev(&init_net, dev) {
152 if (fb_eth_dev_is_bridged(dev)) {
153 netdev_rx_handler_unregister(dev);
154 fb_eth_make_dev_unbridged(dev);
157 rtnl_unlock();
160 static int init_fb_eth(void)
162 int ret = 0, err = 0;
163 struct net_device *dev;
164 rtnl_lock();
165 for_each_netdev(&init_net, dev) {
166 ret = netdev_rx_handler_register(dev, fb_eth_handle_frame,
167 NULL);
168 if (ret) {
169 err = 1;
170 break;
172 fb_eth_make_dev_bridged(dev);
174 rtnl_unlock();
175 if (err) {
176 cleanup_fb_eth();
177 return ret;
179 return 0;
182 static struct fblock *fb_eth_ctor(char *name)
184 int i, ret = 0;
185 unsigned int cpu;
186 struct fb_eth_priv __percpu *fb_priv;
188 if (instantiated)
189 return NULL;
190 fb = alloc_fblock(GFP_ATOMIC);
191 if (!fb)
192 return NULL;
194 fb_priv = alloc_percpu(struct fb_eth_priv);
195 if (!fb_priv)
196 goto err;
198 get_online_cpus();
199 for_each_online_cpu(cpu) {
200 struct fb_eth_priv *fb_priv_cpu;
201 fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
202 seqlock_init(&fb_priv_cpu->lock);
203 for (i = 0; i < NUM_TYPES; ++i)
204 fb_priv_cpu->port[i] = IDP_UNKNOWN;
206 put_online_cpus();
208 ret = init_fblock(fb, name, fb_priv);
209 if (ret)
210 goto err2;
211 fb->netfb_rx = fb_eth_netrx;
212 fb->event_rx = fb_eth_event;
213 ret = register_fblock_namespace(fb);
214 if (ret)
215 goto err3;
216 ret = init_fb_eth();
217 if (ret)
218 goto err4;
219 __module_get(THIS_MODULE);
220 instantiated = 1;
221 smp_wmb();
222 return fb;
223 err4:
224 unregister_fblock_namespace(fb);
225 return NULL;
226 err3:
227 cleanup_fblock_ctor(fb);
228 err2:
229 free_percpu(fb_priv);
230 err:
231 kfree_fblock(fb);
232 fb = NULL;
233 return NULL;
236 static void fb_eth_dtor(struct fblock *fb)
238 free_percpu(rcu_dereference_raw(fb->private_data));
239 module_put(THIS_MODULE);
240 instantiated = 0;
241 cleanup_fb_eth();
244 static struct fblock_factory fb_eth_factory = {
245 .type = "eth",
246 .mode = MODE_SOURCE,
247 .ctor = fb_eth_ctor,
248 .dtor = fb_eth_dtor,
249 .owner = THIS_MODULE,
252 static struct vlink_subsys fb_eth_sys __read_mostly = {
253 .name = "eth",
254 .type = VLINKNLGRP_ETHERNET,
255 .rwsem = __RWSEM_INITIALIZER(fb_eth_sys.rwsem),
258 static int fb_eth_start_hook_dev(struct vlinknlmsg *vhdr, struct nlmsghdr *nlh)
260 return NETLINK_VLINK_RX_NXT;
263 static int fb_eth_stop_hook_dev(struct vlinknlmsg *vhdr, struct nlmsghdr *nlh)
265 return NETLINK_VLINK_RX_NXT;
268 static struct vlink_callback fb_eth_start_hook_dev_cb =
269 VLINK_CALLBACK_INIT(fb_eth_start_hook_dev, NETLINK_VLINK_PRIO_HIGH);
270 static struct vlink_callback fb_eth_stop_hook_dev_cb =
271 VLINK_CALLBACK_INIT(fb_eth_stop_hook_dev, NETLINK_VLINK_PRIO_HIGH);
273 static int __init init_fb_eth_module(void)
275 int ret = 0;
276 ret = vlink_subsys_register(&fb_eth_sys);
277 if (ret)
278 return ret;
280 vlink_add_callback(&fb_eth_sys, &fb_eth_start_hook_dev_cb);
281 vlink_add_callback(&fb_eth_sys, &fb_eth_stop_hook_dev_cb);
283 ret = register_fblock_type(&fb_eth_factory);
284 if (ret)
285 vlink_subsys_unregister_batch(&fb_eth_sys);
286 return ret;
289 static void __exit cleanup_fb_eth_module(void)
291 unregister_fblock_type(&fb_eth_factory);
292 vlink_subsys_unregister_batch(&fb_eth_sys);
293 vlink_subsys_unregister_batch(&fb_eth_sys);
296 module_init(init_fb_eth_module);
297 module_exit(cleanup_fb_eth_module);
299 MODULE_LICENSE("GPL");
300 MODULE_AUTHOR("Daniel Borkmann <dborkma@tik.ee.ethz.ch>");
301 MODULE_DESCRIPTION("Ethernet/PHY link layer bridge driver");