removed ifpps as std tool
[ana-net.git] / src / fb_eth.c
blobfb161a82c7f5d9a24e5751f43f5b3b9db31de092
1 /*
2 * Lightweight Autonomic Network Architecture
4 * Eth/PHY layer. Redirects all traffic into the LANA stack.
5 * Singleton object.
7 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
8 * Swiss federal institute of technology (ETH Zurich)
9 * Subject to the GPL.
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/notifier.h>
16 #include <linux/if_ether.h>
17 #include <linux/if_arp.h>
18 #include <linux/if.h>
19 #include <linux/etherdevice.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/seqlock.h>
23 #include "xt_idp.h"
24 #include "xt_engine.h"
25 #include "xt_skb.h"
26 #include "xt_fblock.h"
27 #include "xt_builder.h"
28 #include "xt_vlink.h"
30 #define IFF_IS_BRIDGED 0x60000
32 struct fb_eth_priv {
33 idp_t port[2];
34 seqlock_t lock;
37 static int instantiated = 0;
39 static struct fblock *fb;
41 static inline int fb_eth_dev_is_bridged(struct net_device *dev)
43 return (dev->priv_flags & IFF_IS_BRIDGED) == IFF_IS_BRIDGED;
46 static inline void fb_eth_make_dev_bridged(struct net_device *dev)
48 dev->priv_flags |= IFF_IS_BRIDGED;
51 static inline void fb_eth_make_dev_unbridged(struct net_device *dev)
53 dev->priv_flags &= ~IFF_IS_BRIDGED;
56 static rx_handler_result_t fb_eth_handle_frame(struct sk_buff **pskb)
58 unsigned int seq;
59 struct sk_buff *skb = *pskb;
60 struct fb_eth_priv __percpu *fb_priv_cpu;
62 if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
63 return RX_HANDLER_PASS;
64 if (unlikely(!is_valid_ether_addr(eth_hdr(skb)->h_source)))
65 goto drop;
66 skb = skb_share_check(skb, GFP_ATOMIC);
67 if (unlikely(!skb))
68 return RX_HANDLER_CONSUMED;
70 fb_priv_cpu = this_cpu_ptr(rcu_dereference(fb->private_data));
71 do {
72 seq = read_seqbegin(&fb_priv_cpu->lock);
73 write_next_idp_to_skb(skb, fb->idp,
74 fb_priv_cpu->port[TYPE_INGRESS]);
75 } while (read_seqretry(&fb_priv_cpu->lock, seq));
77 process_packet(skb, TYPE_INGRESS);
79 return RX_HANDLER_CONSUMED;
80 drop:
81 kfree_skb(skb);
82 return RX_HANDLER_CONSUMED;
85 static int fb_eth_netrx(const struct fblock * const fb,
86 struct sk_buff * const skb,
87 enum path_type * const dir)
89 if (!skb->dev) {
90 kfree_skb(skb);
91 return PPE_DROPPED;
93 write_next_idp_to_skb(skb, fb->idp, IDP_UNKNOWN);
94 dev_queue_xmit(skb);
95 return PPE_DROPPED;
98 static int fb_eth_event(struct notifier_block *self, unsigned long cmd,
99 void *args)
101 int ret = NOTIFY_OK;
102 unsigned int cpu;
103 struct fb_eth_priv __percpu *fb_priv;
105 rcu_read_lock();
106 fb_priv = (struct fb_eth_priv __percpu *) rcu_dereference_raw(fb->private_data);
107 rcu_read_unlock();
109 switch (cmd) {
110 case FBLOCK_BIND_IDP: {
111 int bound = 0;
112 struct fblock_bind_msg *msg = args;
113 get_online_cpus();
114 for_each_online_cpu(cpu) {
115 struct fb_eth_priv *fb_priv_cpu;
116 fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
117 if (fb_priv_cpu->port[msg->dir] == IDP_UNKNOWN) {
118 write_seqlock(&fb_priv_cpu->lock);
119 fb_priv_cpu->port[msg->dir] = msg->idp;
120 write_sequnlock(&fb_priv_cpu->lock);
121 bound = 1;
122 } else {
123 ret = NOTIFY_BAD;
124 break;
127 put_online_cpus();
128 if (bound)
129 printk(KERN_INFO "[%s::%s] port %s bound to IDP%u\n",
130 fb->name, fb->factory->type,
131 path_names[msg->dir], msg->idp);
132 } break;
133 case FBLOCK_UNBIND_IDP: {
134 int unbound = 0;
135 struct fblock_bind_msg *msg = args;
136 get_online_cpus();
137 for_each_online_cpu(cpu) {
138 struct fb_eth_priv *fb_priv_cpu;
139 fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
140 if (fb_priv_cpu->port[msg->dir] == msg->idp) {
141 write_seqlock(&fb_priv_cpu->lock);
142 fb_priv_cpu->port[msg->dir] = IDP_UNKNOWN;
143 write_sequnlock(&fb_priv_cpu->lock);
144 unbound = 1;
145 } else {
146 ret = NOTIFY_BAD;
147 break;
150 put_online_cpus();
151 if (unbound)
152 printk(KERN_INFO "[%s::%s] port %s unbound\n",
153 fb->name, fb->factory->type,
154 path_names[msg->dir]);
155 } break;
156 default:
157 break;
160 return ret;
163 static void cleanup_fb_eth(void)
165 struct net_device *dev;
166 rtnl_lock();
167 for_each_netdev(&init_net, dev) {
168 if (fb_eth_dev_is_bridged(dev)) {
169 netdev_rx_handler_unregister(dev);
170 fb_eth_make_dev_unbridged(dev);
173 rtnl_unlock();
176 static int init_fb_eth(void)
178 int ret = 0, err = 0;
179 struct net_device *dev;
180 rtnl_lock();
181 for_each_netdev(&init_net, dev) {
182 ret = netdev_rx_handler_register(dev, fb_eth_handle_frame,
183 NULL);
184 if (ret) {
185 err = 1;
186 break;
188 fb_eth_make_dev_bridged(dev);
190 rtnl_unlock();
191 if (err) {
192 cleanup_fb_eth();
193 return ret;
195 return 0;
198 static struct fblock *fb_eth_ctor(char *name)
200 int ret = 0;
201 unsigned int cpu;
202 struct fb_eth_priv __percpu *fb_priv;
204 if (instantiated)
205 return NULL;
206 fb = alloc_fblock(GFP_ATOMIC);
207 if (!fb)
208 return NULL;
210 fb_priv = alloc_percpu(struct fb_eth_priv);
211 if (!fb_priv)
212 goto err;
214 get_online_cpus();
215 for_each_online_cpu(cpu) {
216 struct fb_eth_priv *fb_priv_cpu;
217 fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
218 seqlock_init(&fb_priv_cpu->lock);
219 fb_priv_cpu->port[0] = IDP_UNKNOWN;
220 fb_priv_cpu->port[1] = IDP_UNKNOWN;
222 put_online_cpus();
224 ret = init_fblock(fb, name, fb_priv);
225 if (ret)
226 goto err2;
227 fb->netfb_rx = fb_eth_netrx;
228 fb->event_rx = fb_eth_event;
229 ret = register_fblock_namespace(fb);
230 if (ret)
231 goto err3;
232 ret = init_fb_eth();
233 if (ret)
234 goto err4;
235 __module_get(THIS_MODULE);
236 instantiated = 1;
237 smp_wmb();
238 return fb;
239 err4:
240 unregister_fblock_namespace(fb);
241 return NULL;
242 err3:
243 cleanup_fblock_ctor(fb);
244 err2:
245 free_percpu(fb_priv);
246 err:
247 kfree_fblock(fb);
248 fb = NULL;
249 return NULL;
252 static void fb_eth_dtor(struct fblock *fb)
254 free_percpu(rcu_dereference_raw(fb->private_data));
255 module_put(THIS_MODULE);
256 instantiated = 0;
259 static void fb_eth_dtor_outside_rcu(struct fblock *fb)
261 cleanup_fb_eth();
264 static struct fblock_factory fb_eth_factory = {
265 .type = "eth",
266 .mode = MODE_SOURCE,
267 .ctor = fb_eth_ctor,
268 .dtor = fb_eth_dtor,
269 .dtor_outside_rcu = fb_eth_dtor_outside_rcu,
270 .owner = THIS_MODULE,
273 static struct vlink_subsys fb_eth_sys __read_mostly = {
274 .name = "eth",
275 .type = VLINKNLGRP_ETHERNET,
276 .rwsem = __RWSEM_INITIALIZER(fb_eth_sys.rwsem),
279 static int fb_eth_start_hook_dev(struct vlinknlmsg *vhdr, struct nlmsghdr *nlh)
281 return NETLINK_VLINK_RX_NXT;
284 static int fb_eth_stop_hook_dev(struct vlinknlmsg *vhdr, struct nlmsghdr *nlh)
286 return NETLINK_VLINK_RX_NXT;
289 static struct vlink_callback fb_eth_start_hook_dev_cb =
290 VLINK_CALLBACK_INIT(fb_eth_start_hook_dev, NETLINK_VLINK_PRIO_HIGH);
291 static struct vlink_callback fb_eth_stop_hook_dev_cb =
292 VLINK_CALLBACK_INIT(fb_eth_stop_hook_dev, NETLINK_VLINK_PRIO_HIGH);
294 static int __init init_fb_eth_module(void)
296 int ret = 0;
297 ret = vlink_subsys_register(&fb_eth_sys);
298 if (ret)
299 return ret;
301 vlink_add_callback(&fb_eth_sys, &fb_eth_start_hook_dev_cb);
302 vlink_add_callback(&fb_eth_sys, &fb_eth_stop_hook_dev_cb);
304 ret = register_fblock_type(&fb_eth_factory);
305 if (ret)
306 vlink_subsys_unregister_batch(&fb_eth_sys);
307 return ret;
310 static void __exit cleanup_fb_eth_module(void)
312 unregister_fblock_type(&fb_eth_factory);
313 vlink_subsys_unregister_batch(&fb_eth_sys);
314 vlink_subsys_unregister_batch(&fb_eth_sys);
317 module_init(init_fb_eth_module);
318 module_exit(cleanup_fb_eth_module);
320 MODULE_LICENSE("GPL");
321 MODULE_AUTHOR("Daniel Borkmann <dborkma@tik.ee.ethz.ch>");
322 MODULE_DESCRIPTION("Ethernet/PHY link layer bridge driver");