2 * Lightweight Autonomic Network Architecture
4 * Eth/PHY layer. Redirects all traffic into the LANA stack.
7 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
8 * Swiss federal institute of technology (ETH Zurich)
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/notifier.h>
16 #include <linux/if_ether.h>
17 #include <linux/if_arp.h>
19 #include <linux/etherdevice.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/seqlock.h>
24 #include "xt_engine.h"
26 #include "xt_fblock.h"
27 #include "xt_builder.h"
30 #define IFF_IS_BRIDGED 0x60000
37 static int instantiated
= 0;
39 static struct fblock
*fb
;
41 static inline int fb_eth_dev_is_bridged(struct net_device
*dev
)
43 return (dev
->priv_flags
& IFF_IS_BRIDGED
) == IFF_IS_BRIDGED
;
46 static inline void fb_eth_make_dev_bridged(struct net_device
*dev
)
48 dev
->priv_flags
|= IFF_IS_BRIDGED
;
51 static inline void fb_eth_make_dev_unbridged(struct net_device
*dev
)
53 dev
->priv_flags
&= ~IFF_IS_BRIDGED
;
56 static rx_handler_result_t
fb_eth_handle_frame(struct sk_buff
**pskb
)
59 struct sk_buff
*skb
= *pskb
;
60 struct fb_eth_priv __percpu
*fb_priv_cpu
;
62 if (unlikely(skb
->pkt_type
== PACKET_LOOPBACK
))
63 return RX_HANDLER_PASS
;
64 if (unlikely(!is_valid_ether_addr(eth_hdr(skb
)->h_source
)))
66 skb
= skb_share_check(skb
, GFP_ATOMIC
);
68 return RX_HANDLER_CONSUMED
;
70 fb_priv_cpu
= this_cpu_ptr(rcu_dereference(fb
->private_data
));
72 seq
= read_seqbegin(&fb_priv_cpu
->lock
);
73 write_next_idp_to_skb(skb
, fb
->idp
,
74 fb_priv_cpu
->port
[TYPE_INGRESS
]);
75 } while (read_seqretry(&fb_priv_cpu
->lock
, seq
));
77 process_packet(skb
, TYPE_INGRESS
);
79 return RX_HANDLER_CONSUMED
;
82 return RX_HANDLER_CONSUMED
;
85 static int fb_eth_netrx(const struct fblock
* const fb
,
86 struct sk_buff
* const skb
,
87 enum path_type
* const dir
)
93 write_next_idp_to_skb(skb
, fb
->idp
, IDP_UNKNOWN
);
98 static int fb_eth_event(struct notifier_block
*self
, unsigned long cmd
,
103 struct fb_eth_priv __percpu
*fb_priv
;
106 fb_priv
= (struct fb_eth_priv __percpu
*) rcu_dereference_raw(fb
->private_data
);
110 case FBLOCK_BIND_IDP
: {
112 struct fblock_bind_msg
*msg
= args
;
114 for_each_online_cpu(cpu
) {
115 struct fb_eth_priv
*fb_priv_cpu
;
116 fb_priv_cpu
= per_cpu_ptr(fb_priv
, cpu
);
117 if (fb_priv_cpu
->port
[msg
->dir
] == IDP_UNKNOWN
) {
118 write_seqlock(&fb_priv_cpu
->lock
);
119 fb_priv_cpu
->port
[msg
->dir
] = msg
->idp
;
120 write_sequnlock(&fb_priv_cpu
->lock
);
129 printk(KERN_INFO
"[%s::%s] port %s bound to IDP%u\n",
130 fb
->name
, fb
->factory
->type
,
131 path_names
[msg
->dir
], msg
->idp
);
133 case FBLOCK_UNBIND_IDP
: {
135 struct fblock_bind_msg
*msg
= args
;
137 for_each_online_cpu(cpu
) {
138 struct fb_eth_priv
*fb_priv_cpu
;
139 fb_priv_cpu
= per_cpu_ptr(fb_priv
, cpu
);
140 if (fb_priv_cpu
->port
[msg
->dir
] == msg
->idp
) {
141 write_seqlock(&fb_priv_cpu
->lock
);
142 fb_priv_cpu
->port
[msg
->dir
] = IDP_UNKNOWN
;
143 write_sequnlock(&fb_priv_cpu
->lock
);
152 printk(KERN_INFO
"[%s::%s] port %s unbound\n",
153 fb
->name
, fb
->factory
->type
,
154 path_names
[msg
->dir
]);
163 static void cleanup_fb_eth(void)
165 struct net_device
*dev
;
167 for_each_netdev(&init_net
, dev
) {
168 if (fb_eth_dev_is_bridged(dev
)) {
169 netdev_rx_handler_unregister(dev
);
170 fb_eth_make_dev_unbridged(dev
);
176 static int init_fb_eth(void)
178 int ret
= 0, err
= 0;
179 struct net_device
*dev
;
181 for_each_netdev(&init_net
, dev
) {
182 ret
= netdev_rx_handler_register(dev
, fb_eth_handle_frame
,
188 fb_eth_make_dev_bridged(dev
);
198 static struct fblock
*fb_eth_ctor(char *name
)
202 struct fb_eth_priv __percpu
*fb_priv
;
206 fb
= alloc_fblock(GFP_ATOMIC
);
210 fb_priv
= alloc_percpu(struct fb_eth_priv
);
215 for_each_online_cpu(cpu
) {
216 struct fb_eth_priv
*fb_priv_cpu
;
217 fb_priv_cpu
= per_cpu_ptr(fb_priv
, cpu
);
218 seqlock_init(&fb_priv_cpu
->lock
);
219 fb_priv_cpu
->port
[0] = IDP_UNKNOWN
;
220 fb_priv_cpu
->port
[1] = IDP_UNKNOWN
;
224 ret
= init_fblock(fb
, name
, fb_priv
);
227 fb
->netfb_rx
= fb_eth_netrx
;
228 fb
->event_rx
= fb_eth_event
;
229 ret
= register_fblock_namespace(fb
);
235 __module_get(THIS_MODULE
);
240 unregister_fblock_namespace(fb
);
243 cleanup_fblock_ctor(fb
);
245 free_percpu(fb_priv
);
252 static void fb_eth_dtor(struct fblock
*fb
)
254 free_percpu(rcu_dereference_raw(fb
->private_data
));
255 module_put(THIS_MODULE
);
259 static void fb_eth_dtor_outside_rcu(struct fblock
*fb
)
264 static struct fblock_factory fb_eth_factory
= {
269 .dtor_outside_rcu
= fb_eth_dtor_outside_rcu
,
270 .owner
= THIS_MODULE
,
273 static struct vlink_subsys fb_eth_sys __read_mostly
= {
275 .type
= VLINKNLGRP_ETHERNET
,
276 .rwsem
= __RWSEM_INITIALIZER(fb_eth_sys
.rwsem
),
279 static int fb_eth_start_hook_dev(struct vlinknlmsg
*vhdr
, struct nlmsghdr
*nlh
)
281 return NETLINK_VLINK_RX_NXT
;
284 static int fb_eth_stop_hook_dev(struct vlinknlmsg
*vhdr
, struct nlmsghdr
*nlh
)
286 return NETLINK_VLINK_RX_NXT
;
289 static struct vlink_callback fb_eth_start_hook_dev_cb
=
290 VLINK_CALLBACK_INIT(fb_eth_start_hook_dev
, NETLINK_VLINK_PRIO_HIGH
);
291 static struct vlink_callback fb_eth_stop_hook_dev_cb
=
292 VLINK_CALLBACK_INIT(fb_eth_stop_hook_dev
, NETLINK_VLINK_PRIO_HIGH
);
294 static int __init
init_fb_eth_module(void)
297 ret
= vlink_subsys_register(&fb_eth_sys
);
301 vlink_add_callback(&fb_eth_sys
, &fb_eth_start_hook_dev_cb
);
302 vlink_add_callback(&fb_eth_sys
, &fb_eth_stop_hook_dev_cb
);
304 ret
= register_fblock_type(&fb_eth_factory
);
306 vlink_subsys_unregister_batch(&fb_eth_sys
);
310 static void __exit
cleanup_fb_eth_module(void)
312 unregister_fblock_type(&fb_eth_factory
);
313 vlink_subsys_unregister_batch(&fb_eth_sys
);
314 vlink_subsys_unregister_batch(&fb_eth_sys
);
317 module_init(init_fb_eth_module
);
318 module_exit(cleanup_fb_eth_module
);
320 MODULE_LICENSE("GPL");
321 MODULE_AUTHOR("Daniel Borkmann <dborkma@tik.ee.ethz.ch>");
322 MODULE_DESCRIPTION("Ethernet/PHY link layer bridge driver");