2 * Lightweight Autonomic Network Architecture
4 * Eth/PHY layer. Redirects all traffic into the LANA stack.
7 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
8 * Swiss federal institute of technology (ETH Zurich)
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/notifier.h>
16 #include <linux/if_ether.h>
17 #include <linux/if_arp.h>
19 #include <linux/etherdevice.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/seqlock.h>
24 #include "xt_engine.h"
26 #include "xt_fblock.h"
27 #include "xt_builder.h"
30 #define IFF_IS_BRIDGED 0x60000
33 idp_t port
[NUM_TYPES
];
37 static int instantiated
= 0;
38 static struct fblock
*fb
;
40 static inline int fb_eth_dev_is_bridged(struct net_device
*dev
)
42 return (dev
->priv_flags
& IFF_IS_BRIDGED
) == IFF_IS_BRIDGED
;
45 static inline void fb_eth_make_dev_bridged(struct net_device
*dev
)
47 dev
->priv_flags
|= IFF_IS_BRIDGED
;
50 static inline void fb_eth_make_dev_unbridged(struct net_device
*dev
)
52 dev
->priv_flags
&= ~IFF_IS_BRIDGED
;
55 static rx_handler_result_t
fb_eth_handle_frame(struct sk_buff
**pskb
)
58 struct sk_buff
*skb
= *pskb
;
59 struct fb_eth_priv __percpu
*fb_priv_cpu
;
61 if (unlikely(skb
->pkt_type
== PACKET_LOOPBACK
))
62 return RX_HANDLER_PASS
;
64 if (unlikely(!is_valid_ether_addr(eth_hdr(skb
)->h_source
)))
67 skb
= skb_share_check(skb
, GFP_ATOMIC
);
69 return RX_HANDLER_CONSUMED
;
71 fb_priv_cpu
= this_cpu_ptr(rcu_dereference(fb
->private_data
));
73 seq
= read_seqbegin(&fb_priv_cpu
->lock
);
74 write_next_idp_to_skb(skb
, fb
->idp
,
75 fb_priv_cpu
->port
[TYPE_INGRESS
]);
76 } while (read_seqretry(&fb_priv_cpu
->lock
, seq
));
77 if (process_packet(skb
, TYPE_INGRESS
) != PPE_DROPPED
)
79 return RX_HANDLER_CONSUMED
;
82 return RX_HANDLER_CONSUMED
;
85 static int fb_eth_netrx(const struct fblock
* const fb
,
86 struct sk_buff
* const skb
,
87 enum path_type
* const dir
)
93 static int fb_eth_event(struct notifier_block
*self
, unsigned long cmd
,
98 struct fb_eth_priv __percpu
*fb_priv
;
101 fb_priv
= (struct fb_eth_priv __percpu
*) rcu_dereference_raw(fb
->private_data
);
105 case FBLOCK_BIND_IDP
: {
106 struct fblock_bind_msg
*msg
= args
;
108 for_each_online_cpu(cpu
) {
109 struct fb_eth_priv
*fb_priv_cpu
;
110 fb_priv_cpu
= per_cpu_ptr(fb_priv
, cpu
);
111 if (fb_priv_cpu
->port
[msg
->dir
] == IDP_UNKNOWN
) {
112 write_seqlock(&fb_priv_cpu
->lock
);
113 fb_priv_cpu
->port
[msg
->dir
] = msg
->idp
;
114 write_sequnlock(&fb_priv_cpu
->lock
);
122 case FBLOCK_UNBIND_IDP
: {
123 struct fblock_bind_msg
*msg
= args
;
125 for_each_online_cpu(cpu
) {
126 struct fb_eth_priv
*fb_priv_cpu
;
127 fb_priv_cpu
= per_cpu_ptr(fb_priv
, cpu
);
128 if (fb_priv_cpu
->port
[msg
->dir
] == msg
->idp
) {
129 write_seqlock(&fb_priv_cpu
->lock
);
130 fb_priv_cpu
->port
[msg
->dir
] = IDP_UNKNOWN
;
131 write_sequnlock(&fb_priv_cpu
->lock
);
147 static void cleanup_fb_eth(void)
149 struct net_device
*dev
;
151 for_each_netdev(&init_net
, dev
) {
152 if (fb_eth_dev_is_bridged(dev
)) {
153 netdev_rx_handler_unregister(dev
);
154 fb_eth_make_dev_unbridged(dev
);
160 static int init_fb_eth(void)
162 int ret
= 0, err
= 0;
163 struct net_device
*dev
;
165 for_each_netdev(&init_net
, dev
) {
166 ret
= netdev_rx_handler_register(dev
, fb_eth_handle_frame
,
172 fb_eth_make_dev_bridged(dev
);
182 static struct fblock
*fb_eth_ctor(char *name
)
186 struct fb_eth_priv __percpu
*fb_priv
;
190 fb
= alloc_fblock(GFP_ATOMIC
);
194 fb_priv
= alloc_percpu(struct fb_eth_priv
);
199 for_each_online_cpu(cpu
) {
200 struct fb_eth_priv
*fb_priv_cpu
;
201 fb_priv_cpu
= per_cpu_ptr(fb_priv
, cpu
);
202 seqlock_init(&fb_priv_cpu
->lock
);
203 for (i
= 0; i
< NUM_TYPES
; ++i
)
204 fb_priv_cpu
->port
[i
] = IDP_UNKNOWN
;
208 ret
= init_fblock(fb
, name
, fb_priv
);
211 fb
->netfb_rx
= fb_eth_netrx
;
212 fb
->event_rx
= fb_eth_event
;
213 ret
= register_fblock_namespace(fb
);
219 __module_get(THIS_MODULE
);
224 unregister_fblock_namespace(fb
);
227 cleanup_fblock_ctor(fb
);
229 free_percpu(fb_priv
);
236 static void fb_eth_dtor(struct fblock
*fb
)
238 free_percpu(rcu_dereference_raw(fb
->private_data
));
239 module_put(THIS_MODULE
);
244 static struct fblock_factory fb_eth_factory
= {
249 .owner
= THIS_MODULE
,
252 static struct vlink_subsys fb_eth_sys __read_mostly
= {
254 .type
= VLINKNLGRP_ETHERNET
,
255 .rwsem
= __RWSEM_INITIALIZER(fb_eth_sys
.rwsem
),
258 static int fb_eth_start_hook_dev(struct vlinknlmsg
*vhdr
, struct nlmsghdr
*nlh
)
260 return NETLINK_VLINK_RX_NXT
;
263 static int fb_eth_stop_hook_dev(struct vlinknlmsg
*vhdr
, struct nlmsghdr
*nlh
)
265 return NETLINK_VLINK_RX_NXT
;
268 static struct vlink_callback fb_eth_start_hook_dev_cb
=
269 VLINK_CALLBACK_INIT(fb_eth_start_hook_dev
, NETLINK_VLINK_PRIO_HIGH
);
270 static struct vlink_callback fb_eth_stop_hook_dev_cb
=
271 VLINK_CALLBACK_INIT(fb_eth_stop_hook_dev
, NETLINK_VLINK_PRIO_HIGH
);
273 static int __init
init_fb_eth_module(void)
276 ret
= vlink_subsys_register(&fb_eth_sys
);
280 vlink_add_callback(&fb_eth_sys
, &fb_eth_start_hook_dev_cb
);
281 vlink_add_callback(&fb_eth_sys
, &fb_eth_stop_hook_dev_cb
);
283 ret
= register_fblock_type(&fb_eth_factory
);
285 vlink_subsys_unregister_batch(&fb_eth_sys
);
289 static void __exit
cleanup_fb_eth_module(void)
291 unregister_fblock_type(&fb_eth_factory
);
292 vlink_subsys_unregister_batch(&fb_eth_sys
);
293 vlink_subsys_unregister_batch(&fb_eth_sys
);
296 module_init(init_fb_eth_module
);
297 module_exit(cleanup_fb_eth_module
);
299 MODULE_LICENSE("GPL");
300 MODULE_AUTHOR("Daniel Borkmann <dborkma@tik.ee.ethz.ch>");
301 MODULE_DESCRIPTION("Ethernet/PHY link layer bridge driver");