2 * Lightweight Autonomic Network Architecture
4 * LANA Berkeley Packet Filter (BPF) module using the BPF JIT compiler.
6 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
7 * Swiss federal institute of technology (ETH Zurich)
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/spinlock.h>
14 #include <linux/notifier.h>
15 #include <linux/rcupdate.h>
16 #include <linux/seqlock.h>
17 #include <linux/spinlock.h>
18 #include <linux/slab.h>
19 #include <linux/percpu.h>
20 #include <linux/prefetch.h>
21 #include <linux/filter.h>
23 #include "xt_fblock.h"
24 #include "xt_builder.h"
27 #include "xt_engine.h"
28 #include "xt_builder.h"
32 struct sk_filter
*filter
;
36 static int fb_bpf_init_filter(struct fb_bpf_priv __percpu
*fb_priv_cpu
,
37 struct sock_fprog
*fprog
, unsigned int cpu
)
40 struct sk_filter
*sf
, *sfold
;
44 if (fprog
->filter
== NULL
)
47 fsize
= sizeof(struct sock_filter
) * fprog
->len
;
49 sf
= kmalloc_node(fsize
+ sizeof(*sf
), GFP_KERNEL
, cpu_to_node(cpu
));
53 memcpy(sf
->insns
, fprog
->filter
, fsize
);
54 atomic_set(&sf
->refcnt
, 1);
56 sf
->bpf_func
= sk_run_filter
;
58 err
= sk_chk_filter(sf
->insns
, sf
->len
);
66 spin_lock_irqsave(&fb_priv_cpu
->flock
, flags
);
67 sfold
= fb_priv_cpu
->filter
;
68 fb_priv_cpu
->filter
= sf
;
69 spin_unlock_irqrestore(&fb_priv_cpu
->flock
, flags
);
79 static int fb_bpf_init_filter_cpus(struct fblock
*fb
, struct sock_fprog
*fprog
)
83 struct fb_bpf_priv __percpu
*fb_priv
;
89 fb_priv
= (struct fb_bpf_priv __percpu
*) rcu_dereference_raw(fb
->private_data
);
93 for_each_online_cpu(cpu
) {
94 struct fb_bpf_priv
*fb_priv_cpu
;
95 fb_priv_cpu
= per_cpu_ptr(fb_priv
, cpu
);
96 err
= fb_bpf_init_filter(fb_priv_cpu
, fprog
, cpu
);
98 printk(KERN_ERR
"[%s::%s] fb_bpf_init_filter error: %d\n",
99 fb
->name
, fb
->factory
->type
, err
);
108 static void fb_bpf_cleanup_filter(struct fb_bpf_priv __percpu
*fb_priv_cpu
)
111 struct sk_filter
*sfold
;
113 spin_lock_irqsave(&fb_priv_cpu
->flock
, flags
);
114 sfold
= fb_priv_cpu
->filter
;
115 fb_priv_cpu
->filter
= NULL
;
116 spin_unlock_irqrestore(&fb_priv_cpu
->flock
, flags
);
124 static void fb_bpf_cleanup_filter_cpus(struct fblock
*fb
)
127 struct fb_bpf_priv __percpu
*fb_priv
;
133 fb_priv
= (struct fb_bpf_priv __percpu
*) rcu_dereference_raw(fb
->private_data
);
137 for_each_online_cpu(cpu
) {
138 struct fb_bpf_priv
*fb_priv_cpu
;
139 fb_priv_cpu
= per_cpu_ptr(fb_priv
, cpu
);
140 fb_bpf_cleanup_filter(fb_priv_cpu
);
145 static int fb_bpf_netrx(const struct fblock
* const fb
,
146 struct sk_buff
* const skb
,
147 enum path_type
* const dir
)
150 unsigned int pkt_len
;
152 struct fb_bpf_priv __percpu
*fb_priv_cpu
;
154 fb_priv_cpu
= this_cpu_ptr(rcu_dereference_raw(fb
->private_data
));
156 spin_lock_irqsave(&fb_priv_cpu
->flock
, flags
);
157 if (fb_priv_cpu
->filter
) {
158 pkt_len
= SK_RUN_FILTER(fb_priv_cpu
->filter
, skb
);
159 if (pkt_len
< skb
->len
) {
160 spin_unlock_irqrestore(&fb_priv_cpu
->flock
, flags
);
165 write_next_idp_to_skb(skb
, fb
->idp
, fb_priv_cpu
->port
[*dir
]);
166 if (fb_priv_cpu
->port
[*dir
] == IDP_UNKNOWN
)
168 spin_unlock_irqrestore(&fb_priv_cpu
->flock
, flags
);
177 static int fb_bpf_event(struct notifier_block
*self
, unsigned long cmd
,
183 struct fb_bpf_priv __percpu
*fb_priv
;
186 fb
= rcu_dereference_raw(container_of(self
, struct fblock_notifier
, nb
)->self
);
187 fb_priv
= (struct fb_bpf_priv __percpu
*) rcu_dereference_raw(fb
->private_data
);
191 case FBLOCK_BIND_IDP
: {
193 struct fblock_bind_msg
*msg
= args
;
195 for_each_online_cpu(cpu
) {
196 struct fb_bpf_priv
*fb_priv_cpu
;
197 fb_priv_cpu
= per_cpu_ptr(fb_priv
, cpu
);
198 spin_lock(&fb_priv_cpu
->flock
);
199 if (fb_priv_cpu
->port
[msg
->dir
] == IDP_UNKNOWN
) {
200 fb_priv_cpu
->port
[msg
->dir
] = msg
->idp
;
204 spin_unlock(&fb_priv_cpu
->flock
);
207 spin_unlock(&fb_priv_cpu
->flock
);
211 printk(KERN_INFO
"[%s::%s] port %s bound to IDP%u\n",
212 fb
->name
, fb
->factory
->type
,
213 path_names
[msg
->dir
], msg
->idp
);
215 case FBLOCK_UNBIND_IDP
: {
217 struct fblock_bind_msg
*msg
= args
;
219 for_each_online_cpu(cpu
) {
220 struct fb_bpf_priv
*fb_priv_cpu
;
221 fb_priv_cpu
= per_cpu_ptr(fb_priv
, cpu
);
222 spin_lock(&fb_priv_cpu
->flock
);
223 if (fb_priv_cpu
->port
[msg
->dir
] == msg
->idp
) {
224 fb_priv_cpu
->port
[msg
->dir
] = IDP_UNKNOWN
;
228 spin_unlock(&fb_priv_cpu
->flock
);
231 spin_unlock(&fb_priv_cpu
->flock
);
235 printk(KERN_INFO
"[%s::%s] port %s unbound\n",
236 fb
->name
, fb
->factory
->type
,
237 path_names
[msg
->dir
]);
246 static struct fblock
*fb_bpf_ctor(char *name
)
251 struct fb_bpf_priv __percpu
*fb_priv
;
253 fb
= alloc_fblock(GFP_ATOMIC
);
257 fb_priv
= alloc_percpu(struct fb_bpf_priv
);
262 for_each_online_cpu(cpu
) {
263 struct fb_bpf_priv
*fb_priv_cpu
;
264 fb_priv_cpu
= per_cpu_ptr(fb_priv
, cpu
);
265 spin_lock_init(&fb_priv_cpu
->flock
);
266 fb_priv_cpu
->port
[0] = IDP_UNKNOWN
;
267 fb_priv_cpu
->port
[1] = IDP_UNKNOWN
;
268 fb_priv_cpu
->filter
= NULL
;
272 ret
= init_fblock(fb
, name
, fb_priv
);
275 fb
->netfb_rx
= fb_bpf_netrx
;
276 fb
->event_rx
= fb_bpf_event
;
277 ret
= register_fblock_namespace(fb
);
280 __module_get(THIS_MODULE
);
283 cleanup_fblock_ctor(fb
);
285 free_percpu(fb_priv
);
291 static void fb_bpf_dtor(struct fblock
*fb
)
293 free_percpu(rcu_dereference_raw(fb
->private_data
));
294 module_put(THIS_MODULE
);
297 static struct fblock_factory fb_bpf_factory
= {
302 .owner
= THIS_MODULE
,
305 static int __init
init_fb_bpf_module(void)
307 return register_fblock_type(&fb_bpf_factory
);
310 static void __exit
cleanup_fb_bpf_module(void)
312 unregister_fblock_type(&fb_bpf_factory
);
315 module_init(init_fb_bpf_module
);
316 module_exit(cleanup_fb_bpf_module
);
318 MODULE_LICENSE("GPL");
319 MODULE_AUTHOR("Daniel Borkmann <dborkma@tik.ee.ethz.ch>");
320 MODULE_DESCRIPTION("LANA Berkeley Packet Filter module");