added proc read func
[ana-net.git] / src / fb_bpf.c
blob688599b1982536fead7d26c03e637c59ddb2d3cc
1 /*
2 * Lightweight Autonomic Network Architecture
4 * LANA Berkeley Packet Filter (BPF) module using the BPF JIT compiler.
6 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
7 * Swiss federal institute of technology (ETH Zurich)
8 * Subject to the GPL.
9 */
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/spinlock.h>
14 #include <linux/notifier.h>
15 #include <linux/rcupdate.h>
16 #include <linux/seqlock.h>
17 #include <linux/spinlock.h>
18 #include <linux/slab.h>
19 #include <linux/percpu.h>
20 #include <linux/prefetch.h>
21 #include <linux/filter.h>
22 #include <linux/proc_fs.h>
23 #include <linux/seq_file.h>
24 #include <linux/uaccess.h>
26 #include "xt_fblock.h"
27 #include "xt_builder.h"
28 #include "xt_idp.h"
29 #include "xt_skb.h"
30 #include "xt_engine.h"
31 #include "xt_builder.h"
33 struct fb_bpf_priv {
34 idp_t port[2];
35 struct sk_filter *filter;
36 spinlock_t flock;
39 static int fb_bpf_init_filter(struct fb_bpf_priv __percpu *fb_priv_cpu,
40 struct sock_fprog *fprog, unsigned int cpu)
42 int err;
43 struct sk_filter *sf, *sfold;
44 unsigned int fsize;
45 unsigned long flags;
47 if (fprog->filter == NULL)
48 return -EINVAL;
50 fsize = sizeof(struct sock_filter) * fprog->len;
52 sf = kmalloc_node(fsize + sizeof(*sf), GFP_KERNEL, cpu_to_node(cpu));
53 if (!sf)
54 return -ENOMEM;
56 memcpy(sf->insns, fprog->filter, fsize);
57 atomic_set(&sf->refcnt, 1);
58 sf->len = fprog->len;
59 sf->bpf_func = sk_run_filter;
61 err = sk_chk_filter(sf->insns, sf->len);
62 if (err) {
63 kfree(sf);
64 return err;
67 bpf_jit_compile(sf);
69 spin_lock_irqsave(&fb_priv_cpu->flock, flags);
70 sfold = fb_priv_cpu->filter;
71 fb_priv_cpu->filter = sf;
72 spin_unlock_irqrestore(&fb_priv_cpu->flock, flags);
74 if (sfold) {
75 bpf_jit_free(sfold);
76 kfree(sfold);
79 return 0;
82 static int fb_bpf_init_filter_cpus(struct fblock *fb, struct sock_fprog *fprog)
84 int err = 0;
85 unsigned int cpu;
86 struct fb_bpf_priv __percpu *fb_priv;
88 if (!fprog || !fb)
89 return -EINVAL;
91 rcu_read_lock();
92 fb_priv = (struct fb_bpf_priv __percpu *) rcu_dereference_raw(fb->private_data);
93 rcu_read_unlock();
95 get_online_cpus();
96 for_each_online_cpu(cpu) {
97 struct fb_bpf_priv *fb_priv_cpu;
98 fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
99 err = fb_bpf_init_filter(fb_priv_cpu, fprog, cpu);
100 if (err != 0) {
101 printk(KERN_ERR "[%s::%s] fb_bpf_init_filter error: %d\n",
102 fb->name, fb->factory->type, err);
103 break;
106 put_online_cpus();
108 return err;
111 static void fb_bpf_cleanup_filter(struct fb_bpf_priv __percpu *fb_priv_cpu)
113 unsigned long flags;
114 struct sk_filter *sfold;
116 spin_lock_irqsave(&fb_priv_cpu->flock, flags);
117 sfold = fb_priv_cpu->filter;
118 fb_priv_cpu->filter = NULL;
119 spin_unlock_irqrestore(&fb_priv_cpu->flock, flags);
121 if (sfold) {
122 bpf_jit_free(sfold);
123 kfree(sfold);
127 static void fb_bpf_cleanup_filter_cpus(struct fblock *fb)
129 unsigned int cpu;
130 struct fb_bpf_priv __percpu *fb_priv;
132 if (!fb)
133 return;
135 rcu_read_lock();
136 fb_priv = (struct fb_bpf_priv __percpu *) rcu_dereference_raw(fb->private_data);
137 rcu_read_unlock();
139 get_online_cpus();
140 for_each_online_cpu(cpu) {
141 struct fb_bpf_priv *fb_priv_cpu;
142 fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
143 fb_bpf_cleanup_filter(fb_priv_cpu);
145 put_online_cpus();
148 static int fb_bpf_netrx(const struct fblock * const fb,
149 struct sk_buff * const skb,
150 enum path_type * const dir)
152 int drop = 0;
153 unsigned int pkt_len;
154 unsigned long flags;
155 struct fb_bpf_priv __percpu *fb_priv_cpu;
157 fb_priv_cpu = this_cpu_ptr(rcu_dereference_raw(fb->private_data));
159 spin_lock_irqsave(&fb_priv_cpu->flock, flags);
160 if (fb_priv_cpu->filter) {
161 pkt_len = SK_RUN_FILTER(fb_priv_cpu->filter, skb);
162 /* No snap, either drop or pass */
163 if (pkt_len < skb->len) {
164 spin_unlock_irqrestore(&fb_priv_cpu->flock, flags);
165 kfree_skb(skb);
166 return PPE_DROPPED;
169 write_next_idp_to_skb(skb, fb->idp, fb_priv_cpu->port[*dir]);
170 if (fb_priv_cpu->port[*dir] == IDP_UNKNOWN)
171 drop = 1;
172 spin_unlock_irqrestore(&fb_priv_cpu->flock, flags);
173 if (drop) {
174 kfree_skb(skb);
175 return PPE_DROPPED;
178 return PPE_SUCCESS;
181 static int fb_bpf_event(struct notifier_block *self, unsigned long cmd,
182 void *args)
184 int ret = NOTIFY_OK;
185 unsigned int cpu;
186 struct fblock *fb;
187 struct fb_bpf_priv __percpu *fb_priv;
189 rcu_read_lock();
190 fb = rcu_dereference_raw(container_of(self, struct fblock_notifier, nb)->self);
191 fb_priv = (struct fb_bpf_priv __percpu *) rcu_dereference_raw(fb->private_data);
192 rcu_read_unlock();
194 switch (cmd) {
195 case FBLOCK_BIND_IDP: {
196 int bound = 0;
197 struct fblock_bind_msg *msg = args;
198 get_online_cpus();
199 for_each_online_cpu(cpu) {
200 struct fb_bpf_priv *fb_priv_cpu;
201 fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
202 spin_lock(&fb_priv_cpu->flock);
203 if (fb_priv_cpu->port[msg->dir] == IDP_UNKNOWN) {
204 fb_priv_cpu->port[msg->dir] = msg->idp;
205 bound = 1;
206 } else {
207 ret = NOTIFY_BAD;
208 spin_unlock(&fb_priv_cpu->flock);
209 break;
211 spin_unlock(&fb_priv_cpu->flock);
213 put_online_cpus();
214 if (bound)
215 printk(KERN_INFO "[%s::%s] port %s bound to IDP%u\n",
216 fb->name, fb->factory->type,
217 path_names[msg->dir], msg->idp);
218 } break;
219 case FBLOCK_UNBIND_IDP: {
220 int unbound = 0;
221 struct fblock_bind_msg *msg = args;
222 get_online_cpus();
223 for_each_online_cpu(cpu) {
224 struct fb_bpf_priv *fb_priv_cpu;
225 fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
226 spin_lock(&fb_priv_cpu->flock);
227 if (fb_priv_cpu->port[msg->dir] == msg->idp) {
228 fb_priv_cpu->port[msg->dir] = IDP_UNKNOWN;
229 unbound = 1;
230 } else {
231 ret = NOTIFY_BAD;
232 spin_unlock(&fb_priv_cpu->flock);
233 break;
235 spin_unlock(&fb_priv_cpu->flock);
237 put_online_cpus();
238 if (unbound)
239 printk(KERN_INFO "[%s::%s] port %s unbound\n",
240 fb->name, fb->factory->type,
241 path_names[msg->dir]);
242 } break;
243 default:
244 break;
247 return ret;
250 static int fb_bpf_proc_show_filter(struct seq_file *seq, void *v)
252 unsigned long flags;
253 struct fblock *fb = v;
254 struct fb_bpf_priv __percpu *fb_priv;
255 struct fb_bpf_priv *fb_priv_cpu;
256 struct sk_filter *sf;
258 rcu_read_lock();
259 fb_priv = this_cpu_ptr(rcu_dereference_raw(fb->private_data));
260 rcu_read_unlock();
262 get_online_cpus();
263 fb_priv_cpu = per_cpu_ptr(fb_priv, smp_processor_id());
264 spin_lock_irqsave(&fb_priv_cpu->flock, flags);
266 sf = fb_priv_cpu->filter;
267 if (sf) {
268 unsigned int i;
269 if (sf->bpf_func == sk_run_filter)
270 seq_puts(seq, "bpf jit: 0\n");
271 else
272 seq_puts(seq, "bpf jit: 1\n");
273 seq_puts(seq, "code:\n");
274 for (i = 0; i < sf->len; ++i) {
275 char sline[32];
276 memset(sline, 0, sizeof(sline));
277 snprintf(sline, sizeof(sline),
278 "0x%x %d %d 0x%x\n",
279 sf->insns[i].code,
280 sf->insns[i].jt,
281 sf->insns[i].jf,
282 sf->insns[i].k);
283 sline[sizeof(sline) - 1] = 0;
284 seq_puts(seq, sline);
288 spin_unlock_irqrestore(&fb_priv_cpu->flock, flags);
289 put_online_cpus();
291 return 0;
294 static int fb_bpf_proc_open(struct inode *inode, struct file *file)
296 return single_open(file, fb_bpf_proc_show_filter, PDE(inode)->data);
299 static ssize_t fb_bpf_proc_write(struct file *file, const char __user * user_buffer,
300 size_t count, loff_t * offset)
302 return 0;
305 static const struct file_operations fb_bpf_proc_fops = {
306 .owner = THIS_MODULE,
307 .open = fb_bpf_proc_open,
308 .read = seq_read,
309 .llseek = seq_lseek,
310 .write = fb_bpf_proc_write,
311 .release = single_release,
314 static struct fblock *fb_bpf_ctor(char *name)
316 int ret = 0;
317 unsigned int cpu;
318 struct fblock *fb;
319 struct fb_bpf_priv __percpu *fb_priv;
320 struct proc_dir_entry *fb_proc;
322 fb = alloc_fblock(GFP_ATOMIC);
323 if (!fb)
324 return NULL;
326 fb_priv = alloc_percpu(struct fb_bpf_priv);
327 if (!fb_priv)
328 goto err;
330 get_online_cpus();
331 for_each_online_cpu(cpu) {
332 struct fb_bpf_priv *fb_priv_cpu;
333 fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
334 spin_lock_init(&fb_priv_cpu->flock);
335 fb_priv_cpu->port[0] = IDP_UNKNOWN;
336 fb_priv_cpu->port[1] = IDP_UNKNOWN;
337 fb_priv_cpu->filter = NULL;
339 put_online_cpus();
341 ret = init_fblock(fb, name, fb_priv);
342 if (ret)
343 goto err2;
345 fb->netfb_rx = fb_bpf_netrx;
346 fb->event_rx = fb_bpf_event;
348 fb_proc = proc_create_data(fb->name, 0444, fblock_proc_dir,
349 &fb_bpf_proc_fops, fb);
350 if (!fb_proc)
351 goto err3;
353 ret = register_fblock_namespace(fb);
354 if (ret)
355 goto err4;
357 __module_get(THIS_MODULE);
359 return fb;
360 err4:
361 remove_proc_entry(fb->name, fblock_proc_dir);
362 err3:
363 cleanup_fblock_ctor(fb);
364 err2:
365 free_percpu(fb_priv);
366 err:
367 kfree_fblock(fb);
368 return NULL;
371 static void fb_bpf_dtor(struct fblock *fb)
373 free_percpu(rcu_dereference_raw(fb->private_data));
374 remove_proc_entry(fb->name, fblock_proc_dir);
375 module_put(THIS_MODULE);
378 static struct fblock_factory fb_bpf_factory = {
379 .type = "bpf",
380 .mode = MODE_DUAL,
381 .ctor = fb_bpf_ctor,
382 .dtor = fb_bpf_dtor,
383 .owner = THIS_MODULE,
386 static int __init init_fb_bpf_module(void)
388 return register_fblock_type(&fb_bpf_factory);
391 static void __exit cleanup_fb_bpf_module(void)
393 unregister_fblock_type(&fb_bpf_factory);
396 module_init(init_fb_bpf_module);
397 module_exit(cleanup_fb_bpf_module);
399 MODULE_LICENSE("GPL");
400 MODULE_AUTHOR("Daniel Borkmann <dborkma@tik.ee.ethz.ch>");
401 MODULE_DESCRIPTION("LANA Berkeley Packet Filter module");