for all cpu funcs
[ana-net.git] / src / fb_bpf.c
blob2f2aca83a6099954f886f81c376d9b981ee5d13a
1 /*
2 * Lightweight Autonomic Network Architecture
4 * LANA Berkeley Packet Filter (BPF) module using the BPF JIT compiler.
6 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
7 * Swiss federal institute of technology (ETH Zurich)
8 * Subject to the GPL.
9 */
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/spinlock.h>
14 #include <linux/notifier.h>
15 #include <linux/rcupdate.h>
16 #include <linux/seqlock.h>
17 #include <linux/spinlock.h>
18 #include <linux/slab.h>
19 #include <linux/percpu.h>
20 #include <linux/prefetch.h>
21 #include <linux/filter.h>
23 #include "xt_fblock.h"
24 #include "xt_builder.h"
25 #include "xt_idp.h"
26 #include "xt_skb.h"
27 #include "xt_engine.h"
28 #include "xt_builder.h"
30 struct fb_bpf_priv {
31 idp_t port[2];
32 struct sk_filter *filter;
33 spinlock_t flock;
36 static int fb_bpf_init_filter(struct fb_bpf_priv __percpu *fb_priv_cpu,
37 struct sock_fprog *fprog, unsigned int cpu)
39 int err;
40 struct sk_filter *sf, *sfold;
41 unsigned int fsize;
42 unsigned long flags;
44 if (fprog->filter == NULL)
45 return -EINVAL;
47 fsize = sizeof(struct sock_filter) * fprog->len;
49 sf = kmalloc_node(fsize + sizeof(*sf), GFP_KERNEL, cpu_to_node(cpu));
50 if (!sf)
51 return -ENOMEM;
53 memcpy(sf->insns, fprog->filter, fsize);
54 atomic_set(&sf->refcnt, 1);
55 sf->len = fprog->len;
56 sf->bpf_func = sk_run_filter;
58 err = sk_chk_filter(sf->insns, sf->len);
59 if (err) {
60 kfree(sf);
61 return err;
64 bpf_jit_compile(sf);
66 spin_lock_irqsave(&fb_priv_cpu->flock, flags);
67 sfold = fb_priv_cpu->filter;
68 fb_priv_cpu->filter = sf;
69 spin_unlock_irqrestore(&fb_priv_cpu->flock, flags);
71 if (sfold) {
72 bpf_jit_free(sfold);
73 kfree(sfold);
76 return 0;
79 static int fb_bpf_init_filter_cpus(struct fblock *fb, struct sock_fprog *fprog)
81 int err = 0;
82 unsigned int cpu;
83 struct fb_bpf_priv __percpu *fb_priv;
85 if (!fprog || !fb)
86 return -EINVAL;
88 rcu_read_lock();
89 fb_priv = (struct fb_bpf_priv __percpu *) rcu_dereference_raw(fb->private_data);
90 rcu_read_unlock();
92 get_online_cpus();
93 for_each_online_cpu(cpu) {
94 struct fb_bpf_priv *fb_priv_cpu;
95 fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
96 err = fb_bpf_init_filter(fb_priv_cpu, fprog, cpu);
97 if (err != 0) {
98 printk(KERN_ERR "[%s::%s] fb_bpf_init_filter error: %d\n",
99 fb->name, fb->factory->type, err);
100 break;
103 put_online_cpus();
105 return err;
108 static void fb_bpf_cleanup_filter(struct fb_bpf_priv __percpu *fb_priv_cpu)
110 unsigned long flags;
111 struct sk_filter *sfold;
113 spin_lock_irqsave(&fb_priv_cpu->flock, flags);
114 sfold = fb_priv_cpu->filter;
115 fb_priv_cpu->filter = NULL;
116 spin_unlock_irqrestore(&fb_priv_cpu->flock, flags);
118 if (sfold) {
119 bpf_jit_free(sfold);
120 kfree(sfold);
124 static void fb_bpf_cleanup_filter_cpus(struct fblock *fb)
126 unsigned int cpu;
127 struct fb_bpf_priv __percpu *fb_priv;
129 if (!fb)
130 return;
132 rcu_read_lock();
133 fb_priv = (struct fb_bpf_priv __percpu *) rcu_dereference_raw(fb->private_data);
134 rcu_read_unlock();
136 get_online_cpus();
137 for_each_online_cpu(cpu) {
138 struct fb_bpf_priv *fb_priv_cpu;
139 fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
140 fb_bpf_cleanup_filter(fb_priv_cpu);
142 put_online_cpus();
145 static int fb_bpf_netrx(const struct fblock * const fb,
146 struct sk_buff * const skb,
147 enum path_type * const dir)
149 int drop = 0;
150 unsigned int pkt_len;
151 unsigned long flags;
152 struct fb_bpf_priv __percpu *fb_priv_cpu;
154 fb_priv_cpu = this_cpu_ptr(rcu_dereference_raw(fb->private_data));
156 spin_lock_irqsave(&fb_priv_cpu->flock, flags);
157 if (fb_priv_cpu->filter) {
158 pkt_len = SK_RUN_FILTER(fb_priv_cpu->filter, skb);
159 if (pkt_len < skb->len) {
160 spin_unlock_irqrestore(&fb_priv_cpu->flock, flags);
161 kfree_skb(skb);
162 return PPE_DROPPED;
165 write_next_idp_to_skb(skb, fb->idp, fb_priv_cpu->port[*dir]);
166 if (fb_priv_cpu->port[*dir] == IDP_UNKNOWN)
167 drop = 1;
168 spin_unlock_irqrestore(&fb_priv_cpu->flock, flags);
169 if (drop) {
170 kfree_skb(skb);
171 return PPE_DROPPED;
174 return PPE_SUCCESS;
177 static int fb_bpf_event(struct notifier_block *self, unsigned long cmd,
178 void *args)
180 int ret = NOTIFY_OK;
181 unsigned int cpu;
182 struct fblock *fb;
183 struct fb_bpf_priv __percpu *fb_priv;
185 rcu_read_lock();
186 fb = rcu_dereference_raw(container_of(self, struct fblock_notifier, nb)->self);
187 fb_priv = (struct fb_bpf_priv __percpu *) rcu_dereference_raw(fb->private_data);
188 rcu_read_unlock();
190 switch (cmd) {
191 case FBLOCK_BIND_IDP: {
192 int bound = 0;
193 struct fblock_bind_msg *msg = args;
194 get_online_cpus();
195 for_each_online_cpu(cpu) {
196 struct fb_bpf_priv *fb_priv_cpu;
197 fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
198 spin_lock(&fb_priv_cpu->flock);
199 if (fb_priv_cpu->port[msg->dir] == IDP_UNKNOWN) {
200 fb_priv_cpu->port[msg->dir] = msg->idp;
201 bound = 1;
202 } else {
203 ret = NOTIFY_BAD;
204 spin_unlock(&fb_priv_cpu->flock);
205 break;
207 spin_unlock(&fb_priv_cpu->flock);
209 put_online_cpus();
210 if (bound)
211 printk(KERN_INFO "[%s::%s] port %s bound to IDP%u\n",
212 fb->name, fb->factory->type,
213 path_names[msg->dir], msg->idp);
214 } break;
215 case FBLOCK_UNBIND_IDP: {
216 int unbound = 0;
217 struct fblock_bind_msg *msg = args;
218 get_online_cpus();
219 for_each_online_cpu(cpu) {
220 struct fb_bpf_priv *fb_priv_cpu;
221 fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
222 spin_lock(&fb_priv_cpu->flock);
223 if (fb_priv_cpu->port[msg->dir] == msg->idp) {
224 fb_priv_cpu->port[msg->dir] = IDP_UNKNOWN;
225 unbound = 1;
226 } else {
227 ret = NOTIFY_BAD;
228 spin_unlock(&fb_priv_cpu->flock);
229 break;
231 spin_unlock(&fb_priv_cpu->flock);
233 put_online_cpus();
234 if (unbound)
235 printk(KERN_INFO "[%s::%s] port %s unbound\n",
236 fb->name, fb->factory->type,
237 path_names[msg->dir]);
238 } break;
239 default:
240 break;
243 return ret;
246 static struct fblock *fb_bpf_ctor(char *name)
248 int ret = 0;
249 unsigned int cpu;
250 struct fblock *fb;
251 struct fb_bpf_priv __percpu *fb_priv;
253 fb = alloc_fblock(GFP_ATOMIC);
254 if (!fb)
255 return NULL;
257 fb_priv = alloc_percpu(struct fb_bpf_priv);
258 if (!fb_priv)
259 goto err;
261 get_online_cpus();
262 for_each_online_cpu(cpu) {
263 struct fb_bpf_priv *fb_priv_cpu;
264 fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
265 spin_lock_init(&fb_priv_cpu->flock);
266 fb_priv_cpu->port[0] = IDP_UNKNOWN;
267 fb_priv_cpu->port[1] = IDP_UNKNOWN;
268 fb_priv_cpu->filter = NULL;
270 put_online_cpus();
272 ret = init_fblock(fb, name, fb_priv);
273 if (ret)
274 goto err2;
275 fb->netfb_rx = fb_bpf_netrx;
276 fb->event_rx = fb_bpf_event;
277 ret = register_fblock_namespace(fb);
278 if (ret)
279 goto err3;
280 __module_get(THIS_MODULE);
281 return fb;
282 err3:
283 cleanup_fblock_ctor(fb);
284 err2:
285 free_percpu(fb_priv);
286 err:
287 kfree_fblock(fb);
288 return NULL;
291 static void fb_bpf_dtor(struct fblock *fb)
293 free_percpu(rcu_dereference_raw(fb->private_data));
294 module_put(THIS_MODULE);
297 static struct fblock_factory fb_bpf_factory = {
298 .type = "bpf",
299 .mode = MODE_DUAL,
300 .ctor = fb_bpf_ctor,
301 .dtor = fb_bpf_dtor,
302 .owner = THIS_MODULE,
305 static int __init init_fb_bpf_module(void)
307 return register_fblock_type(&fb_bpf_factory);
310 static void __exit cleanup_fb_bpf_module(void)
312 unregister_fblock_type(&fb_bpf_factory);
315 module_init(init_fb_bpf_module);
316 module_exit(cleanup_fb_bpf_module);
318 MODULE_LICENSE("GPL");
319 MODULE_AUTHOR("Daniel Borkmann <dborkma@tik.ee.ethz.ch>");
320 MODULE_DESCRIPTION("LANA Berkeley Packet Filter module");