ring: use gcc builtin for swapping index value
[netsniff-ng.git] / src / bpf.c
blobad6add1a954620b4630c98e11426d6915ace6b70
1 /*
2 * netsniff-ng - the packet sniffing beast
3 * By Daniel Borkmann <daniel@netsniff-ng.org>
4 * Copyright 2009, 2010 Daniel Borkmann.
5 * Copyright 2009, 2010 Emmanuel Roullit.
6 * Copyright 1990-1996 The Regents of the University of
7 * California. All rights reserved. (3-clause BSD license)
8 * Subject to the GPL, version 2.
9 */
11 #include <stdint.h>
12 #include <stdio.h>
13 #include <assert.h>
14 #include <arpa/inet.h>
16 #include "bpf.h"
17 #include "xmalloc.h"
18 #include "xstring.h"
19 #include "die.h"
21 /* This is a bug in libpcap, they actually use 'unsigned long' instead
22 * of short! */
23 #define EXTRACT_SHORT(packet) \
24 ((unsigned short) ntohs(*(unsigned short *) packet))
25 #define EXTRACT_LONG(packet) \
26 (ntohl(*(unsigned long *) packet))
27 #ifndef BPF_MEMWORDS
28 # define BPF_MEMWORDS 16
29 #endif
31 static char *bpf_dump(const struct sock_filter bpf, int n)
33 int v;
34 const char *fmt, *op;
35 static char image[256];
36 char operand[64];
38 v = bpf.k;
39 switch (bpf.code) {
40 default:
41 op = "unimp";
42 fmt = "0x%x";
43 v = bpf.code;
44 break;
45 case BPF_RET | BPF_K:
46 op = "ret";
47 fmt = "#%d";
48 break;
49 case BPF_RET | BPF_A:
50 op = "ret";
51 fmt = "";
52 break;
53 case BPF_LD | BPF_W | BPF_ABS:
54 op = "ld";
55 fmt = "[%d]";
56 break;
57 case BPF_LD | BPF_H | BPF_ABS:
58 op = "ldh";
59 fmt = "[%d]";
60 break;
61 case BPF_LD | BPF_B | BPF_ABS:
62 op = "ldb";
63 fmt = "[%d]";
64 break;
65 case BPF_LD | BPF_W | BPF_LEN:
66 op = "ld";
67 fmt = "#pktlen";
68 break;
69 case BPF_LD | BPF_W | BPF_IND:
70 op = "ld";
71 fmt = "[x + %d]";
72 break;
73 case BPF_LD | BPF_H | BPF_IND:
74 op = "ldh";
75 fmt = "[x + %d]";
76 break;
77 case BPF_LD | BPF_B | BPF_IND:
78 op = "ldb";
79 fmt = "[x + %d]";
80 break;
81 case BPF_LD | BPF_IMM:
82 op = "ld";
83 fmt = "#0x%x";
84 break;
85 case BPF_LDX | BPF_IMM:
86 op = "ldx";
87 fmt = "#0x%x";
88 break;
89 case BPF_LDX | BPF_MSH | BPF_B:
90 op = "ldxb";
91 fmt = "4*([%d]&0xf)";
92 break;
93 case BPF_LD | BPF_MEM:
94 op = "ld";
95 fmt = "M[%d]";
96 break;
97 case BPF_LDX | BPF_MEM:
98 op = "ldx";
99 fmt = "M[%d]";
100 break;
101 case BPF_ST:
102 op = "st";
103 fmt = "M[%d]";
104 break;
105 case BPF_STX:
106 op = "stx";
107 fmt = "M[%d]";
108 break;
109 case BPF_JMP | BPF_JA:
110 op = "ja";
111 fmt = "%d";
112 v = n + 1 + bpf.k;
113 break;
114 case BPF_JMP | BPF_JGT | BPF_K:
115 op = "jgt";
116 fmt = "#0x%x";
117 break;
118 case BPF_JMP | BPF_JGE | BPF_K:
119 op = "jge";
120 fmt = "#0x%x";
121 break;
122 case BPF_JMP | BPF_JEQ | BPF_K:
123 op = "jeq";
124 fmt = "#0x%x";
125 break;
126 case BPF_JMP | BPF_JSET | BPF_K:
127 op = "jset";
128 fmt = "#0x%x";
129 break;
130 case BPF_JMP | BPF_JGT | BPF_X:
131 op = "jgt";
132 fmt = "x";
133 break;
134 case BPF_JMP | BPF_JGE | BPF_X:
135 op = "jge";
136 fmt = "x";
137 break;
138 case BPF_JMP | BPF_JEQ | BPF_X:
139 op = "jeq";
140 fmt = "x";
141 break;
142 case BPF_JMP | BPF_JSET | BPF_X:
143 op = "jset";
144 fmt = "x";
145 break;
146 case BPF_ALU | BPF_ADD | BPF_X:
147 op = "add";
148 fmt = "x";
149 break;
150 case BPF_ALU | BPF_SUB | BPF_X:
151 op = "sub";
152 fmt = "x";
153 break;
154 case BPF_ALU | BPF_MUL | BPF_X:
155 op = "mul";
156 fmt = "x";
157 break;
158 case BPF_ALU | BPF_DIV | BPF_X:
159 op = "div";
160 fmt = "x";
161 break;
162 case BPF_ALU | BPF_AND | BPF_X:
163 op = "and";
164 fmt = "x";
165 break;
166 case BPF_ALU | BPF_OR | BPF_X:
167 op = "or";
168 fmt = "x";
169 break;
170 case BPF_ALU | BPF_LSH | BPF_X:
171 op = "lsh";
172 fmt = "x";
173 break;
174 case BPF_ALU | BPF_RSH | BPF_X:
175 op = "rsh";
176 fmt = "x";
177 break;
178 case BPF_ALU | BPF_ADD | BPF_K:
179 op = "add";
180 fmt = "#%d";
181 break;
182 case BPF_ALU | BPF_SUB | BPF_K:
183 op = "sub";
184 fmt = "#%d";
185 break;
186 case BPF_ALU | BPF_MUL | BPF_K:
187 op = "mul";
188 fmt = "#%d";
189 break;
190 case BPF_ALU | BPF_DIV | BPF_K:
191 op = "div";
192 fmt = "#%d";
193 break;
194 case BPF_ALU | BPF_AND | BPF_K:
195 op = "and";
196 fmt = "#0x%x";
197 break;
198 case BPF_ALU | BPF_OR | BPF_K:
199 op = "or";
200 fmt = "#0x%x";
201 break;
202 case BPF_ALU | BPF_LSH | BPF_K:
203 op = "lsh";
204 fmt = "#%d";
205 break;
206 case BPF_ALU | BPF_RSH | BPF_K:
207 op = "rsh";
208 fmt = "#%d";
209 break;
210 case BPF_ALU | BPF_NEG:
211 op = "neg";
212 fmt = "";
213 break;
214 case BPF_MISC | BPF_TAX:
215 op = "tax";
216 fmt = "";
217 break;
218 case BPF_MISC | BPF_TXA:
219 op = "txa";
220 fmt = "";
221 break;
224 slprintf(operand, sizeof(operand), fmt, v);
225 slprintf(image, sizeof(image),
226 (BPF_CLASS(bpf.code) == BPF_JMP &&
227 BPF_OP(bpf.code) != BPF_JA) ?
228 "(%03d) %-8s %-16s jt %d\tjf %d" : "(%03d) %-8s %s",
229 n, op, operand, n + 1 + bpf.jt, n + 1 + bpf.jf);
231 return image;
234 void bpf_dump_all(struct sock_fprog *bpf)
236 int i;
237 for (i = 0; i < bpf->len; ++i)
238 printf("%s\n", bpf_dump(bpf->filter[i], i));
241 void bpf_attach_to_sock(int sock, struct sock_fprog *bpf)
243 if (bpf->len == 1)
244 if (bpf->filter[0].code == BPF_RET &&
245 bpf->filter[0].k == 0xFFFFFFFF)
246 return;
248 int ret = setsockopt(sock, SOL_SOCKET, SO_ATTACH_FILTER, bpf,
249 sizeof(*bpf));
250 if (ret < 0)
251 panic("Cannot attach filter to socket!\n");
254 void bpf_detach_from_sock(int sock)
256 int ret, empty = 0;
258 ret = setsockopt(sock, SOL_SOCKET, SO_DETACH_FILTER, &empty,
259 sizeof(empty));
260 if (ret < 0)
261 panic("Cannot detach filter from socket!\n");
264 void enable_kernel_bpf_jit_compiler(void)
266 int fd;
267 ssize_t ret;
268 char *file = "/proc/sys/net/core/bpf_jit_enable";
270 fd = open(file, O_WRONLY);
271 if (fd < 0)
272 return;
274 ret = write(fd, "1", strlen("1"));
275 if (ret > 0)
276 printf("BPF JIT\n");
278 close(fd);
281 int bpf_validate(const struct sock_fprog *bpf)
283 uint32_t i, from;
284 const struct sock_filter *p;
286 if (!bpf)
287 return 0;
288 if (bpf->len < 1)
289 return 0;
291 for (i = 0; i < bpf->len; ++i) {
292 p = &bpf->filter[i];
293 switch (BPF_CLASS(p->code)) {
295 * Check that memory operations use valid addresses.
297 case BPF_LD:
298 case BPF_LDX:
299 switch (BPF_MODE(p->code)) {
300 case BPF_IMM:
301 break;
302 case BPF_ABS:
303 case BPF_IND:
304 case BPF_MSH:
306 * There's no maximum packet data size
307 * in userland. The runtime packet length
308 * check suffices.
310 break;
311 case BPF_MEM:
312 if (p->k >= BPF_MEMWORDS)
313 return 0;
314 break;
315 case BPF_LEN:
316 break;
317 default:
318 return 0;
320 break;
321 case BPF_ST:
322 case BPF_STX:
323 if (p->k >= BPF_MEMWORDS)
324 return 0;
325 break;
326 case BPF_ALU:
327 switch (BPF_OP(p->code)) {
328 case BPF_ADD:
329 case BPF_SUB:
330 case BPF_MUL:
331 case BPF_OR:
332 case BPF_AND:
333 case BPF_LSH:
334 case BPF_RSH:
335 case BPF_NEG:
336 break;
337 case BPF_DIV:
339 * Check for constant division by 0.
341 if (BPF_RVAL(p->code) == BPF_K && p->k == 0)
342 return 0;
343 break;
344 default:
345 return 0;
347 break;
348 case BPF_JMP:
350 * Check that jumps are within the code block,
351 * and that unconditional branches don't go
352 * backwards as a result of an overflow.
353 * Unconditional branches have a 32-bit offset,
354 * so they could overflow; we check to make
355 * sure they don't. Conditional branches have
356 * an 8-bit offset, and the from address is <=
357 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
358 * is sufficiently small that adding 255 to it
359 * won't overflow.
361 * We know that len is <= BPF_MAXINSNS, and we
362 * assume that BPF_MAXINSNS is < the maximum size
363 * of a u_int, so that i + 1 doesn't overflow.
365 * For userland, we don't know that the from
366 * or len are <= BPF_MAXINSNS, but we know that
367 * from <= len, and, except on a 64-bit system,
368 * it's unlikely that len, if it truly reflects
369 * the size of the program we've been handed,
370 * will be anywhere near the maximum size of
371 * a u_int. We also don't check for backward
372 * branches, as we currently support them in
373 * userland for the protochain operation.
375 from = i + 1;
376 switch (BPF_OP(p->code)) {
377 case BPF_JA:
378 if (from + p->k >= bpf->len)
379 return 0;
380 break;
381 case BPF_JEQ:
382 case BPF_JGT:
383 case BPF_JGE:
384 case BPF_JSET:
385 if (from + p->jt >= bpf->len ||
386 from + p->jf >= bpf->len)
387 return 0;
388 break;
389 default:
390 return 0;
392 break;
393 case BPF_RET:
394 break;
395 case BPF_MISC:
396 break;
397 default:
398 return 0;
402 return BPF_CLASS(bpf->filter[bpf->len - 1].code) == BPF_RET;
405 uint32_t bpf_run_filter(const struct sock_fprog * fcode, uint8_t * packet,
406 size_t plen)
408 /* XXX: caplen == len */
409 uint32_t A, X;
410 uint32_t k;
411 struct sock_filter *bpf;
412 int32_t mem[BPF_MEMWORDS];
414 if (fcode == NULL || fcode->filter == NULL || fcode->len == 0)
415 return 0xFFFFFFFF;
417 A = 0;
418 X = 0;
420 bpf = fcode->filter;
421 --bpf;
423 while (1) {
424 ++bpf;
426 switch (bpf->code) {
427 default:
428 return 0;
429 case BPF_RET | BPF_K:
430 return (uint32_t) bpf->k;
431 case BPF_RET | BPF_A:
432 return (uint32_t) A;
433 case BPF_LD | BPF_W | BPF_ABS:
434 k = bpf->k;
435 if (k + sizeof(int32_t) > plen)
436 return 0;
437 A = EXTRACT_LONG(&packet[k]);
438 continue;
439 case BPF_LD | BPF_H | BPF_ABS:
440 k = bpf->k;
441 if (k + sizeof(short) > plen)
442 return 0;
443 A = EXTRACT_SHORT(&packet[k]);
444 continue;
445 case BPF_LD | BPF_B | BPF_ABS:
446 k = bpf->k;
447 if (k >= plen)
448 return 0;
449 A = packet[k];
450 continue;
451 case BPF_LD | BPF_W | BPF_LEN:
452 A = plen;
453 continue;
454 case BPF_LDX | BPF_W | BPF_LEN:
455 X = plen;
456 continue;
457 case BPF_LD | BPF_W | BPF_IND:
458 k = X + bpf->k;
459 if (k + sizeof(int32_t) > plen)
460 return 0;
461 A = EXTRACT_LONG(&packet[k]);
462 continue;
463 case BPF_LD | BPF_H | BPF_IND:
464 k = X + bpf->k;
465 if (k + sizeof(short) > plen)
466 return 0;
467 A = EXTRACT_SHORT(&packet[k]);
468 continue;
469 case BPF_LD | BPF_B | BPF_IND:
470 k = X + bpf->k;
471 if (k >= plen)
472 return 0;
473 A = packet[k];
474 continue;
475 case BPF_LDX | BPF_MSH | BPF_B:
476 k = bpf->k;
477 if (k >= plen)
478 return 0;
479 X = (packet[bpf->k] & 0xf) << 2;
480 continue;
481 case BPF_LD | BPF_IMM:
482 A = bpf->k;
483 continue;
484 case BPF_LDX | BPF_IMM:
485 X = bpf->k;
486 continue;
487 case BPF_LD | BPF_MEM:
488 A = mem[bpf->k];
489 continue;
490 case BPF_LDX | BPF_MEM:
491 X = mem[bpf->k];
492 continue;
493 case BPF_ST:
494 mem[bpf->k] = A;
495 continue;
496 case BPF_STX:
497 mem[bpf->k] = X;
498 continue;
499 case BPF_JMP | BPF_JA:
500 bpf += bpf->k;
501 continue;
502 case BPF_JMP | BPF_JGT | BPF_K:
503 bpf += (A > bpf->k) ? bpf->jt : bpf->jf;
504 continue;
505 case BPF_JMP | BPF_JGE | BPF_K:
506 bpf += (A >= bpf->k) ? bpf->jt : bpf->jf;
507 continue;
508 case BPF_JMP | BPF_JEQ | BPF_K:
509 bpf += (A == bpf->k) ? bpf->jt : bpf->jf;
510 continue;
511 case BPF_JMP | BPF_JSET | BPF_K:
512 bpf += (A & bpf->k) ? bpf->jt : bpf->jf;
513 continue;
514 case BPF_JMP | BPF_JGT | BPF_X:
515 bpf += (A > X) ? bpf->jt : bpf->jf;
516 continue;
517 case BPF_JMP | BPF_JGE | BPF_X:
518 bpf += (A >= X) ? bpf->jt : bpf->jf;
519 continue;
520 case BPF_JMP | BPF_JEQ | BPF_X:
521 bpf += (A == X) ? bpf->jt : bpf->jf;
522 continue;
523 case BPF_JMP | BPF_JSET | BPF_X:
524 bpf += (A & X) ? bpf->jt : bpf->jf;
525 continue;
526 case BPF_ALU | BPF_ADD | BPF_X:
527 A += X;
528 continue;
529 case BPF_ALU | BPF_SUB | BPF_X:
530 A -= X;
531 continue;
532 case BPF_ALU | BPF_MUL | BPF_X:
533 A *= X;
534 continue;
535 case BPF_ALU | BPF_DIV | BPF_X:
536 if (X == 0)
537 return 0;
538 A /= X;
539 continue;
540 case BPF_ALU | BPF_AND | BPF_X:
541 A &= X;
542 continue;
543 case BPF_ALU | BPF_OR | BPF_X:
544 A |= X;
545 continue;
546 case BPF_ALU | BPF_LSH | BPF_X:
547 A <<= X;
548 continue;
549 case BPF_ALU | BPF_RSH | BPF_X:
550 A >>= X;
551 continue;
552 case BPF_ALU | BPF_ADD | BPF_K:
553 A += bpf->k;
554 continue;
555 case BPF_ALU | BPF_SUB | BPF_K:
556 A -= bpf->k;
557 continue;
558 case BPF_ALU | BPF_MUL | BPF_K:
559 A *= bpf->k;
560 continue;
561 case BPF_ALU | BPF_DIV | BPF_K:
562 A /= bpf->k;
563 continue;
564 case BPF_ALU | BPF_AND | BPF_K:
565 A &= bpf->k;
566 continue;
567 case BPF_ALU | BPF_OR | BPF_K:
568 A |= bpf->k;
569 continue;
570 case BPF_ALU | BPF_LSH | BPF_K:
571 A <<= bpf->k;
572 continue;
573 case BPF_ALU | BPF_RSH | BPF_K:
574 A >>= bpf->k;
575 continue;
576 case BPF_ALU | BPF_NEG:
577 A = -A;
578 continue;
579 case BPF_MISC | BPF_TAX:
580 X = A;
581 continue;
582 case BPF_MISC | BPF_TXA:
583 A = X;
584 continue;
589 void bpf_parse_rules(char *rulefile, struct sock_fprog *bpf)
591 int ret;
592 char buff[256];
593 struct sock_filter sf_single = { 0x06, 0, 0, 0xFFFFFFFF };
594 FILE *fp;
596 if (rulefile == NULL) {
597 bpf->len = 1;
598 bpf->filter = xmalloc(sizeof(sf_single));
599 fmemcpy(&bpf->filter[0], &sf_single, sizeof(sf_single));
600 return;
603 fp = fopen(rulefile, "r");
604 if (!fp)
605 panic("Cannot read BPF rule file!\n");
607 fmemset(buff, 0, sizeof(buff));
608 while (fgets(buff, sizeof(buff), fp) != NULL) {
609 buff[sizeof(buff) - 1] = 0;
610 if (buff[0] != '{') {
611 fmemset(buff, 0, sizeof(buff));
612 continue;
615 fmemset(&sf_single, 0, sizeof(sf_single));
616 ret = sscanf(buff, "{ 0x%x, %u, %u, 0x%08x },",
617 (unsigned int *) &sf_single.code,
618 (unsigned int *) &sf_single.jt,
619 (unsigned int *) &sf_single.jf,
620 (unsigned int *) &sf_single.k);
621 if (ret != 4)
622 panic("BPF syntax error!\n");
623 bpf->len++;
624 bpf->filter = xrealloc(bpf->filter, 1,
625 bpf->len * sizeof(sf_single));
626 fmemcpy(&bpf->filter[bpf->len - 1], &sf_single,
627 sizeof(sf_single));
629 fmemset(buff, 0, sizeof(buff));
632 fclose(fp);
633 if (bpf_validate(bpf) == 0)
634 panic("This is not a valid BPF program!\n");