bpf: indent one space for better visibility
[netsniff-ng.git] / src / bpf.c
blobd80841367e1dfb50309e0848150a61aaae8dec50
1 /*
2 * netsniff-ng - the packet sniffing beast
3 * By Daniel Borkmann <daniel@netsniff-ng.org>
4 * Copyright 2009, 2010 Daniel Borkmann.
5 * Copyright 2009, 2010 Emmanuel Roullit.
6 * Copyright 1990-1996 The Regents of the University of
7 * California. All rights reserved. (3-clause BSD license)
8 * Subject to the GPL, version 2.
9 */
11 #include <stdint.h>
12 #include <stdio.h>
13 #include <arpa/inet.h>
14 #include <sys/types.h>
15 #include <sys/stat.h>
16 #include <fcntl.h>
18 #include "bpf.h"
19 #include "xmalloc.h"
20 #include "xstring.h"
21 #include "die.h"
23 /* This is a bug in libpcap, they actually use 'unsigned long' instead
24 * of short! */
25 #define EXTRACT_SHORT(packet) \
26 ((unsigned short) ntohs(*(unsigned short *) packet))
27 #define EXTRACT_LONG(packet) \
28 (ntohl(*(unsigned long *) packet))
29 #ifndef BPF_MEMWORDS
30 # define BPF_MEMWORDS 16
31 #endif
33 static char *bpf_dump(const struct sock_filter bpf, int n)
35 int v;
36 const char *fmt, *op;
37 static char image[256];
38 char operand[64];
40 v = bpf.k;
41 switch (bpf.code) {
42 default:
43 op = "unimp";
44 fmt = "0x%x";
45 v = bpf.code;
46 break;
47 case BPF_RET | BPF_K:
48 op = "ret";
49 fmt = "#0x%x";
50 break;
51 case BPF_RET | BPF_A:
52 op = "ret";
53 fmt = "";
54 break;
55 case BPF_LD | BPF_W | BPF_ABS:
56 op = "ld";
57 fmt = "[%d]";
58 break;
59 case BPF_LD | BPF_H | BPF_ABS:
60 op = "ldh";
61 fmt = "[%d]";
62 break;
63 case BPF_LD | BPF_B | BPF_ABS:
64 op = "ldb";
65 fmt = "[%d]";
66 break;
67 case BPF_LD | BPF_W | BPF_LEN:
68 op = "ld";
69 fmt = "#pktlen";
70 break;
71 case BPF_LD | BPF_W | BPF_IND:
72 op = "ld";
73 fmt = "[x + %d]";
74 break;
75 case BPF_LD | BPF_H | BPF_IND:
76 op = "ldh";
77 fmt = "[x + %d]";
78 break;
79 case BPF_LD | BPF_B | BPF_IND:
80 op = "ldb";
81 fmt = "[x + %d]";
82 break;
83 case BPF_LD | BPF_IMM:
84 op = "ld";
85 fmt = "#0x%x";
86 break;
87 case BPF_LDX | BPF_IMM:
88 op = "ldx";
89 fmt = "#0x%x";
90 break;
91 case BPF_LDX | BPF_MSH | BPF_B:
92 op = "ldxb";
93 fmt = "4*([%d]&0xf)";
94 break;
95 case BPF_LD | BPF_MEM:
96 op = "ld";
97 fmt = "M[%d]";
98 break;
99 case BPF_LDX | BPF_MEM:
100 op = "ldx";
101 fmt = "M[%d]";
102 break;
103 case BPF_ST:
104 op = "st";
105 fmt = "M[%d]";
106 break;
107 case BPF_STX:
108 op = "stx";
109 fmt = "M[%d]";
110 break;
111 case BPF_JMP | BPF_JA:
112 op = "ja";
113 fmt = "%d";
114 v = n + 1 + bpf.k;
115 break;
116 case BPF_JMP | BPF_JGT | BPF_K:
117 op = "jgt";
118 fmt = "#0x%x";
119 break;
120 case BPF_JMP | BPF_JGE | BPF_K:
121 op = "jge";
122 fmt = "#0x%x";
123 break;
124 case BPF_JMP | BPF_JEQ | BPF_K:
125 op = "jeq";
126 fmt = "#0x%x";
127 break;
128 case BPF_JMP | BPF_JSET | BPF_K:
129 op = "jset";
130 fmt = "#0x%x";
131 break;
132 case BPF_JMP | BPF_JGT | BPF_X:
133 op = "jgt";
134 fmt = "x";
135 break;
136 case BPF_JMP | BPF_JGE | BPF_X:
137 op = "jge";
138 fmt = "x";
139 break;
140 case BPF_JMP | BPF_JEQ | BPF_X:
141 op = "jeq";
142 fmt = "x";
143 break;
144 case BPF_JMP | BPF_JSET | BPF_X:
145 op = "jset";
146 fmt = "x";
147 break;
148 case BPF_ALU | BPF_ADD | BPF_X:
149 op = "add";
150 fmt = "x";
151 break;
152 case BPF_ALU | BPF_SUB | BPF_X:
153 op = "sub";
154 fmt = "x";
155 break;
156 case BPF_ALU | BPF_MUL | BPF_X:
157 op = "mul";
158 fmt = "x";
159 break;
160 case BPF_ALU | BPF_DIV | BPF_X:
161 op = "div";
162 fmt = "x";
163 break;
164 case BPF_ALU | BPF_AND | BPF_X:
165 op = "and";
166 fmt = "x";
167 break;
168 case BPF_ALU | BPF_OR | BPF_X:
169 op = "or";
170 fmt = "x";
171 break;
172 case BPF_ALU | BPF_LSH | BPF_X:
173 op = "lsh";
174 fmt = "x";
175 break;
176 case BPF_ALU | BPF_RSH | BPF_X:
177 op = "rsh";
178 fmt = "x";
179 break;
180 case BPF_ALU | BPF_ADD | BPF_K:
181 op = "add";
182 fmt = "#%d";
183 break;
184 case BPF_ALU | BPF_SUB | BPF_K:
185 op = "sub";
186 fmt = "#%d";
187 break;
188 case BPF_ALU | BPF_MUL | BPF_K:
189 op = "mul";
190 fmt = "#%d";
191 break;
192 case BPF_ALU | BPF_DIV | BPF_K:
193 op = "div";
194 fmt = "#%d";
195 break;
196 case BPF_ALU | BPF_AND | BPF_K:
197 op = "and";
198 fmt = "#0x%x";
199 break;
200 case BPF_ALU | BPF_OR | BPF_K:
201 op = "or";
202 fmt = "#0x%x";
203 break;
204 case BPF_ALU | BPF_LSH | BPF_K:
205 op = "lsh";
206 fmt = "#%d";
207 break;
208 case BPF_ALU | BPF_RSH | BPF_K:
209 op = "rsh";
210 fmt = "#%d";
211 break;
212 case BPF_ALU | BPF_NEG:
213 op = "neg";
214 fmt = "";
215 break;
216 case BPF_MISC | BPF_TAX:
217 op = "tax";
218 fmt = "";
219 break;
220 case BPF_MISC | BPF_TXA:
221 op = "txa";
222 fmt = "";
223 break;
226 slprintf(operand, sizeof(operand), fmt, v);
227 slprintf(image, sizeof(image),
228 (BPF_CLASS(bpf.code) == BPF_JMP &&
229 BPF_OP(bpf.code) != BPF_JA) ?
230 " L%d: %s %s, L%d, L%d" : " L%d: %s %s",
231 n, op, operand, n + 1 + bpf.jt, n + 1 + bpf.jf);
233 return image;
236 void bpf_dump_all(struct sock_fprog *bpf)
238 int i;
239 for (i = 0; i < bpf->len; ++i)
240 printf("%s\n", bpf_dump(bpf->filter[i], i));
243 void bpf_attach_to_sock(int sock, struct sock_fprog *bpf)
245 if (bpf->len == 1)
246 if (bpf->filter[0].code == BPF_RET &&
247 bpf->filter[0].k == 0xFFFFFFFF)
248 return;
250 int ret = setsockopt(sock, SOL_SOCKET, SO_ATTACH_FILTER, bpf,
251 sizeof(*bpf));
252 if (ret < 0)
253 panic("Cannot attach filter to socket!\n");
256 void bpf_detach_from_sock(int sock)
258 int ret, empty = 0;
260 ret = setsockopt(sock, SOL_SOCKET, SO_DETACH_FILTER, &empty,
261 sizeof(empty));
262 if (ret < 0)
263 panic("Cannot detach filter from socket!\n");
266 void enable_kernel_bpf_jit_compiler(void)
268 int fd;
269 ssize_t ret;
270 char *file = "/proc/sys/net/core/bpf_jit_enable";
272 fd = open(file, O_WRONLY);
273 if (fd < 0)
274 return;
276 ret = write(fd, "1", strlen("1"));
277 if (ret > 0)
278 printf("BPF JIT\n");
280 close(fd);
283 int bpf_validate(const struct sock_fprog *bpf)
285 uint32_t i, from;
286 const struct sock_filter *p;
288 if (!bpf)
289 return 0;
290 if (bpf->len < 1)
291 return 0;
293 for (i = 0; i < bpf->len; ++i) {
294 p = &bpf->filter[i];
295 switch (BPF_CLASS(p->code)) {
297 * Check that memory operations use valid addresses.
299 case BPF_LD:
300 case BPF_LDX:
301 switch (BPF_MODE(p->code)) {
302 case BPF_IMM:
303 break;
304 case BPF_ABS:
305 case BPF_IND:
306 case BPF_MSH:
308 * There's no maximum packet data size
309 * in userland. The runtime packet length
310 * check suffices.
312 break;
313 case BPF_MEM:
314 if (p->k >= BPF_MEMWORDS)
315 return 0;
316 break;
317 case BPF_LEN:
318 break;
319 default:
320 return 0;
322 break;
323 case BPF_ST:
324 case BPF_STX:
325 if (p->k >= BPF_MEMWORDS)
326 return 0;
327 break;
328 case BPF_ALU:
329 switch (BPF_OP(p->code)) {
330 case BPF_ADD:
331 case BPF_SUB:
332 case BPF_MUL:
333 case BPF_OR:
334 case BPF_AND:
335 case BPF_LSH:
336 case BPF_RSH:
337 case BPF_NEG:
338 break;
339 case BPF_DIV:
341 * Check for constant division by 0.
343 if (BPF_RVAL(p->code) == BPF_K && p->k == 0)
344 return 0;
345 break;
346 default:
347 return 0;
349 break;
350 case BPF_JMP:
352 * Check that jumps are within the code block,
353 * and that unconditional branches don't go
354 * backwards as a result of an overflow.
355 * Unconditional branches have a 32-bit offset,
356 * so they could overflow; we check to make
357 * sure they don't. Conditional branches have
358 * an 8-bit offset, and the from address is <=
359 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
360 * is sufficiently small that adding 255 to it
361 * won't overflow.
363 * We know that len is <= BPF_MAXINSNS, and we
364 * assume that BPF_MAXINSNS is < the maximum size
365 * of a u_int, so that i + 1 doesn't overflow.
367 * For userland, we don't know that the from
368 * or len are <= BPF_MAXINSNS, but we know that
369 * from <= len, and, except on a 64-bit system,
370 * it's unlikely that len, if it truly reflects
371 * the size of the program we've been handed,
372 * will be anywhere near the maximum size of
373 * a u_int. We also don't check for backward
374 * branches, as we currently support them in
375 * userland for the protochain operation.
377 from = i + 1;
378 switch (BPF_OP(p->code)) {
379 case BPF_JA:
380 if (from + p->k >= bpf->len)
381 return 0;
382 break;
383 case BPF_JEQ:
384 case BPF_JGT:
385 case BPF_JGE:
386 case BPF_JSET:
387 if (from + p->jt >= bpf->len ||
388 from + p->jf >= bpf->len)
389 return 0;
390 break;
391 default:
392 return 0;
394 break;
395 case BPF_RET:
396 break;
397 case BPF_MISC:
398 break;
399 default:
400 return 0;
404 return BPF_CLASS(bpf->filter[bpf->len - 1].code) == BPF_RET;
407 uint32_t bpf_run_filter(const struct sock_fprog * fcode, uint8_t * packet,
408 size_t plen)
410 /* XXX: caplen == len */
411 uint32_t A, X;
412 uint32_t k;
413 struct sock_filter *bpf;
414 int32_t mem[BPF_MEMWORDS];
416 if (fcode == NULL || fcode->filter == NULL || fcode->len == 0)
417 return 0xFFFFFFFF;
419 A = 0;
420 X = 0;
422 bpf = fcode->filter;
423 --bpf;
425 while (1) {
426 ++bpf;
428 switch (bpf->code) {
429 default:
430 return 0;
431 case BPF_RET | BPF_K:
432 return (uint32_t) bpf->k;
433 case BPF_RET | BPF_A:
434 return (uint32_t) A;
435 case BPF_LD | BPF_W | BPF_ABS:
436 k = bpf->k;
437 if (k + sizeof(int32_t) > plen)
438 return 0;
439 A = EXTRACT_LONG(&packet[k]);
440 continue;
441 case BPF_LD | BPF_H | BPF_ABS:
442 k = bpf->k;
443 if (k + sizeof(short) > plen)
444 return 0;
445 A = EXTRACT_SHORT(&packet[k]);
446 continue;
447 case BPF_LD | BPF_B | BPF_ABS:
448 k = bpf->k;
449 if (k >= plen)
450 return 0;
451 A = packet[k];
452 continue;
453 case BPF_LD | BPF_W | BPF_LEN:
454 A = plen;
455 continue;
456 case BPF_LDX | BPF_W | BPF_LEN:
457 X = plen;
458 continue;
459 case BPF_LD | BPF_W | BPF_IND:
460 k = X + bpf->k;
461 if (k + sizeof(int32_t) > plen)
462 return 0;
463 A = EXTRACT_LONG(&packet[k]);
464 continue;
465 case BPF_LD | BPF_H | BPF_IND:
466 k = X + bpf->k;
467 if (k + sizeof(short) > plen)
468 return 0;
469 A = EXTRACT_SHORT(&packet[k]);
470 continue;
471 case BPF_LD | BPF_B | BPF_IND:
472 k = X + bpf->k;
473 if (k >= plen)
474 return 0;
475 A = packet[k];
476 continue;
477 case BPF_LDX | BPF_MSH | BPF_B:
478 k = bpf->k;
479 if (k >= plen)
480 return 0;
481 X = (packet[bpf->k] & 0xf) << 2;
482 continue;
483 case BPF_LD | BPF_IMM:
484 A = bpf->k;
485 continue;
486 case BPF_LDX | BPF_IMM:
487 X = bpf->k;
488 continue;
489 case BPF_LD | BPF_MEM:
490 A = mem[bpf->k];
491 continue;
492 case BPF_LDX | BPF_MEM:
493 X = mem[bpf->k];
494 continue;
495 case BPF_ST:
496 mem[bpf->k] = A;
497 continue;
498 case BPF_STX:
499 mem[bpf->k] = X;
500 continue;
501 case BPF_JMP | BPF_JA:
502 bpf += bpf->k;
503 continue;
504 case BPF_JMP | BPF_JGT | BPF_K:
505 bpf += (A > bpf->k) ? bpf->jt : bpf->jf;
506 continue;
507 case BPF_JMP | BPF_JGE | BPF_K:
508 bpf += (A >= bpf->k) ? bpf->jt : bpf->jf;
509 continue;
510 case BPF_JMP | BPF_JEQ | BPF_K:
511 bpf += (A == bpf->k) ? bpf->jt : bpf->jf;
512 continue;
513 case BPF_JMP | BPF_JSET | BPF_K:
514 bpf += (A & bpf->k) ? bpf->jt : bpf->jf;
515 continue;
516 case BPF_JMP | BPF_JGT | BPF_X:
517 bpf += (A > X) ? bpf->jt : bpf->jf;
518 continue;
519 case BPF_JMP | BPF_JGE | BPF_X:
520 bpf += (A >= X) ? bpf->jt : bpf->jf;
521 continue;
522 case BPF_JMP | BPF_JEQ | BPF_X:
523 bpf += (A == X) ? bpf->jt : bpf->jf;
524 continue;
525 case BPF_JMP | BPF_JSET | BPF_X:
526 bpf += (A & X) ? bpf->jt : bpf->jf;
527 continue;
528 case BPF_ALU | BPF_ADD | BPF_X:
529 A += X;
530 continue;
531 case BPF_ALU | BPF_SUB | BPF_X:
532 A -= X;
533 continue;
534 case BPF_ALU | BPF_MUL | BPF_X:
535 A *= X;
536 continue;
537 case BPF_ALU | BPF_DIV | BPF_X:
538 if (X == 0)
539 return 0;
540 A /= X;
541 continue;
542 case BPF_ALU | BPF_AND | BPF_X:
543 A &= X;
544 continue;
545 case BPF_ALU | BPF_OR | BPF_X:
546 A |= X;
547 continue;
548 case BPF_ALU | BPF_LSH | BPF_X:
549 A <<= X;
550 continue;
551 case BPF_ALU | BPF_RSH | BPF_X:
552 A >>= X;
553 continue;
554 case BPF_ALU | BPF_ADD | BPF_K:
555 A += bpf->k;
556 continue;
557 case BPF_ALU | BPF_SUB | BPF_K:
558 A -= bpf->k;
559 continue;
560 case BPF_ALU | BPF_MUL | BPF_K:
561 A *= bpf->k;
562 continue;
563 case BPF_ALU | BPF_DIV | BPF_K:
564 A /= bpf->k;
565 continue;
566 case BPF_ALU | BPF_AND | BPF_K:
567 A &= bpf->k;
568 continue;
569 case BPF_ALU | BPF_OR | BPF_K:
570 A |= bpf->k;
571 continue;
572 case BPF_ALU | BPF_LSH | BPF_K:
573 A <<= bpf->k;
574 continue;
575 case BPF_ALU | BPF_RSH | BPF_K:
576 A >>= bpf->k;
577 continue;
578 case BPF_ALU | BPF_NEG:
579 A = -A;
580 continue;
581 case BPF_MISC | BPF_TAX:
582 X = A;
583 continue;
584 case BPF_MISC | BPF_TXA:
585 A = X;
586 continue;
591 void bpf_parse_rules(char *rulefile, struct sock_fprog *bpf)
593 int ret;
594 char buff[256];
595 struct sock_filter sf_single = { 0x06, 0, 0, 0xFFFFFFFF };
596 FILE *fp;
598 if (rulefile == NULL) {
599 bpf->len = 1;
600 bpf->filter = xmalloc(sizeof(sf_single));
601 fmemcpy(&bpf->filter[0], &sf_single, sizeof(sf_single));
602 return;
605 fp = fopen(rulefile, "r");
606 if (!fp)
607 panic("Cannot read BPF rule file!\n");
609 fmemset(buff, 0, sizeof(buff));
610 while (fgets(buff, sizeof(buff), fp) != NULL) {
611 buff[sizeof(buff) - 1] = 0;
612 if (buff[0] != '{') {
613 fmemset(buff, 0, sizeof(buff));
614 continue;
617 fmemset(&sf_single, 0, sizeof(sf_single));
618 ret = sscanf(buff, "{ 0x%x, %u, %u, 0x%08x },",
619 (unsigned int *) &sf_single.code,
620 (unsigned int *) &sf_single.jt,
621 (unsigned int *) &sf_single.jf,
622 (unsigned int *) &sf_single.k);
623 if (ret != 4)
624 panic("BPF syntax error!\n");
625 bpf->len++;
626 bpf->filter = xrealloc(bpf->filter, 1,
627 bpf->len * sizeof(sf_single));
628 fmemcpy(&bpf->filter[bpf->len - 1], &sf_single,
629 sizeof(sf_single));
631 fmemset(buff, 0, sizeof(buff));
634 fclose(fp);
635 if (bpf_validate(bpf) == 0)
636 panic("This is not a valid BPF program!\n");