docs: add todo's from Sibir's repo
[netsniff-ng.git] / src / bpf.c
blobe25c52b69e24f28ed737982d775326c25c4c4452
1 /*
2 * netsniff-ng - the packet sniffing beast
3 * By Daniel Borkmann <daniel@netsniff-ng.org>
4 * Copyright 2009, 2010 Daniel Borkmann.
5 * Copyright 2009, 2010 Emmanuel Roullit.
6 * Copyright 1990-1996 The Regents of the University of
7 * California. All rights reserved. (3-clause BSD license)
8 * Subject to the GPL, version 2.
9 */
11 #include <stdint.h>
12 #include <stdio.h>
13 #include <arpa/inet.h>
14 #include <sys/types.h>
15 #include <sys/stat.h>
16 #include <fcntl.h>
18 #include "bpf.h"
19 #include "xmalloc.h"
20 #include "xutils.h"
21 #include "die.h"
23 /* This is a bug in libpcap, they actually use 'unsigned long' instead
24 * of short! */
25 #define EXTRACT_SHORT(packet) \
26 ((unsigned short) ntohs(*(unsigned short *) packet))
27 #define EXTRACT_LONG(packet) \
28 (ntohl(*(unsigned long *) packet))
29 #ifndef BPF_MEMWORDS
30 # define BPF_MEMWORDS 16
31 #endif
33 #define BPF_LD_B (BPF_LD | BPF_B)
34 #define BPF_LD_H (BPF_LD | BPF_H)
35 #define BPF_LD_W (BPF_LD | BPF_W)
36 #define BPF_LDX_B (BPF_LDX | BPF_B)
37 #define BPF_LDX_W (BPF_LDX | BPF_W)
38 #define BPF_JMP_JA (BPF_JMP | BPF_JA)
39 #define BPF_JMP_JEQ (BPF_JMP | BPF_JEQ)
40 #define BPF_JMP_JGT (BPF_JMP | BPF_JGT)
41 #define BPF_JMP_JGE (BPF_JMP | BPF_JGE)
42 #define BPF_JMP_JSET (BPF_JMP | BPF_JSET)
43 #define BPF_ALU_ADD (BPF_ALU | BPF_ADD)
44 #define BPF_ALU_SUB (BPF_ALU | BPF_SUB)
45 #define BPF_ALU_MUL (BPF_ALU | BPF_MUL)
46 #define BPF_ALU_DIV (BPF_ALU | BPF_DIV)
47 #define BPF_ALU_MOD (BPF_ALU | BPF_MOD)
48 #define BPF_ALU_NEG (BPF_ALU | BPF_NEG)
49 #define BPF_ALU_AND (BPF_ALU | BPF_AND)
50 #define BPF_ALU_OR (BPF_ALU | BPF_OR)
51 #define BPF_ALU_XOR (BPF_ALU | BPF_XOR)
52 #define BPF_ALU_LSH (BPF_ALU | BPF_LSH)
53 #define BPF_ALU_RSH (BPF_ALU | BPF_RSH)
54 #define BPF_MISC_TAX (BPF_MISC | BPF_TAX)
55 #define BPF_MISC_TXA (BPF_MISC | BPF_TXA)
57 static const char *op_table[] = {
58 [BPF_LD_B] = "ldb",
59 [BPF_LD_H] = "ldh",
60 [BPF_LD_W] = "ld",
61 [BPF_LDX] = "ldx",
62 [BPF_LDX_B] = "ldxb",
63 [BPF_ST] = "st",
64 [BPF_STX] = "stx",
65 [BPF_JMP_JA] = "ja",
66 [BPF_JMP_JEQ] = "jeq",
67 [BPF_JMP_JGT] = "jgt",
68 [BPF_JMP_JGE] = "jge",
69 [BPF_JMP_JSET] = "jset",
70 [BPF_ALU_ADD] = "add",
71 [BPF_ALU_SUB] = "sub",
72 [BPF_ALU_MUL] = "mul",
73 [BPF_ALU_DIV] = "div",
74 [BPF_ALU_MOD] = "mod",
75 [BPF_ALU_NEG] = "neg",
76 [BPF_ALU_AND] = "and",
77 [BPF_ALU_OR] = "or",
78 [BPF_ALU_XOR] = "xor",
79 [BPF_ALU_LSH] = "lsh",
80 [BPF_ALU_RSH] = "rsh",
81 [BPF_RET] = "ret",
82 [BPF_MISC_TAX] = "tax",
83 [BPF_MISC_TXA] = "txa",
86 void bpf_dump_op_table(void)
88 int i;
89 for (i = 0; i < array_size(op_table); ++i) {
90 if (op_table[i])
91 printf("%s\n", op_table[i]);
95 static const char *bpf_dump_linux_k(uint32_t k)
97 switch (k) {
98 default:
99 return "[%d]";
100 /* Linux specific arguments */
101 case (SKF_AD_OFF + SKF_AD_PROTOCOL):
102 return "#proto";
103 case (SKF_AD_OFF + SKF_AD_PKTTYPE):
104 return "#type";
105 case (SKF_AD_OFF + SKF_AD_IFINDEX):
106 return "#ifidx";
107 case (SKF_AD_OFF + SKF_AD_NLATTR):
108 return "#nla";
109 case (SKF_AD_OFF + SKF_AD_NLATTR_NEST):
110 return "#nlan";
111 case (SKF_AD_OFF + SKF_AD_MARK):
112 return "#mark";
113 case (SKF_AD_OFF + SKF_AD_QUEUE):
114 return "#queue";
115 case (SKF_AD_OFF + SKF_AD_HATYPE):
116 return "#hatype";
117 case (SKF_AD_OFF + SKF_AD_RXHASH):
118 return "#rxhash";
119 case (SKF_AD_OFF + SKF_AD_CPU):
120 return "#cpu";
124 static char *bpf_dump(const struct sock_filter bpf, int n)
126 int v;
127 const char *fmt, *op;
128 static char image[256];
129 char operand[64];
131 v = bpf.k;
132 switch (bpf.code) {
133 default:
134 op = "unimp";
135 fmt = "0x%x";
136 v = bpf.code;
137 break;
138 case BPF_RET | BPF_K:
139 op = op_table[BPF_RET];
140 fmt = "#0x%x";
141 break;
142 case BPF_RET | BPF_A:
143 op = op_table[BPF_RET];
144 fmt = "";
145 break;
146 case BPF_LD_W | BPF_ABS:
147 op = op_table[BPF_LD_W];
148 fmt = bpf_dump_linux_k(bpf.k);
149 break;
150 case BPF_LD_H | BPF_ABS:
151 op = op_table[BPF_LD_H];
152 fmt = bpf_dump_linux_k(bpf.k);
153 break;
154 case BPF_LD_B | BPF_ABS:
155 op = op_table[BPF_LD_B];
156 fmt = bpf_dump_linux_k(bpf.k);
157 break;
158 case BPF_LD_W | BPF_LEN:
159 op = op_table[BPF_LD_W];
160 fmt = "#len";
161 break;
162 case BPF_LD_W | BPF_IND:
163 op = op_table[BPF_LD_W];
164 fmt = "[x + %d]";
165 break;
166 case BPF_LD_H | BPF_IND:
167 op = op_table[BPF_LD_H];
168 fmt = "[x + %d]";
169 break;
170 case BPF_LD_B | BPF_IND:
171 op = op_table[BPF_LD_B];
172 fmt = "[x + %d]";
173 break;
174 case BPF_LD | BPF_IMM:
175 op = op_table[BPF_LD_W];
176 fmt = "#0x%x";
177 break;
178 case BPF_LDX | BPF_IMM:
179 op = op_table[BPF_LDX];
180 fmt = "#0x%x";
181 break;
182 case BPF_LDX_B | BPF_MSH:
183 op = op_table[BPF_LDX_B];
184 fmt = "4*([%d]&0xf)";
185 break;
186 case BPF_LD | BPF_MEM:
187 op = op_table[BPF_LD_W];
188 fmt = "M[%d]";
189 break;
190 case BPF_LDX | BPF_MEM:
191 op = op_table[BPF_LDX];
192 fmt = "M[%d]";
193 break;
194 case BPF_ST:
195 op = op_table[BPF_ST];
196 fmt = "M[%d]";
197 break;
198 case BPF_STX:
199 op = op_table[BPF_STX];
200 fmt = "M[%d]";
201 break;
202 case BPF_JMP_JA:
203 op = op_table[BPF_JMP_JA];
204 fmt = "%d";
205 v = n + 1 + bpf.k;
206 break;
207 case BPF_JMP_JGT | BPF_K:
208 op = op_table[BPF_JMP_JGT];
209 fmt = "#0x%x";
210 break;
211 case BPF_JMP_JGE | BPF_K:
212 op = op_table[BPF_JMP_JGE];
213 fmt = "#0x%x";
214 break;
215 case BPF_JMP_JEQ | BPF_K:
216 op = op_table[BPF_JMP_JEQ];
217 fmt = "#0x%x";
218 break;
219 case BPF_JMP_JSET | BPF_K:
220 op = op_table[BPF_JMP_JSET];
221 fmt = "#0x%x";
222 break;
223 case BPF_JMP_JGT | BPF_X:
224 op = op_table[BPF_JMP_JGT];
225 fmt = "x";
226 break;
227 case BPF_JMP_JGE | BPF_X:
228 op = op_table[BPF_JMP_JGE];
229 fmt = "x";
230 break;
231 case BPF_JMP_JEQ | BPF_X:
232 op = op_table[BPF_JMP_JEQ];
233 fmt = "x";
234 break;
235 case BPF_JMP_JSET | BPF_X:
236 op = op_table[BPF_JMP_JSET];
237 fmt = "x";
238 break;
239 case BPF_ALU_ADD | BPF_X:
240 op = op_table[BPF_ALU_ADD];
241 fmt = "x";
242 break;
243 case BPF_ALU_SUB | BPF_X:
244 op = op_table[BPF_ALU_SUB];
245 fmt = "x";
246 break;
247 case BPF_ALU_MUL | BPF_X:
248 op = op_table[BPF_ALU_MUL];
249 fmt = "x";
250 break;
251 case BPF_ALU_DIV | BPF_X:
252 op = op_table[BPF_ALU_DIV];
253 fmt = "x";
254 break;
255 case BPF_ALU_MOD | BPF_X:
256 op = op_table[BPF_ALU_MOD];
257 fmt = "x";
258 break;
259 case BPF_ALU_AND | BPF_X:
260 op = op_table[BPF_ALU_AND];
261 fmt = "x";
262 break;
263 case BPF_ALU_OR | BPF_X:
264 op = op_table[BPF_ALU_OR];
265 fmt = "x";
266 break;
267 case BPF_ALU_XOR | BPF_X:
268 op = op_table[BPF_ALU_XOR];
269 fmt = "x";
270 break;
271 case BPF_ALU_LSH | BPF_X:
272 op = op_table[BPF_ALU_LSH];
273 fmt = "x";
274 break;
275 case BPF_ALU_RSH | BPF_X:
276 op = op_table[BPF_ALU_RSH];
277 fmt = "x";
278 break;
279 case BPF_ALU_ADD | BPF_K:
280 op = op_table[BPF_ALU_ADD];
281 fmt = "#%d";
282 break;
283 case BPF_ALU_SUB | BPF_K:
284 op = op_table[BPF_ALU_SUB];
285 fmt = "#%d";
286 break;
287 case BPF_ALU_MUL | BPF_K:
288 op = op_table[BPF_ALU_MUL];
289 fmt = "#%d";
290 break;
291 case BPF_ALU_DIV | BPF_K:
292 op = op_table[BPF_ALU_DIV];
293 fmt = "#%d";
294 break;
295 case BPF_ALU_MOD | BPF_K:
296 op = op_table[BPF_ALU_MOD];
297 fmt = "#%d";
298 break;
299 case BPF_ALU_AND | BPF_K:
300 op = op_table[BPF_ALU_AND];
301 fmt = "#0x%x";
302 break;
303 case BPF_ALU_OR | BPF_K:
304 op = op_table[BPF_ALU_OR];
305 fmt = "#0x%x";
306 break;
307 case BPF_ALU_XOR | BPF_K:
308 op = op_table[BPF_ALU_XOR];
309 fmt = "#0x%x";
310 break;
311 case BPF_ALU_LSH | BPF_K:
312 op = op_table[BPF_ALU_LSH];
313 fmt = "#%d";
314 break;
315 case BPF_ALU_RSH | BPF_K:
316 op = op_table[BPF_ALU_RSH];
317 fmt = "#%d";
318 break;
319 case BPF_ALU_NEG:
320 op = op_table[BPF_ALU_NEG];
321 fmt = "";
322 break;
323 case BPF_MISC_TAX:
324 op = op_table[BPF_MISC_TAX];
325 fmt = "";
326 break;
327 case BPF_MISC_TXA:
328 op = op_table[BPF_MISC_TXA];
329 fmt = "";
330 break;
333 slprintf(operand, sizeof(operand), fmt, v);
334 slprintf(image, sizeof(image),
335 (BPF_CLASS(bpf.code) == BPF_JMP &&
336 BPF_OP(bpf.code) != BPF_JA) ?
337 " L%d: %s %s, L%d, L%d" : " L%d: %s %s",
338 n, op, operand, n + 1 + bpf.jt, n + 1 + bpf.jf);
340 return image;
343 void bpf_dump_all(struct sock_fprog *bpf)
345 int i;
346 for (i = 0; i < bpf->len; ++i)
347 printf("%s\n", bpf_dump(bpf->filter[i], i));
350 void bpf_attach_to_sock(int sock, struct sock_fprog *bpf)
352 int ret;
354 if (bpf->filter[0].code == BPF_RET &&
355 bpf->filter[0].k == 0xFFFFFFFF)
356 return;
358 ret = setsockopt(sock, SOL_SOCKET, SO_ATTACH_FILTER,
359 bpf, sizeof(*bpf));
360 if (ret < 0)
361 panic("Cannot attach filter to socket!\n");
364 void bpf_detach_from_sock(int sock)
366 int ret, empty = 0;
368 ret = setsockopt(sock, SOL_SOCKET, SO_DETACH_FILTER,
369 &empty, sizeof(empty));
370 if (ret < 0)
371 panic("Cannot detach filter from socket!\n");
374 void enable_kernel_bpf_jit_compiler(void)
376 int fd;
377 ssize_t ret;
378 char *file = "/proc/sys/net/core/bpf_jit_enable";
380 fd = open(file, O_WRONLY);
381 if (fd < 0)
382 return;
384 ret = write(fd, "1", strlen("1"));
385 if (ret > 0)
386 printf("BPF JIT\n");
388 close(fd);
391 int bpf_validate(const struct sock_fprog *bpf)
393 uint32_t i, from;
394 const struct sock_filter *p;
396 if (!bpf)
397 return 0;
398 if (bpf->len < 1)
399 return 0;
401 for (i = 0; i < bpf->len; ++i) {
402 p = &bpf->filter[i];
403 switch (BPF_CLASS(p->code)) {
404 /* Check that memory operations use valid addresses. */
405 case BPF_LD:
406 case BPF_LDX:
407 switch (BPF_MODE(p->code)) {
408 case BPF_IMM:
409 break;
410 case BPF_ABS:
411 case BPF_IND:
412 case BPF_MSH:
413 /* There's no maximum packet data size
414 * in userland. The runtime packet length
415 * check suffices.
417 break;
418 case BPF_MEM:
419 if (p->k >= BPF_MEMWORDS)
420 return 0;
421 break;
422 case BPF_LEN:
423 break;
424 default:
425 return 0;
427 break;
428 case BPF_ST:
429 case BPF_STX:
430 if (p->k >= BPF_MEMWORDS)
431 return 0;
432 break;
433 case BPF_ALU:
434 switch (BPF_OP(p->code)) {
435 case BPF_ADD:
436 case BPF_SUB:
437 case BPF_MUL:
438 case BPF_OR:
439 case BPF_XOR:
440 case BPF_AND:
441 case BPF_LSH:
442 case BPF_RSH:
443 case BPF_NEG:
444 break;
445 case BPF_DIV:
446 case BPF_MOD:
447 /* Check for constant division by 0 (undefined
448 * for div and mod).
450 if (BPF_RVAL(p->code) == BPF_K && p->k == 0)
451 return 0;
452 break;
453 default:
454 return 0;
456 break;
457 case BPF_JMP:
458 /* Check that jumps are within the code block,
459 * and that unconditional branches don't go
460 * backwards as a result of an overflow.
461 * Unconditional branches have a 32-bit offset,
462 * so they could overflow; we check to make
463 * sure they don't. Conditional branches have
464 * an 8-bit offset, and the from address is <=
465 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
466 * is sufficiently small that adding 255 to it
467 * won't overflow.
469 * We know that len is <= BPF_MAXINSNS, and we
470 * assume that BPF_MAXINSNS is < the maximum size
471 * of a u_int, so that i + 1 doesn't overflow.
473 * For userland, we don't know that the from
474 * or len are <= BPF_MAXINSNS, but we know that
475 * from <= len, and, except on a 64-bit system,
476 * it's unlikely that len, if it truly reflects
477 * the size of the program we've been handed,
478 * will be anywhere near the maximum size of
479 * a u_int. We also don't check for backward
480 * branches, as we currently support them in
481 * userland for the protochain operation.
483 from = i + 1;
484 switch (BPF_OP(p->code)) {
485 case BPF_JA:
486 if (from + p->k >= bpf->len)
487 return 0;
488 break;
489 case BPF_JEQ:
490 case BPF_JGT:
491 case BPF_JGE:
492 case BPF_JSET:
493 if (from + p->jt >= bpf->len ||
494 from + p->jf >= bpf->len)
495 return 0;
496 break;
497 default:
498 return 0;
500 break;
501 case BPF_RET:
502 break;
503 case BPF_MISC:
504 break;
505 default:
506 return 0;
510 return BPF_CLASS(bpf->filter[bpf->len - 1].code) == BPF_RET;
513 uint32_t bpf_run_filter(const struct sock_fprog * fcode, uint8_t * packet,
514 size_t plen)
516 /* XXX: caplen == len */
517 uint32_t A, X;
518 uint32_t k;
519 struct sock_filter *bpf;
520 int32_t mem[BPF_MEMWORDS];
522 if (fcode == NULL || fcode->filter == NULL || fcode->len == 0)
523 return 0xFFFFFFFF;
525 A = 0;
526 X = 0;
528 bpf = fcode->filter;
529 --bpf;
530 while (1) {
531 ++bpf;
532 switch (bpf->code) {
533 default:
534 return 0;
535 case BPF_RET | BPF_K:
536 return (uint32_t) bpf->k;
537 case BPF_RET | BPF_A:
538 return (uint32_t) A;
539 case BPF_LD_W | BPF_ABS:
540 /* No Linux extensions supported here! */
541 k = bpf->k;
542 if (k + sizeof(int32_t) > plen)
543 return 0;
544 A = EXTRACT_LONG(&packet[k]);
545 continue;
546 case BPF_LD_H | BPF_ABS:
547 /* No Linux extensions supported here! */
548 k = bpf->k;
549 if (k + sizeof(short) > plen)
550 return 0;
551 A = EXTRACT_SHORT(&packet[k]);
552 continue;
553 case BPF_LD_B | BPF_ABS:
554 /* No Linux extensions supported here! */
555 k = bpf->k;
556 if (k >= plen)
557 return 0;
558 A = packet[k];
559 continue;
560 case BPF_LD_W | BPF_LEN:
561 A = plen;
562 continue;
563 case BPF_LDX_W | BPF_LEN:
564 X = plen;
565 continue;
566 case BPF_LD_W | BPF_IND:
567 k = X + bpf->k;
568 if (k + sizeof(int32_t) > plen)
569 return 0;
570 A = EXTRACT_LONG(&packet[k]);
571 continue;
572 case BPF_LD_H | BPF_IND:
573 k = X + bpf->k;
574 if (k + sizeof(short) > plen)
575 return 0;
576 A = EXTRACT_SHORT(&packet[k]);
577 continue;
578 case BPF_LD_B | BPF_IND:
579 k = X + bpf->k;
580 if (k >= plen)
581 return 0;
582 A = packet[k];
583 continue;
584 case BPF_LDX_B | BPF_MSH:
585 k = bpf->k;
586 if (k >= plen)
587 return 0;
588 X = (packet[bpf->k] & 0xf) << 2;
589 continue;
590 case BPF_LD | BPF_IMM:
591 A = bpf->k;
592 continue;
593 case BPF_LDX | BPF_IMM:
594 X = bpf->k;
595 continue;
596 case BPF_LD | BPF_MEM:
597 A = mem[bpf->k];
598 continue;
599 case BPF_LDX | BPF_MEM:
600 X = mem[bpf->k];
601 continue;
602 case BPF_ST:
603 mem[bpf->k] = A;
604 continue;
605 case BPF_STX:
606 mem[bpf->k] = X;
607 continue;
608 case BPF_JMP_JA:
609 bpf += bpf->k;
610 continue;
611 case BPF_JMP_JGT | BPF_K:
612 bpf += (A > bpf->k) ? bpf->jt : bpf->jf;
613 continue;
614 case BPF_JMP_JGE | BPF_K:
615 bpf += (A >= bpf->k) ? bpf->jt : bpf->jf;
616 continue;
617 case BPF_JMP_JEQ | BPF_K:
618 bpf += (A == bpf->k) ? bpf->jt : bpf->jf;
619 continue;
620 case BPF_JMP_JSET | BPF_K:
621 bpf += (A & bpf->k) ? bpf->jt : bpf->jf;
622 continue;
623 case BPF_JMP_JGT | BPF_X:
624 bpf += (A > X) ? bpf->jt : bpf->jf;
625 continue;
626 case BPF_JMP_JGE | BPF_X:
627 bpf += (A >= X) ? bpf->jt : bpf->jf;
628 continue;
629 case BPF_JMP_JEQ | BPF_X:
630 bpf += (A == X) ? bpf->jt : bpf->jf;
631 continue;
632 case BPF_JMP_JSET | BPF_X:
633 bpf += (A & X) ? bpf->jt : bpf->jf;
634 continue;
635 case BPF_ALU_ADD | BPF_X:
636 A += X;
637 continue;
638 case BPF_ALU_SUB | BPF_X:
639 A -= X;
640 continue;
641 case BPF_ALU_MUL | BPF_X:
642 A *= X;
643 continue;
644 case BPF_ALU_DIV | BPF_X:
645 if (X == 0)
646 return 0;
647 A /= X;
648 continue;
649 case BPF_ALU_MOD | BPF_X:
650 if (X == 0)
651 return 0;
652 A %= X;
653 continue;
654 case BPF_ALU_AND | BPF_X:
655 A &= X;
656 continue;
657 case BPF_ALU_OR | BPF_X:
658 A |= X;
659 continue;
660 case BPF_ALU_XOR | BPF_X:
661 A ^= X;
662 continue;
663 case BPF_ALU_LSH | BPF_X:
664 A <<= X;
665 continue;
666 case BPF_ALU_RSH | BPF_X:
667 A >>= X;
668 continue;
669 case BPF_ALU_ADD | BPF_K:
670 A += bpf->k;
671 continue;
672 case BPF_ALU_SUB | BPF_K:
673 A -= bpf->k;
674 continue;
675 case BPF_ALU_MUL | BPF_K:
676 A *= bpf->k;
677 continue;
678 case BPF_ALU_DIV | BPF_K:
679 A /= bpf->k;
680 continue;
681 case BPF_ALU_MOD | BPF_K:
682 A %= bpf->k;
683 continue;
684 case BPF_ALU_AND | BPF_K:
685 A &= bpf->k;
686 continue;
687 case BPF_ALU_OR | BPF_K:
688 A |= bpf->k;
689 continue;
690 case BPF_ALU_XOR | BPF_K:
691 A ^= bpf->k;
692 continue;
693 case BPF_ALU_LSH | BPF_K:
694 A <<= bpf->k;
695 continue;
696 case BPF_ALU_RSH | BPF_K:
697 A >>= bpf->k;
698 continue;
699 case BPF_ALU_NEG:
700 A = -A;
701 continue;
702 case BPF_MISC_TAX:
703 X = A;
704 continue;
705 case BPF_MISC_TXA:
706 A = X;
707 continue;
712 void bpf_parse_rules(char *rulefile, struct sock_fprog *bpf)
714 int ret;
715 char buff[256];
716 struct sock_filter sf_single = { 0x06, 0, 0, 0xFFFFFFFF };
717 FILE *fp;
719 if (rulefile == NULL) {
720 bpf->len = 1;
721 bpf->filter = xmalloc(sizeof(sf_single));
722 fmemcpy(&bpf->filter[0], &sf_single, sizeof(sf_single));
723 return;
726 fp = fopen(rulefile, "r");
727 if (!fp)
728 panic("Cannot read BPF rule file!\n");
730 fmemset(buff, 0, sizeof(buff));
731 while (fgets(buff, sizeof(buff), fp) != NULL) {
732 buff[sizeof(buff) - 1] = 0;
733 if (buff[0] != '{') {
734 fmemset(buff, 0, sizeof(buff));
735 continue;
738 fmemset(&sf_single, 0, sizeof(sf_single));
739 ret = sscanf(buff, "{ 0x%x, %u, %u, 0x%08x },",
740 (unsigned int *) &sf_single.code,
741 (unsigned int *) &sf_single.jt,
742 (unsigned int *) &sf_single.jf,
743 (unsigned int *) &sf_single.k);
744 if (ret != 4)
745 panic("BPF syntax error!\n");
747 bpf->len++;
748 bpf->filter = xrealloc(bpf->filter, 1,
749 bpf->len * sizeof(sf_single));
751 fmemcpy(&bpf->filter[bpf->len - 1], &sf_single,
752 sizeof(sf_single));
753 fmemset(buff, 0, sizeof(buff));
756 fclose(fp);
758 if (bpf_validate(bpf) == 0)
759 panic("This is not a valid BPF program!\n");