trafgen: Move checking of dynamic packet elements to header
[netsniff-ng-new.git] / bpf.c
blob2d769ec127ae723e848b0659efa45e950a658393
1 /*
2 * netsniff-ng - the packet sniffing beast
3 * Copyright 2009 - 2012 Daniel Borkmann.
4 * Copyright 2009, 2010 Emmanuel Roullit.
5 * Copyright 1990-1996 The Regents of the University of
6 * California. All rights reserved. (3-clause BSD license)
7 * Subject to the GPL, version 2.
8 */
10 #include <stdint.h>
11 #include <stdio.h>
12 #include <arpa/inet.h>
13 #include <sys/types.h>
14 #include <sys/stat.h>
15 #include <fcntl.h>
17 #include "bpf.h"
18 #include "xmalloc.h"
19 #include "die.h"
20 #include "str.h"
21 #include "sysctl.h"
23 #define EXTRACT_SHORT(packet) \
24 ((unsigned short) ntohs(*(unsigned short *) packet))
25 #define EXTRACT_LONG(packet) \
26 (ntohl(*(unsigned long *) packet))
28 #ifndef BPF_MEMWORDS
29 # define BPF_MEMWORDS 16
30 #endif
32 #define BPF_LD_B (BPF_LD | BPF_B)
33 #define BPF_LD_H (BPF_LD | BPF_H)
34 #define BPF_LD_W (BPF_LD | BPF_W)
35 #define BPF_LDX_B (BPF_LDX | BPF_B)
36 #define BPF_LDX_W (BPF_LDX | BPF_W)
37 #define BPF_JMP_JA (BPF_JMP | BPF_JA)
38 #define BPF_JMP_JEQ (BPF_JMP | BPF_JEQ)
39 #define BPF_JMP_JGT (BPF_JMP | BPF_JGT)
40 #define BPF_JMP_JGE (BPF_JMP | BPF_JGE)
41 #define BPF_JMP_JSET (BPF_JMP | BPF_JSET)
42 #define BPF_ALU_ADD (BPF_ALU | BPF_ADD)
43 #define BPF_ALU_SUB (BPF_ALU | BPF_SUB)
44 #define BPF_ALU_MUL (BPF_ALU | BPF_MUL)
45 #define BPF_ALU_DIV (BPF_ALU | BPF_DIV)
46 #define BPF_ALU_MOD (BPF_ALU | BPF_MOD)
47 #define BPF_ALU_NEG (BPF_ALU | BPF_NEG)
48 #define BPF_ALU_AND (BPF_ALU | BPF_AND)
49 #define BPF_ALU_OR (BPF_ALU | BPF_OR)
50 #define BPF_ALU_XOR (BPF_ALU | BPF_XOR)
51 #define BPF_ALU_LSH (BPF_ALU | BPF_LSH)
52 #define BPF_ALU_RSH (BPF_ALU | BPF_RSH)
53 #define BPF_MISC_TAX (BPF_MISC | BPF_TAX)
54 #define BPF_MISC_TXA (BPF_MISC | BPF_TXA)
56 static const char *op_table[] = {
57 [BPF_LD_B] = "ldb",
58 [BPF_LD_H] = "ldh",
59 [BPF_LD_W] = "ld",
60 [BPF_LDX] = "ldx",
61 [BPF_LDX_B] = "ldxb",
62 [BPF_ST] = "st",
63 [BPF_STX] = "stx",
64 [BPF_JMP_JA] = "ja",
65 [BPF_JMP_JEQ] = "jeq",
66 [BPF_JMP_JGT] = "jgt",
67 [BPF_JMP_JGE] = "jge",
68 [BPF_JMP_JSET] = "jset",
69 [BPF_ALU_ADD] = "add",
70 [BPF_ALU_SUB] = "sub",
71 [BPF_ALU_MUL] = "mul",
72 [BPF_ALU_DIV] = "div",
73 [BPF_ALU_MOD] = "mod",
74 [BPF_ALU_NEG] = "neg",
75 [BPF_ALU_AND] = "and",
76 [BPF_ALU_OR] = "or",
77 [BPF_ALU_XOR] = "xor",
78 [BPF_ALU_LSH] = "lsh",
79 [BPF_ALU_RSH] = "rsh",
80 [BPF_RET] = "ret",
81 [BPF_MISC_TAX] = "tax",
82 [BPF_MISC_TXA] = "txa",
85 void bpf_dump_op_table(void)
87 size_t i;
88 for (i = 0; i < array_size(op_table); ++i) {
89 if (op_table[i])
90 printf("%s\n", op_table[i]);
94 static const char *bpf_dump_linux_k(uint32_t k)
96 switch (k) {
97 default:
98 return "[%d]";
99 case SKF_AD_OFF + SKF_AD_PROTOCOL:
100 return "proto";
101 case SKF_AD_OFF + SKF_AD_PKTTYPE:
102 return "type";
103 case SKF_AD_OFF + SKF_AD_IFINDEX:
104 return "ifidx";
105 case SKF_AD_OFF + SKF_AD_NLATTR:
106 return "nla";
107 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
108 return "nlan";
109 case SKF_AD_OFF + SKF_AD_MARK:
110 return "mark";
111 case SKF_AD_OFF + SKF_AD_QUEUE:
112 return "queue";
113 case SKF_AD_OFF + SKF_AD_HATYPE:
114 return "hatype";
115 case SKF_AD_OFF + SKF_AD_RXHASH:
116 return "rxhash";
117 case SKF_AD_OFF + SKF_AD_CPU:
118 return "cpu";
119 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
120 return "vlant";
121 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
122 return "vlanp";
123 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
124 return "poff";
128 static char *__bpf_dump(const struct sock_filter bpf, int n)
130 int v;
131 const char *fmt, *op;
132 static char image[256];
133 char operand[64];
135 v = bpf.k;
136 switch (bpf.code) {
137 default:
138 op = "unimp";
139 fmt = "0x%x";
140 v = bpf.code;
141 break;
142 case BPF_RET | BPF_K:
143 op = op_table[BPF_RET];
144 fmt = "#0x%x";
145 break;
146 case BPF_RET | BPF_A:
147 op = op_table[BPF_RET];
148 fmt = "a";
149 break;
150 case BPF_RET | BPF_X:
151 op = op_table[BPF_RET];
152 fmt = "x";
153 break;
154 case BPF_LD_W | BPF_ABS:
155 op = op_table[BPF_LD_W];
156 fmt = bpf_dump_linux_k(bpf.k);
157 break;
158 case BPF_LD_H | BPF_ABS:
159 op = op_table[BPF_LD_H];
160 fmt = bpf_dump_linux_k(bpf.k);
161 break;
162 case BPF_LD_B | BPF_ABS:
163 op = op_table[BPF_LD_B];
164 fmt = bpf_dump_linux_k(bpf.k);
165 break;
166 case BPF_LD_W | BPF_LEN:
167 op = op_table[BPF_LD_W];
168 fmt = "#len";
169 break;
170 case BPF_LD_W | BPF_IND:
171 op = op_table[BPF_LD_W];
172 fmt = "[x + %d]";
173 break;
174 case BPF_LD_H | BPF_IND:
175 op = op_table[BPF_LD_H];
176 fmt = "[x + %d]";
177 break;
178 case BPF_LD_B | BPF_IND:
179 op = op_table[BPF_LD_B];
180 fmt = "[x + %d]";
181 break;
182 case BPF_LD | BPF_IMM:
183 op = op_table[BPF_LD_W];
184 fmt = "#0x%x";
185 break;
186 case BPF_LDX | BPF_IMM:
187 op = op_table[BPF_LDX];
188 fmt = "#0x%x";
189 break;
190 case BPF_LDX_B | BPF_MSH:
191 op = op_table[BPF_LDX_B];
192 fmt = "4*([%d]&0xf)";
193 break;
194 case BPF_LD | BPF_MEM:
195 op = op_table[BPF_LD_W];
196 fmt = "M[%d]";
197 break;
198 case BPF_LDX | BPF_MEM:
199 op = op_table[BPF_LDX];
200 fmt = "M[%d]";
201 break;
202 case BPF_ST:
203 op = op_table[BPF_ST];
204 fmt = "M[%d]";
205 break;
206 case BPF_STX:
207 op = op_table[BPF_STX];
208 fmt = "M[%d]";
209 break;
210 case BPF_JMP_JA:
211 op = op_table[BPF_JMP_JA];
212 fmt = "%d";
213 v = n + 1 + bpf.k;
214 break;
215 case BPF_JMP_JGT | BPF_K:
216 op = op_table[BPF_JMP_JGT];
217 fmt = "#0x%x";
218 break;
219 case BPF_JMP_JGE | BPF_K:
220 op = op_table[BPF_JMP_JGE];
221 fmt = "#0x%x";
222 break;
223 case BPF_JMP_JEQ | BPF_K:
224 op = op_table[BPF_JMP_JEQ];
225 fmt = "#0x%x";
226 break;
227 case BPF_JMP_JSET | BPF_K:
228 op = op_table[BPF_JMP_JSET];
229 fmt = "#0x%x";
230 break;
231 case BPF_JMP_JGT | BPF_X:
232 op = op_table[BPF_JMP_JGT];
233 fmt = "x";
234 break;
235 case BPF_JMP_JGE | BPF_X:
236 op = op_table[BPF_JMP_JGE];
237 fmt = "x";
238 break;
239 case BPF_JMP_JEQ | BPF_X:
240 op = op_table[BPF_JMP_JEQ];
241 fmt = "x";
242 break;
243 case BPF_JMP_JSET | BPF_X:
244 op = op_table[BPF_JMP_JSET];
245 fmt = "x";
246 break;
247 case BPF_ALU_ADD | BPF_X:
248 op = op_table[BPF_ALU_ADD];
249 fmt = "x";
250 break;
251 case BPF_ALU_SUB | BPF_X:
252 op = op_table[BPF_ALU_SUB];
253 fmt = "x";
254 break;
255 case BPF_ALU_MUL | BPF_X:
256 op = op_table[BPF_ALU_MUL];
257 fmt = "x";
258 break;
259 case BPF_ALU_DIV | BPF_X:
260 op = op_table[BPF_ALU_DIV];
261 fmt = "x";
262 break;
263 case BPF_ALU_MOD | BPF_X:
264 op = op_table[BPF_ALU_MOD];
265 fmt = "x";
266 break;
267 case BPF_ALU_AND | BPF_X:
268 op = op_table[BPF_ALU_AND];
269 fmt = "x";
270 break;
271 case BPF_ALU_OR | BPF_X:
272 op = op_table[BPF_ALU_OR];
273 fmt = "x";
274 break;
275 case BPF_ALU_XOR | BPF_X:
276 op = op_table[BPF_ALU_XOR];
277 fmt = "x";
278 break;
279 case BPF_ALU_LSH | BPF_X:
280 op = op_table[BPF_ALU_LSH];
281 fmt = "x";
282 break;
283 case BPF_ALU_RSH | BPF_X:
284 op = op_table[BPF_ALU_RSH];
285 fmt = "x";
286 break;
287 case BPF_ALU_ADD | BPF_K:
288 op = op_table[BPF_ALU_ADD];
289 fmt = "#%d";
290 break;
291 case BPF_ALU_SUB | BPF_K:
292 op = op_table[BPF_ALU_SUB];
293 fmt = "#%d";
294 break;
295 case BPF_ALU_MUL | BPF_K:
296 op = op_table[BPF_ALU_MUL];
297 fmt = "#%d";
298 break;
299 case BPF_ALU_DIV | BPF_K:
300 op = op_table[BPF_ALU_DIV];
301 fmt = "#%d";
302 break;
303 case BPF_ALU_MOD | BPF_K:
304 op = op_table[BPF_ALU_MOD];
305 fmt = "#%d";
306 break;
307 case BPF_ALU_AND | BPF_K:
308 op = op_table[BPF_ALU_AND];
309 fmt = "#0x%x";
310 break;
311 case BPF_ALU_OR | BPF_K:
312 op = op_table[BPF_ALU_OR];
313 fmt = "#0x%x";
314 break;
315 case BPF_ALU_XOR | BPF_K:
316 op = op_table[BPF_ALU_XOR];
317 fmt = "#0x%x";
318 break;
319 case BPF_ALU_LSH | BPF_K:
320 op = op_table[BPF_ALU_LSH];
321 fmt = "#%d";
322 break;
323 case BPF_ALU_RSH | BPF_K:
324 op = op_table[BPF_ALU_RSH];
325 fmt = "#%d";
326 break;
327 case BPF_ALU_NEG:
328 op = op_table[BPF_ALU_NEG];
329 fmt = "";
330 break;
331 case BPF_MISC_TAX:
332 op = op_table[BPF_MISC_TAX];
333 fmt = "";
334 break;
335 case BPF_MISC_TXA:
336 op = op_table[BPF_MISC_TXA];
337 fmt = "";
338 break;
341 slprintf_nocheck(operand, sizeof(operand), fmt, v);
342 slprintf_nocheck(image, sizeof(image),
343 (BPF_CLASS(bpf.code) == BPF_JMP &&
344 BPF_OP(bpf.code) != BPF_JA) ?
345 " L%d: %s %s, L%d, L%d" : " L%d: %s %s",
346 n, op, operand, n + 1 + bpf.jt, n + 1 + bpf.jf);
347 return image;
350 void bpf_dump_all(struct sock_fprog *bpf)
352 int i;
354 for (i = 0; i < bpf->len; ++i)
355 printf("%s\n", __bpf_dump(bpf->filter[i], i));
358 void bpf_attach_to_sock(int sock, struct sock_fprog *bpf)
360 int ret;
362 if (bpf->filter[0].code == BPF_RET &&
363 bpf->filter[0].k == 0xFFFFFFFF)
364 return;
366 ret = setsockopt(sock, SOL_SOCKET, SO_ATTACH_FILTER,
367 bpf, sizeof(*bpf));
368 if (unlikely(ret < 0))
369 panic("Cannot attach filter to socket!\n");
372 void bpf_detach_from_sock(int sock)
374 int ret, empty = 0;
376 ret = setsockopt(sock, SOL_SOCKET, SO_DETACH_FILTER,
377 &empty, sizeof(empty));
378 if (unlikely(ret < 0))
379 panic("Cannot detach filter from socket!\n");
382 int enable_kernel_bpf_jit_compiler(void)
384 return sysctl_set_int("net/core/bpf_jit_enable", 1);
387 int __bpf_validate(const struct sock_fprog *bpf)
389 uint32_t i, from;
390 const struct sock_filter *p;
392 if (!bpf)
393 return 0;
394 if (bpf->len < 1)
395 return 0;
397 for (i = 0; i < bpf->len; ++i) {
398 p = &bpf->filter[i];
399 switch (BPF_CLASS(p->code)) {
400 /* Check that memory operations use valid addresses. */
401 case BPF_LD:
402 case BPF_LDX:
403 switch (BPF_MODE(p->code)) {
404 case BPF_IMM:
405 break;
406 case BPF_ABS:
407 case BPF_IND:
408 case BPF_MSH:
409 /* There's no maximum packet data size
410 * in userland. The runtime packet length
411 * check suffices.
413 break;
414 case BPF_MEM:
415 if (p->k >= BPF_MEMWORDS)
416 return 0;
417 break;
418 case BPF_LEN:
419 break;
420 default:
421 return 0;
423 break;
424 case BPF_ST:
425 case BPF_STX:
426 if (p->k >= BPF_MEMWORDS)
427 return 0;
428 break;
429 case BPF_ALU:
430 switch (BPF_OP(p->code)) {
431 case BPF_ADD:
432 case BPF_SUB:
433 case BPF_MUL:
434 case BPF_OR:
435 case BPF_XOR:
436 case BPF_AND:
437 case BPF_LSH:
438 case BPF_RSH:
439 case BPF_NEG:
440 break;
441 case BPF_DIV:
442 case BPF_MOD:
443 /* Check for constant division by 0 (undefined
444 * for div and mod).
446 if (BPF_RVAL(p->code) == BPF_K && p->k == 0)
447 return 0;
448 break;
449 default:
450 return 0;
452 break;
453 case BPF_JMP:
454 /* Check that jumps are within the code block,
455 * and that unconditional branches don't go
456 * backwards as a result of an overflow.
457 * Unconditional branches have a 32-bit offset,
458 * so they could overflow; we check to make
459 * sure they don't. Conditional branches have
460 * an 8-bit offset, and the from address is <=
461 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
462 * is sufficiently small that adding 255 to it
463 * won't overflow.
465 * We know that len is <= BPF_MAXINSNS, and we
466 * assume that BPF_MAXINSNS is < the maximum size
467 * of a u_int, so that i + 1 doesn't overflow.
469 * For userland, we don't know that the from
470 * or len are <= BPF_MAXINSNS, but we know that
471 * from <= len, and, except on a 64-bit system,
472 * it's unlikely that len, if it truly reflects
473 * the size of the program we've been handed,
474 * will be anywhere near the maximum size of
475 * a u_int. We also don't check for backward
476 * branches, as we currently support them in
477 * userland for the protochain operation.
479 from = i + 1;
480 switch (BPF_OP(p->code)) {
481 case BPF_JA:
482 if (from + p->k >= bpf->len)
483 return 0;
484 break;
485 case BPF_JEQ:
486 case BPF_JGT:
487 case BPF_JGE:
488 case BPF_JSET:
489 if (from + p->jt >= bpf->len ||
490 from + p->jf >= bpf->len)
491 return 0;
492 break;
493 default:
494 return 0;
496 break;
497 case BPF_RET:
498 break;
499 case BPF_MISC:
500 break;
504 return BPF_CLASS(bpf->filter[bpf->len - 1].code) == BPF_RET;
507 uint32_t bpf_run_filter(const struct sock_fprog * fcode, uint8_t * packet,
508 size_t plen)
510 /* XXX: caplen == len */
511 uint32_t A, X;
512 uint32_t k;
513 struct sock_filter *bpf;
514 int32_t mem[BPF_MEMWORDS] = { 0, };
516 if (fcode == NULL || fcode->filter == NULL || fcode->len == 0)
517 return 0xFFFFFFFF;
519 A = 0;
520 X = 0;
522 bpf = fcode->filter;
523 --bpf;
524 while (1) {
525 ++bpf;
526 switch (bpf->code) {
527 default:
528 return 0;
529 case BPF_RET | BPF_K:
530 return (uint32_t) bpf->k;
531 case BPF_RET | BPF_A:
532 return (uint32_t) A;
533 case BPF_LD_W | BPF_ABS:
534 /* No Linux extensions supported here! */
535 k = bpf->k;
536 if (k + sizeof(int32_t) > plen)
537 return 0;
538 A = EXTRACT_LONG(&packet[k]);
539 continue;
540 case BPF_LD_H | BPF_ABS:
541 /* No Linux extensions supported here! */
542 k = bpf->k;
543 if (k + sizeof(short) > plen)
544 return 0;
545 A = EXTRACT_SHORT(&packet[k]);
546 continue;
547 case BPF_LD_B | BPF_ABS:
548 /* No Linux extensions supported here! */
549 k = bpf->k;
550 if (k >= plen)
551 return 0;
552 A = packet[k];
553 continue;
554 case BPF_LD_W | BPF_LEN:
555 A = plen;
556 continue;
557 case BPF_LDX_W | BPF_LEN:
558 X = plen;
559 continue;
560 case BPF_LD_W | BPF_IND:
561 k = X + bpf->k;
562 if (k + sizeof(int32_t) > plen)
563 return 0;
564 A = EXTRACT_LONG(&packet[k]);
565 continue;
566 case BPF_LD_H | BPF_IND:
567 k = X + bpf->k;
568 if (k + sizeof(short) > plen)
569 return 0;
570 A = EXTRACT_SHORT(&packet[k]);
571 continue;
572 case BPF_LD_B | BPF_IND:
573 k = X + bpf->k;
574 if (k >= plen)
575 return 0;
576 A = packet[k];
577 continue;
578 case BPF_LDX_B | BPF_MSH:
579 k = bpf->k;
580 if (k >= plen)
581 return 0;
582 X = (packet[bpf->k] & 0xf) << 2;
583 continue;
584 case BPF_LD | BPF_IMM:
585 A = bpf->k;
586 continue;
587 case BPF_LDX | BPF_IMM:
588 X = bpf->k;
589 continue;
590 case BPF_LD | BPF_MEM:
591 A = mem[bpf->k];
592 continue;
593 case BPF_LDX | BPF_MEM:
594 X = mem[bpf->k];
595 continue;
596 case BPF_ST:
597 mem[bpf->k] = A;
598 continue;
599 case BPF_STX:
600 mem[bpf->k] = X;
601 continue;
602 case BPF_JMP_JA:
603 bpf += bpf->k;
604 continue;
605 case BPF_JMP_JGT | BPF_K:
606 bpf += (A > bpf->k) ? bpf->jt : bpf->jf;
607 continue;
608 case BPF_JMP_JGE | BPF_K:
609 bpf += (A >= bpf->k) ? bpf->jt : bpf->jf;
610 continue;
611 case BPF_JMP_JEQ | BPF_K:
612 bpf += (A == bpf->k) ? bpf->jt : bpf->jf;
613 continue;
614 case BPF_JMP_JSET | BPF_K:
615 bpf += (A & bpf->k) ? bpf->jt : bpf->jf;
616 continue;
617 case BPF_JMP_JGT | BPF_X:
618 bpf += (A > X) ? bpf->jt : bpf->jf;
619 continue;
620 case BPF_JMP_JGE | BPF_X:
621 bpf += (A >= X) ? bpf->jt : bpf->jf;
622 continue;
623 case BPF_JMP_JEQ | BPF_X:
624 bpf += (A == X) ? bpf->jt : bpf->jf;
625 continue;
626 case BPF_JMP_JSET | BPF_X:
627 bpf += (A & X) ? bpf->jt : bpf->jf;
628 continue;
629 case BPF_ALU_ADD | BPF_X:
630 A += X;
631 continue;
632 case BPF_ALU_SUB | BPF_X:
633 A -= X;
634 continue;
635 case BPF_ALU_MUL | BPF_X:
636 A *= X;
637 continue;
638 case BPF_ALU_DIV | BPF_X:
639 if (X == 0)
640 return 0;
641 A /= X;
642 continue;
643 case BPF_ALU_MOD | BPF_X:
644 if (X == 0)
645 return 0;
646 A %= X;
647 continue;
648 case BPF_ALU_AND | BPF_X:
649 A &= X;
650 continue;
651 case BPF_ALU_OR | BPF_X:
652 A |= X;
653 continue;
654 case BPF_ALU_XOR | BPF_X:
655 A ^= X;
656 continue;
657 case BPF_ALU_LSH | BPF_X:
658 A <<= X;
659 continue;
660 case BPF_ALU_RSH | BPF_X:
661 A >>= X;
662 continue;
663 case BPF_ALU_ADD | BPF_K:
664 A += bpf->k;
665 continue;
666 case BPF_ALU_SUB | BPF_K:
667 A -= bpf->k;
668 continue;
669 case BPF_ALU_MUL | BPF_K:
670 A *= bpf->k;
671 continue;
672 case BPF_ALU_DIV | BPF_K:
673 A /= bpf->k;
674 continue;
675 case BPF_ALU_MOD | BPF_K:
676 A %= bpf->k;
677 continue;
678 case BPF_ALU_AND | BPF_K:
679 A &= bpf->k;
680 continue;
681 case BPF_ALU_OR | BPF_K:
682 A |= bpf->k;
683 continue;
684 case BPF_ALU_XOR | BPF_K:
685 A ^= bpf->k;
686 continue;
687 case BPF_ALU_LSH | BPF_K:
688 A <<= bpf->k;
689 continue;
690 case BPF_ALU_RSH | BPF_K:
691 A >>= bpf->k;
692 continue;
693 case BPF_ALU_NEG:
694 A = -A;
695 continue;
696 case BPF_MISC_TAX:
697 X = A;
698 continue;
699 case BPF_MISC_TXA:
700 A = X;
701 continue;
706 void bpf_parse_rules(char *rulefile, struct sock_fprog *bpf, uint32_t link_type)
708 int ret;
709 char buff[256];
710 struct sock_filter sf_single = { 0x06, 0, 0, 0xFFFFFFFF };
711 FILE *fp;
713 fmemset(bpf, 0, sizeof(*bpf));
715 if (rulefile == NULL) {
716 bpf->len = 1;
717 bpf->filter = xmalloc(sizeof(sf_single));
719 fmemcpy(&bpf->filter[0], &sf_single, sizeof(sf_single));
720 return;
723 fp = fopen(rulefile, "r");
724 if (!fp) {
725 bpf_try_compile(rulefile, bpf, link_type);
726 return;
729 fmemset(buff, 0, sizeof(buff));
730 while (fgets(buff, sizeof(buff), fp) != NULL) {
731 buff[sizeof(buff) - 1] = 0;
733 if (buff[0] != '{') {
734 fmemset(buff, 0, sizeof(buff));
735 continue;
738 fmemset(&sf_single, 0, sizeof(sf_single));
739 ret = sscanf(buff, "{ 0x%x, %u, %u, 0x%08x },",
740 (unsigned int *) &sf_single.code,
741 (unsigned int *) &sf_single.jt,
742 (unsigned int *) &sf_single.jf,
743 (unsigned int *) &sf_single.k);
744 if (unlikely(ret != 4))
745 panic("BPF syntax error!\n");
747 bpf->len++;
748 bpf->filter = xrealloc(bpf->filter,
749 bpf->len * sizeof(sf_single));
751 fmemcpy(&bpf->filter[bpf->len - 1], &sf_single,
752 sizeof(sf_single));
753 fmemset(buff, 0, sizeof(buff));
756 fclose(fp);
758 if (unlikely(__bpf_validate(bpf) == 0))
759 panic("This is not a valid BPF program!\n");