2 * netsniff-ng - the packet sniffing beast
3 * Copyright 2009 - 2012 Daniel Borkmann.
4 * Copyright 2009, 2010 Emmanuel Roullit.
5 * Copyright 1990-1996 The Regents of the University of
6 * California. All rights reserved. (3-clause BSD license)
7 * Subject to the GPL, version 2.
12 #include <arpa/inet.h>
13 #include <sys/types.h>
23 #define EXTRACT_SHORT(packet) \
24 ((unsigned short) ntohs(*(unsigned short *) packet))
25 #define EXTRACT_LONG(packet) \
26 (ntohl(*(unsigned long *) packet))
29 # define BPF_MEMWORDS 16
32 #define BPF_LD_B (BPF_LD | BPF_B)
33 #define BPF_LD_H (BPF_LD | BPF_H)
34 #define BPF_LD_W (BPF_LD | BPF_W)
35 #define BPF_LDX_B (BPF_LDX | BPF_B)
36 #define BPF_LDX_W (BPF_LDX | BPF_W)
37 #define BPF_JMP_JA (BPF_JMP | BPF_JA)
38 #define BPF_JMP_JEQ (BPF_JMP | BPF_JEQ)
39 #define BPF_JMP_JGT (BPF_JMP | BPF_JGT)
40 #define BPF_JMP_JGE (BPF_JMP | BPF_JGE)
41 #define BPF_JMP_JSET (BPF_JMP | BPF_JSET)
42 #define BPF_ALU_ADD (BPF_ALU | BPF_ADD)
43 #define BPF_ALU_SUB (BPF_ALU | BPF_SUB)
44 #define BPF_ALU_MUL (BPF_ALU | BPF_MUL)
45 #define BPF_ALU_DIV (BPF_ALU | BPF_DIV)
46 #define BPF_ALU_MOD (BPF_ALU | BPF_MOD)
47 #define BPF_ALU_NEG (BPF_ALU | BPF_NEG)
48 #define BPF_ALU_AND (BPF_ALU | BPF_AND)
49 #define BPF_ALU_OR (BPF_ALU | BPF_OR)
50 #define BPF_ALU_XOR (BPF_ALU | BPF_XOR)
51 #define BPF_ALU_LSH (BPF_ALU | BPF_LSH)
52 #define BPF_ALU_RSH (BPF_ALU | BPF_RSH)
53 #define BPF_MISC_TAX (BPF_MISC | BPF_TAX)
54 #define BPF_MISC_TXA (BPF_MISC | BPF_TXA)
56 static const char *op_table
[] = {
65 [BPF_JMP_JEQ
] = "jeq",
66 [BPF_JMP_JGT
] = "jgt",
67 [BPF_JMP_JGE
] = "jge",
68 [BPF_JMP_JSET
] = "jset",
69 [BPF_ALU_ADD
] = "add",
70 [BPF_ALU_SUB
] = "sub",
71 [BPF_ALU_MUL
] = "mul",
72 [BPF_ALU_DIV
] = "div",
73 [BPF_ALU_MOD
] = "mod",
74 [BPF_ALU_NEG
] = "neg",
75 [BPF_ALU_AND
] = "and",
77 [BPF_ALU_XOR
] = "xor",
78 [BPF_ALU_LSH
] = "lsh",
79 [BPF_ALU_RSH
] = "rsh",
81 [BPF_MISC_TAX
] = "tax",
82 [BPF_MISC_TXA
] = "txa",
85 void bpf_dump_op_table(void)
88 for (i
= 0; i
< array_size(op_table
); ++i
) {
90 printf("%s\n", op_table
[i
]);
94 static const char *bpf_dump_linux_k(uint32_t k
)
99 case SKF_AD_OFF
+ SKF_AD_PROTOCOL
:
101 case SKF_AD_OFF
+ SKF_AD_PKTTYPE
:
103 case SKF_AD_OFF
+ SKF_AD_IFINDEX
:
105 case SKF_AD_OFF
+ SKF_AD_NLATTR
:
107 case SKF_AD_OFF
+ SKF_AD_NLATTR_NEST
:
109 case SKF_AD_OFF
+ SKF_AD_MARK
:
111 case SKF_AD_OFF
+ SKF_AD_QUEUE
:
113 case SKF_AD_OFF
+ SKF_AD_HATYPE
:
115 case SKF_AD_OFF
+ SKF_AD_RXHASH
:
117 case SKF_AD_OFF
+ SKF_AD_CPU
:
119 case SKF_AD_OFF
+ SKF_AD_VLAN_TAG
:
121 case SKF_AD_OFF
+ SKF_AD_VLAN_TAG_PRESENT
:
123 case SKF_AD_OFF
+ SKF_AD_PAY_OFFSET
:
128 static char *__bpf_dump(const struct sock_filter bpf
, int n
)
131 const char *fmt
, *op
;
132 static char image
[256];
142 case BPF_RET
| BPF_K
:
143 op
= op_table
[BPF_RET
];
146 case BPF_RET
| BPF_A
:
147 op
= op_table
[BPF_RET
];
150 case BPF_RET
| BPF_X
:
151 op
= op_table
[BPF_RET
];
154 case BPF_LD_W
| BPF_ABS
:
155 op
= op_table
[BPF_LD_W
];
156 fmt
= bpf_dump_linux_k(bpf
.k
);
158 case BPF_LD_H
| BPF_ABS
:
159 op
= op_table
[BPF_LD_H
];
160 fmt
= bpf_dump_linux_k(bpf
.k
);
162 case BPF_LD_B
| BPF_ABS
:
163 op
= op_table
[BPF_LD_B
];
164 fmt
= bpf_dump_linux_k(bpf
.k
);
166 case BPF_LD_W
| BPF_LEN
:
167 op
= op_table
[BPF_LD_W
];
170 case BPF_LD_W
| BPF_IND
:
171 op
= op_table
[BPF_LD_W
];
174 case BPF_LD_H
| BPF_IND
:
175 op
= op_table
[BPF_LD_H
];
178 case BPF_LD_B
| BPF_IND
:
179 op
= op_table
[BPF_LD_B
];
182 case BPF_LD
| BPF_IMM
:
183 op
= op_table
[BPF_LD_W
];
186 case BPF_LDX
| BPF_IMM
:
187 op
= op_table
[BPF_LDX
];
190 case BPF_LDX_B
| BPF_MSH
:
191 op
= op_table
[BPF_LDX_B
];
192 fmt
= "4*([%d]&0xf)";
194 case BPF_LD
| BPF_MEM
:
195 op
= op_table
[BPF_LD_W
];
198 case BPF_LDX
| BPF_MEM
:
199 op
= op_table
[BPF_LDX
];
203 op
= op_table
[BPF_ST
];
207 op
= op_table
[BPF_STX
];
211 op
= op_table
[BPF_JMP_JA
];
215 case BPF_JMP_JGT
| BPF_K
:
216 op
= op_table
[BPF_JMP_JGT
];
219 case BPF_JMP_JGE
| BPF_K
:
220 op
= op_table
[BPF_JMP_JGE
];
223 case BPF_JMP_JEQ
| BPF_K
:
224 op
= op_table
[BPF_JMP_JEQ
];
227 case BPF_JMP_JSET
| BPF_K
:
228 op
= op_table
[BPF_JMP_JSET
];
231 case BPF_JMP_JGT
| BPF_X
:
232 op
= op_table
[BPF_JMP_JGT
];
235 case BPF_JMP_JGE
| BPF_X
:
236 op
= op_table
[BPF_JMP_JGE
];
239 case BPF_JMP_JEQ
| BPF_X
:
240 op
= op_table
[BPF_JMP_JEQ
];
243 case BPF_JMP_JSET
| BPF_X
:
244 op
= op_table
[BPF_JMP_JSET
];
247 case BPF_ALU_ADD
| BPF_X
:
248 op
= op_table
[BPF_ALU_ADD
];
251 case BPF_ALU_SUB
| BPF_X
:
252 op
= op_table
[BPF_ALU_SUB
];
255 case BPF_ALU_MUL
| BPF_X
:
256 op
= op_table
[BPF_ALU_MUL
];
259 case BPF_ALU_DIV
| BPF_X
:
260 op
= op_table
[BPF_ALU_DIV
];
263 case BPF_ALU_MOD
| BPF_X
:
264 op
= op_table
[BPF_ALU_MOD
];
267 case BPF_ALU_AND
| BPF_X
:
268 op
= op_table
[BPF_ALU_AND
];
271 case BPF_ALU_OR
| BPF_X
:
272 op
= op_table
[BPF_ALU_OR
];
275 case BPF_ALU_XOR
| BPF_X
:
276 op
= op_table
[BPF_ALU_XOR
];
279 case BPF_ALU_LSH
| BPF_X
:
280 op
= op_table
[BPF_ALU_LSH
];
283 case BPF_ALU_RSH
| BPF_X
:
284 op
= op_table
[BPF_ALU_RSH
];
287 case BPF_ALU_ADD
| BPF_K
:
288 op
= op_table
[BPF_ALU_ADD
];
291 case BPF_ALU_SUB
| BPF_K
:
292 op
= op_table
[BPF_ALU_SUB
];
295 case BPF_ALU_MUL
| BPF_K
:
296 op
= op_table
[BPF_ALU_MUL
];
299 case BPF_ALU_DIV
| BPF_K
:
300 op
= op_table
[BPF_ALU_DIV
];
303 case BPF_ALU_MOD
| BPF_K
:
304 op
= op_table
[BPF_ALU_MOD
];
307 case BPF_ALU_AND
| BPF_K
:
308 op
= op_table
[BPF_ALU_AND
];
311 case BPF_ALU_OR
| BPF_K
:
312 op
= op_table
[BPF_ALU_OR
];
315 case BPF_ALU_XOR
| BPF_K
:
316 op
= op_table
[BPF_ALU_XOR
];
319 case BPF_ALU_LSH
| BPF_K
:
320 op
= op_table
[BPF_ALU_LSH
];
323 case BPF_ALU_RSH
| BPF_K
:
324 op
= op_table
[BPF_ALU_RSH
];
328 op
= op_table
[BPF_ALU_NEG
];
332 op
= op_table
[BPF_MISC_TAX
];
336 op
= op_table
[BPF_MISC_TXA
];
341 slprintf_nocheck(operand
, sizeof(operand
), fmt
, v
);
342 slprintf_nocheck(image
, sizeof(image
),
343 (BPF_CLASS(bpf
.code
) == BPF_JMP
&&
344 BPF_OP(bpf
.code
) != BPF_JA
) ?
345 " L%d: %s %s, L%d, L%d" : " L%d: %s %s",
346 n
, op
, operand
, n
+ 1 + bpf
.jt
, n
+ 1 + bpf
.jf
);
350 void bpf_dump_all(struct sock_fprog
*bpf
)
354 for (i
= 0; i
< bpf
->len
; ++i
)
355 printf("%s\n", __bpf_dump(bpf
->filter
[i
], i
));
358 void bpf_attach_to_sock(int sock
, struct sock_fprog
*bpf
)
362 if (bpf
->filter
[0].code
== BPF_RET
&&
363 bpf
->filter
[0].k
== 0xFFFFFFFF)
366 ret
= setsockopt(sock
, SOL_SOCKET
, SO_ATTACH_FILTER
,
368 if (unlikely(ret
< 0))
369 panic("Cannot attach filter to socket!\n");
372 void bpf_detach_from_sock(int sock
)
376 ret
= setsockopt(sock
, SOL_SOCKET
, SO_DETACH_FILTER
,
377 &empty
, sizeof(empty
));
378 if (unlikely(ret
< 0))
379 panic("Cannot detach filter from socket!\n");
382 int enable_kernel_bpf_jit_compiler(void)
384 return sysctl_set_int("net/core/bpf_jit_enable", 1);
387 int __bpf_validate(const struct sock_fprog
*bpf
)
390 const struct sock_filter
*p
;
397 for (i
= 0; i
< bpf
->len
; ++i
) {
399 switch (BPF_CLASS(p
->code
)) {
400 /* Check that memory operations use valid addresses. */
403 switch (BPF_MODE(p
->code
)) {
409 /* There's no maximum packet data size
410 * in userland. The runtime packet length
415 if (p
->k
>= BPF_MEMWORDS
)
426 if (p
->k
>= BPF_MEMWORDS
)
430 switch (BPF_OP(p
->code
)) {
443 /* Check for constant division by 0 (undefined
446 if (BPF_RVAL(p
->code
) == BPF_K
&& p
->k
== 0)
454 /* Check that jumps are within the code block,
455 * and that unconditional branches don't go
456 * backwards as a result of an overflow.
457 * Unconditional branches have a 32-bit offset,
458 * so they could overflow; we check to make
459 * sure they don't. Conditional branches have
460 * an 8-bit offset, and the from address is <=
461 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
462 * is sufficiently small that adding 255 to it
465 * We know that len is <= BPF_MAXINSNS, and we
466 * assume that BPF_MAXINSNS is < the maximum size
467 * of a u_int, so that i + 1 doesn't overflow.
469 * For userland, we don't know that the from
470 * or len are <= BPF_MAXINSNS, but we know that
471 * from <= len, and, except on a 64-bit system,
472 * it's unlikely that len, if it truly reflects
473 * the size of the program we've been handed,
474 * will be anywhere near the maximum size of
475 * a u_int. We also don't check for backward
476 * branches, as we currently support them in
477 * userland for the protochain operation.
480 switch (BPF_OP(p
->code
)) {
482 if (from
+ p
->k
>= bpf
->len
)
489 if (from
+ p
->jt
>= bpf
->len
||
490 from
+ p
->jf
>= bpf
->len
)
504 return BPF_CLASS(bpf
->filter
[bpf
->len
- 1].code
) == BPF_RET
;
507 uint32_t bpf_run_filter(const struct sock_fprog
* fcode
, uint8_t * packet
,
510 /* XXX: caplen == len */
513 struct sock_filter
*bpf
;
514 int32_t mem
[BPF_MEMWORDS
] = { 0, };
516 if (fcode
== NULL
|| fcode
->filter
== NULL
|| fcode
->len
== 0)
529 case BPF_RET
| BPF_K
:
530 return (uint32_t) bpf
->k
;
531 case BPF_RET
| BPF_A
:
533 case BPF_LD_W
| BPF_ABS
:
534 /* No Linux extensions supported here! */
536 if (k
+ sizeof(int32_t) > plen
)
538 A
= EXTRACT_LONG(&packet
[k
]);
540 case BPF_LD_H
| BPF_ABS
:
541 /* No Linux extensions supported here! */
543 if (k
+ sizeof(short) > plen
)
545 A
= EXTRACT_SHORT(&packet
[k
]);
547 case BPF_LD_B
| BPF_ABS
:
548 /* No Linux extensions supported here! */
554 case BPF_LD_W
| BPF_LEN
:
557 case BPF_LDX_W
| BPF_LEN
:
560 case BPF_LD_W
| BPF_IND
:
562 if (k
+ sizeof(int32_t) > plen
)
564 A
= EXTRACT_LONG(&packet
[k
]);
566 case BPF_LD_H
| BPF_IND
:
568 if (k
+ sizeof(short) > plen
)
570 A
= EXTRACT_SHORT(&packet
[k
]);
572 case BPF_LD_B
| BPF_IND
:
578 case BPF_LDX_B
| BPF_MSH
:
582 X
= (packet
[bpf
->k
] & 0xf) << 2;
584 case BPF_LD
| BPF_IMM
:
587 case BPF_LDX
| BPF_IMM
:
590 case BPF_LD
| BPF_MEM
:
593 case BPF_LDX
| BPF_MEM
:
605 case BPF_JMP_JGT
| BPF_K
:
606 bpf
+= (A
> bpf
->k
) ? bpf
->jt
: bpf
->jf
;
608 case BPF_JMP_JGE
| BPF_K
:
609 bpf
+= (A
>= bpf
->k
) ? bpf
->jt
: bpf
->jf
;
611 case BPF_JMP_JEQ
| BPF_K
:
612 bpf
+= (A
== bpf
->k
) ? bpf
->jt
: bpf
->jf
;
614 case BPF_JMP_JSET
| BPF_K
:
615 bpf
+= (A
& bpf
->k
) ? bpf
->jt
: bpf
->jf
;
617 case BPF_JMP_JGT
| BPF_X
:
618 bpf
+= (A
> X
) ? bpf
->jt
: bpf
->jf
;
620 case BPF_JMP_JGE
| BPF_X
:
621 bpf
+= (A
>= X
) ? bpf
->jt
: bpf
->jf
;
623 case BPF_JMP_JEQ
| BPF_X
:
624 bpf
+= (A
== X
) ? bpf
->jt
: bpf
->jf
;
626 case BPF_JMP_JSET
| BPF_X
:
627 bpf
+= (A
& X
) ? bpf
->jt
: bpf
->jf
;
629 case BPF_ALU_ADD
| BPF_X
:
632 case BPF_ALU_SUB
| BPF_X
:
635 case BPF_ALU_MUL
| BPF_X
:
638 case BPF_ALU_DIV
| BPF_X
:
643 case BPF_ALU_MOD
| BPF_X
:
648 case BPF_ALU_AND
| BPF_X
:
651 case BPF_ALU_OR
| BPF_X
:
654 case BPF_ALU_XOR
| BPF_X
:
657 case BPF_ALU_LSH
| BPF_X
:
660 case BPF_ALU_RSH
| BPF_X
:
663 case BPF_ALU_ADD
| BPF_K
:
666 case BPF_ALU_SUB
| BPF_K
:
669 case BPF_ALU_MUL
| BPF_K
:
672 case BPF_ALU_DIV
| BPF_K
:
675 case BPF_ALU_MOD
| BPF_K
:
678 case BPF_ALU_AND
| BPF_K
:
681 case BPF_ALU_OR
| BPF_K
:
684 case BPF_ALU_XOR
| BPF_K
:
687 case BPF_ALU_LSH
| BPF_K
:
690 case BPF_ALU_RSH
| BPF_K
:
706 void bpf_parse_rules(char *rulefile
, struct sock_fprog
*bpf
, uint32_t link_type
)
710 struct sock_filter sf_single
= { 0x06, 0, 0, 0xFFFFFFFF };
713 fmemset(bpf
, 0, sizeof(*bpf
));
715 if (rulefile
== NULL
) {
717 bpf
->filter
= xmalloc(sizeof(sf_single
));
719 fmemcpy(&bpf
->filter
[0], &sf_single
, sizeof(sf_single
));
723 fp
= fopen(rulefile
, "r");
725 bpf_try_compile(rulefile
, bpf
, link_type
);
729 fmemset(buff
, 0, sizeof(buff
));
730 while (fgets(buff
, sizeof(buff
), fp
) != NULL
) {
731 buff
[sizeof(buff
) - 1] = 0;
733 if (buff
[0] != '{') {
734 fmemset(buff
, 0, sizeof(buff
));
738 fmemset(&sf_single
, 0, sizeof(sf_single
));
739 ret
= sscanf(buff
, "{ 0x%x, %u, %u, 0x%08x },",
740 (unsigned int *) &sf_single
.code
,
741 (unsigned int *) &sf_single
.jt
,
742 (unsigned int *) &sf_single
.jf
,
743 (unsigned int *) &sf_single
.k
);
744 if (unlikely(ret
!= 4))
745 panic("BPF syntax error!\n");
748 bpf
->filter
= xrealloc(bpf
->filter
,
749 bpf
->len
* sizeof(sf_single
));
751 fmemcpy(&bpf
->filter
[bpf
->len
- 1], &sf_single
,
753 fmemset(buff
, 0, sizeof(buff
));
758 if (unlikely(__bpf_validate(bpf
) == 0))
759 panic("This is not a valid BPF program!\n");