2 * Linux Socket Filter - Kernel level socket filtering
5 * Jay Schulist <jschlst@samba.org>
7 * Based on the design of:
8 * - The Berkeley Packet Filter
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Andi Kleen - Fix a few bad bugs and races.
16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
19 #include <linux/module.h>
20 #include <linux/types.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
28 #include <linux/gfp.h>
30 #include <net/protocol.h>
31 #include <net/netlink.h>
32 #include <linux/skbuff.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <asm/unaligned.h>
39 #include <linux/filter.h>
40 #include <linux/reciprocal_div.h>
42 /* No hurry in this branch */
43 static void *__load_pointer(const struct sk_buff
*skb
, int k
, unsigned int size
)
48 ptr
= skb_network_header(skb
) + k
- SKF_NET_OFF
;
49 else if (k
>= SKF_LL_OFF
)
50 ptr
= skb_mac_header(skb
) + k
- SKF_LL_OFF
;
52 if (ptr
>= skb
->head
&& ptr
+ size
<= skb_tail_pointer(skb
))
57 static inline void *load_pointer(const struct sk_buff
*skb
, int k
,
58 unsigned int size
, void *buffer
)
61 return skb_header_pointer(skb
, k
, size
, buffer
);
62 return __load_pointer(skb
, k
, size
);
66 * sk_filter - run a packet through a socket filter
67 * @sk: sock associated with &sk_buff
68 * @skb: buffer to filter
70 * Run the filter code and then cut skb->data to correct size returned by
71 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
72 * than pkt_len we keep whole skb->data. This is the socket level
73 * wrapper to sk_run_filter. It returns 0 if the packet should
74 * be accepted or -EPERM if the packet should be tossed.
77 int sk_filter(struct sock
*sk
, struct sk_buff
*skb
)
80 struct sk_filter
*filter
;
82 err
= security_sock_rcv_skb(sk
, skb
);
87 filter
= rcu_dereference(sk
->sk_filter
);
89 unsigned int pkt_len
= SK_RUN_FILTER(filter
, skb
);
91 err
= pkt_len
? pskb_trim(skb
, pkt_len
) : -EPERM
;
97 EXPORT_SYMBOL(sk_filter
);
100 * sk_run_filter - run a filter on a socket
101 * @skb: buffer to run the filter on
102 * @fentry: filter to apply
104 * Decode and apply filter instructions to the skb->data.
105 * Return length to keep, 0 for none. @skb is the data we are
106 * filtering, @filter is the array of filter instructions.
107 * Because all jumps are guaranteed to be before last instruction,
108 * and last instruction guaranteed to be a RET, we dont need to check
109 * flen. (We used to pass to this function the length of filter)
111 unsigned int sk_run_filter(const struct sk_buff
*skb
,
112 const struct sock_filter
*fentry
)
115 u32 A
= 0; /* Accumulator */
116 u32 X
= 0; /* Index Register */
117 u32 mem
[BPF_MEMWORDS
]; /* Scratch Memory Store */
122 * Process array of filter instructions.
125 #if defined(CONFIG_X86_32)
126 #define K (fentry->k)
128 const u32 K
= fentry
->k
;
131 switch (fentry
->code
) {
132 case BPF_S_ALU_ADD_X
:
135 case BPF_S_ALU_ADD_K
:
138 case BPF_S_ALU_SUB_X
:
141 case BPF_S_ALU_SUB_K
:
144 case BPF_S_ALU_MUL_X
:
147 case BPF_S_ALU_MUL_K
:
150 case BPF_S_ALU_DIV_X
:
155 case BPF_S_ALU_DIV_K
:
156 A
= reciprocal_divide(A
, K
);
158 case BPF_S_ALU_AND_X
:
161 case BPF_S_ALU_AND_K
:
170 case BPF_S_ALU_LSH_X
:
173 case BPF_S_ALU_LSH_K
:
176 case BPF_S_ALU_RSH_X
:
179 case BPF_S_ALU_RSH_K
:
188 case BPF_S_JMP_JGT_K
:
189 fentry
+= (A
> K
) ? fentry
->jt
: fentry
->jf
;
191 case BPF_S_JMP_JGE_K
:
192 fentry
+= (A
>= K
) ? fentry
->jt
: fentry
->jf
;
194 case BPF_S_JMP_JEQ_K
:
195 fentry
+= (A
== K
) ? fentry
->jt
: fentry
->jf
;
197 case BPF_S_JMP_JSET_K
:
198 fentry
+= (A
& K
) ? fentry
->jt
: fentry
->jf
;
200 case BPF_S_JMP_JGT_X
:
201 fentry
+= (A
> X
) ? fentry
->jt
: fentry
->jf
;
203 case BPF_S_JMP_JGE_X
:
204 fentry
+= (A
>= X
) ? fentry
->jt
: fentry
->jf
;
206 case BPF_S_JMP_JEQ_X
:
207 fentry
+= (A
== X
) ? fentry
->jt
: fentry
->jf
;
209 case BPF_S_JMP_JSET_X
:
210 fentry
+= (A
& X
) ? fentry
->jt
: fentry
->jf
;
215 ptr
= load_pointer(skb
, k
, 4, &tmp
);
217 A
= get_unaligned_be32(ptr
);
224 ptr
= load_pointer(skb
, k
, 2, &tmp
);
226 A
= get_unaligned_be16(ptr
);
233 ptr
= load_pointer(skb
, k
, 1, &tmp
);
242 case BPF_S_LDX_W_LEN
:
254 case BPF_S_LDX_B_MSH
:
255 ptr
= load_pointer(skb
, K
, 1, &tmp
);
257 X
= (*(u8
*)ptr
& 0xf) << 2;
289 case BPF_S_ANC_PROTOCOL
:
290 A
= ntohs(skb
->protocol
);
292 case BPF_S_ANC_PKTTYPE
:
295 case BPF_S_ANC_IFINDEX
:
298 A
= skb
->dev
->ifindex
;
303 case BPF_S_ANC_QUEUE
:
304 A
= skb
->queue_mapping
;
306 case BPF_S_ANC_HATYPE
:
311 case BPF_S_ANC_RXHASH
:
315 A
= raw_smp_processor_id();
317 case BPF_S_ANC_NLATTR
: {
320 if (skb_is_nonlinear(skb
))
322 if (A
> skb
->len
- sizeof(struct nlattr
))
325 nla
= nla_find((struct nlattr
*)&skb
->data
[A
],
328 A
= (void *)nla
- (void *)skb
->data
;
333 case BPF_S_ANC_NLATTR_NEST
: {
336 if (skb_is_nonlinear(skb
))
338 if (A
> skb
->len
- sizeof(struct nlattr
))
341 nla
= (struct nlattr
*)&skb
->data
[A
];
342 if (nla
->nla_len
> A
- skb
->len
)
345 nla
= nla_find_nested(nla
, X
);
347 A
= (void *)nla
- (void *)skb
->data
;
353 WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
354 fentry
->code
, fentry
->jt
,
355 fentry
->jf
, fentry
->k
);
362 EXPORT_SYMBOL(sk_run_filter
);
366 * A BPF program is able to use 16 cells of memory to store intermediate
367 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
368 * As we dont want to clear mem[] array for each packet going through
369 * sk_run_filter(), we check that filter loaded by user never try to read
370 * a cell if not previously written, and we check all branches to be sure
371 * a malicious user doesn't try to abuse us.
373 static int check_load_and_stores(struct sock_filter
*filter
, int flen
)
375 u16
*masks
, memvalid
= 0; /* one bit per cell, 16 cells */
378 BUILD_BUG_ON(BPF_MEMWORDS
> 16);
379 masks
= kmalloc(flen
* sizeof(*masks
), GFP_KERNEL
);
382 memset(masks
, 0xff, flen
* sizeof(*masks
));
384 for (pc
= 0; pc
< flen
; pc
++) {
385 memvalid
&= masks
[pc
];
387 switch (filter
[pc
].code
) {
390 memvalid
|= (1 << filter
[pc
].k
);
394 if (!(memvalid
& (1 << filter
[pc
].k
))) {
400 /* a jump must set masks on target */
401 masks
[pc
+ 1 + filter
[pc
].k
] &= memvalid
;
404 case BPF_S_JMP_JEQ_K
:
405 case BPF_S_JMP_JEQ_X
:
406 case BPF_S_JMP_JGE_K
:
407 case BPF_S_JMP_JGE_X
:
408 case BPF_S_JMP_JGT_K
:
409 case BPF_S_JMP_JGT_X
:
410 case BPF_S_JMP_JSET_X
:
411 case BPF_S_JMP_JSET_K
:
412 /* a jump must set masks on targets */
413 masks
[pc
+ 1 + filter
[pc
].jt
] &= memvalid
;
414 masks
[pc
+ 1 + filter
[pc
].jf
] &= memvalid
;
425 * sk_chk_filter - verify socket filter code
426 * @filter: filter to verify
427 * @flen: length of filter
429 * Check the user's filter code. If we let some ugly
430 * filter code slip through kaboom! The filter must contain
431 * no references or jumps that are out of range, no illegal
432 * instructions, and must end with a RET instruction.
434 * All jumps are forward as they are not signed.
436 * Returns 0 if the rule set is legal or -EINVAL if not.
438 int sk_chk_filter(struct sock_filter
*filter
, int flen
)
441 * Valid instructions are initialized to non-0.
442 * Invalid instructions are initialized to 0.
444 static const u8 codes
[] = {
445 [BPF_ALU
|BPF_ADD
|BPF_K
] = BPF_S_ALU_ADD_K
,
446 [BPF_ALU
|BPF_ADD
|BPF_X
] = BPF_S_ALU_ADD_X
,
447 [BPF_ALU
|BPF_SUB
|BPF_K
] = BPF_S_ALU_SUB_K
,
448 [BPF_ALU
|BPF_SUB
|BPF_X
] = BPF_S_ALU_SUB_X
,
449 [BPF_ALU
|BPF_MUL
|BPF_K
] = BPF_S_ALU_MUL_K
,
450 [BPF_ALU
|BPF_MUL
|BPF_X
] = BPF_S_ALU_MUL_X
,
451 [BPF_ALU
|BPF_DIV
|BPF_X
] = BPF_S_ALU_DIV_X
,
452 [BPF_ALU
|BPF_AND
|BPF_K
] = BPF_S_ALU_AND_K
,
453 [BPF_ALU
|BPF_AND
|BPF_X
] = BPF_S_ALU_AND_X
,
454 [BPF_ALU
|BPF_OR
|BPF_K
] = BPF_S_ALU_OR_K
,
455 [BPF_ALU
|BPF_OR
|BPF_X
] = BPF_S_ALU_OR_X
,
456 [BPF_ALU
|BPF_LSH
|BPF_K
] = BPF_S_ALU_LSH_K
,
457 [BPF_ALU
|BPF_LSH
|BPF_X
] = BPF_S_ALU_LSH_X
,
458 [BPF_ALU
|BPF_RSH
|BPF_K
] = BPF_S_ALU_RSH_K
,
459 [BPF_ALU
|BPF_RSH
|BPF_X
] = BPF_S_ALU_RSH_X
,
460 [BPF_ALU
|BPF_NEG
] = BPF_S_ALU_NEG
,
461 [BPF_LD
|BPF_W
|BPF_ABS
] = BPF_S_LD_W_ABS
,
462 [BPF_LD
|BPF_H
|BPF_ABS
] = BPF_S_LD_H_ABS
,
463 [BPF_LD
|BPF_B
|BPF_ABS
] = BPF_S_LD_B_ABS
,
464 [BPF_LD
|BPF_W
|BPF_LEN
] = BPF_S_LD_W_LEN
,
465 [BPF_LD
|BPF_W
|BPF_IND
] = BPF_S_LD_W_IND
,
466 [BPF_LD
|BPF_H
|BPF_IND
] = BPF_S_LD_H_IND
,
467 [BPF_LD
|BPF_B
|BPF_IND
] = BPF_S_LD_B_IND
,
468 [BPF_LD
|BPF_IMM
] = BPF_S_LD_IMM
,
469 [BPF_LDX
|BPF_W
|BPF_LEN
] = BPF_S_LDX_W_LEN
,
470 [BPF_LDX
|BPF_B
|BPF_MSH
] = BPF_S_LDX_B_MSH
,
471 [BPF_LDX
|BPF_IMM
] = BPF_S_LDX_IMM
,
472 [BPF_MISC
|BPF_TAX
] = BPF_S_MISC_TAX
,
473 [BPF_MISC
|BPF_TXA
] = BPF_S_MISC_TXA
,
474 [BPF_RET
|BPF_K
] = BPF_S_RET_K
,
475 [BPF_RET
|BPF_A
] = BPF_S_RET_A
,
476 [BPF_ALU
|BPF_DIV
|BPF_K
] = BPF_S_ALU_DIV_K
,
477 [BPF_LD
|BPF_MEM
] = BPF_S_LD_MEM
,
478 [BPF_LDX
|BPF_MEM
] = BPF_S_LDX_MEM
,
480 [BPF_STX
] = BPF_S_STX
,
481 [BPF_JMP
|BPF_JA
] = BPF_S_JMP_JA
,
482 [BPF_JMP
|BPF_JEQ
|BPF_K
] = BPF_S_JMP_JEQ_K
,
483 [BPF_JMP
|BPF_JEQ
|BPF_X
] = BPF_S_JMP_JEQ_X
,
484 [BPF_JMP
|BPF_JGE
|BPF_K
] = BPF_S_JMP_JGE_K
,
485 [BPF_JMP
|BPF_JGE
|BPF_X
] = BPF_S_JMP_JGE_X
,
486 [BPF_JMP
|BPF_JGT
|BPF_K
] = BPF_S_JMP_JGT_K
,
487 [BPF_JMP
|BPF_JGT
|BPF_X
] = BPF_S_JMP_JGT_X
,
488 [BPF_JMP
|BPF_JSET
|BPF_K
] = BPF_S_JMP_JSET_K
,
489 [BPF_JMP
|BPF_JSET
|BPF_X
] = BPF_S_JMP_JSET_X
,
493 if (flen
== 0 || flen
> BPF_MAXINSNS
)
496 /* check the filter code now */
497 for (pc
= 0; pc
< flen
; pc
++) {
498 struct sock_filter
*ftest
= &filter
[pc
];
499 u16 code
= ftest
->code
;
501 if (code
>= ARRAY_SIZE(codes
))
506 /* Some instructions need special checks */
508 case BPF_S_ALU_DIV_K
:
509 /* check for division by zero */
512 ftest
->k
= reciprocal_value(ftest
->k
);
518 /* check for invalid memory addresses */
519 if (ftest
->k
>= BPF_MEMWORDS
)
524 * Note, the large ftest->k might cause loops.
525 * Compare this with conditional jumps below,
526 * where offsets are limited. --ANK (981016)
528 if (ftest
->k
>= (unsigned)(flen
-pc
-1))
531 case BPF_S_JMP_JEQ_K
:
532 case BPF_S_JMP_JEQ_X
:
533 case BPF_S_JMP_JGE_K
:
534 case BPF_S_JMP_JGE_X
:
535 case BPF_S_JMP_JGT_K
:
536 case BPF_S_JMP_JGT_X
:
537 case BPF_S_JMP_JSET_X
:
538 case BPF_S_JMP_JSET_K
:
539 /* for conditionals both must be safe */
540 if (pc
+ ftest
->jt
+ 1 >= flen
||
541 pc
+ ftest
->jf
+ 1 >= flen
)
547 #define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
548 code = BPF_S_ANC_##CODE; \
555 ANCILLARY(NLATTR_NEST
);
566 /* last instruction must be a RET code */
567 switch (filter
[flen
- 1].code
) {
570 return check_load_and_stores(filter
, flen
);
574 EXPORT_SYMBOL(sk_chk_filter
);
577 * sk_filter_release_rcu - Release a socket filter by rcu_head
578 * @rcu: rcu_head that contains the sk_filter to free
580 void sk_filter_release_rcu(struct rcu_head
*rcu
)
582 struct sk_filter
*fp
= container_of(rcu
, struct sk_filter
, rcu
);
587 EXPORT_SYMBOL(sk_filter_release_rcu
);
590 * sk_attach_filter - attach a socket filter
591 * @fprog: the filter program
592 * @sk: the socket to use
594 * Attach the user's filter code. We first run some sanity checks on
595 * it to make sure it does not explode on us later. If an error
596 * occurs or there is insufficient memory for the filter a negative
597 * errno code is returned. On success the return is zero.
599 int sk_attach_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
601 struct sk_filter
*fp
, *old_fp
;
602 unsigned int fsize
= sizeof(struct sock_filter
) * fprog
->len
;
605 /* Make sure new filter is there and in the right amounts. */
606 if (fprog
->filter
== NULL
)
609 fp
= sock_kmalloc(sk
, fsize
+sizeof(*fp
), GFP_KERNEL
);
612 if (copy_from_user(fp
->insns
, fprog
->filter
, fsize
)) {
613 sock_kfree_s(sk
, fp
, fsize
+sizeof(*fp
));
617 atomic_set(&fp
->refcnt
, 1);
618 fp
->len
= fprog
->len
;
619 fp
->bpf_func
= sk_run_filter
;
621 err
= sk_chk_filter(fp
->insns
, fp
->len
);
623 sk_filter_uncharge(sk
, fp
);
629 old_fp
= rcu_dereference_protected(sk
->sk_filter
,
630 sock_owned_by_user(sk
));
631 rcu_assign_pointer(sk
->sk_filter
, fp
);
634 sk_filter_uncharge(sk
, old_fp
);
637 EXPORT_SYMBOL_GPL(sk_attach_filter
);
639 int sk_detach_filter(struct sock
*sk
)
642 struct sk_filter
*filter
;
644 filter
= rcu_dereference_protected(sk
->sk_filter
,
645 sock_owned_by_user(sk
));
647 rcu_assign_pointer(sk
->sk_filter
, NULL
);
648 sk_filter_uncharge(sk
, filter
);
653 EXPORT_SYMBOL_GPL(sk_detach_filter
);