2 * Linux Socket Filter - Kernel level socket filtering
5 * Jay Schulist <jschlst@samba.org>
7 * Based on the design of:
8 * - The Berkeley Packet Filter
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Andi Kleen - Fix a few bad bugs and races.
16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
19 #include <linux/module.h>
20 #include <linux/types.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
28 #include <linux/gfp.h>
30 #include <net/protocol.h>
31 #include <net/netlink.h>
32 #include <linux/skbuff.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <asm/unaligned.h>
39 #include <linux/filter.h>
40 #include <linux/reciprocal_div.h>
41 #include <linux/ratelimit.h>
94 BPF_S_ANC_NLATTR_NEST
,
102 /* No hurry in this branch */
103 static void *__load_pointer(const struct sk_buff
*skb
, int k
, unsigned int size
)
107 if (k
>= SKF_NET_OFF
)
108 ptr
= skb_network_header(skb
) + k
- SKF_NET_OFF
;
109 else if (k
>= SKF_LL_OFF
)
110 ptr
= skb_mac_header(skb
) + k
- SKF_LL_OFF
;
112 if (ptr
>= skb
->head
&& ptr
+ size
<= skb_tail_pointer(skb
))
117 static inline void *load_pointer(const struct sk_buff
*skb
, int k
,
118 unsigned int size
, void *buffer
)
121 return skb_header_pointer(skb
, k
, size
, buffer
);
122 return __load_pointer(skb
, k
, size
);
126 * sk_filter - run a packet through a socket filter
127 * @sk: sock associated with &sk_buff
128 * @skb: buffer to filter
130 * Run the filter code and then cut skb->data to correct size returned by
131 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
132 * than pkt_len we keep whole skb->data. This is the socket level
133 * wrapper to sk_run_filter. It returns 0 if the packet should
134 * be accepted or -EPERM if the packet should be tossed.
137 int sk_filter(struct sock
*sk
, struct sk_buff
*skb
)
140 struct sk_filter
*filter
;
142 err
= security_sock_rcv_skb(sk
, skb
);
147 filter
= rcu_dereference(sk
->sk_filter
);
149 unsigned int pkt_len
= sk_run_filter(skb
, filter
->insns
);
151 err
= pkt_len
? pskb_trim(skb
, pkt_len
) : -EPERM
;
157 EXPORT_SYMBOL(sk_filter
);
160 * sk_run_filter - run a filter on a socket
161 * @skb: buffer to run the filter on
162 * @fentry: filter to apply
164 * Decode and apply filter instructions to the skb->data.
165 * Return length to keep, 0 for none. @skb is the data we are
166 * filtering, @filter is the array of filter instructions.
167 * Because all jumps are guaranteed to be before last instruction,
168 * and last instruction guaranteed to be a RET, we dont need to check
169 * flen. (We used to pass to this function the length of filter)
171 unsigned int sk_run_filter(const struct sk_buff
*skb
,
172 const struct sock_filter
*fentry
)
175 u32 A
= 0; /* Accumulator */
176 u32 X
= 0; /* Index Register */
177 u32 mem
[BPF_MEMWORDS
]; /* Scratch Memory Store */
182 * Process array of filter instructions.
185 #if defined(CONFIG_X86_32)
186 #define K (fentry->k)
188 const u32 K
= fentry
->k
;
191 switch (fentry
->code
) {
192 case BPF_S_ALU_ADD_X
:
195 case BPF_S_ALU_ADD_K
:
198 case BPF_S_ALU_SUB_X
:
201 case BPF_S_ALU_SUB_K
:
204 case BPF_S_ALU_MUL_X
:
207 case BPF_S_ALU_MUL_K
:
210 case BPF_S_ALU_DIV_X
:
215 case BPF_S_ALU_DIV_K
:
216 A
= reciprocal_divide(A
, K
);
218 case BPF_S_ALU_AND_X
:
221 case BPF_S_ALU_AND_K
:
230 case BPF_S_ALU_LSH_X
:
233 case BPF_S_ALU_LSH_K
:
236 case BPF_S_ALU_RSH_X
:
239 case BPF_S_ALU_RSH_K
:
248 case BPF_S_JMP_JGT_K
:
249 fentry
+= (A
> K
) ? fentry
->jt
: fentry
->jf
;
251 case BPF_S_JMP_JGE_K
:
252 fentry
+= (A
>= K
) ? fentry
->jt
: fentry
->jf
;
254 case BPF_S_JMP_JEQ_K
:
255 fentry
+= (A
== K
) ? fentry
->jt
: fentry
->jf
;
257 case BPF_S_JMP_JSET_K
:
258 fentry
+= (A
& K
) ? fentry
->jt
: fentry
->jf
;
260 case BPF_S_JMP_JGT_X
:
261 fentry
+= (A
> X
) ? fentry
->jt
: fentry
->jf
;
263 case BPF_S_JMP_JGE_X
:
264 fentry
+= (A
>= X
) ? fentry
->jt
: fentry
->jf
;
266 case BPF_S_JMP_JEQ_X
:
267 fentry
+= (A
== X
) ? fentry
->jt
: fentry
->jf
;
269 case BPF_S_JMP_JSET_X
:
270 fentry
+= (A
& X
) ? fentry
->jt
: fentry
->jf
;
275 ptr
= load_pointer(skb
, k
, 4, &tmp
);
277 A
= get_unaligned_be32(ptr
);
284 ptr
= load_pointer(skb
, k
, 2, &tmp
);
286 A
= get_unaligned_be16(ptr
);
293 ptr
= load_pointer(skb
, k
, 1, &tmp
);
302 case BPF_S_LDX_W_LEN
:
314 case BPF_S_LDX_B_MSH
:
315 ptr
= load_pointer(skb
, K
, 1, &tmp
);
317 X
= (*(u8
*)ptr
& 0xf) << 2;
349 case BPF_S_ANC_PROTOCOL
:
350 A
= ntohs(skb
->protocol
);
352 case BPF_S_ANC_PKTTYPE
:
355 case BPF_S_ANC_IFINDEX
:
358 A
= skb
->dev
->ifindex
;
363 case BPF_S_ANC_QUEUE
:
364 A
= skb
->queue_mapping
;
366 case BPF_S_ANC_HATYPE
:
371 case BPF_S_ANC_RXHASH
:
375 A
= raw_smp_processor_id();
377 case BPF_S_ANC_NLATTR
: {
380 if (skb_is_nonlinear(skb
))
382 if (A
> skb
->len
- sizeof(struct nlattr
))
385 nla
= nla_find((struct nlattr
*)&skb
->data
[A
],
388 A
= (void *)nla
- (void *)skb
->data
;
393 case BPF_S_ANC_NLATTR_NEST
: {
396 if (skb_is_nonlinear(skb
))
398 if (A
> skb
->len
- sizeof(struct nlattr
))
401 nla
= (struct nlattr
*)&skb
->data
[A
];
402 if (nla
->nla_len
> A
- skb
->len
)
405 nla
= nla_find_nested(nla
, X
);
407 A
= (void *)nla
- (void *)skb
->data
;
413 WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
414 fentry
->code
, fentry
->jt
,
415 fentry
->jf
, fentry
->k
);
422 EXPORT_SYMBOL(sk_run_filter
);
426 * A BPF program is able to use 16 cells of memory to store intermediate
427 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
428 * As we dont want to clear mem[] array for each packet going through
429 * sk_run_filter(), we check that filter loaded by user never try to read
430 * a cell if not previously written, and we check all branches to be sure
431 * a malicious user doesn't try to abuse us.
433 static int check_load_and_stores(struct sock_filter
*filter
, int flen
)
435 u16
*masks
, memvalid
= 0; /* one bit per cell, 16 cells */
438 BUILD_BUG_ON(BPF_MEMWORDS
> 16);
439 masks
= kmalloc(flen
* sizeof(*masks
), GFP_KERNEL
);
442 memset(masks
, 0xff, flen
* sizeof(*masks
));
444 for (pc
= 0; pc
< flen
; pc
++) {
445 memvalid
&= masks
[pc
];
447 switch (filter
[pc
].code
) {
450 memvalid
|= (1 << filter
[pc
].k
);
454 if (!(memvalid
& (1 << filter
[pc
].k
))) {
460 /* a jump must set masks on target */
461 masks
[pc
+ 1 + filter
[pc
].k
] &= memvalid
;
464 case BPF_S_JMP_JEQ_K
:
465 case BPF_S_JMP_JEQ_X
:
466 case BPF_S_JMP_JGE_K
:
467 case BPF_S_JMP_JGE_X
:
468 case BPF_S_JMP_JGT_K
:
469 case BPF_S_JMP_JGT_X
:
470 case BPF_S_JMP_JSET_X
:
471 case BPF_S_JMP_JSET_K
:
472 /* a jump must set masks on targets */
473 masks
[pc
+ 1 + filter
[pc
].jt
] &= memvalid
;
474 masks
[pc
+ 1 + filter
[pc
].jf
] &= memvalid
;
485 * sk_chk_filter - verify socket filter code
486 * @filter: filter to verify
487 * @flen: length of filter
489 * Check the user's filter code. If we let some ugly
490 * filter code slip through kaboom! The filter must contain
491 * no references or jumps that are out of range, no illegal
492 * instructions, and must end with a RET instruction.
494 * All jumps are forward as they are not signed.
496 * Returns 0 if the rule set is legal or -EINVAL if not.
498 int sk_chk_filter(struct sock_filter
*filter
, int flen
)
501 * Valid instructions are initialized to non-0.
502 * Invalid instructions are initialized to 0.
504 static const u8 codes
[] = {
505 [BPF_ALU
|BPF_ADD
|BPF_K
] = BPF_S_ALU_ADD_K
,
506 [BPF_ALU
|BPF_ADD
|BPF_X
] = BPF_S_ALU_ADD_X
,
507 [BPF_ALU
|BPF_SUB
|BPF_K
] = BPF_S_ALU_SUB_K
,
508 [BPF_ALU
|BPF_SUB
|BPF_X
] = BPF_S_ALU_SUB_X
,
509 [BPF_ALU
|BPF_MUL
|BPF_K
] = BPF_S_ALU_MUL_K
,
510 [BPF_ALU
|BPF_MUL
|BPF_X
] = BPF_S_ALU_MUL_X
,
511 [BPF_ALU
|BPF_DIV
|BPF_X
] = BPF_S_ALU_DIV_X
,
512 [BPF_ALU
|BPF_AND
|BPF_K
] = BPF_S_ALU_AND_K
,
513 [BPF_ALU
|BPF_AND
|BPF_X
] = BPF_S_ALU_AND_X
,
514 [BPF_ALU
|BPF_OR
|BPF_K
] = BPF_S_ALU_OR_K
,
515 [BPF_ALU
|BPF_OR
|BPF_X
] = BPF_S_ALU_OR_X
,
516 [BPF_ALU
|BPF_LSH
|BPF_K
] = BPF_S_ALU_LSH_K
,
517 [BPF_ALU
|BPF_LSH
|BPF_X
] = BPF_S_ALU_LSH_X
,
518 [BPF_ALU
|BPF_RSH
|BPF_K
] = BPF_S_ALU_RSH_K
,
519 [BPF_ALU
|BPF_RSH
|BPF_X
] = BPF_S_ALU_RSH_X
,
520 [BPF_ALU
|BPF_NEG
] = BPF_S_ALU_NEG
,
521 [BPF_LD
|BPF_W
|BPF_ABS
] = BPF_S_LD_W_ABS
,
522 [BPF_LD
|BPF_H
|BPF_ABS
] = BPF_S_LD_H_ABS
,
523 [BPF_LD
|BPF_B
|BPF_ABS
] = BPF_S_LD_B_ABS
,
524 [BPF_LD
|BPF_W
|BPF_LEN
] = BPF_S_LD_W_LEN
,
525 [BPF_LD
|BPF_W
|BPF_IND
] = BPF_S_LD_W_IND
,
526 [BPF_LD
|BPF_H
|BPF_IND
] = BPF_S_LD_H_IND
,
527 [BPF_LD
|BPF_B
|BPF_IND
] = BPF_S_LD_B_IND
,
528 [BPF_LD
|BPF_IMM
] = BPF_S_LD_IMM
,
529 [BPF_LDX
|BPF_W
|BPF_LEN
] = BPF_S_LDX_W_LEN
,
530 [BPF_LDX
|BPF_B
|BPF_MSH
] = BPF_S_LDX_B_MSH
,
531 [BPF_LDX
|BPF_IMM
] = BPF_S_LDX_IMM
,
532 [BPF_MISC
|BPF_TAX
] = BPF_S_MISC_TAX
,
533 [BPF_MISC
|BPF_TXA
] = BPF_S_MISC_TXA
,
534 [BPF_RET
|BPF_K
] = BPF_S_RET_K
,
535 [BPF_RET
|BPF_A
] = BPF_S_RET_A
,
536 [BPF_ALU
|BPF_DIV
|BPF_K
] = BPF_S_ALU_DIV_K
,
537 [BPF_LD
|BPF_MEM
] = BPF_S_LD_MEM
,
538 [BPF_LDX
|BPF_MEM
] = BPF_S_LDX_MEM
,
540 [BPF_STX
] = BPF_S_STX
,
541 [BPF_JMP
|BPF_JA
] = BPF_S_JMP_JA
,
542 [BPF_JMP
|BPF_JEQ
|BPF_K
] = BPF_S_JMP_JEQ_K
,
543 [BPF_JMP
|BPF_JEQ
|BPF_X
] = BPF_S_JMP_JEQ_X
,
544 [BPF_JMP
|BPF_JGE
|BPF_K
] = BPF_S_JMP_JGE_K
,
545 [BPF_JMP
|BPF_JGE
|BPF_X
] = BPF_S_JMP_JGE_X
,
546 [BPF_JMP
|BPF_JGT
|BPF_K
] = BPF_S_JMP_JGT_K
,
547 [BPF_JMP
|BPF_JGT
|BPF_X
] = BPF_S_JMP_JGT_X
,
548 [BPF_JMP
|BPF_JSET
|BPF_K
] = BPF_S_JMP_JSET_K
,
549 [BPF_JMP
|BPF_JSET
|BPF_X
] = BPF_S_JMP_JSET_X
,
553 if (flen
== 0 || flen
> BPF_MAXINSNS
)
556 /* check the filter code now */
557 for (pc
= 0; pc
< flen
; pc
++) {
558 struct sock_filter
*ftest
= &filter
[pc
];
559 u16 code
= ftest
->code
;
561 if (code
>= ARRAY_SIZE(codes
))
566 /* Some instructions need special checks */
568 case BPF_S_ALU_DIV_K
:
569 /* check for division by zero */
572 ftest
->k
= reciprocal_value(ftest
->k
);
578 /* check for invalid memory addresses */
579 if (ftest
->k
>= BPF_MEMWORDS
)
584 * Note, the large ftest->k might cause loops.
585 * Compare this with conditional jumps below,
586 * where offsets are limited. --ANK (981016)
588 if (ftest
->k
>= (unsigned)(flen
-pc
-1))
591 case BPF_S_JMP_JEQ_K
:
592 case BPF_S_JMP_JEQ_X
:
593 case BPF_S_JMP_JGE_K
:
594 case BPF_S_JMP_JGE_X
:
595 case BPF_S_JMP_JGT_K
:
596 case BPF_S_JMP_JGT_X
:
597 case BPF_S_JMP_JSET_X
:
598 case BPF_S_JMP_JSET_K
:
599 /* for conditionals both must be safe */
600 if (pc
+ ftest
->jt
+ 1 >= flen
||
601 pc
+ ftest
->jf
+ 1 >= flen
)
607 #define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
608 code = BPF_S_ANC_##CODE; \
615 ANCILLARY(NLATTR_NEST
);
626 /* last instruction must be a RET code */
627 switch (filter
[flen
- 1].code
) {
630 return check_load_and_stores(filter
, flen
);
634 EXPORT_SYMBOL(sk_chk_filter
);
637 * sk_filter_release_rcu - Release a socket filter by rcu_head
638 * @rcu: rcu_head that contains the sk_filter to free
640 void sk_filter_release_rcu(struct rcu_head
*rcu
)
642 struct sk_filter
*fp
= container_of(rcu
, struct sk_filter
, rcu
);
646 EXPORT_SYMBOL(sk_filter_release_rcu
);
649 * sk_attach_filter - attach a socket filter
650 * @fprog: the filter program
651 * @sk: the socket to use
653 * Attach the user's filter code. We first run some sanity checks on
654 * it to make sure it does not explode on us later. If an error
655 * occurs or there is insufficient memory for the filter a negative
656 * errno code is returned. On success the return is zero.
658 int sk_attach_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
660 struct sk_filter
*fp
, *old_fp
;
661 unsigned int fsize
= sizeof(struct sock_filter
) * fprog
->len
;
664 /* Make sure new filter is there and in the right amounts. */
665 if (fprog
->filter
== NULL
)
668 fp
= sock_kmalloc(sk
, fsize
+sizeof(*fp
), GFP_KERNEL
);
671 if (copy_from_user(fp
->insns
, fprog
->filter
, fsize
)) {
672 sock_kfree_s(sk
, fp
, fsize
+sizeof(*fp
));
676 atomic_set(&fp
->refcnt
, 1);
677 fp
->len
= fprog
->len
;
679 err
= sk_chk_filter(fp
->insns
, fp
->len
);
681 sk_filter_uncharge(sk
, fp
);
685 old_fp
= rcu_dereference_protected(sk
->sk_filter
,
686 sock_owned_by_user(sk
));
687 rcu_assign_pointer(sk
->sk_filter
, fp
);
690 sk_filter_uncharge(sk
, old_fp
);
693 EXPORT_SYMBOL_GPL(sk_attach_filter
);
695 int sk_detach_filter(struct sock
*sk
)
698 struct sk_filter
*filter
;
700 filter
= rcu_dereference_protected(sk
->sk_filter
,
701 sock_owned_by_user(sk
));
703 rcu_assign_pointer(sk
->sk_filter
, NULL
);
704 sk_filter_uncharge(sk
, filter
);
709 EXPORT_SYMBOL_GPL(sk_detach_filter
);