2 * Linux Socket Filter - Kernel level socket filtering
5 * Jay Schulist <jschlst@samba.org>
7 * Based on the design of:
8 * - The Berkeley Packet Filter
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Andi Kleen - Fix a few bad bugs and races.
16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
19 #include <linux/module.h>
20 #include <linux/types.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
28 #include <linux/gfp.h>
30 #include <net/protocol.h>
31 #include <net/netlink.h>
32 #include <linux/skbuff.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <asm/unaligned.h>
39 #include <linux/filter.h>
40 #include <linux/reciprocal_div.h>
90 /* No hurry in this branch */
91 static void *__load_pointer(struct sk_buff
*skb
, int k
)
96 ptr
= skb_network_header(skb
) + k
- SKF_NET_OFF
;
97 else if (k
>= SKF_LL_OFF
)
98 ptr
= skb_mac_header(skb
) + k
- SKF_LL_OFF
;
100 if (ptr
>= skb
->head
&& ptr
< skb_tail_pointer(skb
))
105 static inline void *load_pointer(struct sk_buff
*skb
, int k
,
106 unsigned int size
, void *buffer
)
109 return skb_header_pointer(skb
, k
, size
, buffer
);
113 return __load_pointer(skb
, k
);
118 * sk_filter - run a packet through a socket filter
119 * @sk: sock associated with &sk_buff
120 * @skb: buffer to filter
122 * Run the filter code and then cut skb->data to correct size returned by
123 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
124 * than pkt_len we keep whole skb->data. This is the socket level
125 * wrapper to sk_run_filter. It returns 0 if the packet should
126 * be accepted or -EPERM if the packet should be tossed.
129 int sk_filter(struct sock
*sk
, struct sk_buff
*skb
)
132 struct sk_filter
*filter
;
134 err
= security_sock_rcv_skb(sk
, skb
);
139 filter
= rcu_dereference_bh(sk
->sk_filter
);
141 unsigned int pkt_len
= sk_run_filter(skb
, filter
->insns
);
143 err
= pkt_len
? pskb_trim(skb
, pkt_len
) : -EPERM
;
145 rcu_read_unlock_bh();
149 EXPORT_SYMBOL(sk_filter
);
152 * sk_run_filter - run a filter on a socket
153 * @skb: buffer to run the filter on
154 * @filter: filter to apply
156 * Decode and apply filter instructions to the skb->data.
157 * Return length to keep, 0 for none. @skb is the data we are
158 * filtering, @filter is the array of filter instructions.
159 * Because all jumps are guaranteed to be before last instruction,
160 * and last instruction guaranteed to be a RET, we dont need to check
161 * flen. (We used to pass to this function the length of filter)
163 unsigned int sk_run_filter(struct sk_buff
*skb
, const struct sock_filter
*fentry
)
166 u32 A
= 0; /* Accumulator */
167 u32 X
= 0; /* Index Register */
168 u32 mem
[BPF_MEMWORDS
]; /* Scratch Memory Store */
169 unsigned long memvalid
= 0;
173 BUILD_BUG_ON(BPF_MEMWORDS
> BITS_PER_LONG
);
175 * Process array of filter instructions.
178 #if defined(CONFIG_X86_32)
179 #define K (fentry->k)
181 const u32 K
= fentry
->k
;
184 switch (fentry
->code
) {
185 case BPF_S_ALU_ADD_X
:
188 case BPF_S_ALU_ADD_K
:
191 case BPF_S_ALU_SUB_X
:
194 case BPF_S_ALU_SUB_K
:
197 case BPF_S_ALU_MUL_X
:
200 case BPF_S_ALU_MUL_K
:
203 case BPF_S_ALU_DIV_X
:
208 case BPF_S_ALU_DIV_K
:
209 A
= reciprocal_divide(A
, K
);
211 case BPF_S_ALU_AND_X
:
214 case BPF_S_ALU_AND_K
:
223 case BPF_S_ALU_LSH_X
:
226 case BPF_S_ALU_LSH_K
:
229 case BPF_S_ALU_RSH_X
:
232 case BPF_S_ALU_RSH_K
:
241 case BPF_S_JMP_JGT_K
:
242 fentry
+= (A
> K
) ? fentry
->jt
: fentry
->jf
;
244 case BPF_S_JMP_JGE_K
:
245 fentry
+= (A
>= K
) ? fentry
->jt
: fentry
->jf
;
247 case BPF_S_JMP_JEQ_K
:
248 fentry
+= (A
== K
) ? fentry
->jt
: fentry
->jf
;
250 case BPF_S_JMP_JSET_K
:
251 fentry
+= (A
& K
) ? fentry
->jt
: fentry
->jf
;
253 case BPF_S_JMP_JGT_X
:
254 fentry
+= (A
> X
) ? fentry
->jt
: fentry
->jf
;
256 case BPF_S_JMP_JGE_X
:
257 fentry
+= (A
>= X
) ? fentry
->jt
: fentry
->jf
;
259 case BPF_S_JMP_JEQ_X
:
260 fentry
+= (A
== X
) ? fentry
->jt
: fentry
->jf
;
262 case BPF_S_JMP_JSET_X
:
263 fentry
+= (A
& X
) ? fentry
->jt
: fentry
->jf
;
268 ptr
= load_pointer(skb
, k
, 4, &tmp
);
270 A
= get_unaligned_be32(ptr
);
277 ptr
= load_pointer(skb
, k
, 2, &tmp
);
279 A
= get_unaligned_be16(ptr
);
286 ptr
= load_pointer(skb
, k
, 1, &tmp
);
295 case BPF_S_LDX_W_LEN
:
307 case BPF_S_LDX_B_MSH
:
308 ptr
= load_pointer(skb
, K
, 1, &tmp
);
310 X
= (*(u8
*)ptr
& 0xf) << 2;
321 A
= (memvalid
& (1UL << K
)) ?
325 X
= (memvalid
& (1UL << K
)) ?
339 memvalid
|= 1UL << K
;
343 memvalid
|= 1UL << K
;
352 * Handle ancillary data, which are impossible
353 * (or very difficult) to get parsing packet contents.
355 switch (k
-SKF_AD_OFF
) {
356 case SKF_AD_PROTOCOL
:
357 A
= ntohs(skb
->protocol
);
365 A
= skb
->dev
->ifindex
;
371 A
= skb
->queue_mapping
;
382 A
= raw_smp_processor_id();
384 case SKF_AD_NLATTR
: {
387 if (skb_is_nonlinear(skb
))
389 if (A
> skb
->len
- sizeof(struct nlattr
))
392 nla
= nla_find((struct nlattr
*)&skb
->data
[A
],
395 A
= (void *)nla
- (void *)skb
->data
;
400 case SKF_AD_NLATTR_NEST
: {
403 if (skb_is_nonlinear(skb
))
405 if (A
> skb
->len
- sizeof(struct nlattr
))
408 nla
= (struct nlattr
*)&skb
->data
[A
];
409 if (nla
->nla_len
> A
- skb
->len
)
412 nla
= nla_find_nested(nla
, X
);
414 A
= (void *)nla
- (void *)skb
->data
;
426 EXPORT_SYMBOL(sk_run_filter
);
429 * sk_chk_filter - verify socket filter code
430 * @filter: filter to verify
431 * @flen: length of filter
433 * Check the user's filter code. If we let some ugly
434 * filter code slip through kaboom! The filter must contain
435 * no references or jumps that are out of range, no illegal
436 * instructions, and must end with a RET instruction.
438 * All jumps are forward as they are not signed.
440 * Returns 0 if the rule set is legal or -EINVAL if not.
442 int sk_chk_filter(struct sock_filter
*filter
, int flen
)
445 * Valid instructions are initialized to non-0.
446 * Invalid instructions are initialized to 0.
448 static const u8 codes
[] = {
449 [BPF_ALU
|BPF_ADD
|BPF_K
] = BPF_S_ALU_ADD_K
,
450 [BPF_ALU
|BPF_ADD
|BPF_X
] = BPF_S_ALU_ADD_X
,
451 [BPF_ALU
|BPF_SUB
|BPF_K
] = BPF_S_ALU_SUB_K
,
452 [BPF_ALU
|BPF_SUB
|BPF_X
] = BPF_S_ALU_SUB_X
,
453 [BPF_ALU
|BPF_MUL
|BPF_K
] = BPF_S_ALU_MUL_K
,
454 [BPF_ALU
|BPF_MUL
|BPF_X
] = BPF_S_ALU_MUL_X
,
455 [BPF_ALU
|BPF_DIV
|BPF_X
] = BPF_S_ALU_DIV_X
,
456 [BPF_ALU
|BPF_AND
|BPF_K
] = BPF_S_ALU_AND_K
,
457 [BPF_ALU
|BPF_AND
|BPF_X
] = BPF_S_ALU_AND_X
,
458 [BPF_ALU
|BPF_OR
|BPF_K
] = BPF_S_ALU_OR_K
,
459 [BPF_ALU
|BPF_OR
|BPF_X
] = BPF_S_ALU_OR_X
,
460 [BPF_ALU
|BPF_LSH
|BPF_K
] = BPF_S_ALU_LSH_K
,
461 [BPF_ALU
|BPF_LSH
|BPF_X
] = BPF_S_ALU_LSH_X
,
462 [BPF_ALU
|BPF_RSH
|BPF_K
] = BPF_S_ALU_RSH_K
,
463 [BPF_ALU
|BPF_RSH
|BPF_X
] = BPF_S_ALU_RSH_X
,
464 [BPF_ALU
|BPF_NEG
] = BPF_S_ALU_NEG
,
465 [BPF_LD
|BPF_W
|BPF_ABS
] = BPF_S_LD_W_ABS
,
466 [BPF_LD
|BPF_H
|BPF_ABS
] = BPF_S_LD_H_ABS
,
467 [BPF_LD
|BPF_B
|BPF_ABS
] = BPF_S_LD_B_ABS
,
468 [BPF_LD
|BPF_W
|BPF_LEN
] = BPF_S_LD_W_LEN
,
469 [BPF_LD
|BPF_W
|BPF_IND
] = BPF_S_LD_W_IND
,
470 [BPF_LD
|BPF_H
|BPF_IND
] = BPF_S_LD_H_IND
,
471 [BPF_LD
|BPF_B
|BPF_IND
] = BPF_S_LD_B_IND
,
472 [BPF_LD
|BPF_IMM
] = BPF_S_LD_IMM
,
473 [BPF_LDX
|BPF_W
|BPF_LEN
] = BPF_S_LDX_W_LEN
,
474 [BPF_LDX
|BPF_B
|BPF_MSH
] = BPF_S_LDX_B_MSH
,
475 [BPF_LDX
|BPF_IMM
] = BPF_S_LDX_IMM
,
476 [BPF_MISC
|BPF_TAX
] = BPF_S_MISC_TAX
,
477 [BPF_MISC
|BPF_TXA
] = BPF_S_MISC_TXA
,
478 [BPF_RET
|BPF_K
] = BPF_S_RET_K
,
479 [BPF_RET
|BPF_A
] = BPF_S_RET_A
,
480 [BPF_ALU
|BPF_DIV
|BPF_K
] = BPF_S_ALU_DIV_K
,
481 [BPF_LD
|BPF_MEM
] = BPF_S_LD_MEM
,
482 [BPF_LDX
|BPF_MEM
] = BPF_S_LDX_MEM
,
484 [BPF_STX
] = BPF_S_STX
,
485 [BPF_JMP
|BPF_JA
] = BPF_S_JMP_JA
,
486 [BPF_JMP
|BPF_JEQ
|BPF_K
] = BPF_S_JMP_JEQ_K
,
487 [BPF_JMP
|BPF_JEQ
|BPF_X
] = BPF_S_JMP_JEQ_X
,
488 [BPF_JMP
|BPF_JGE
|BPF_K
] = BPF_S_JMP_JGE_K
,
489 [BPF_JMP
|BPF_JGE
|BPF_X
] = BPF_S_JMP_JGE_X
,
490 [BPF_JMP
|BPF_JGT
|BPF_K
] = BPF_S_JMP_JGT_K
,
491 [BPF_JMP
|BPF_JGT
|BPF_X
] = BPF_S_JMP_JGT_X
,
492 [BPF_JMP
|BPF_JSET
|BPF_K
] = BPF_S_JMP_JSET_K
,
493 [BPF_JMP
|BPF_JSET
|BPF_X
] = BPF_S_JMP_JSET_X
,
497 if (flen
== 0 || flen
> BPF_MAXINSNS
)
500 /* check the filter code now */
501 for (pc
= 0; pc
< flen
; pc
++) {
502 struct sock_filter
*ftest
= &filter
[pc
];
503 u16 code
= ftest
->code
;
505 if (code
>= ARRAY_SIZE(codes
))
510 /* Some instructions need special checks */
512 case BPF_S_ALU_DIV_K
:
513 /* check for division by zero */
516 ftest
->k
= reciprocal_value(ftest
->k
);
522 /* check for invalid memory addresses */
523 if (ftest
->k
>= BPF_MEMWORDS
)
528 * Note, the large ftest->k might cause loops.
529 * Compare this with conditional jumps below,
530 * where offsets are limited. --ANK (981016)
532 if (ftest
->k
>= (unsigned)(flen
-pc
-1))
535 case BPF_S_JMP_JEQ_K
:
536 case BPF_S_JMP_JEQ_X
:
537 case BPF_S_JMP_JGE_K
:
538 case BPF_S_JMP_JGE_X
:
539 case BPF_S_JMP_JGT_K
:
540 case BPF_S_JMP_JGT_X
:
541 case BPF_S_JMP_JSET_X
:
542 case BPF_S_JMP_JSET_K
:
543 /* for conditionals both must be safe */
544 if (pc
+ ftest
->jt
+ 1 >= flen
||
545 pc
+ ftest
->jf
+ 1 >= flen
)
552 /* last instruction must be a RET code */
553 switch (filter
[flen
- 1].code
) {
560 EXPORT_SYMBOL(sk_chk_filter
);
563 * sk_filter_rcu_release - Release a socket filter by rcu_head
564 * @rcu: rcu_head that contains the sk_filter to free
566 static void sk_filter_rcu_release(struct rcu_head
*rcu
)
568 struct sk_filter
*fp
= container_of(rcu
, struct sk_filter
, rcu
);
570 sk_filter_release(fp
);
573 static void sk_filter_delayed_uncharge(struct sock
*sk
, struct sk_filter
*fp
)
575 unsigned int size
= sk_filter_len(fp
);
577 atomic_sub(size
, &sk
->sk_omem_alloc
);
578 call_rcu_bh(&fp
->rcu
, sk_filter_rcu_release
);
582 * sk_attach_filter - attach a socket filter
583 * @fprog: the filter program
584 * @sk: the socket to use
586 * Attach the user's filter code. We first run some sanity checks on
587 * it to make sure it does not explode on us later. If an error
588 * occurs or there is insufficient memory for the filter a negative
589 * errno code is returned. On success the return is zero.
591 int sk_attach_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
593 struct sk_filter
*fp
, *old_fp
;
594 unsigned int fsize
= sizeof(struct sock_filter
) * fprog
->len
;
597 /* Make sure new filter is there and in the right amounts. */
598 if (fprog
->filter
== NULL
)
601 fp
= sock_kmalloc(sk
, fsize
+sizeof(*fp
), GFP_KERNEL
);
604 if (copy_from_user(fp
->insns
, fprog
->filter
, fsize
)) {
605 sock_kfree_s(sk
, fp
, fsize
+sizeof(*fp
));
609 atomic_set(&fp
->refcnt
, 1);
610 fp
->len
= fprog
->len
;
612 err
= sk_chk_filter(fp
->insns
, fp
->len
);
614 sk_filter_uncharge(sk
, fp
);
618 old_fp
= rcu_dereference_protected(sk
->sk_filter
,
619 sock_owned_by_user(sk
));
620 rcu_assign_pointer(sk
->sk_filter
, fp
);
623 sk_filter_delayed_uncharge(sk
, old_fp
);
626 EXPORT_SYMBOL_GPL(sk_attach_filter
);
628 int sk_detach_filter(struct sock
*sk
)
631 struct sk_filter
*filter
;
633 filter
= rcu_dereference_protected(sk
->sk_filter
,
634 sock_owned_by_user(sk
));
636 rcu_assign_pointer(sk
->sk_filter
, NULL
);
637 sk_filter_delayed_uncharge(sk
, filter
);
642 EXPORT_SYMBOL_GPL(sk_detach_filter
);