2 * Linux Socket Filter - Kernel level socket filtering
5 * Jay Schulist <jschlst@samba.org>
7 * Based on the design of:
8 * - The Berkeley Packet Filter
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Andi Kleen - Fix a few bad bugs and races.
16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
19 #include <linux/module.h>
20 #include <linux/types.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
28 #include <linux/gfp.h>
30 #include <net/protocol.h>
31 #include <net/netlink.h>
32 #include <linux/skbuff.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <asm/unaligned.h>
39 #include <linux/filter.h>
89 /* No hurry in this branch */
90 static void *__load_pointer(struct sk_buff
*skb
, int k
)
95 ptr
= skb_network_header(skb
) + k
- SKF_NET_OFF
;
96 else if (k
>= SKF_LL_OFF
)
97 ptr
= skb_mac_header(skb
) + k
- SKF_LL_OFF
;
99 if (ptr
>= skb
->head
&& ptr
< skb_tail_pointer(skb
))
104 static inline void *load_pointer(struct sk_buff
*skb
, int k
,
105 unsigned int size
, void *buffer
)
108 return skb_header_pointer(skb
, k
, size
, buffer
);
112 return __load_pointer(skb
, k
);
117 * sk_filter - run a packet through a socket filter
118 * @sk: sock associated with &sk_buff
119 * @skb: buffer to filter
121 * Run the filter code and then cut skb->data to correct size returned by
122 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
123 * than pkt_len we keep whole skb->data. This is the socket level
124 * wrapper to sk_run_filter. It returns 0 if the packet should
125 * be accepted or -EPERM if the packet should be tossed.
128 int sk_filter(struct sock
*sk
, struct sk_buff
*skb
)
131 struct sk_filter
*filter
;
133 err
= security_sock_rcv_skb(sk
, skb
);
138 filter
= rcu_dereference_bh(sk
->sk_filter
);
140 unsigned int pkt_len
= sk_run_filter(skb
, filter
->insns
);
142 err
= pkt_len
? pskb_trim(skb
, pkt_len
) : -EPERM
;
144 rcu_read_unlock_bh();
148 EXPORT_SYMBOL(sk_filter
);
151 * sk_run_filter - run a filter on a socket
152 * @skb: buffer to run the filter on
153 * @filter: filter to apply
155 * Decode and apply filter instructions to the skb->data.
156 * Return length to keep, 0 for none. @skb is the data we are
157 * filtering, @filter is the array of filter instructions.
158 * Because all jumps are guaranteed to be before last instruction,
159 * and last instruction guaranteed to be a RET, we dont need to check
160 * flen. (We used to pass to this function the length of filter)
162 unsigned int sk_run_filter(struct sk_buff
*skb
, const struct sock_filter
*fentry
)
165 u32 A
= 0; /* Accumulator */
166 u32 X
= 0; /* Index Register */
167 u32 mem
[BPF_MEMWORDS
]; /* Scratch Memory Store */
168 unsigned long memvalid
= 0;
172 BUILD_BUG_ON(BPF_MEMWORDS
> BITS_PER_LONG
);
174 * Process array of filter instructions.
177 #if defined(CONFIG_X86_32)
178 #define K (fentry->k)
180 const u32 K
= fentry
->k
;
183 switch (fentry
->code
) {
184 case BPF_S_ALU_ADD_X
:
187 case BPF_S_ALU_ADD_K
:
190 case BPF_S_ALU_SUB_X
:
193 case BPF_S_ALU_SUB_K
:
196 case BPF_S_ALU_MUL_X
:
199 case BPF_S_ALU_MUL_K
:
202 case BPF_S_ALU_DIV_X
:
207 case BPF_S_ALU_DIV_K
:
210 case BPF_S_ALU_AND_X
:
213 case BPF_S_ALU_AND_K
:
222 case BPF_S_ALU_LSH_X
:
225 case BPF_S_ALU_LSH_K
:
228 case BPF_S_ALU_RSH_X
:
231 case BPF_S_ALU_RSH_K
:
240 case BPF_S_JMP_JGT_K
:
241 fentry
+= (A
> K
) ? fentry
->jt
: fentry
->jf
;
243 case BPF_S_JMP_JGE_K
:
244 fentry
+= (A
>= K
) ? fentry
->jt
: fentry
->jf
;
246 case BPF_S_JMP_JEQ_K
:
247 fentry
+= (A
== K
) ? fentry
->jt
: fentry
->jf
;
249 case BPF_S_JMP_JSET_K
:
250 fentry
+= (A
& K
) ? fentry
->jt
: fentry
->jf
;
252 case BPF_S_JMP_JGT_X
:
253 fentry
+= (A
> X
) ? fentry
->jt
: fentry
->jf
;
255 case BPF_S_JMP_JGE_X
:
256 fentry
+= (A
>= X
) ? fentry
->jt
: fentry
->jf
;
258 case BPF_S_JMP_JEQ_X
:
259 fentry
+= (A
== X
) ? fentry
->jt
: fentry
->jf
;
261 case BPF_S_JMP_JSET_X
:
262 fentry
+= (A
& X
) ? fentry
->jt
: fentry
->jf
;
267 ptr
= load_pointer(skb
, k
, 4, &tmp
);
269 A
= get_unaligned_be32(ptr
);
276 ptr
= load_pointer(skb
, k
, 2, &tmp
);
278 A
= get_unaligned_be16(ptr
);
285 ptr
= load_pointer(skb
, k
, 1, &tmp
);
294 case BPF_S_LDX_W_LEN
:
306 case BPF_S_LDX_B_MSH
:
307 ptr
= load_pointer(skb
, K
, 1, &tmp
);
309 X
= (*(u8
*)ptr
& 0xf) << 2;
320 A
= (memvalid
& (1UL << K
)) ?
324 X
= (memvalid
& (1UL << K
)) ?
338 memvalid
|= 1UL << K
;
342 memvalid
|= 1UL << K
;
351 * Handle ancillary data, which are impossible
352 * (or very difficult) to get parsing packet contents.
354 switch (k
-SKF_AD_OFF
) {
355 case SKF_AD_PROTOCOL
:
356 A
= ntohs(skb
->protocol
);
364 A
= skb
->dev
->ifindex
;
370 A
= skb
->queue_mapping
;
377 case SKF_AD_NLATTR
: {
380 if (skb_is_nonlinear(skb
))
382 if (A
> skb
->len
- sizeof(struct nlattr
))
385 nla
= nla_find((struct nlattr
*)&skb
->data
[A
],
388 A
= (void *)nla
- (void *)skb
->data
;
393 case SKF_AD_NLATTR_NEST
: {
396 if (skb_is_nonlinear(skb
))
398 if (A
> skb
->len
- sizeof(struct nlattr
))
401 nla
= (struct nlattr
*)&skb
->data
[A
];
402 if (nla
->nla_len
> A
- skb
->len
)
405 nla
= nla_find_nested(nla
, X
);
407 A
= (void *)nla
- (void *)skb
->data
;
419 EXPORT_SYMBOL(sk_run_filter
);
422 * sk_chk_filter - verify socket filter code
423 * @filter: filter to verify
424 * @flen: length of filter
426 * Check the user's filter code. If we let some ugly
427 * filter code slip through kaboom! The filter must contain
428 * no references or jumps that are out of range, no illegal
429 * instructions, and must end with a RET instruction.
431 * All jumps are forward as they are not signed.
433 * Returns 0 if the rule set is legal or -EINVAL if not.
435 int sk_chk_filter(struct sock_filter
*filter
, int flen
)
438 * Valid instructions are initialized to non-0.
439 * Invalid instructions are initialized to 0.
441 static const u8 codes
[] = {
442 [BPF_ALU
|BPF_ADD
|BPF_K
] = BPF_S_ALU_ADD_K
+ 1,
443 [BPF_ALU
|BPF_ADD
|BPF_X
] = BPF_S_ALU_ADD_X
+ 1,
444 [BPF_ALU
|BPF_SUB
|BPF_K
] = BPF_S_ALU_SUB_K
+ 1,
445 [BPF_ALU
|BPF_SUB
|BPF_X
] = BPF_S_ALU_SUB_X
+ 1,
446 [BPF_ALU
|BPF_MUL
|BPF_K
] = BPF_S_ALU_MUL_K
+ 1,
447 [BPF_ALU
|BPF_MUL
|BPF_X
] = BPF_S_ALU_MUL_X
+ 1,
448 [BPF_ALU
|BPF_DIV
|BPF_X
] = BPF_S_ALU_DIV_X
+ 1,
449 [BPF_ALU
|BPF_AND
|BPF_K
] = BPF_S_ALU_AND_K
+ 1,
450 [BPF_ALU
|BPF_AND
|BPF_X
] = BPF_S_ALU_AND_X
+ 1,
451 [BPF_ALU
|BPF_OR
|BPF_K
] = BPF_S_ALU_OR_K
+ 1,
452 [BPF_ALU
|BPF_OR
|BPF_X
] = BPF_S_ALU_OR_X
+ 1,
453 [BPF_ALU
|BPF_LSH
|BPF_K
] = BPF_S_ALU_LSH_K
+ 1,
454 [BPF_ALU
|BPF_LSH
|BPF_X
] = BPF_S_ALU_LSH_X
+ 1,
455 [BPF_ALU
|BPF_RSH
|BPF_K
] = BPF_S_ALU_RSH_K
+ 1,
456 [BPF_ALU
|BPF_RSH
|BPF_X
] = BPF_S_ALU_RSH_X
+ 1,
457 [BPF_ALU
|BPF_NEG
] = BPF_S_ALU_NEG
+ 1,
458 [BPF_LD
|BPF_W
|BPF_ABS
] = BPF_S_LD_W_ABS
+ 1,
459 [BPF_LD
|BPF_H
|BPF_ABS
] = BPF_S_LD_H_ABS
+ 1,
460 [BPF_LD
|BPF_B
|BPF_ABS
] = BPF_S_LD_B_ABS
+ 1,
461 [BPF_LD
|BPF_W
|BPF_LEN
] = BPF_S_LD_W_LEN
+ 1,
462 [BPF_LD
|BPF_W
|BPF_IND
] = BPF_S_LD_W_IND
+ 1,
463 [BPF_LD
|BPF_H
|BPF_IND
] = BPF_S_LD_H_IND
+ 1,
464 [BPF_LD
|BPF_B
|BPF_IND
] = BPF_S_LD_B_IND
+ 1,
465 [BPF_LD
|BPF_IMM
] = BPF_S_LD_IMM
+ 1,
466 [BPF_LDX
|BPF_W
|BPF_LEN
] = BPF_S_LDX_W_LEN
+ 1,
467 [BPF_LDX
|BPF_B
|BPF_MSH
] = BPF_S_LDX_B_MSH
+ 1,
468 [BPF_LDX
|BPF_IMM
] = BPF_S_LDX_IMM
+ 1,
469 [BPF_MISC
|BPF_TAX
] = BPF_S_MISC_TAX
+ 1,
470 [BPF_MISC
|BPF_TXA
] = BPF_S_MISC_TXA
+ 1,
471 [BPF_RET
|BPF_K
] = BPF_S_RET_K
+ 1,
472 [BPF_RET
|BPF_A
] = BPF_S_RET_A
+ 1,
473 [BPF_ALU
|BPF_DIV
|BPF_K
] = BPF_S_ALU_DIV_K
+ 1,
474 [BPF_LD
|BPF_MEM
] = BPF_S_LD_MEM
+ 1,
475 [BPF_LDX
|BPF_MEM
] = BPF_S_LDX_MEM
+ 1,
476 [BPF_ST
] = BPF_S_ST
+ 1,
477 [BPF_STX
] = BPF_S_STX
+ 1,
478 [BPF_JMP
|BPF_JA
] = BPF_S_JMP_JA
+ 1,
479 [BPF_JMP
|BPF_JEQ
|BPF_K
] = BPF_S_JMP_JEQ_K
+ 1,
480 [BPF_JMP
|BPF_JEQ
|BPF_X
] = BPF_S_JMP_JEQ_X
+ 1,
481 [BPF_JMP
|BPF_JGE
|BPF_K
] = BPF_S_JMP_JGE_K
+ 1,
482 [BPF_JMP
|BPF_JGE
|BPF_X
] = BPF_S_JMP_JGE_X
+ 1,
483 [BPF_JMP
|BPF_JGT
|BPF_K
] = BPF_S_JMP_JGT_K
+ 1,
484 [BPF_JMP
|BPF_JGT
|BPF_X
] = BPF_S_JMP_JGT_X
+ 1,
485 [BPF_JMP
|BPF_JSET
|BPF_K
] = BPF_S_JMP_JSET_K
+ 1,
486 [BPF_JMP
|BPF_JSET
|BPF_X
] = BPF_S_JMP_JSET_X
+ 1,
490 if (flen
== 0 || flen
> BPF_MAXINSNS
)
493 /* check the filter code now */
494 for (pc
= 0; pc
< flen
; pc
++) {
495 struct sock_filter
*ftest
= &filter
[pc
];
496 u16 code
= ftest
->code
;
498 if (code
>= ARRAY_SIZE(codes
))
501 /* Undo the '+ 1' in codes[] after validation. */
504 /* Some instructions need special checks */
506 case BPF_S_ALU_DIV_K
:
507 /* check for division by zero */
515 /* check for invalid memory addresses */
516 if (ftest
->k
>= BPF_MEMWORDS
)
521 * Note, the large ftest->k might cause loops.
522 * Compare this with conditional jumps below,
523 * where offsets are limited. --ANK (981016)
525 if (ftest
->k
>= (unsigned)(flen
-pc
-1))
528 case BPF_S_JMP_JEQ_K
:
529 case BPF_S_JMP_JEQ_X
:
530 case BPF_S_JMP_JGE_K
:
531 case BPF_S_JMP_JGE_X
:
532 case BPF_S_JMP_JGT_K
:
533 case BPF_S_JMP_JGT_X
:
534 case BPF_S_JMP_JSET_X
:
535 case BPF_S_JMP_JSET_K
:
536 /* for conditionals both must be safe */
537 if (pc
+ ftest
->jt
+ 1 >= flen
||
538 pc
+ ftest
->jf
+ 1 >= flen
)
545 /* last instruction must be a RET code */
546 switch (filter
[flen
- 1].code
) {
553 EXPORT_SYMBOL(sk_chk_filter
);
556 * sk_filter_rcu_release: Release a socket filter by rcu_head
557 * @rcu: rcu_head that contains the sk_filter to free
559 static void sk_filter_rcu_release(struct rcu_head
*rcu
)
561 struct sk_filter
*fp
= container_of(rcu
, struct sk_filter
, rcu
);
563 sk_filter_release(fp
);
566 static void sk_filter_delayed_uncharge(struct sock
*sk
, struct sk_filter
*fp
)
568 unsigned int size
= sk_filter_len(fp
);
570 atomic_sub(size
, &sk
->sk_omem_alloc
);
571 call_rcu_bh(&fp
->rcu
, sk_filter_rcu_release
);
575 * sk_attach_filter - attach a socket filter
576 * @fprog: the filter program
577 * @sk: the socket to use
579 * Attach the user's filter code. We first run some sanity checks on
580 * it to make sure it does not explode on us later. If an error
581 * occurs or there is insufficient memory for the filter a negative
582 * errno code is returned. On success the return is zero.
584 int sk_attach_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
586 struct sk_filter
*fp
, *old_fp
;
587 unsigned int fsize
= sizeof(struct sock_filter
) * fprog
->len
;
590 /* Make sure new filter is there and in the right amounts. */
591 if (fprog
->filter
== NULL
)
594 fp
= sock_kmalloc(sk
, fsize
+sizeof(*fp
), GFP_KERNEL
);
597 if (copy_from_user(fp
->insns
, fprog
->filter
, fsize
)) {
598 sock_kfree_s(sk
, fp
, fsize
+sizeof(*fp
));
602 atomic_set(&fp
->refcnt
, 1);
603 fp
->len
= fprog
->len
;
605 err
= sk_chk_filter(fp
->insns
, fp
->len
);
607 sk_filter_uncharge(sk
, fp
);
611 old_fp
= rcu_dereference_protected(sk
->sk_filter
,
612 sock_owned_by_user(sk
));
613 rcu_assign_pointer(sk
->sk_filter
, fp
);
616 sk_filter_delayed_uncharge(sk
, old_fp
);
619 EXPORT_SYMBOL_GPL(sk_attach_filter
);
621 int sk_detach_filter(struct sock
*sk
)
624 struct sk_filter
*filter
;
626 filter
= rcu_dereference_protected(sk
->sk_filter
,
627 sock_owned_by_user(sk
));
629 rcu_assign_pointer(sk
->sk_filter
, NULL
);
630 sk_filter_delayed_uncharge(sk
, filter
);
635 EXPORT_SYMBOL_GPL(sk_detach_filter
);