2 * Linux Socket Filter - Kernel level socket filtering
5 * Jay Schulist <jschlst@samba.org>
7 * Based on the design of:
8 * - The Berkeley Packet Filter
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Andi Kleen - Fix a few bad bugs and races.
16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
19 #include <linux/module.h>
20 #include <linux/types.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
28 #include <linux/gfp.h>
30 #include <net/protocol.h>
31 #include <net/netlink.h>
32 #include <linux/skbuff.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <asm/uaccess.h>
37 #include <asm/unaligned.h>
38 #include <linux/filter.h>
39 #include <linux/reciprocal_div.h>
40 #include <linux/ratelimit.h>
41 #include <linux/seccomp.h>
42 #include <linux/if_vlan.h>
44 /* No hurry in this branch
46 * Exported for the bpf jit load helper.
48 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff
*skb
, int k
, unsigned int size
)
53 ptr
= skb_network_header(skb
) + k
- SKF_NET_OFF
;
54 else if (k
>= SKF_LL_OFF
)
55 ptr
= skb_mac_header(skb
) + k
- SKF_LL_OFF
;
57 if (ptr
>= skb
->head
&& ptr
+ size
<= skb_tail_pointer(skb
))
62 static inline void *load_pointer(const struct sk_buff
*skb
, int k
,
63 unsigned int size
, void *buffer
)
66 return skb_header_pointer(skb
, k
, size
, buffer
);
67 return bpf_internal_load_pointer_neg_helper(skb
, k
, size
);
71 * sk_filter - run a packet through a socket filter
72 * @sk: sock associated with &sk_buff
73 * @skb: buffer to filter
75 * Run the filter code and then cut skb->data to correct size returned by
76 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
77 * than pkt_len we keep whole skb->data. This is the socket level
78 * wrapper to sk_run_filter. It returns 0 if the packet should
79 * be accepted or -EPERM if the packet should be tossed.
82 int sk_filter(struct sock
*sk
, struct sk_buff
*skb
)
85 struct sk_filter
*filter
;
88 * If the skb was allocated from pfmemalloc reserves, only
89 * allow SOCK_MEMALLOC sockets to use it as this socket is
92 if (skb_pfmemalloc(skb
) && !sock_flag(sk
, SOCK_MEMALLOC
))
95 err
= security_sock_rcv_skb(sk
, skb
);
100 filter
= rcu_dereference(sk
->sk_filter
);
102 unsigned int pkt_len
= SK_RUN_FILTER(filter
, skb
);
104 err
= pkt_len
? pskb_trim(skb
, pkt_len
) : -EPERM
;
110 EXPORT_SYMBOL(sk_filter
);
113 * sk_run_filter - run a filter on a socket
114 * @skb: buffer to run the filter on
115 * @fentry: filter to apply
117 * Decode and apply filter instructions to the skb->data.
118 * Return length to keep, 0 for none. @skb is the data we are
119 * filtering, @filter is the array of filter instructions.
120 * Because all jumps are guaranteed to be before last instruction,
121 * and last instruction guaranteed to be a RET, we dont need to check
122 * flen. (We used to pass to this function the length of filter)
124 unsigned int sk_run_filter(const struct sk_buff
*skb
,
125 const struct sock_filter
*fentry
)
128 u32 A
= 0; /* Accumulator */
129 u32 X
= 0; /* Index Register */
130 u32 mem
[BPF_MEMWORDS
]; /* Scratch Memory Store */
135 * Process array of filter instructions.
138 #if defined(CONFIG_X86_32)
139 #define K (fentry->k)
141 const u32 K
= fentry
->k
;
144 switch (fentry
->code
) {
145 case BPF_S_ALU_ADD_X
:
148 case BPF_S_ALU_ADD_K
:
151 case BPF_S_ALU_SUB_X
:
154 case BPF_S_ALU_SUB_K
:
157 case BPF_S_ALU_MUL_X
:
160 case BPF_S_ALU_MUL_K
:
163 case BPF_S_ALU_DIV_X
:
168 case BPF_S_ALU_DIV_K
:
169 A
= reciprocal_divide(A
, K
);
171 case BPF_S_ALU_MOD_X
:
176 case BPF_S_ALU_MOD_K
:
179 case BPF_S_ALU_AND_X
:
182 case BPF_S_ALU_AND_K
:
191 case BPF_S_ANC_ALU_XOR_X
:
192 case BPF_S_ALU_XOR_X
:
195 case BPF_S_ALU_XOR_K
:
198 case BPF_S_ALU_LSH_X
:
201 case BPF_S_ALU_LSH_K
:
204 case BPF_S_ALU_RSH_X
:
207 case BPF_S_ALU_RSH_K
:
216 case BPF_S_JMP_JGT_K
:
217 fentry
+= (A
> K
) ? fentry
->jt
: fentry
->jf
;
219 case BPF_S_JMP_JGE_K
:
220 fentry
+= (A
>= K
) ? fentry
->jt
: fentry
->jf
;
222 case BPF_S_JMP_JEQ_K
:
223 fentry
+= (A
== K
) ? fentry
->jt
: fentry
->jf
;
225 case BPF_S_JMP_JSET_K
:
226 fentry
+= (A
& K
) ? fentry
->jt
: fentry
->jf
;
228 case BPF_S_JMP_JGT_X
:
229 fentry
+= (A
> X
) ? fentry
->jt
: fentry
->jf
;
231 case BPF_S_JMP_JGE_X
:
232 fentry
+= (A
>= X
) ? fentry
->jt
: fentry
->jf
;
234 case BPF_S_JMP_JEQ_X
:
235 fentry
+= (A
== X
) ? fentry
->jt
: fentry
->jf
;
237 case BPF_S_JMP_JSET_X
:
238 fentry
+= (A
& X
) ? fentry
->jt
: fentry
->jf
;
243 ptr
= load_pointer(skb
, k
, 4, &tmp
);
245 A
= get_unaligned_be32(ptr
);
252 ptr
= load_pointer(skb
, k
, 2, &tmp
);
254 A
= get_unaligned_be16(ptr
);
261 ptr
= load_pointer(skb
, k
, 1, &tmp
);
270 case BPF_S_LDX_W_LEN
:
282 case BPF_S_LDX_B_MSH
:
283 ptr
= load_pointer(skb
, K
, 1, &tmp
);
285 X
= (*(u8
*)ptr
& 0xf) << 2;
317 case BPF_S_ANC_PROTOCOL
:
318 A
= ntohs(skb
->protocol
);
320 case BPF_S_ANC_PKTTYPE
:
323 case BPF_S_ANC_IFINDEX
:
326 A
= skb
->dev
->ifindex
;
331 case BPF_S_ANC_QUEUE
:
332 A
= skb
->queue_mapping
;
334 case BPF_S_ANC_HATYPE
:
339 case BPF_S_ANC_RXHASH
:
343 A
= raw_smp_processor_id();
345 case BPF_S_ANC_VLAN_TAG
:
346 A
= vlan_tx_tag_get(skb
);
348 case BPF_S_ANC_VLAN_TAG_PRESENT
:
349 A
= !!vlan_tx_tag_present(skb
);
351 case BPF_S_ANC_NLATTR
: {
354 if (skb_is_nonlinear(skb
))
356 if (A
> skb
->len
- sizeof(struct nlattr
))
359 nla
= nla_find((struct nlattr
*)&skb
->data
[A
],
362 A
= (void *)nla
- (void *)skb
->data
;
367 case BPF_S_ANC_NLATTR_NEST
: {
370 if (skb_is_nonlinear(skb
))
372 if (A
> skb
->len
- sizeof(struct nlattr
))
375 nla
= (struct nlattr
*)&skb
->data
[A
];
376 if (nla
->nla_len
> A
- skb
->len
)
379 nla
= nla_find_nested(nla
, X
);
381 A
= (void *)nla
- (void *)skb
->data
;
386 #ifdef CONFIG_SECCOMP_FILTER
387 case BPF_S_ANC_SECCOMP_LD_W
:
388 A
= seccomp_bpf_load(fentry
->k
);
392 WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
393 fentry
->code
, fentry
->jt
,
394 fentry
->jf
, fentry
->k
);
401 EXPORT_SYMBOL(sk_run_filter
);
405 * A BPF program is able to use 16 cells of memory to store intermediate
406 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
407 * As we dont want to clear mem[] array for each packet going through
408 * sk_run_filter(), we check that filter loaded by user never try to read
409 * a cell if not previously written, and we check all branches to be sure
410 * a malicious user doesn't try to abuse us.
412 static int check_load_and_stores(struct sock_filter
*filter
, int flen
)
414 u16
*masks
, memvalid
= 0; /* one bit per cell, 16 cells */
417 BUILD_BUG_ON(BPF_MEMWORDS
> 16);
418 masks
= kmalloc(flen
* sizeof(*masks
), GFP_KERNEL
);
421 memset(masks
, 0xff, flen
* sizeof(*masks
));
423 for (pc
= 0; pc
< flen
; pc
++) {
424 memvalid
&= masks
[pc
];
426 switch (filter
[pc
].code
) {
429 memvalid
|= (1 << filter
[pc
].k
);
433 if (!(memvalid
& (1 << filter
[pc
].k
))) {
439 /* a jump must set masks on target */
440 masks
[pc
+ 1 + filter
[pc
].k
] &= memvalid
;
443 case BPF_S_JMP_JEQ_K
:
444 case BPF_S_JMP_JEQ_X
:
445 case BPF_S_JMP_JGE_K
:
446 case BPF_S_JMP_JGE_X
:
447 case BPF_S_JMP_JGT_K
:
448 case BPF_S_JMP_JGT_X
:
449 case BPF_S_JMP_JSET_X
:
450 case BPF_S_JMP_JSET_K
:
451 /* a jump must set masks on targets */
452 masks
[pc
+ 1 + filter
[pc
].jt
] &= memvalid
;
453 masks
[pc
+ 1 + filter
[pc
].jf
] &= memvalid
;
464 * sk_chk_filter - verify socket filter code
465 * @filter: filter to verify
466 * @flen: length of filter
468 * Check the user's filter code. If we let some ugly
469 * filter code slip through kaboom! The filter must contain
470 * no references or jumps that are out of range, no illegal
471 * instructions, and must end with a RET instruction.
473 * All jumps are forward as they are not signed.
475 * Returns 0 if the rule set is legal or -EINVAL if not.
477 int sk_chk_filter(struct sock_filter
*filter
, unsigned int flen
)
480 * Valid instructions are initialized to non-0.
481 * Invalid instructions are initialized to 0.
483 static const u8 codes
[] = {
484 [BPF_ALU
|BPF_ADD
|BPF_K
] = BPF_S_ALU_ADD_K
,
485 [BPF_ALU
|BPF_ADD
|BPF_X
] = BPF_S_ALU_ADD_X
,
486 [BPF_ALU
|BPF_SUB
|BPF_K
] = BPF_S_ALU_SUB_K
,
487 [BPF_ALU
|BPF_SUB
|BPF_X
] = BPF_S_ALU_SUB_X
,
488 [BPF_ALU
|BPF_MUL
|BPF_K
] = BPF_S_ALU_MUL_K
,
489 [BPF_ALU
|BPF_MUL
|BPF_X
] = BPF_S_ALU_MUL_X
,
490 [BPF_ALU
|BPF_DIV
|BPF_X
] = BPF_S_ALU_DIV_X
,
491 [BPF_ALU
|BPF_MOD
|BPF_K
] = BPF_S_ALU_MOD_K
,
492 [BPF_ALU
|BPF_MOD
|BPF_X
] = BPF_S_ALU_MOD_X
,
493 [BPF_ALU
|BPF_AND
|BPF_K
] = BPF_S_ALU_AND_K
,
494 [BPF_ALU
|BPF_AND
|BPF_X
] = BPF_S_ALU_AND_X
,
495 [BPF_ALU
|BPF_OR
|BPF_K
] = BPF_S_ALU_OR_K
,
496 [BPF_ALU
|BPF_OR
|BPF_X
] = BPF_S_ALU_OR_X
,
497 [BPF_ALU
|BPF_XOR
|BPF_K
] = BPF_S_ALU_XOR_K
,
498 [BPF_ALU
|BPF_XOR
|BPF_X
] = BPF_S_ALU_XOR_X
,
499 [BPF_ALU
|BPF_LSH
|BPF_K
] = BPF_S_ALU_LSH_K
,
500 [BPF_ALU
|BPF_LSH
|BPF_X
] = BPF_S_ALU_LSH_X
,
501 [BPF_ALU
|BPF_RSH
|BPF_K
] = BPF_S_ALU_RSH_K
,
502 [BPF_ALU
|BPF_RSH
|BPF_X
] = BPF_S_ALU_RSH_X
,
503 [BPF_ALU
|BPF_NEG
] = BPF_S_ALU_NEG
,
504 [BPF_LD
|BPF_W
|BPF_ABS
] = BPF_S_LD_W_ABS
,
505 [BPF_LD
|BPF_H
|BPF_ABS
] = BPF_S_LD_H_ABS
,
506 [BPF_LD
|BPF_B
|BPF_ABS
] = BPF_S_LD_B_ABS
,
507 [BPF_LD
|BPF_W
|BPF_LEN
] = BPF_S_LD_W_LEN
,
508 [BPF_LD
|BPF_W
|BPF_IND
] = BPF_S_LD_W_IND
,
509 [BPF_LD
|BPF_H
|BPF_IND
] = BPF_S_LD_H_IND
,
510 [BPF_LD
|BPF_B
|BPF_IND
] = BPF_S_LD_B_IND
,
511 [BPF_LD
|BPF_IMM
] = BPF_S_LD_IMM
,
512 [BPF_LDX
|BPF_W
|BPF_LEN
] = BPF_S_LDX_W_LEN
,
513 [BPF_LDX
|BPF_B
|BPF_MSH
] = BPF_S_LDX_B_MSH
,
514 [BPF_LDX
|BPF_IMM
] = BPF_S_LDX_IMM
,
515 [BPF_MISC
|BPF_TAX
] = BPF_S_MISC_TAX
,
516 [BPF_MISC
|BPF_TXA
] = BPF_S_MISC_TXA
,
517 [BPF_RET
|BPF_K
] = BPF_S_RET_K
,
518 [BPF_RET
|BPF_A
] = BPF_S_RET_A
,
519 [BPF_ALU
|BPF_DIV
|BPF_K
] = BPF_S_ALU_DIV_K
,
520 [BPF_LD
|BPF_MEM
] = BPF_S_LD_MEM
,
521 [BPF_LDX
|BPF_MEM
] = BPF_S_LDX_MEM
,
523 [BPF_STX
] = BPF_S_STX
,
524 [BPF_JMP
|BPF_JA
] = BPF_S_JMP_JA
,
525 [BPF_JMP
|BPF_JEQ
|BPF_K
] = BPF_S_JMP_JEQ_K
,
526 [BPF_JMP
|BPF_JEQ
|BPF_X
] = BPF_S_JMP_JEQ_X
,
527 [BPF_JMP
|BPF_JGE
|BPF_K
] = BPF_S_JMP_JGE_K
,
528 [BPF_JMP
|BPF_JGE
|BPF_X
] = BPF_S_JMP_JGE_X
,
529 [BPF_JMP
|BPF_JGT
|BPF_K
] = BPF_S_JMP_JGT_K
,
530 [BPF_JMP
|BPF_JGT
|BPF_X
] = BPF_S_JMP_JGT_X
,
531 [BPF_JMP
|BPF_JSET
|BPF_K
] = BPF_S_JMP_JSET_K
,
532 [BPF_JMP
|BPF_JSET
|BPF_X
] = BPF_S_JMP_JSET_X
,
536 if (flen
== 0 || flen
> BPF_MAXINSNS
)
539 /* check the filter code now */
540 for (pc
= 0; pc
< flen
; pc
++) {
541 struct sock_filter
*ftest
= &filter
[pc
];
542 u16 code
= ftest
->code
;
544 if (code
>= ARRAY_SIZE(codes
))
549 /* Some instructions need special checks */
551 case BPF_S_ALU_DIV_K
:
552 /* check for division by zero */
555 ftest
->k
= reciprocal_value(ftest
->k
);
557 case BPF_S_ALU_MOD_K
:
558 /* check for division by zero */
566 /* check for invalid memory addresses */
567 if (ftest
->k
>= BPF_MEMWORDS
)
572 * Note, the large ftest->k might cause loops.
573 * Compare this with conditional jumps below,
574 * where offsets are limited. --ANK (981016)
576 if (ftest
->k
>= (unsigned int)(flen
-pc
-1))
579 case BPF_S_JMP_JEQ_K
:
580 case BPF_S_JMP_JEQ_X
:
581 case BPF_S_JMP_JGE_K
:
582 case BPF_S_JMP_JGE_X
:
583 case BPF_S_JMP_JGT_K
:
584 case BPF_S_JMP_JGT_X
:
585 case BPF_S_JMP_JSET_X
:
586 case BPF_S_JMP_JSET_K
:
587 /* for conditionals both must be safe */
588 if (pc
+ ftest
->jt
+ 1 >= flen
||
589 pc
+ ftest
->jf
+ 1 >= flen
)
595 #define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
596 code = BPF_S_ANC_##CODE; \
603 ANCILLARY(NLATTR_NEST
);
609 ANCILLARY(ALU_XOR_X
);
611 ANCILLARY(VLAN_TAG_PRESENT
);
617 /* last instruction must be a RET code */
618 switch (filter
[flen
- 1].code
) {
621 return check_load_and_stores(filter
, flen
);
625 EXPORT_SYMBOL(sk_chk_filter
);
628 * sk_filter_release_rcu - Release a socket filter by rcu_head
629 * @rcu: rcu_head that contains the sk_filter to free
631 void sk_filter_release_rcu(struct rcu_head
*rcu
)
633 struct sk_filter
*fp
= container_of(rcu
, struct sk_filter
, rcu
);
638 EXPORT_SYMBOL(sk_filter_release_rcu
);
640 static int __sk_prepare_filter(struct sk_filter
*fp
)
644 fp
->bpf_func
= sk_run_filter
;
646 err
= sk_chk_filter(fp
->insns
, fp
->len
);
655 * sk_unattached_filter_create - create an unattached filter
656 * @fprog: the filter program
657 * @pfp: the unattached filter that is created
659 * Create a filter independent of any socket. We first run some
660 * sanity checks on it to make sure it does not explode on us later.
661 * If an error occurs or there is insufficient memory for the filter
662 * a negative errno code is returned. On success the return is zero.
664 int sk_unattached_filter_create(struct sk_filter
**pfp
,
665 struct sock_fprog
*fprog
)
667 struct sk_filter
*fp
;
668 unsigned int fsize
= sizeof(struct sock_filter
) * fprog
->len
;
671 /* Make sure new filter is there and in the right amounts. */
672 if (fprog
->filter
== NULL
)
675 fp
= kmalloc(fsize
+ sizeof(*fp
), GFP_KERNEL
);
678 memcpy(fp
->insns
, fprog
->filter
, fsize
);
680 atomic_set(&fp
->refcnt
, 1);
681 fp
->len
= fprog
->len
;
683 err
= __sk_prepare_filter(fp
);
693 EXPORT_SYMBOL_GPL(sk_unattached_filter_create
);
695 void sk_unattached_filter_destroy(struct sk_filter
*fp
)
697 sk_filter_release(fp
);
699 EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy
);
702 * sk_attach_filter - attach a socket filter
703 * @fprog: the filter program
704 * @sk: the socket to use
706 * Attach the user's filter code. We first run some sanity checks on
707 * it to make sure it does not explode on us later. If an error
708 * occurs or there is insufficient memory for the filter a negative
709 * errno code is returned. On success the return is zero.
711 int sk_attach_filter(struct sock_fprog
*fprog
, struct sock
*sk
)
713 struct sk_filter
*fp
, *old_fp
;
714 unsigned int fsize
= sizeof(struct sock_filter
) * fprog
->len
;
717 /* Make sure new filter is there and in the right amounts. */
718 if (fprog
->filter
== NULL
)
721 fp
= sock_kmalloc(sk
, fsize
+sizeof(*fp
), GFP_KERNEL
);
724 if (copy_from_user(fp
->insns
, fprog
->filter
, fsize
)) {
725 sock_kfree_s(sk
, fp
, fsize
+sizeof(*fp
));
729 atomic_set(&fp
->refcnt
, 1);
730 fp
->len
= fprog
->len
;
732 err
= __sk_prepare_filter(fp
);
734 sk_filter_uncharge(sk
, fp
);
738 old_fp
= rcu_dereference_protected(sk
->sk_filter
,
739 sock_owned_by_user(sk
));
740 rcu_assign_pointer(sk
->sk_filter
, fp
);
743 sk_filter_uncharge(sk
, old_fp
);
746 EXPORT_SYMBOL_GPL(sk_attach_filter
);
748 int sk_detach_filter(struct sock
*sk
)
751 struct sk_filter
*filter
;
753 filter
= rcu_dereference_protected(sk
->sk_filter
,
754 sock_owned_by_user(sk
));
756 RCU_INIT_POINTER(sk
->sk_filter
, NULL
);
757 sk_filter_uncharge(sk
, filter
);
762 EXPORT_SYMBOL_GPL(sk_detach_filter
);
764 static void sk_decode_filter(struct sock_filter
*filt
, struct sock_filter
*to
)
766 static const u16 decodes
[] = {
767 [BPF_S_ALU_ADD_K
] = BPF_ALU
|BPF_ADD
|BPF_K
,
768 [BPF_S_ALU_ADD_X
] = BPF_ALU
|BPF_ADD
|BPF_X
,
769 [BPF_S_ALU_SUB_K
] = BPF_ALU
|BPF_SUB
|BPF_K
,
770 [BPF_S_ALU_SUB_X
] = BPF_ALU
|BPF_SUB
|BPF_X
,
771 [BPF_S_ALU_MUL_K
] = BPF_ALU
|BPF_MUL
|BPF_K
,
772 [BPF_S_ALU_MUL_X
] = BPF_ALU
|BPF_MUL
|BPF_X
,
773 [BPF_S_ALU_DIV_X
] = BPF_ALU
|BPF_DIV
|BPF_X
,
774 [BPF_S_ALU_MOD_K
] = BPF_ALU
|BPF_MOD
|BPF_K
,
775 [BPF_S_ALU_MOD_X
] = BPF_ALU
|BPF_MOD
|BPF_X
,
776 [BPF_S_ALU_AND_K
] = BPF_ALU
|BPF_AND
|BPF_K
,
777 [BPF_S_ALU_AND_X
] = BPF_ALU
|BPF_AND
|BPF_X
,
778 [BPF_S_ALU_OR_K
] = BPF_ALU
|BPF_OR
|BPF_K
,
779 [BPF_S_ALU_OR_X
] = BPF_ALU
|BPF_OR
|BPF_X
,
780 [BPF_S_ALU_XOR_K
] = BPF_ALU
|BPF_XOR
|BPF_K
,
781 [BPF_S_ALU_XOR_X
] = BPF_ALU
|BPF_XOR
|BPF_X
,
782 [BPF_S_ALU_LSH_K
] = BPF_ALU
|BPF_LSH
|BPF_K
,
783 [BPF_S_ALU_LSH_X
] = BPF_ALU
|BPF_LSH
|BPF_X
,
784 [BPF_S_ALU_RSH_K
] = BPF_ALU
|BPF_RSH
|BPF_K
,
785 [BPF_S_ALU_RSH_X
] = BPF_ALU
|BPF_RSH
|BPF_X
,
786 [BPF_S_ALU_NEG
] = BPF_ALU
|BPF_NEG
,
787 [BPF_S_LD_W_ABS
] = BPF_LD
|BPF_W
|BPF_ABS
,
788 [BPF_S_LD_H_ABS
] = BPF_LD
|BPF_H
|BPF_ABS
,
789 [BPF_S_LD_B_ABS
] = BPF_LD
|BPF_B
|BPF_ABS
,
790 [BPF_S_ANC_PROTOCOL
] = BPF_LD
|BPF_B
|BPF_ABS
,
791 [BPF_S_ANC_PKTTYPE
] = BPF_LD
|BPF_B
|BPF_ABS
,
792 [BPF_S_ANC_IFINDEX
] = BPF_LD
|BPF_B
|BPF_ABS
,
793 [BPF_S_ANC_NLATTR
] = BPF_LD
|BPF_B
|BPF_ABS
,
794 [BPF_S_ANC_NLATTR_NEST
] = BPF_LD
|BPF_B
|BPF_ABS
,
795 [BPF_S_ANC_MARK
] = BPF_LD
|BPF_B
|BPF_ABS
,
796 [BPF_S_ANC_QUEUE
] = BPF_LD
|BPF_B
|BPF_ABS
,
797 [BPF_S_ANC_HATYPE
] = BPF_LD
|BPF_B
|BPF_ABS
,
798 [BPF_S_ANC_RXHASH
] = BPF_LD
|BPF_B
|BPF_ABS
,
799 [BPF_S_ANC_CPU
] = BPF_LD
|BPF_B
|BPF_ABS
,
800 [BPF_S_ANC_ALU_XOR_X
] = BPF_LD
|BPF_B
|BPF_ABS
,
801 [BPF_S_ANC_SECCOMP_LD_W
] = BPF_LD
|BPF_B
|BPF_ABS
,
802 [BPF_S_ANC_VLAN_TAG
] = BPF_LD
|BPF_B
|BPF_ABS
,
803 [BPF_S_ANC_VLAN_TAG_PRESENT
] = BPF_LD
|BPF_B
|BPF_ABS
,
804 [BPF_S_LD_W_LEN
] = BPF_LD
|BPF_W
|BPF_LEN
,
805 [BPF_S_LD_W_IND
] = BPF_LD
|BPF_W
|BPF_IND
,
806 [BPF_S_LD_H_IND
] = BPF_LD
|BPF_H
|BPF_IND
,
807 [BPF_S_LD_B_IND
] = BPF_LD
|BPF_B
|BPF_IND
,
808 [BPF_S_LD_IMM
] = BPF_LD
|BPF_IMM
,
809 [BPF_S_LDX_W_LEN
] = BPF_LDX
|BPF_W
|BPF_LEN
,
810 [BPF_S_LDX_B_MSH
] = BPF_LDX
|BPF_B
|BPF_MSH
,
811 [BPF_S_LDX_IMM
] = BPF_LDX
|BPF_IMM
,
812 [BPF_S_MISC_TAX
] = BPF_MISC
|BPF_TAX
,
813 [BPF_S_MISC_TXA
] = BPF_MISC
|BPF_TXA
,
814 [BPF_S_RET_K
] = BPF_RET
|BPF_K
,
815 [BPF_S_RET_A
] = BPF_RET
|BPF_A
,
816 [BPF_S_ALU_DIV_K
] = BPF_ALU
|BPF_DIV
|BPF_K
,
817 [BPF_S_LD_MEM
] = BPF_LD
|BPF_MEM
,
818 [BPF_S_LDX_MEM
] = BPF_LDX
|BPF_MEM
,
820 [BPF_S_STX
] = BPF_STX
,
821 [BPF_S_JMP_JA
] = BPF_JMP
|BPF_JA
,
822 [BPF_S_JMP_JEQ_K
] = BPF_JMP
|BPF_JEQ
|BPF_K
,
823 [BPF_S_JMP_JEQ_X
] = BPF_JMP
|BPF_JEQ
|BPF_X
,
824 [BPF_S_JMP_JGE_K
] = BPF_JMP
|BPF_JGE
|BPF_K
,
825 [BPF_S_JMP_JGE_X
] = BPF_JMP
|BPF_JGE
|BPF_X
,
826 [BPF_S_JMP_JGT_K
] = BPF_JMP
|BPF_JGT
|BPF_K
,
827 [BPF_S_JMP_JGT_X
] = BPF_JMP
|BPF_JGT
|BPF_X
,
828 [BPF_S_JMP_JSET_K
] = BPF_JMP
|BPF_JSET
|BPF_K
,
829 [BPF_S_JMP_JSET_X
] = BPF_JMP
|BPF_JSET
|BPF_X
,
835 to
->code
= decodes
[code
];
839 if (code
== BPF_S_ALU_DIV_K
) {
841 * When loaded this rule user gave us X, which was
842 * translated into R = r(X). Now we calculate the
843 * RR = r(R) and report it back. If next time this
844 * value is loaded and RRR = r(RR) is calculated
845 * then the R == RRR will be true.
847 * One exception. X == 1 translates into R == 0 and
848 * we can't calculate RR out of it with r().
854 to
->k
= reciprocal_value(filt
->k
);
856 BUG_ON(reciprocal_value(to
->k
) != filt
->k
);
861 int sk_get_filter(struct sock
*sk
, struct sock_filter __user
*ubuf
, unsigned int len
)
863 struct sk_filter
*filter
;
867 filter
= rcu_dereference_protected(sk
->sk_filter
,
868 sock_owned_by_user(sk
));
876 if (len
< filter
->len
)
880 for (i
= 0; i
< filter
->len
; i
++) {
881 struct sock_filter fb
;
883 sk_decode_filter(&filter
->insns
[i
], &fb
);
884 if (copy_to_user(&ubuf
[i
], &fb
, sizeof(fb
)))