2 * Copyright (C) 2016 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #define pr_fmt(fmt) "NFP net bpf: " fmt
36 #include <linux/bpf.h>
37 #include <linux/bpf_verifier.h>
38 #include <linux/kernel.h>
39 #include <linux/pkt_cls.h>
43 /* Analyzer/verifier definitions */
44 struct nfp_bpf_analyzer_priv
{
45 struct nfp_prog
*prog
;
46 struct nfp_insn_meta
*meta
;
49 static struct nfp_insn_meta
*
50 nfp_bpf_goto_meta(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
51 unsigned int insn_idx
, unsigned int n_insns
)
53 unsigned int forward
, backward
, i
;
55 backward
= meta
->n
- insn_idx
;
56 forward
= insn_idx
- meta
->n
;
58 if (min(forward
, backward
) > n_insns
- insn_idx
- 1) {
59 backward
= n_insns
- insn_idx
- 1;
60 meta
= nfp_prog_last_meta(nfp_prog
);
62 if (min(forward
, backward
) > insn_idx
&& backward
> insn_idx
) {
64 meta
= nfp_prog_first_meta(nfp_prog
);
67 if (forward
< backward
)
68 for (i
= 0; i
< forward
; i
++)
69 meta
= nfp_meta_next(meta
);
71 for (i
= 0; i
< backward
; i
++)
72 meta
= nfp_meta_prev(meta
);
78 nfp_bpf_check_exit(struct nfp_prog
*nfp_prog
,
79 struct bpf_verifier_env
*env
)
81 const struct bpf_reg_state
*reg0
= cur_regs(env
) + BPF_REG_0
;
84 if (nfp_prog
->type
== BPF_PROG_TYPE_XDP
)
87 if (!(reg0
->type
== SCALAR_VALUE
&& tnum_is_const(reg0
->var_off
))) {
90 tnum_strn(tn_buf
, sizeof(tn_buf
), reg0
->var_off
);
91 pr_info("unsupported exit state: %d, var_off: %s\n",
96 imm
= reg0
->var_off
.value
;
97 if (nfp_prog
->type
== BPF_PROG_TYPE_SCHED_CLS
&&
98 imm
<= TC_ACT_REDIRECT
&&
99 imm
!= TC_ACT_SHOT
&& imm
!= TC_ACT_STOLEN
&&
100 imm
!= TC_ACT_QUEUED
) {
101 pr_info("unsupported exit state: %d, imm: %llx\n",
110 nfp_bpf_check_stack_access(struct nfp_prog
*nfp_prog
,
111 struct nfp_insn_meta
*meta
,
112 const struct bpf_reg_state
*reg
)
114 s32 old_off
, new_off
;
116 if (!tnum_is_const(reg
->var_off
)) {
117 pr_info("variable ptr stack access\n");
121 if (meta
->ptr
.type
== NOT_INIT
)
124 old_off
= meta
->ptr
.off
+ meta
->ptr
.var_off
.value
;
125 new_off
= reg
->off
+ reg
->var_off
.value
;
127 meta
->ptr_not_const
|= old_off
!= new_off
;
129 if (!meta
->ptr_not_const
)
132 if (old_off
% 4 == new_off
% 4)
135 pr_info("stack access changed location was:%d is:%d\n",
141 nfp_bpf_check_ptr(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
142 struct bpf_verifier_env
*env
, u8 reg_no
)
144 const struct bpf_reg_state
*reg
= cur_regs(env
) + reg_no
;
147 if (reg
->type
!= PTR_TO_CTX
&&
148 reg
->type
!= PTR_TO_STACK
&&
149 reg
->type
!= PTR_TO_PACKET
) {
150 pr_info("unsupported ptr type: %d\n", reg
->type
);
154 if (reg
->type
== PTR_TO_STACK
) {
155 err
= nfp_bpf_check_stack_access(nfp_prog
, meta
, reg
);
160 if (meta
->ptr
.type
!= NOT_INIT
&& meta
->ptr
.type
!= reg
->type
) {
161 pr_info("ptr type changed for instruction %d -> %d\n",
162 meta
->ptr
.type
, reg
->type
);
172 nfp_verify_insn(struct bpf_verifier_env
*env
, int insn_idx
, int prev_insn_idx
)
174 struct nfp_bpf_analyzer_priv
*priv
= env
->analyzer_priv
;
175 struct nfp_insn_meta
*meta
= priv
->meta
;
177 meta
= nfp_bpf_goto_meta(priv
->prog
, meta
, insn_idx
, env
->prog
->len
);
180 if (meta
->insn
.src_reg
>= MAX_BPF_REG
||
181 meta
->insn
.dst_reg
>= MAX_BPF_REG
) {
182 pr_err("program uses extended registers - jit hardening?\n");
186 if (meta
->insn
.code
== (BPF_JMP
| BPF_EXIT
))
187 return nfp_bpf_check_exit(priv
->prog
, env
);
189 if ((meta
->insn
.code
& ~BPF_SIZE_MASK
) == (BPF_LDX
| BPF_MEM
))
190 return nfp_bpf_check_ptr(priv
->prog
, meta
, env
,
192 if ((meta
->insn
.code
& ~BPF_SIZE_MASK
) == (BPF_STX
| BPF_MEM
))
193 return nfp_bpf_check_ptr(priv
->prog
, meta
, env
,
199 static const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops
= {
200 .insn_hook
= nfp_verify_insn
,
203 int nfp_prog_verify(struct nfp_prog
*nfp_prog
, struct bpf_prog
*prog
)
205 struct nfp_bpf_analyzer_priv
*priv
;
208 nfp_prog
->stack_depth
= prog
->aux
->stack_depth
;
210 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
214 priv
->prog
= nfp_prog
;
215 priv
->meta
= nfp_prog_first_meta(nfp_prog
);
217 ret
= bpf_analyzer(prog
, &nfp_bpf_analyzer_ops
, priv
);