2 * Copyright (C) 2017-2018 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <net/pkt_cls.h>
36 #include "../nfpcore/nfp_cpp.h"
37 #include "../nfpcore/nfp_nffw.h"
38 #include "../nfpcore/nfp_nsp.h"
39 #include "../nfp_app.h"
40 #include "../nfp_main.h"
41 #include "../nfp_net.h"
42 #include "../nfp_port.h"
46 const struct rhashtable_params nfp_bpf_maps_neutral_params
= {
48 .key_len
= FIELD_SIZEOF(struct bpf_map
, id
),
49 .key_offset
= offsetof(struct nfp_bpf_neutral_map
, map_id
),
50 .head_offset
= offsetof(struct nfp_bpf_neutral_map
, l
),
51 .automatic_shrinking
= true,
54 static bool nfp_net_ebpf_capable(struct nfp_net
*nn
)
56 #ifdef __LITTLE_ENDIAN
57 if (nn
->cap
& NFP_NET_CFG_CTRL_BPF
&&
58 nn_readb(nn
, NFP_NET_CFG_BPF_ABI
) == NFP_NET_BPF_ABI
)
65 nfp_bpf_xdp_offload(struct nfp_app
*app
, struct nfp_net
*nn
,
66 struct bpf_prog
*prog
, struct netlink_ext_ack
*extack
)
68 bool running
, xdp_running
;
70 if (!nfp_net_ebpf_capable(nn
))
73 running
= nn
->dp
.ctrl
& NFP_NET_CFG_CTRL_BPF
;
74 xdp_running
= running
&& nn
->xdp_hw
.prog
;
76 if (!prog
&& !xdp_running
)
78 if (prog
&& running
&& !xdp_running
)
81 return nfp_net_bpf_offload(nn
, prog
, running
, extack
);
84 static const char *nfp_bpf_extra_cap(struct nfp_app
*app
, struct nfp_net
*nn
)
86 return nfp_net_ebpf_capable(nn
) ? "BPF" : "";
90 nfp_bpf_vnic_alloc(struct nfp_app
*app
, struct nfp_net
*nn
, unsigned int id
)
92 struct nfp_pf
*pf
= app
->pf
;
93 struct nfp_bpf_vnic
*bv
;
97 nfp_err(pf
->cpp
, "No ETH table\n");
100 if (pf
->max_data_vnics
!= pf
->eth_tbl
->count
) {
101 nfp_err(pf
->cpp
, "ETH entries don't match vNICs (%d vs %d)\n",
102 pf
->max_data_vnics
, pf
->eth_tbl
->count
);
106 bv
= kzalloc(sizeof(*bv
), GFP_KERNEL
);
111 err
= nfp_app_nic_vnic_alloc(app
, nn
, id
);
115 bv
->start_off
= nn_readw(nn
, NFP_NET_CFG_BPF_START
);
116 bv
->tgt_done
= nn_readw(nn
, NFP_NET_CFG_BPF_DONE
);
124 static void nfp_bpf_vnic_free(struct nfp_app
*app
, struct nfp_net
*nn
)
126 struct nfp_bpf_vnic
*bv
= nn
->app_priv
;
128 WARN_ON(bv
->tc_prog
);
132 static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type
,
133 void *type_data
, void *cb_priv
)
135 struct tc_cls_bpf_offload
*cls_bpf
= type_data
;
136 struct nfp_net
*nn
= cb_priv
;
137 struct bpf_prog
*oldprog
;
138 struct nfp_bpf_vnic
*bv
;
141 if (type
!= TC_SETUP_CLSBPF
) {
142 NL_SET_ERR_MSG_MOD(cls_bpf
->common
.extack
,
143 "only offload of BPF classifiers supported");
146 if (!tc_cls_can_offload_and_chain0(nn
->dp
.netdev
, &cls_bpf
->common
))
148 if (!nfp_net_ebpf_capable(nn
)) {
149 NL_SET_ERR_MSG_MOD(cls_bpf
->common
.extack
,
150 "NFP firmware does not support eBPF offload");
153 if (cls_bpf
->common
.protocol
!= htons(ETH_P_ALL
)) {
154 NL_SET_ERR_MSG_MOD(cls_bpf
->common
.extack
,
155 "only ETH_P_ALL supported as filter protocol");
159 /* Only support TC direct action */
160 if (!cls_bpf
->exts_integrated
||
161 tcf_exts_has_actions(cls_bpf
->exts
)) {
162 NL_SET_ERR_MSG_MOD(cls_bpf
->common
.extack
,
163 "only direct action with no legacy actions supported");
167 if (cls_bpf
->command
!= TC_CLSBPF_OFFLOAD
)
171 oldprog
= cls_bpf
->oldprog
;
173 /* Don't remove if oldprog doesn't match driver's state */
174 if (bv
->tc_prog
!= oldprog
) {
180 err
= nfp_net_bpf_offload(nn
, cls_bpf
->prog
, oldprog
,
181 cls_bpf
->common
.extack
);
185 bv
->tc_prog
= cls_bpf
->prog
;
186 nn
->port
->tc_offload_cnt
= !!bv
->tc_prog
;
190 static int nfp_bpf_setup_tc_block(struct net_device
*netdev
,
191 struct tc_block_offload
*f
)
193 struct nfp_net
*nn
= netdev_priv(netdev
);
195 if (f
->binder_type
!= TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS
)
198 switch (f
->command
) {
200 return tcf_block_cb_register(f
->block
,
201 nfp_bpf_setup_tc_block_cb
,
203 case TC_BLOCK_UNBIND
:
204 tcf_block_cb_unregister(f
->block
,
205 nfp_bpf_setup_tc_block_cb
,
213 static int nfp_bpf_setup_tc(struct nfp_app
*app
, struct net_device
*netdev
,
214 enum tc_setup_type type
, void *type_data
)
218 return nfp_bpf_setup_tc_block(netdev
, type_data
);
225 nfp_bpf_check_mtu(struct nfp_app
*app
, struct net_device
*netdev
, int new_mtu
)
227 struct nfp_net
*nn
= netdev_priv(netdev
);
228 unsigned int max_mtu
;
230 if (~nn
->dp
.ctrl
& NFP_NET_CFG_CTRL_BPF
)
233 max_mtu
= nn_readb(nn
, NFP_NET_CFG_BPF_INL_MTU
) * 64 - 32;
234 if (new_mtu
> max_mtu
) {
235 nn_info(nn
, "BPF offload active, MTU over %u not supported\n",
243 nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf
*bpf
, void __iomem
*value
,
246 struct nfp_bpf_cap_tlv_adjust_head __iomem
*cap
= value
;
247 struct nfp_cpp
*cpp
= bpf
->app
->pf
->cpp
;
249 if (length
< sizeof(*cap
)) {
250 nfp_err(cpp
, "truncated adjust_head TLV: %d\n", length
);
254 bpf
->adjust_head
.flags
= readl(&cap
->flags
);
255 bpf
->adjust_head
.off_min
= readl(&cap
->off_min
);
256 bpf
->adjust_head
.off_max
= readl(&cap
->off_max
);
257 bpf
->adjust_head
.guaranteed_sub
= readl(&cap
->guaranteed_sub
);
258 bpf
->adjust_head
.guaranteed_add
= readl(&cap
->guaranteed_add
);
260 if (bpf
->adjust_head
.off_min
> bpf
->adjust_head
.off_max
) {
261 nfp_err(cpp
, "invalid adjust_head TLV: min > max\n");
264 if (!FIELD_FIT(UR_REG_IMM_MAX
, bpf
->adjust_head
.off_min
) ||
265 !FIELD_FIT(UR_REG_IMM_MAX
, bpf
->adjust_head
.off_max
)) {
266 nfp_warn(cpp
, "disabling adjust_head - driver expects min/max to fit in as immediates\n");
267 memset(&bpf
->adjust_head
, 0, sizeof(bpf
->adjust_head
));
275 nfp_bpf_parse_cap_func(struct nfp_app_bpf
*bpf
, void __iomem
*value
, u32 length
)
277 struct nfp_bpf_cap_tlv_func __iomem
*cap
= value
;
279 if (length
< sizeof(*cap
)) {
280 nfp_err(bpf
->app
->cpp
, "truncated function TLV: %d\n", length
);
284 switch (readl(&cap
->func_id
)) {
285 case BPF_FUNC_map_lookup_elem
:
286 bpf
->helpers
.map_lookup
= readl(&cap
->func_addr
);
288 case BPF_FUNC_map_update_elem
:
289 bpf
->helpers
.map_update
= readl(&cap
->func_addr
);
291 case BPF_FUNC_map_delete_elem
:
292 bpf
->helpers
.map_delete
= readl(&cap
->func_addr
);
294 case BPF_FUNC_perf_event_output
:
295 bpf
->helpers
.perf_event_output
= readl(&cap
->func_addr
);
303 nfp_bpf_parse_cap_maps(struct nfp_app_bpf
*bpf
, void __iomem
*value
, u32 length
)
305 struct nfp_bpf_cap_tlv_maps __iomem
*cap
= value
;
307 if (length
< sizeof(*cap
)) {
308 nfp_err(bpf
->app
->cpp
, "truncated maps TLV: %d\n", length
);
312 bpf
->maps
.types
= readl(&cap
->types
);
313 bpf
->maps
.max_maps
= readl(&cap
->max_maps
);
314 bpf
->maps
.max_elems
= readl(&cap
->max_elems
);
315 bpf
->maps
.max_key_sz
= readl(&cap
->max_key_sz
);
316 bpf
->maps
.max_val_sz
= readl(&cap
->max_val_sz
);
317 bpf
->maps
.max_elem_sz
= readl(&cap
->max_elem_sz
);
323 nfp_bpf_parse_cap_random(struct nfp_app_bpf
*bpf
, void __iomem
*value
,
326 bpf
->pseudo_random
= true;
331 nfp_bpf_parse_cap_qsel(struct nfp_app_bpf
*bpf
, void __iomem
*value
, u32 length
)
333 bpf
->queue_select
= true;
338 nfp_bpf_parse_cap_adjust_tail(struct nfp_app_bpf
*bpf
, void __iomem
*value
,
341 bpf
->adjust_tail
= true;
345 static int nfp_bpf_parse_capabilities(struct nfp_app
*app
)
347 struct nfp_cpp
*cpp
= app
->pf
->cpp
;
348 struct nfp_cpp_area
*area
;
349 u8 __iomem
*mem
, *start
;
351 mem
= nfp_rtsym_map(app
->pf
->rtbl
, "_abi_bpf_capabilities", "bpf.cap",
354 return PTR_ERR(mem
) == -ENOENT
? 0 : PTR_ERR(mem
);
357 while (mem
- start
+ 8 <= nfp_cpp_area_size(area
)) {
362 length
= readl(mem
+ 4);
366 if (mem
- start
> nfp_cpp_area_size(area
))
367 goto err_release_free
;
370 case NFP_BPF_CAP_TYPE_FUNC
:
371 if (nfp_bpf_parse_cap_func(app
->priv
, value
, length
))
372 goto err_release_free
;
374 case NFP_BPF_CAP_TYPE_ADJUST_HEAD
:
375 if (nfp_bpf_parse_cap_adjust_head(app
->priv
, value
,
377 goto err_release_free
;
379 case NFP_BPF_CAP_TYPE_MAPS
:
380 if (nfp_bpf_parse_cap_maps(app
->priv
, value
, length
))
381 goto err_release_free
;
383 case NFP_BPF_CAP_TYPE_RANDOM
:
384 if (nfp_bpf_parse_cap_random(app
->priv
, value
, length
))
385 goto err_release_free
;
387 case NFP_BPF_CAP_TYPE_QUEUE_SELECT
:
388 if (nfp_bpf_parse_cap_qsel(app
->priv
, value
, length
))
389 goto err_release_free
;
391 case NFP_BPF_CAP_TYPE_ADJUST_TAIL
:
392 if (nfp_bpf_parse_cap_adjust_tail(app
->priv
, value
,
394 goto err_release_free
;
397 nfp_dbg(cpp
, "unknown BPF capability: %d\n", type
);
401 if (mem
- start
!= nfp_cpp_area_size(area
)) {
402 nfp_err(cpp
, "BPF capabilities left after parsing, parsed:%zd total length:%zu\n",
403 mem
- start
, nfp_cpp_area_size(area
));
404 goto err_release_free
;
407 nfp_cpp_area_release_free(area
);
412 nfp_err(cpp
, "invalid BPF capabilities at offset:%zd\n", mem
- start
);
413 nfp_cpp_area_release_free(area
);
417 static int nfp_bpf_ndo_init(struct nfp_app
*app
, struct net_device
*netdev
)
419 struct nfp_app_bpf
*bpf
= app
->priv
;
421 return bpf_offload_dev_netdev_register(bpf
->bpf_dev
, netdev
);
424 static void nfp_bpf_ndo_uninit(struct nfp_app
*app
, struct net_device
*netdev
)
426 struct nfp_app_bpf
*bpf
= app
->priv
;
428 bpf_offload_dev_netdev_unregister(bpf
->bpf_dev
, netdev
);
431 static int nfp_bpf_init(struct nfp_app
*app
)
433 struct nfp_app_bpf
*bpf
;
436 bpf
= kzalloc(sizeof(*bpf
), GFP_KERNEL
);
442 skb_queue_head_init(&bpf
->cmsg_replies
);
443 init_waitqueue_head(&bpf
->cmsg_wq
);
444 INIT_LIST_HEAD(&bpf
->map_list
);
446 err
= rhashtable_init(&bpf
->maps_neutral
, &nfp_bpf_maps_neutral_params
);
450 err
= nfp_bpf_parse_capabilities(app
);
452 goto err_free_neutral_maps
;
454 bpf
->bpf_dev
= bpf_offload_dev_create();
455 err
= PTR_ERR_OR_ZERO(bpf
->bpf_dev
);
457 goto err_free_neutral_maps
;
461 err_free_neutral_maps
:
462 rhashtable_destroy(&bpf
->maps_neutral
);
468 static void nfp_check_rhashtable_empty(void *ptr
, void *arg
)
473 static void nfp_bpf_clean(struct nfp_app
*app
)
475 struct nfp_app_bpf
*bpf
= app
->priv
;
477 bpf_offload_dev_destroy(bpf
->bpf_dev
);
478 WARN_ON(!skb_queue_empty(&bpf
->cmsg_replies
));
479 WARN_ON(!list_empty(&bpf
->map_list
));
480 WARN_ON(bpf
->maps_in_use
|| bpf
->map_elems_in_use
);
481 rhashtable_free_and_destroy(&bpf
->maps_neutral
,
482 nfp_check_rhashtable_empty
, NULL
);
486 const struct nfp_app_type app_bpf
= {
487 .id
= NFP_APP_BPF_NIC
,
492 .init
= nfp_bpf_init
,
493 .clean
= nfp_bpf_clean
,
495 .check_mtu
= nfp_bpf_check_mtu
,
497 .extra_cap
= nfp_bpf_extra_cap
,
499 .ndo_init
= nfp_bpf_ndo_init
,
500 .ndo_uninit
= nfp_bpf_ndo_uninit
,
502 .vnic_alloc
= nfp_bpf_vnic_alloc
,
503 .vnic_free
= nfp_bpf_vnic_free
,
505 .ctrl_msg_rx
= nfp_bpf_ctrl_msg_rx
,
506 .ctrl_msg_rx_raw
= nfp_bpf_ctrl_msg_rx_raw
,
508 .setup_tc
= nfp_bpf_setup_tc
,
510 .xdp_offload
= nfp_bpf_xdp_offload
,