2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/tc_act/tc_gact.h>
34 #include <linux/crash_dump.h>
35 #include <net/pkt_cls.h>
36 #include <linux/mlx5/fs.h>
37 #include <net/vxlan.h>
38 #include <linux/bpf.h>
44 struct mlx5e_rq_param
{
45 u32 rqc
[MLX5_ST_SZ_DW(rqc
)];
46 struct mlx5_wq_param wq
;
50 struct mlx5e_sq_param
{
51 u32 sqc
[MLX5_ST_SZ_DW(sqc
)];
52 struct mlx5_wq_param wq
;
57 struct mlx5e_cq_param
{
58 u32 cqc
[MLX5_ST_SZ_DW(cqc
)];
59 struct mlx5_wq_param wq
;
64 struct mlx5e_channel_param
{
65 struct mlx5e_rq_param rq
;
66 struct mlx5e_sq_param sq
;
67 struct mlx5e_sq_param xdp_sq
;
68 struct mlx5e_sq_param icosq
;
69 struct mlx5e_cq_param rx_cq
;
70 struct mlx5e_cq_param tx_cq
;
71 struct mlx5e_cq_param icosq_cq
;
74 static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev
*mdev
)
76 return MLX5_CAP_GEN(mdev
, striding_rq
) &&
77 MLX5_CAP_GEN(mdev
, umr_ptr_rlky
) &&
78 MLX5_CAP_ETH(mdev
, reg_umr_sq
);
81 void mlx5e_set_rq_type_params(struct mlx5e_priv
*priv
, u8 rq_type
)
83 priv
->params
.rq_wq_type
= rq_type
;
84 priv
->params
.lro_wqe_sz
= MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ
;
85 switch (priv
->params
.rq_wq_type
) {
86 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
87 priv
->params
.log_rq_size
= is_kdump_kernel() ?
88 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW
:
89 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW
;
90 priv
->params
.mpwqe_log_stride_sz
=
91 MLX5E_GET_PFLAG(priv
, MLX5E_PFLAG_RX_CQE_COMPRESS
) ?
92 MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(priv
->mdev
) :
93 MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(priv
->mdev
);
94 priv
->params
.mpwqe_log_num_strides
= MLX5_MPWRQ_LOG_WQE_SZ
-
95 priv
->params
.mpwqe_log_stride_sz
;
97 default: /* MLX5_WQ_TYPE_LINKED_LIST */
98 priv
->params
.log_rq_size
= is_kdump_kernel() ?
99 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE
:
100 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE
;
102 /* Extra room needed for build_skb */
103 priv
->params
.lro_wqe_sz
-= MLX5_RX_HEADROOM
+
104 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
106 priv
->params
.min_rx_wqes
= mlx5_min_rx_wqes(priv
->params
.rq_wq_type
,
107 BIT(priv
->params
.log_rq_size
));
109 mlx5_core_info(priv
->mdev
,
110 "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
111 priv
->params
.rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
,
112 BIT(priv
->params
.log_rq_size
),
113 BIT(priv
->params
.mpwqe_log_stride_sz
),
114 MLX5E_GET_PFLAG(priv
, MLX5E_PFLAG_RX_CQE_COMPRESS
));
117 static void mlx5e_set_rq_priv_params(struct mlx5e_priv
*priv
)
119 u8 rq_type
= mlx5e_check_fragmented_striding_rq_cap(priv
->mdev
) &&
121 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
122 MLX5_WQ_TYPE_LINKED_LIST
;
123 mlx5e_set_rq_type_params(priv
, rq_type
);
126 static void mlx5e_update_carrier(struct mlx5e_priv
*priv
)
128 struct mlx5_core_dev
*mdev
= priv
->mdev
;
131 port_state
= mlx5_query_vport_state(mdev
,
132 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT
, 0);
134 if (port_state
== VPORT_STATE_UP
) {
135 netdev_info(priv
->netdev
, "Link up\n");
136 netif_carrier_on(priv
->netdev
);
138 netdev_info(priv
->netdev
, "Link down\n");
139 netif_carrier_off(priv
->netdev
);
143 static void mlx5e_update_carrier_work(struct work_struct
*work
)
145 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
146 update_carrier_work
);
148 mutex_lock(&priv
->state_lock
);
149 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
150 mlx5e_update_carrier(priv
);
151 mutex_unlock(&priv
->state_lock
);
154 static void mlx5e_tx_timeout_work(struct work_struct
*work
)
156 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
161 mutex_lock(&priv
->state_lock
);
162 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
164 mlx5e_close_locked(priv
->netdev
);
165 err
= mlx5e_open_locked(priv
->netdev
);
167 netdev_err(priv
->netdev
, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
170 mutex_unlock(&priv
->state_lock
);
174 static void mlx5e_update_sw_counters(struct mlx5e_priv
*priv
)
176 struct mlx5e_sw_stats
*s
= &priv
->stats
.sw
;
177 struct mlx5e_rq_stats
*rq_stats
;
178 struct mlx5e_sq_stats
*sq_stats
;
179 u64 tx_offload_none
= 0;
182 memset(s
, 0, sizeof(*s
));
183 for (i
= 0; i
< priv
->params
.num_channels
; i
++) {
184 rq_stats
= &priv
->channel
[i
]->rq
.stats
;
186 s
->rx_packets
+= rq_stats
->packets
;
187 s
->rx_bytes
+= rq_stats
->bytes
;
188 s
->rx_lro_packets
+= rq_stats
->lro_packets
;
189 s
->rx_lro_bytes
+= rq_stats
->lro_bytes
;
190 s
->rx_csum_none
+= rq_stats
->csum_none
;
191 s
->rx_csum_complete
+= rq_stats
->csum_complete
;
192 s
->rx_csum_unnecessary_inner
+= rq_stats
->csum_unnecessary_inner
;
193 s
->rx_xdp_drop
+= rq_stats
->xdp_drop
;
194 s
->rx_xdp_tx
+= rq_stats
->xdp_tx
;
195 s
->rx_xdp_tx_full
+= rq_stats
->xdp_tx_full
;
196 s
->rx_wqe_err
+= rq_stats
->wqe_err
;
197 s
->rx_mpwqe_filler
+= rq_stats
->mpwqe_filler
;
198 s
->rx_buff_alloc_err
+= rq_stats
->buff_alloc_err
;
199 s
->rx_cqe_compress_blks
+= rq_stats
->cqe_compress_blks
;
200 s
->rx_cqe_compress_pkts
+= rq_stats
->cqe_compress_pkts
;
201 s
->rx_cache_reuse
+= rq_stats
->cache_reuse
;
202 s
->rx_cache_full
+= rq_stats
->cache_full
;
203 s
->rx_cache_empty
+= rq_stats
->cache_empty
;
204 s
->rx_cache_busy
+= rq_stats
->cache_busy
;
206 for (j
= 0; j
< priv
->params
.num_tc
; j
++) {
207 sq_stats
= &priv
->channel
[i
]->sq
[j
].stats
;
209 s
->tx_packets
+= sq_stats
->packets
;
210 s
->tx_bytes
+= sq_stats
->bytes
;
211 s
->tx_tso_packets
+= sq_stats
->tso_packets
;
212 s
->tx_tso_bytes
+= sq_stats
->tso_bytes
;
213 s
->tx_tso_inner_packets
+= sq_stats
->tso_inner_packets
;
214 s
->tx_tso_inner_bytes
+= sq_stats
->tso_inner_bytes
;
215 s
->tx_queue_stopped
+= sq_stats
->stopped
;
216 s
->tx_queue_wake
+= sq_stats
->wake
;
217 s
->tx_queue_dropped
+= sq_stats
->dropped
;
218 s
->tx_xmit_more
+= sq_stats
->xmit_more
;
219 s
->tx_csum_partial_inner
+= sq_stats
->csum_partial_inner
;
220 tx_offload_none
+= sq_stats
->csum_none
;
224 /* Update calculated offload counters */
225 s
->tx_csum_partial
= s
->tx_packets
- tx_offload_none
- s
->tx_csum_partial_inner
;
226 s
->rx_csum_unnecessary
= s
->rx_packets
- s
->rx_csum_none
- s
->rx_csum_complete
;
228 s
->link_down_events_phy
= MLX5_GET(ppcnt_reg
,
229 priv
->stats
.pport
.phy_counters
,
230 counter_set
.phys_layer_cntrs
.link_down_events
);
233 static void mlx5e_update_vport_counters(struct mlx5e_priv
*priv
)
235 int outlen
= MLX5_ST_SZ_BYTES(query_vport_counter_out
);
236 u32
*out
= (u32
*)priv
->stats
.vport
.query_vport_out
;
237 u32 in
[MLX5_ST_SZ_DW(query_vport_counter_in
)] = {0};
238 struct mlx5_core_dev
*mdev
= priv
->mdev
;
240 MLX5_SET(query_vport_counter_in
, in
, opcode
,
241 MLX5_CMD_OP_QUERY_VPORT_COUNTER
);
242 MLX5_SET(query_vport_counter_in
, in
, op_mod
, 0);
243 MLX5_SET(query_vport_counter_in
, in
, other_vport
, 0);
245 memset(out
, 0, outlen
);
246 mlx5_cmd_exec(mdev
, in
, sizeof(in
), out
, outlen
);
249 static void mlx5e_update_pport_counters(struct mlx5e_priv
*priv
)
251 struct mlx5e_pport_stats
*pstats
= &priv
->stats
.pport
;
252 struct mlx5_core_dev
*mdev
= priv
->mdev
;
253 int sz
= MLX5_ST_SZ_BYTES(ppcnt_reg
);
258 in
= mlx5_vzalloc(sz
);
262 MLX5_SET(ppcnt_reg
, in
, local_port
, 1);
264 out
= pstats
->IEEE_802_3_counters
;
265 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_IEEE_802_3_COUNTERS_GROUP
);
266 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
268 out
= pstats
->RFC_2863_counters
;
269 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_RFC_2863_COUNTERS_GROUP
);
270 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
272 out
= pstats
->RFC_2819_counters
;
273 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_RFC_2819_COUNTERS_GROUP
);
274 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
276 out
= pstats
->phy_counters
;
277 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP
);
278 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
280 if (MLX5_CAP_PCAM_FEATURE(mdev
, ppcnt_statistical_group
)) {
281 out
= pstats
->phy_statistical_counters
;
282 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP
);
283 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_PPCNT
, 0, 0);
286 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_PER_PRIORITY_COUNTERS_GROUP
);
287 for (prio
= 0; prio
< NUM_PPORT_PRIO
; prio
++) {
288 out
= pstats
->per_prio_counters
[prio
];
289 MLX5_SET(ppcnt_reg
, in
, prio_tc
, prio
);
290 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
,
291 MLX5_REG_PPCNT
, 0, 0);
298 static void mlx5e_update_q_counter(struct mlx5e_priv
*priv
)
300 struct mlx5e_qcounter_stats
*qcnt
= &priv
->stats
.qcnt
;
302 if (!priv
->q_counter
)
305 mlx5_core_query_out_of_buffer(priv
->mdev
, priv
->q_counter
,
306 &qcnt
->rx_out_of_buffer
);
309 static void mlx5e_update_pcie_counters(struct mlx5e_priv
*priv
)
311 struct mlx5e_pcie_stats
*pcie_stats
= &priv
->stats
.pcie
;
312 struct mlx5_core_dev
*mdev
= priv
->mdev
;
313 int sz
= MLX5_ST_SZ_BYTES(mpcnt_reg
);
317 if (!MLX5_CAP_MCAM_FEATURE(mdev
, pcie_performance_group
))
320 in
= mlx5_vzalloc(sz
);
324 out
= pcie_stats
->pcie_perf_counters
;
325 MLX5_SET(mpcnt_reg
, in
, grp
, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP
);
326 mlx5_core_access_reg(mdev
, in
, sz
, out
, sz
, MLX5_REG_MPCNT
, 0, 0);
331 void mlx5e_update_stats(struct mlx5e_priv
*priv
)
333 mlx5e_update_pcie_counters(priv
);
334 mlx5e_update_pport_counters(priv
);
335 mlx5e_update_vport_counters(priv
);
336 mlx5e_update_q_counter(priv
);
337 mlx5e_update_sw_counters(priv
);
340 void mlx5e_update_stats_work(struct work_struct
*work
)
342 struct delayed_work
*dwork
= to_delayed_work(work
);
343 struct mlx5e_priv
*priv
= container_of(dwork
, struct mlx5e_priv
,
345 mutex_lock(&priv
->state_lock
);
346 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
)) {
347 priv
->profile
->update_stats(priv
);
348 queue_delayed_work(priv
->wq
, dwork
,
349 msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL
));
351 mutex_unlock(&priv
->state_lock
);
354 static void mlx5e_async_event(struct mlx5_core_dev
*mdev
, void *vpriv
,
355 enum mlx5_dev_event event
, unsigned long param
)
357 struct mlx5e_priv
*priv
= vpriv
;
358 struct ptp_clock_event ptp_event
;
359 struct mlx5_eqe
*eqe
= NULL
;
361 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED
, &priv
->state
))
365 case MLX5_DEV_EVENT_PORT_UP
:
366 case MLX5_DEV_EVENT_PORT_DOWN
:
367 queue_work(priv
->wq
, &priv
->update_carrier_work
);
369 case MLX5_DEV_EVENT_PPS
:
370 eqe
= (struct mlx5_eqe
*)param
;
371 ptp_event
.type
= PTP_CLOCK_EXTTS
;
372 ptp_event
.index
= eqe
->data
.pps
.pin
;
373 ptp_event
.timestamp
=
374 timecounter_cyc2time(&priv
->tstamp
.clock
,
375 be64_to_cpu(eqe
->data
.pps
.time_stamp
));
376 mlx5e_pps_event_handler(vpriv
, &ptp_event
);
383 static void mlx5e_enable_async_events(struct mlx5e_priv
*priv
)
385 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED
, &priv
->state
);
388 static void mlx5e_disable_async_events(struct mlx5e_priv
*priv
)
390 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED
, &priv
->state
);
391 synchronize_irq(mlx5_get_msix_vec(priv
->mdev
, MLX5_EQ_VEC_ASYNC
));
394 static inline int mlx5e_get_wqe_mtt_sz(void)
396 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
397 * To avoid copying garbage after the mtt array, we allocate
400 return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE
* sizeof(__be64
),
401 MLX5_UMR_MTT_ALIGNMENT
);
404 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq
*rq
,
405 struct mlx5e_icosq
*sq
,
406 struct mlx5e_umr_wqe
*wqe
,
409 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
410 struct mlx5_wqe_umr_ctrl_seg
*ucseg
= &wqe
->uctrl
;
411 struct mlx5_wqe_data_seg
*dseg
= &wqe
->data
;
412 struct mlx5e_mpw_info
*wi
= &rq
->mpwqe
.info
[ix
];
413 u8 ds_cnt
= DIV_ROUND_UP(sizeof(*wqe
), MLX5_SEND_WQE_DS
);
414 u32 umr_wqe_mtt_offset
= mlx5e_get_wqe_mtt_offset(rq
, ix
);
416 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< MLX5_WQE_CTRL_QPN_SHIFT
) |
418 cseg
->fm_ce_se
= MLX5_WQE_CTRL_CQ_UPDATE
;
419 cseg
->imm
= rq
->mkey_be
;
421 ucseg
->flags
= MLX5_UMR_TRANSLATION_OFFSET_EN
;
422 ucseg
->xlt_octowords
=
423 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE
));
424 ucseg
->bsf_octowords
=
425 cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset
));
426 ucseg
->mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
428 dseg
->lkey
= sq
->mkey_be
;
429 dseg
->addr
= cpu_to_be64(wi
->umr
.mtt_addr
);
432 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq
*rq
,
433 struct mlx5e_channel
*c
)
435 int wq_sz
= mlx5_wq_ll_get_size(&rq
->wq
);
436 int mtt_sz
= mlx5e_get_wqe_mtt_sz();
437 int mtt_alloc
= mtt_sz
+ MLX5_UMR_ALIGN
- 1;
440 rq
->mpwqe
.info
= kzalloc_node(wq_sz
* sizeof(*rq
->mpwqe
.info
),
441 GFP_KERNEL
, cpu_to_node(c
->cpu
));
445 /* We allocate more than mtt_sz as we will align the pointer */
446 rq
->mpwqe
.mtt_no_align
= kzalloc_node(mtt_alloc
* wq_sz
, GFP_KERNEL
,
447 cpu_to_node(c
->cpu
));
448 if (unlikely(!rq
->mpwqe
.mtt_no_align
))
449 goto err_free_wqe_info
;
451 for (i
= 0; i
< wq_sz
; i
++) {
452 struct mlx5e_mpw_info
*wi
= &rq
->mpwqe
.info
[i
];
454 wi
->umr
.mtt
= PTR_ALIGN(rq
->mpwqe
.mtt_no_align
+ i
* mtt_alloc
,
456 wi
->umr
.mtt_addr
= dma_map_single(c
->pdev
, wi
->umr
.mtt
, mtt_sz
,
458 if (unlikely(dma_mapping_error(c
->pdev
, wi
->umr
.mtt_addr
)))
461 mlx5e_build_umr_wqe(rq
, &c
->icosq
, &wi
->umr
.wqe
, i
);
468 struct mlx5e_mpw_info
*wi
= &rq
->mpwqe
.info
[i
];
470 dma_unmap_single(c
->pdev
, wi
->umr
.mtt_addr
, mtt_sz
,
473 kfree(rq
->mpwqe
.mtt_no_align
);
475 kfree(rq
->mpwqe
.info
);
481 static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq
*rq
)
483 int wq_sz
= mlx5_wq_ll_get_size(&rq
->wq
);
484 int mtt_sz
= mlx5e_get_wqe_mtt_sz();
487 for (i
= 0; i
< wq_sz
; i
++) {
488 struct mlx5e_mpw_info
*wi
= &rq
->mpwqe
.info
[i
];
490 dma_unmap_single(rq
->pdev
, wi
->umr
.mtt_addr
, mtt_sz
,
493 kfree(rq
->mpwqe
.mtt_no_align
);
494 kfree(rq
->mpwqe
.info
);
497 static int mlx5e_create_umr_mkey(struct mlx5e_priv
*priv
,
498 u64 npages
, u8 page_shift
,
499 struct mlx5_core_mkey
*umr_mkey
)
501 struct mlx5_core_dev
*mdev
= priv
->mdev
;
502 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
507 if (!MLX5E_VALID_NUM_MTTS(npages
))
510 in
= mlx5_vzalloc(inlen
);
514 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
516 MLX5_SET(mkc
, mkc
, free
, 1);
517 MLX5_SET(mkc
, mkc
, umr_en
, 1);
518 MLX5_SET(mkc
, mkc
, lw
, 1);
519 MLX5_SET(mkc
, mkc
, lr
, 1);
520 MLX5_SET(mkc
, mkc
, access_mode
, MLX5_MKC_ACCESS_MODE_MTT
);
522 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
523 MLX5_SET(mkc
, mkc
, pd
, mdev
->mlx5e_res
.pdn
);
524 MLX5_SET64(mkc
, mkc
, len
, npages
<< page_shift
);
525 MLX5_SET(mkc
, mkc
, translations_octword_size
,
526 MLX5_MTT_OCTW(npages
));
527 MLX5_SET(mkc
, mkc
, log_page_size
, page_shift
);
529 err
= mlx5_core_create_mkey(mdev
, umr_mkey
, in
, inlen
);
535 static int mlx5e_create_rq_umr_mkey(struct mlx5e_rq
*rq
)
537 struct mlx5e_priv
*priv
= rq
->priv
;
538 u64 num_mtts
= MLX5E_REQUIRED_MTTS(BIT(priv
->params
.log_rq_size
));
540 return mlx5e_create_umr_mkey(priv
, num_mtts
, PAGE_SHIFT
, &rq
->umr_mkey
);
543 static int mlx5e_alloc_rq(struct mlx5e_channel
*c
,
544 struct mlx5e_rq_param
*param
,
547 struct mlx5e_priv
*priv
= c
->priv
;
548 struct mlx5_core_dev
*mdev
= priv
->mdev
;
549 void *rqc
= param
->rqc
;
550 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
558 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
560 err
= mlx5_wq_ll_create(mdev
, ¶m
->wq
, rqc_wq
, &rq
->wq
,
565 rq
->wq
.db
= &rq
->wq
.db
[MLX5_RCV_DBR
];
567 wq_sz
= mlx5_wq_ll_get_size(&rq
->wq
);
569 rq
->wq_type
= priv
->params
.rq_wq_type
;
571 rq
->netdev
= c
->netdev
;
572 rq
->tstamp
= &priv
->tstamp
;
577 rq
->xdp_prog
= priv
->xdp_prog
? bpf_prog_inc(priv
->xdp_prog
) : NULL
;
578 if (IS_ERR(rq
->xdp_prog
)) {
579 err
= PTR_ERR(rq
->xdp_prog
);
581 goto err_rq_wq_destroy
;
585 rq
->buff
.map_dir
= DMA_BIDIRECTIONAL
;
586 rq
->rx_headroom
= XDP_PACKET_HEADROOM
;
588 rq
->buff
.map_dir
= DMA_FROM_DEVICE
;
589 rq
->rx_headroom
= MLX5_RX_HEADROOM
;
592 switch (priv
->params
.rq_wq_type
) {
593 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
594 if (mlx5e_is_vf_vport_rep(priv
)) {
596 goto err_rq_wq_destroy
;
599 rq
->handle_rx_cqe
= mlx5e_handle_rx_cqe_mpwrq
;
600 rq
->alloc_wqe
= mlx5e_alloc_rx_mpwqe
;
601 rq
->dealloc_wqe
= mlx5e_dealloc_rx_mpwqe
;
603 rq
->mpwqe_stride_sz
= BIT(priv
->params
.mpwqe_log_stride_sz
);
604 rq
->mpwqe_num_strides
= BIT(priv
->params
.mpwqe_log_num_strides
);
606 rq
->buff
.wqe_sz
= rq
->mpwqe_stride_sz
* rq
->mpwqe_num_strides
;
607 byte_count
= rq
->buff
.wqe_sz
;
609 err
= mlx5e_create_rq_umr_mkey(rq
);
611 goto err_rq_wq_destroy
;
612 rq
->mkey_be
= cpu_to_be32(rq
->umr_mkey
.key
);
614 err
= mlx5e_rq_alloc_mpwqe_info(rq
, c
);
616 goto err_destroy_umr_mkey
;
618 default: /* MLX5_WQ_TYPE_LINKED_LIST */
619 rq
->dma_info
= kzalloc_node(wq_sz
* sizeof(*rq
->dma_info
),
620 GFP_KERNEL
, cpu_to_node(c
->cpu
));
623 goto err_rq_wq_destroy
;
626 if (mlx5e_is_vf_vport_rep(priv
))
627 rq
->handle_rx_cqe
= mlx5e_handle_rx_cqe_rep
;
629 rq
->handle_rx_cqe
= mlx5e_handle_rx_cqe
;
631 rq
->alloc_wqe
= mlx5e_alloc_rx_wqe
;
632 rq
->dealloc_wqe
= mlx5e_dealloc_rx_wqe
;
634 rq
->buff
.wqe_sz
= (priv
->params
.lro_en
) ?
635 priv
->params
.lro_wqe_sz
:
636 MLX5E_SW2HW_MTU(priv
->netdev
->mtu
);
637 byte_count
= rq
->buff
.wqe_sz
;
639 /* calc the required page order */
640 frag_sz
= rq
->rx_headroom
+
641 byte_count
/* packet data */ +
642 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
643 frag_sz
= SKB_DATA_ALIGN(frag_sz
);
645 npages
= DIV_ROUND_UP(frag_sz
, PAGE_SIZE
);
646 rq
->buff
.page_order
= order_base_2(npages
);
648 byte_count
|= MLX5_HW_START_PADDING
;
649 rq
->mkey_be
= c
->mkey_be
;
652 for (i
= 0; i
< wq_sz
; i
++) {
653 struct mlx5e_rx_wqe
*wqe
= mlx5_wq_ll_get_wqe(&rq
->wq
, i
);
655 wqe
->data
.byte_count
= cpu_to_be32(byte_count
);
656 wqe
->data
.lkey
= rq
->mkey_be
;
659 INIT_WORK(&rq
->am
.work
, mlx5e_rx_am_work
);
660 rq
->am
.mode
= priv
->params
.rx_cq_period_mode
;
662 rq
->page_cache
.head
= 0;
663 rq
->page_cache
.tail
= 0;
667 err_destroy_umr_mkey
:
668 mlx5_core_destroy_mkey(mdev
, &rq
->umr_mkey
);
672 bpf_prog_put(rq
->xdp_prog
);
673 mlx5_wq_destroy(&rq
->wq_ctrl
);
678 static void mlx5e_free_rq(struct mlx5e_rq
*rq
)
683 bpf_prog_put(rq
->xdp_prog
);
685 switch (rq
->wq_type
) {
686 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
687 mlx5e_rq_free_mpwqe_info(rq
);
688 mlx5_core_destroy_mkey(rq
->priv
->mdev
, &rq
->umr_mkey
);
690 default: /* MLX5_WQ_TYPE_LINKED_LIST */
694 for (i
= rq
->page_cache
.head
; i
!= rq
->page_cache
.tail
;
695 i
= (i
+ 1) & (MLX5E_CACHE_SIZE
- 1)) {
696 struct mlx5e_dma_info
*dma_info
= &rq
->page_cache
.page_cache
[i
];
698 mlx5e_page_release(rq
, dma_info
, false);
700 mlx5_wq_destroy(&rq
->wq_ctrl
);
703 static int mlx5e_create_rq(struct mlx5e_rq
*rq
, struct mlx5e_rq_param
*param
)
705 struct mlx5e_priv
*priv
= rq
->priv
;
706 struct mlx5_core_dev
*mdev
= priv
->mdev
;
714 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) +
715 sizeof(u64
) * rq
->wq_ctrl
.buf
.npages
;
716 in
= mlx5_vzalloc(inlen
);
720 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
721 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
723 memcpy(rqc
, param
->rqc
, sizeof(param
->rqc
));
725 MLX5_SET(rqc
, rqc
, cqn
, rq
->cq
.mcq
.cqn
);
726 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
727 MLX5_SET(rqc
, rqc
, vsd
, priv
->params
.vlan_strip_disable
);
728 MLX5_SET(wq
, wq
, log_wq_pg_sz
, rq
->wq_ctrl
.buf
.page_shift
-
729 MLX5_ADAPTER_PAGE_SHIFT
);
730 MLX5_SET64(wq
, wq
, dbr_addr
, rq
->wq_ctrl
.db
.dma
);
732 mlx5_fill_page_array(&rq
->wq_ctrl
.buf
,
733 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
735 err
= mlx5_core_create_rq(mdev
, in
, inlen
, &rq
->rqn
);
742 static int mlx5e_modify_rq_state(struct mlx5e_rq
*rq
, int curr_state
,
745 struct mlx5e_channel
*c
= rq
->channel
;
746 struct mlx5e_priv
*priv
= c
->priv
;
747 struct mlx5_core_dev
*mdev
= priv
->mdev
;
754 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
755 in
= mlx5_vzalloc(inlen
);
759 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
761 MLX5_SET(modify_rq_in
, in
, rq_state
, curr_state
);
762 MLX5_SET(rqc
, rqc
, state
, next_state
);
764 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
771 static int mlx5e_modify_rq_vsd(struct mlx5e_rq
*rq
, bool vsd
)
773 struct mlx5e_channel
*c
= rq
->channel
;
774 struct mlx5e_priv
*priv
= c
->priv
;
775 struct mlx5_core_dev
*mdev
= priv
->mdev
;
782 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
783 in
= mlx5_vzalloc(inlen
);
787 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
789 MLX5_SET(modify_rq_in
, in
, rq_state
, MLX5_RQC_STATE_RDY
);
790 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
791 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD
);
792 MLX5_SET(rqc
, rqc
, vsd
, vsd
);
793 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RDY
);
795 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
802 static void mlx5e_destroy_rq(struct mlx5e_rq
*rq
)
804 mlx5_core_destroy_rq(rq
->priv
->mdev
, rq
->rqn
);
807 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq
*rq
)
809 unsigned long exp_time
= jiffies
+ msecs_to_jiffies(20000);
810 struct mlx5e_channel
*c
= rq
->channel
;
811 struct mlx5e_priv
*priv
= c
->priv
;
812 struct mlx5_wq_ll
*wq
= &rq
->wq
;
814 while (time_before(jiffies
, exp_time
)) {
815 if (wq
->cur_sz
>= priv
->params
.min_rx_wqes
)
824 static void mlx5e_free_rx_descs(struct mlx5e_rq
*rq
)
826 struct mlx5_wq_ll
*wq
= &rq
->wq
;
827 struct mlx5e_rx_wqe
*wqe
;
831 /* UMR WQE (if in progress) is always at wq->head */
832 if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS
, &rq
->state
))
833 mlx5e_free_rx_mpwqe(rq
, &rq
->mpwqe
.info
[wq
->head
]);
835 while (!mlx5_wq_ll_is_empty(wq
)) {
836 wqe_ix_be
= *wq
->tail_next
;
837 wqe_ix
= be16_to_cpu(wqe_ix_be
);
838 wqe
= mlx5_wq_ll_get_wqe(&rq
->wq
, wqe_ix
);
839 rq
->dealloc_wqe(rq
, wqe_ix
);
840 mlx5_wq_ll_pop(&rq
->wq
, wqe_ix_be
,
841 &wqe
->next
.next_wqe_index
);
845 static int mlx5e_open_rq(struct mlx5e_channel
*c
,
846 struct mlx5e_rq_param
*param
,
849 struct mlx5e_icosq
*sq
= &c
->icosq
;
850 u16 pi
= sq
->pc
& sq
->wq
.sz_m1
;
851 struct mlx5e_tx_wqe
*nopwqe
;
854 err
= mlx5e_alloc_rq(c
, param
, rq
);
858 err
= mlx5e_create_rq(rq
, param
);
862 set_bit(MLX5E_RQ_STATE_ENABLED
, &rq
->state
);
863 err
= mlx5e_modify_rq_state(rq
, MLX5_RQC_STATE_RST
, MLX5_RQC_STATE_RDY
);
867 if (param
->am_enabled
)
868 set_bit(MLX5E_RQ_STATE_AM
, &c
->rq
.state
);
870 sq
->db
.ico_wqe
[pi
].opcode
= MLX5_OPCODE_NOP
;
871 sq
->db
.ico_wqe
[pi
].num_wqebbs
= 1;
872 nopwqe
= mlx5e_post_nop(&sq
->wq
, sq
->sqn
, &sq
->pc
);
873 mlx5e_notify_hw(&sq
->wq
, sq
->pc
, sq
->uar_map
, &nopwqe
->ctrl
);
877 clear_bit(MLX5E_RQ_STATE_ENABLED
, &rq
->state
);
878 mlx5e_destroy_rq(rq
);
885 static void mlx5e_close_rq(struct mlx5e_rq
*rq
)
887 clear_bit(MLX5E_RQ_STATE_ENABLED
, &rq
->state
);
888 napi_synchronize(&rq
->channel
->napi
); /* prevent mlx5e_post_rx_wqes */
889 cancel_work_sync(&rq
->am
.work
);
891 mlx5e_destroy_rq(rq
);
892 mlx5e_free_rx_descs(rq
);
896 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq
*sq
)
901 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq
*sq
, int numa
)
903 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
905 sq
->db
.di
= kzalloc_node(sizeof(*sq
->db
.di
) * wq_sz
,
908 mlx5e_free_xdpsq_db(sq
);
915 static int mlx5e_alloc_xdpsq(struct mlx5e_channel
*c
,
916 struct mlx5e_sq_param
*param
,
917 struct mlx5e_xdpsq
*sq
)
919 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
920 struct mlx5e_priv
*priv
= c
->priv
;
921 struct mlx5_core_dev
*mdev
= priv
->mdev
;
925 sq
->mkey_be
= c
->mkey_be
;
927 sq
->uar_map
= mdev
->mlx5e_res
.bfreg
.map
;
928 sq
->min_inline_mode
= param
->min_inline_mode
;
930 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
931 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, &sq
->wq
, &sq
->wq_ctrl
);
934 sq
->wq
.db
= &sq
->wq
.db
[MLX5_SND_DBR
];
936 err
= mlx5e_alloc_xdpsq_db(sq
, cpu_to_node(c
->cpu
));
938 goto err_sq_wq_destroy
;
943 mlx5_wq_destroy(&sq
->wq_ctrl
);
948 static void mlx5e_free_xdpsq(struct mlx5e_xdpsq
*sq
)
950 mlx5e_free_xdpsq_db(sq
);
951 mlx5_wq_destroy(&sq
->wq_ctrl
);
954 static void mlx5e_free_icosq_db(struct mlx5e_icosq
*sq
)
956 kfree(sq
->db
.ico_wqe
);
959 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq
*sq
, int numa
)
961 u8 wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
963 sq
->db
.ico_wqe
= kzalloc_node(sizeof(*sq
->db
.ico_wqe
) * wq_sz
,
971 static int mlx5e_alloc_icosq(struct mlx5e_channel
*c
,
973 struct mlx5e_sq_param
*param
,
974 struct mlx5e_icosq
*sq
)
976 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
977 struct mlx5e_priv
*priv
= c
->priv
;
978 struct mlx5_core_dev
*mdev
= priv
->mdev
;
982 sq
->mkey_be
= c
->mkey_be
;
984 sq
->uar_map
= mdev
->mlx5e_res
.bfreg
.map
;
986 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
987 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, &sq
->wq
, &sq
->wq_ctrl
);
990 sq
->wq
.db
= &sq
->wq
.db
[MLX5_SND_DBR
];
992 err
= mlx5e_alloc_icosq_db(sq
, cpu_to_node(c
->cpu
));
994 goto err_sq_wq_destroy
;
996 sq
->edge
= (sq
->wq
.sz_m1
+ 1) - MLX5E_ICOSQ_MAX_WQEBBS
;
1001 mlx5_wq_destroy(&sq
->wq_ctrl
);
1006 static void mlx5e_free_icosq(struct mlx5e_icosq
*sq
)
1008 mlx5e_free_icosq_db(sq
);
1009 mlx5_wq_destroy(&sq
->wq_ctrl
);
1012 static void mlx5e_free_txqsq_db(struct mlx5e_txqsq
*sq
)
1014 kfree(sq
->db
.wqe_info
);
1015 kfree(sq
->db
.dma_fifo
);
1019 static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq
*sq
, int numa
)
1021 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
1022 int df_sz
= wq_sz
* MLX5_SEND_WQEBB_NUM_DS
;
1024 sq
->db
.skb
= kzalloc_node(wq_sz
* sizeof(*sq
->db
.skb
),
1026 sq
->db
.dma_fifo
= kzalloc_node(df_sz
* sizeof(*sq
->db
.dma_fifo
),
1028 sq
->db
.wqe_info
= kzalloc_node(wq_sz
* sizeof(*sq
->db
.wqe_info
),
1030 if (!sq
->db
.skb
|| !sq
->db
.dma_fifo
|| !sq
->db
.wqe_info
) {
1031 mlx5e_free_txqsq_db(sq
);
1035 sq
->dma_fifo_mask
= df_sz
- 1;
1040 static int mlx5e_alloc_txqsq(struct mlx5e_channel
*c
,
1042 struct mlx5e_sq_param
*param
,
1043 struct mlx5e_txqsq
*sq
)
1045 void *sqc_wq
= MLX5_ADDR_OF(sqc
, param
->sqc
, wq
);
1046 struct mlx5e_priv
*priv
= c
->priv
;
1047 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1052 sq
->tstamp
= &priv
->tstamp
;
1053 sq
->mkey_be
= c
->mkey_be
;
1056 sq
->uar_map
= mdev
->mlx5e_res
.bfreg
.map
;
1057 sq
->max_inline
= param
->max_inline
;
1058 sq
->min_inline_mode
= param
->min_inline_mode
;
1060 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1061 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, &sq
->wq
, &sq
->wq_ctrl
);
1064 sq
->wq
.db
= &sq
->wq
.db
[MLX5_SND_DBR
];
1066 err
= mlx5e_alloc_txqsq_db(sq
, cpu_to_node(c
->cpu
));
1068 goto err_sq_wq_destroy
;
1070 txq_ix
= c
->ix
+ tc
* priv
->params
.num_channels
;
1071 sq
->txq
= netdev_get_tx_queue(priv
->netdev
, txq_ix
);
1072 priv
->txq_to_sq_map
[txq_ix
] = sq
;
1074 sq
->edge
= (sq
->wq
.sz_m1
+ 1) - MLX5_SEND_WQE_MAX_WQEBBS
;
1079 mlx5_wq_destroy(&sq
->wq_ctrl
);
1084 static void mlx5e_free_txqsq(struct mlx5e_txqsq
*sq
)
1086 mlx5e_free_txqsq_db(sq
);
1087 mlx5_wq_destroy(&sq
->wq_ctrl
);
1090 struct mlx5e_create_sq_param
{
1091 struct mlx5_wq_ctrl
*wq_ctrl
;
1098 static int mlx5e_create_sq(struct mlx5e_priv
*priv
,
1099 struct mlx5e_sq_param
*param
,
1100 struct mlx5e_create_sq_param
*csp
,
1103 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1111 inlen
= MLX5_ST_SZ_BYTES(create_sq_in
) +
1112 sizeof(u64
) * csp
->wq_ctrl
->buf
.npages
;
1113 in
= mlx5_vzalloc(inlen
);
1117 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
1118 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1120 memcpy(sqc
, param
->sqc
, sizeof(param
->sqc
));
1121 MLX5_SET(sqc
, sqc
, tis_lst_sz
, csp
->tis_lst_sz
);
1122 MLX5_SET(sqc
, sqc
, tis_num_0
, csp
->tisn
);
1123 MLX5_SET(sqc
, sqc
, cqn
, csp
->cqn
);
1125 if (MLX5_CAP_ETH(mdev
, wqe_inline_mode
) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT
)
1126 MLX5_SET(sqc
, sqc
, min_wqe_inline_mode
, csp
->min_inline_mode
);
1128 MLX5_SET(sqc
, sqc
, state
, MLX5_SQC_STATE_RST
);
1130 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1131 MLX5_SET(wq
, wq
, uar_page
, priv
->mdev
->mlx5e_res
.bfreg
.index
);
1132 MLX5_SET(wq
, wq
, log_wq_pg_sz
, csp
->wq_ctrl
->buf
.page_shift
-
1133 MLX5_ADAPTER_PAGE_SHIFT
);
1134 MLX5_SET64(wq
, wq
, dbr_addr
, csp
->wq_ctrl
->db
.dma
);
1136 mlx5_fill_page_array(&csp
->wq_ctrl
->buf
, (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
1138 err
= mlx5_core_create_sq(mdev
, in
, inlen
, sqn
);
1145 struct mlx5e_modify_sq_param
{
1152 static int mlx5e_modify_sq(struct mlx5e_priv
*priv
,
1154 struct mlx5e_modify_sq_param
*p
)
1156 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1163 inlen
= MLX5_ST_SZ_BYTES(modify_sq_in
);
1164 in
= mlx5_vzalloc(inlen
);
1168 sqc
= MLX5_ADDR_OF(modify_sq_in
, in
, ctx
);
1170 MLX5_SET(modify_sq_in
, in
, sq_state
, p
->curr_state
);
1171 MLX5_SET(sqc
, sqc
, state
, p
->next_state
);
1172 if (p
->rl_update
&& p
->next_state
== MLX5_SQC_STATE_RDY
) {
1173 MLX5_SET64(modify_sq_in
, in
, modify_bitmask
, 1);
1174 MLX5_SET(sqc
, sqc
, packet_pacing_rate_limit_index
, p
->rl_index
);
1177 err
= mlx5_core_modify_sq(mdev
, sqn
, in
, inlen
);
1184 static void mlx5e_destroy_sq(struct mlx5e_priv
*priv
, u32 sqn
)
1186 mlx5_core_destroy_sq(priv
->mdev
, sqn
);
1189 static int mlx5e_create_sq_rdy(struct mlx5e_priv
*priv
,
1190 struct mlx5e_sq_param
*param
,
1191 struct mlx5e_create_sq_param
*csp
,
1194 struct mlx5e_modify_sq_param msp
= {0};
1197 err
= mlx5e_create_sq(priv
, param
, csp
, sqn
);
1201 msp
.curr_state
= MLX5_SQC_STATE_RST
;
1202 msp
.next_state
= MLX5_SQC_STATE_RDY
;
1203 err
= mlx5e_modify_sq(priv
, *sqn
, &msp
);
1205 mlx5e_destroy_sq(priv
, *sqn
);
1210 static int mlx5e_set_sq_maxrate(struct net_device
*dev
,
1211 struct mlx5e_txqsq
*sq
, u32 rate
);
1213 static int mlx5e_open_txqsq(struct mlx5e_channel
*c
,
1215 struct mlx5e_sq_param
*param
,
1216 struct mlx5e_txqsq
*sq
)
1218 struct mlx5e_create_sq_param csp
= {};
1219 struct mlx5e_priv
*priv
= c
->priv
;
1224 err
= mlx5e_alloc_txqsq(c
, tc
, param
, sq
);
1228 csp
.tisn
= priv
->tisn
[sq
->tc
];
1230 csp
.cqn
= sq
->cq
.mcq
.cqn
;
1231 csp
.wq_ctrl
= &sq
->wq_ctrl
;
1232 csp
.min_inline_mode
= sq
->min_inline_mode
;
1233 set_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1234 err
= mlx5e_create_sq_rdy(c
->priv
, param
, &csp
, &sq
->sqn
);
1236 goto err_free_txqsq
;
1238 txq_ix
= c
->ix
+ tc
* priv
->params
.num_channels
;
1239 tx_rate
= priv
->tx_rates
[txq_ix
];
1241 mlx5e_set_sq_maxrate(priv
->netdev
, sq
, tx_rate
);
1243 netdev_tx_reset_queue(sq
->txq
);
1244 netif_tx_start_queue(sq
->txq
);
1248 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1249 mlx5e_free_txqsq(sq
);
1254 static inline void netif_tx_disable_queue(struct netdev_queue
*txq
)
1256 __netif_tx_lock_bh(txq
);
1257 netif_tx_stop_queue(txq
);
1258 __netif_tx_unlock_bh(txq
);
1261 static void mlx5e_close_txqsq(struct mlx5e_txqsq
*sq
)
1263 struct mlx5e_channel
*c
= sq
->channel
;
1264 struct mlx5e_priv
*priv
= c
->priv
;
1265 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1267 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1268 /* prevent netif_tx_wake_queue */
1269 napi_synchronize(&c
->napi
);
1271 netif_tx_disable_queue(sq
->txq
);
1273 /* last doorbell out, godspeed .. */
1274 if (mlx5e_wqc_has_room_for(&sq
->wq
, sq
->cc
, sq
->pc
, 1)) {
1275 struct mlx5e_tx_wqe
*nop
;
1277 sq
->db
.skb
[(sq
->pc
& sq
->wq
.sz_m1
)] = NULL
;
1278 nop
= mlx5e_post_nop(&sq
->wq
, sq
->sqn
, &sq
->pc
);
1279 mlx5e_notify_hw(&sq
->wq
, sq
->pc
, sq
->uar_map
, &nop
->ctrl
);
1282 mlx5e_destroy_sq(priv
, sq
->sqn
);
1284 mlx5_rl_remove_rate(mdev
, sq
->rate_limit
);
1285 mlx5e_free_txqsq_descs(sq
);
1286 mlx5e_free_txqsq(sq
);
1289 static int mlx5e_open_icosq(struct mlx5e_channel
*c
,
1291 struct mlx5e_sq_param
*param
,
1292 struct mlx5e_icosq
*sq
)
1294 struct mlx5e_create_sq_param csp
= {};
1297 err
= mlx5e_alloc_icosq(c
, tc
, param
, sq
);
1301 csp
.cqn
= sq
->cq
.mcq
.cqn
;
1302 csp
.wq_ctrl
= &sq
->wq_ctrl
;
1303 csp
.min_inline_mode
= param
->min_inline_mode
;
1304 set_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1305 err
= mlx5e_create_sq_rdy(c
->priv
, param
, &csp
, &sq
->sqn
);
1307 goto err_free_icosq
;
1312 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1313 mlx5e_free_icosq(sq
);
1318 static void mlx5e_close_icosq(struct mlx5e_icosq
*sq
)
1320 struct mlx5e_channel
*c
= sq
->channel
;
1322 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1323 napi_synchronize(&c
->napi
);
1325 mlx5e_destroy_sq(c
->priv
, sq
->sqn
);
1326 mlx5e_free_icosq(sq
);
1329 static int mlx5e_open_xdpsq(struct mlx5e_channel
*c
,
1330 struct mlx5e_sq_param
*param
,
1331 struct mlx5e_xdpsq
*sq
)
1333 unsigned int ds_cnt
= MLX5E_XDP_TX_DS_COUNT
;
1334 struct mlx5e_create_sq_param csp
= {};
1335 struct mlx5e_priv
*priv
= c
->priv
;
1336 unsigned int inline_hdr_sz
= 0;
1340 err
= mlx5e_alloc_xdpsq(c
, param
, sq
);
1345 csp
.tisn
= priv
->tisn
[0]; /* tc = 0 */
1346 csp
.cqn
= sq
->cq
.mcq
.cqn
;
1347 csp
.wq_ctrl
= &sq
->wq_ctrl
;
1348 csp
.min_inline_mode
= sq
->min_inline_mode
;
1349 set_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1350 err
= mlx5e_create_sq_rdy(c
->priv
, param
, &csp
, &sq
->sqn
);
1352 goto err_free_xdpsq
;
1354 if (sq
->min_inline_mode
!= MLX5_INLINE_MODE_NONE
) {
1355 inline_hdr_sz
= MLX5E_XDP_MIN_INLINE
;
1359 /* Pre initialize fixed WQE fields */
1360 for (i
= 0; i
< mlx5_wq_cyc_get_size(&sq
->wq
); i
++) {
1361 struct mlx5e_tx_wqe
*wqe
= mlx5_wq_cyc_get_wqe(&sq
->wq
, i
);
1362 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
1363 struct mlx5_wqe_eth_seg
*eseg
= &wqe
->eth
;
1364 struct mlx5_wqe_data_seg
*dseg
;
1366 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< 8) | ds_cnt
);
1367 eseg
->inline_hdr
.sz
= cpu_to_be16(inline_hdr_sz
);
1369 dseg
= (struct mlx5_wqe_data_seg
*)cseg
+ (ds_cnt
- 1);
1370 dseg
->lkey
= sq
->mkey_be
;
1376 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1377 mlx5e_free_xdpsq(sq
);
1382 static void mlx5e_close_xdpsq(struct mlx5e_xdpsq
*sq
)
1384 struct mlx5e_channel
*c
= sq
->channel
;
1386 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
1387 napi_synchronize(&c
->napi
);
1389 mlx5e_destroy_sq(c
->priv
, sq
->sqn
);
1390 mlx5e_free_xdpsq_descs(sq
);
1391 mlx5e_free_xdpsq(sq
);
1394 static int mlx5e_alloc_cq(struct mlx5e_channel
*c
,
1395 struct mlx5e_cq_param
*param
,
1396 struct mlx5e_cq
*cq
)
1398 struct mlx5e_priv
*priv
= c
->priv
;
1399 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1400 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
1406 param
->wq
.buf_numa_node
= cpu_to_node(c
->cpu
);
1407 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
1408 param
->eq_ix
= c
->ix
;
1410 err
= mlx5_cqwq_create(mdev
, ¶m
->wq
, param
->cqc
, &cq
->wq
,
1415 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn_not_used
, &irqn
);
1417 cq
->napi
= &c
->napi
;
1420 mcq
->set_ci_db
= cq
->wq_ctrl
.db
.db
;
1421 mcq
->arm_db
= cq
->wq_ctrl
.db
.db
+ 1;
1422 *mcq
->set_ci_db
= 0;
1424 mcq
->vector
= param
->eq_ix
;
1425 mcq
->comp
= mlx5e_completion_event
;
1426 mcq
->event
= mlx5e_cq_error_event
;
1429 for (i
= 0; i
< mlx5_cqwq_get_size(&cq
->wq
); i
++) {
1430 struct mlx5_cqe64
*cqe
= mlx5_cqwq_get_wqe(&cq
->wq
, i
);
1441 static void mlx5e_free_cq(struct mlx5e_cq
*cq
)
1443 mlx5_cqwq_destroy(&cq
->wq_ctrl
);
1446 static int mlx5e_create_cq(struct mlx5e_cq
*cq
, struct mlx5e_cq_param
*param
)
1448 struct mlx5e_priv
*priv
= cq
->priv
;
1449 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1450 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
1455 unsigned int irqn_not_used
;
1459 inlen
= MLX5_ST_SZ_BYTES(create_cq_in
) +
1460 sizeof(u64
) * cq
->wq_ctrl
.frag_buf
.npages
;
1461 in
= mlx5_vzalloc(inlen
);
1465 cqc
= MLX5_ADDR_OF(create_cq_in
, in
, cq_context
);
1467 memcpy(cqc
, param
->cqc
, sizeof(param
->cqc
));
1469 mlx5_fill_page_frag_array(&cq
->wq_ctrl
.frag_buf
,
1470 (__be64
*)MLX5_ADDR_OF(create_cq_in
, in
, pas
));
1472 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn
, &irqn_not_used
);
1474 MLX5_SET(cqc
, cqc
, cq_period_mode
, param
->cq_period_mode
);
1475 MLX5_SET(cqc
, cqc
, c_eqn
, eqn
);
1476 MLX5_SET(cqc
, cqc
, uar_page
, mdev
->priv
.uar
->index
);
1477 MLX5_SET(cqc
, cqc
, log_page_size
, cq
->wq_ctrl
.frag_buf
.page_shift
-
1478 MLX5_ADAPTER_PAGE_SHIFT
);
1479 MLX5_SET64(cqc
, cqc
, dbr_addr
, cq
->wq_ctrl
.db
.dma
);
1481 err
= mlx5_core_create_cq(mdev
, mcq
, in
, inlen
);
1493 static void mlx5e_destroy_cq(struct mlx5e_cq
*cq
)
1495 struct mlx5e_priv
*priv
= cq
->priv
;
1496 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1498 mlx5_core_destroy_cq(mdev
, &cq
->mcq
);
1501 static int mlx5e_open_cq(struct mlx5e_channel
*c
,
1502 struct mlx5e_cq_param
*param
,
1503 struct mlx5e_cq
*cq
,
1504 struct mlx5e_cq_moder moderation
)
1507 struct mlx5e_priv
*priv
= c
->priv
;
1508 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1510 err
= mlx5e_alloc_cq(c
, param
, cq
);
1514 err
= mlx5e_create_cq(cq
, param
);
1518 if (MLX5_CAP_GEN(mdev
, cq_moderation
))
1519 mlx5_core_modify_cq_moderation(mdev
, &cq
->mcq
,
1530 static void mlx5e_close_cq(struct mlx5e_cq
*cq
)
1532 mlx5e_destroy_cq(cq
);
1536 static int mlx5e_get_cpu(struct mlx5e_priv
*priv
, int ix
)
1538 return cpumask_first(priv
->mdev
->priv
.irq_info
[ix
].mask
);
1541 static int mlx5e_open_tx_cqs(struct mlx5e_channel
*c
,
1542 struct mlx5e_channel_param
*cparam
)
1544 struct mlx5e_priv
*priv
= c
->priv
;
1548 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
1549 err
= mlx5e_open_cq(c
, &cparam
->tx_cq
, &c
->sq
[tc
].cq
,
1550 priv
->params
.tx_cq_moderation
);
1552 goto err_close_tx_cqs
;
1558 for (tc
--; tc
>= 0; tc
--)
1559 mlx5e_close_cq(&c
->sq
[tc
].cq
);
1564 static void mlx5e_close_tx_cqs(struct mlx5e_channel
*c
)
1568 for (tc
= 0; tc
< c
->num_tc
; tc
++)
1569 mlx5e_close_cq(&c
->sq
[tc
].cq
);
1572 static int mlx5e_open_sqs(struct mlx5e_channel
*c
,
1573 struct mlx5e_channel_param
*cparam
)
1578 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
1579 err
= mlx5e_open_txqsq(c
, tc
, &cparam
->sq
, &c
->sq
[tc
]);
1587 for (tc
--; tc
>= 0; tc
--)
1588 mlx5e_close_txqsq(&c
->sq
[tc
]);
1593 static void mlx5e_close_sqs(struct mlx5e_channel
*c
)
1597 for (tc
= 0; tc
< c
->num_tc
; tc
++)
1598 mlx5e_close_txqsq(&c
->sq
[tc
]);
1601 static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv
*priv
, int ix
)
1605 for (i
= 0; i
< priv
->profile
->max_tc
; i
++)
1606 priv
->channeltc_to_txq_map
[ix
][i
] =
1607 ix
+ i
* priv
->params
.num_channels
;
1610 static int mlx5e_set_sq_maxrate(struct net_device
*dev
,
1611 struct mlx5e_txqsq
*sq
, u32 rate
)
1613 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1614 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1615 struct mlx5e_modify_sq_param msp
= {0};
1619 if (rate
== sq
->rate_limit
)
1624 /* remove current rl index to free space to next ones */
1625 mlx5_rl_remove_rate(mdev
, sq
->rate_limit
);
1630 err
= mlx5_rl_add_rate(mdev
, rate
, &rl_index
);
1632 netdev_err(dev
, "Failed configuring rate %u: %d\n",
1638 msp
.curr_state
= MLX5_SQC_STATE_RDY
;
1639 msp
.next_state
= MLX5_SQC_STATE_RDY
;
1640 msp
.rl_index
= rl_index
;
1641 msp
.rl_update
= true;
1642 err
= mlx5e_modify_sq(priv
, sq
->sqn
, &msp
);
1644 netdev_err(dev
, "Failed configuring rate %u: %d\n",
1646 /* remove the rate from the table */
1648 mlx5_rl_remove_rate(mdev
, rate
);
1652 sq
->rate_limit
= rate
;
1656 static int mlx5e_set_tx_maxrate(struct net_device
*dev
, int index
, u32 rate
)
1658 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1659 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1660 struct mlx5e_txqsq
*sq
= priv
->txq_to_sq_map
[index
];
1663 if (!mlx5_rl_is_supported(mdev
)) {
1664 netdev_err(dev
, "Rate limiting is not supported on this device\n");
1668 /* rate is given in Mb/sec, HW config is in Kb/sec */
1671 /* Check whether rate in valid range, 0 is always valid */
1672 if (rate
&& !mlx5_rl_is_in_range(mdev
, rate
)) {
1673 netdev_err(dev
, "TX rate %u, is not in range\n", rate
);
1677 mutex_lock(&priv
->state_lock
);
1678 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
1679 err
= mlx5e_set_sq_maxrate(dev
, sq
, rate
);
1681 priv
->tx_rates
[index
] = rate
;
1682 mutex_unlock(&priv
->state_lock
);
1687 static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev
*mdev
)
1689 return is_kdump_kernel() ?
1690 MLX5E_MIN_NUM_CHANNELS
:
1691 min_t(int, mdev
->priv
.eq_table
.num_comp_vectors
,
1692 MLX5E_MAX_NUM_CHANNELS
);
1695 static int mlx5e_open_channel(struct mlx5e_priv
*priv
, int ix
,
1696 struct mlx5e_channel_param
*cparam
,
1697 struct mlx5e_channel
**cp
)
1699 struct mlx5e_cq_moder icosq_cq_moder
= {0, 0};
1700 struct net_device
*netdev
= priv
->netdev
;
1701 struct mlx5e_cq_moder rx_cq_profile
;
1702 int cpu
= mlx5e_get_cpu(priv
, ix
);
1703 struct mlx5e_channel
*c
;
1706 c
= kzalloc_node(sizeof(*c
), GFP_KERNEL
, cpu_to_node(cpu
));
1713 c
->pdev
= &priv
->mdev
->pdev
->dev
;
1714 c
->netdev
= priv
->netdev
;
1715 c
->mkey_be
= cpu_to_be32(priv
->mdev
->mlx5e_res
.mkey
.key
);
1716 c
->num_tc
= priv
->params
.num_tc
;
1717 c
->xdp
= !!priv
->xdp_prog
;
1719 if (priv
->params
.rx_am_enabled
)
1720 rx_cq_profile
= mlx5e_am_get_def_profile(priv
->params
.rx_cq_period_mode
);
1722 rx_cq_profile
= priv
->params
.rx_cq_moderation
;
1724 mlx5e_build_channeltc_to_txq_map(priv
, ix
);
1726 netif_napi_add(netdev
, &c
->napi
, mlx5e_napi_poll
, 64);
1728 err
= mlx5e_open_cq(c
, &cparam
->icosq_cq
, &c
->icosq
.cq
, icosq_cq_moder
);
1732 err
= mlx5e_open_tx_cqs(c
, cparam
);
1734 goto err_close_icosq_cq
;
1736 err
= mlx5e_open_cq(c
, &cparam
->rx_cq
, &c
->rq
.cq
,
1739 goto err_close_tx_cqs
;
1741 /* XDP SQ CQ params are same as normal TXQ sq CQ params */
1742 err
= c
->xdp
? mlx5e_open_cq(c
, &cparam
->tx_cq
, &c
->rq
.xdpsq
.cq
,
1743 priv
->params
.tx_cq_moderation
) : 0;
1745 goto err_close_rx_cq
;
1747 napi_enable(&c
->napi
);
1749 err
= mlx5e_open_icosq(c
, 0, &cparam
->icosq
, &c
->icosq
);
1751 goto err_disable_napi
;
1753 err
= mlx5e_open_sqs(c
, cparam
);
1755 goto err_close_icosq
;
1757 err
= c
->xdp
? mlx5e_open_xdpsq(c
, &cparam
->xdp_sq
, &c
->rq
.xdpsq
) : 0;
1761 err
= mlx5e_open_rq(c
, &cparam
->rq
, &c
->rq
);
1763 goto err_close_xdp_sq
;
1765 netif_set_xps_queue(netdev
, get_cpu_mask(c
->cpu
), ix
);
1771 mlx5e_close_xdpsq(&c
->rq
.xdpsq
);
1777 mlx5e_close_icosq(&c
->icosq
);
1780 napi_disable(&c
->napi
);
1782 mlx5e_close_cq(&c
->rq
.xdpsq
.cq
);
1785 mlx5e_close_cq(&c
->rq
.cq
);
1788 mlx5e_close_tx_cqs(c
);
1791 mlx5e_close_cq(&c
->icosq
.cq
);
1794 netif_napi_del(&c
->napi
);
1800 static void mlx5e_close_channel(struct mlx5e_channel
*c
)
1802 mlx5e_close_rq(&c
->rq
);
1804 mlx5e_close_xdpsq(&c
->rq
.xdpsq
);
1806 mlx5e_close_icosq(&c
->icosq
);
1807 napi_disable(&c
->napi
);
1809 mlx5e_close_cq(&c
->rq
.xdpsq
.cq
);
1810 mlx5e_close_cq(&c
->rq
.cq
);
1811 mlx5e_close_tx_cqs(c
);
1812 mlx5e_close_cq(&c
->icosq
.cq
);
1813 netif_napi_del(&c
->napi
);
1818 static void mlx5e_build_rq_param(struct mlx5e_priv
*priv
,
1819 struct mlx5e_rq_param
*param
)
1821 void *rqc
= param
->rqc
;
1822 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1824 switch (priv
->params
.rq_wq_type
) {
1825 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
1826 MLX5_SET(wq
, wq
, log_wqe_num_of_strides
,
1827 priv
->params
.mpwqe_log_num_strides
- 9);
1828 MLX5_SET(wq
, wq
, log_wqe_stride_size
,
1829 priv
->params
.mpwqe_log_stride_sz
- 6);
1830 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
);
1832 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1833 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_LINKED_LIST
);
1836 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
1837 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(sizeof(struct mlx5e_rx_wqe
)));
1838 MLX5_SET(wq
, wq
, log_wq_sz
, priv
->params
.log_rq_size
);
1839 MLX5_SET(wq
, wq
, pd
, priv
->mdev
->mlx5e_res
.pdn
);
1840 MLX5_SET(rqc
, rqc
, counter_set_id
, priv
->q_counter
);
1842 param
->wq
.buf_numa_node
= dev_to_node(&priv
->mdev
->pdev
->dev
);
1843 param
->wq
.linear
= 1;
1845 param
->am_enabled
= priv
->params
.rx_am_enabled
;
1848 static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param
*param
)
1850 void *rqc
= param
->rqc
;
1851 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1853 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_LINKED_LIST
);
1854 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(sizeof(struct mlx5e_rx_wqe
)));
1857 static void mlx5e_build_sq_param_common(struct mlx5e_priv
*priv
,
1858 struct mlx5e_sq_param
*param
)
1860 void *sqc
= param
->sqc
;
1861 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1863 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(MLX5_SEND_WQE_BB
));
1864 MLX5_SET(wq
, wq
, pd
, priv
->mdev
->mlx5e_res
.pdn
);
1866 param
->wq
.buf_numa_node
= dev_to_node(&priv
->mdev
->pdev
->dev
);
1869 static void mlx5e_build_sq_param(struct mlx5e_priv
*priv
,
1870 struct mlx5e_sq_param
*param
)
1872 void *sqc
= param
->sqc
;
1873 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1875 mlx5e_build_sq_param_common(priv
, param
);
1876 MLX5_SET(wq
, wq
, log_wq_sz
, priv
->params
.log_sq_size
);
1878 param
->max_inline
= priv
->params
.tx_max_inline
;
1879 param
->min_inline_mode
= priv
->params
.tx_min_inline_mode
;
1882 static void mlx5e_build_common_cq_param(struct mlx5e_priv
*priv
,
1883 struct mlx5e_cq_param
*param
)
1885 void *cqc
= param
->cqc
;
1887 MLX5_SET(cqc
, cqc
, uar_page
, priv
->mdev
->priv
.uar
->index
);
1890 static void mlx5e_build_rx_cq_param(struct mlx5e_priv
*priv
,
1891 struct mlx5e_cq_param
*param
)
1893 void *cqc
= param
->cqc
;
1896 switch (priv
->params
.rq_wq_type
) {
1897 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
:
1898 log_cq_size
= priv
->params
.log_rq_size
+
1899 priv
->params
.mpwqe_log_num_strides
;
1901 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1902 log_cq_size
= priv
->params
.log_rq_size
;
1905 MLX5_SET(cqc
, cqc
, log_cq_size
, log_cq_size
);
1906 if (MLX5E_GET_PFLAG(priv
, MLX5E_PFLAG_RX_CQE_COMPRESS
)) {
1907 MLX5_SET(cqc
, cqc
, mini_cqe_res_format
, MLX5_CQE_FORMAT_CSUM
);
1908 MLX5_SET(cqc
, cqc
, cqe_comp_en
, 1);
1911 mlx5e_build_common_cq_param(priv
, param
);
1913 param
->cq_period_mode
= priv
->params
.rx_cq_period_mode
;
1916 static void mlx5e_build_tx_cq_param(struct mlx5e_priv
*priv
,
1917 struct mlx5e_cq_param
*param
)
1919 void *cqc
= param
->cqc
;
1921 MLX5_SET(cqc
, cqc
, log_cq_size
, priv
->params
.log_sq_size
);
1923 mlx5e_build_common_cq_param(priv
, param
);
1925 param
->cq_period_mode
= MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
1928 static void mlx5e_build_ico_cq_param(struct mlx5e_priv
*priv
,
1929 struct mlx5e_cq_param
*param
,
1932 void *cqc
= param
->cqc
;
1934 MLX5_SET(cqc
, cqc
, log_cq_size
, log_wq_size
);
1936 mlx5e_build_common_cq_param(priv
, param
);
1938 param
->cq_period_mode
= MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
1941 static void mlx5e_build_icosq_param(struct mlx5e_priv
*priv
,
1942 struct mlx5e_sq_param
*param
,
1945 void *sqc
= param
->sqc
;
1946 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1948 mlx5e_build_sq_param_common(priv
, param
);
1950 MLX5_SET(wq
, wq
, log_wq_sz
, log_wq_size
);
1951 MLX5_SET(sqc
, sqc
, reg_umr
, MLX5_CAP_ETH(priv
->mdev
, reg_umr_sq
));
1954 static void mlx5e_build_xdpsq_param(struct mlx5e_priv
*priv
,
1955 struct mlx5e_sq_param
*param
)
1957 void *sqc
= param
->sqc
;
1958 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1960 mlx5e_build_sq_param_common(priv
, param
);
1961 MLX5_SET(wq
, wq
, log_wq_sz
, priv
->params
.log_sq_size
);
1963 param
->max_inline
= priv
->params
.tx_max_inline
;
1964 param
->min_inline_mode
= priv
->params
.tx_min_inline_mode
;
1967 static void mlx5e_build_channel_param(struct mlx5e_priv
*priv
, struct mlx5e_channel_param
*cparam
)
1969 u8 icosq_log_wq_sz
= MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
;
1971 mlx5e_build_rq_param(priv
, &cparam
->rq
);
1972 mlx5e_build_sq_param(priv
, &cparam
->sq
);
1973 mlx5e_build_xdpsq_param(priv
, &cparam
->xdp_sq
);
1974 mlx5e_build_icosq_param(priv
, &cparam
->icosq
, icosq_log_wq_sz
);
1975 mlx5e_build_rx_cq_param(priv
, &cparam
->rx_cq
);
1976 mlx5e_build_tx_cq_param(priv
, &cparam
->tx_cq
);
1977 mlx5e_build_ico_cq_param(priv
, &cparam
->icosq_cq
, icosq_log_wq_sz
);
1980 static int mlx5e_open_channels(struct mlx5e_priv
*priv
)
1982 struct mlx5e_channel_param
*cparam
;
1983 int nch
= priv
->params
.num_channels
;
1988 priv
->channel
= kcalloc(nch
, sizeof(struct mlx5e_channel
*),
1991 priv
->txq_to_sq_map
= kcalloc(nch
* priv
->params
.num_tc
,
1992 sizeof(struct mlx5e_sq
*), GFP_KERNEL
);
1994 cparam
= kzalloc(sizeof(struct mlx5e_channel_param
), GFP_KERNEL
);
1996 if (!priv
->channel
|| !priv
->txq_to_sq_map
|| !cparam
)
1997 goto err_free_txq_to_sq_map
;
1999 mlx5e_build_channel_param(priv
, cparam
);
2001 for (i
= 0; i
< nch
; i
++) {
2002 err
= mlx5e_open_channel(priv
, i
, cparam
, &priv
->channel
[i
]);
2004 goto err_close_channels
;
2007 for (j
= 0; j
< nch
; j
++) {
2008 err
= mlx5e_wait_for_min_rx_wqes(&priv
->channel
[j
]->rq
);
2010 goto err_close_channels
;
2013 /* FIXME: This is a W/A for tx timeout watch dog false alarm when
2014 * polling for inactive tx queues.
2016 netif_tx_start_all_queues(priv
->netdev
);
2022 for (i
--; i
>= 0; i
--)
2023 mlx5e_close_channel(priv
->channel
[i
]);
2025 err_free_txq_to_sq_map
:
2026 kfree(priv
->txq_to_sq_map
);
2027 kfree(priv
->channel
);
2033 static void mlx5e_close_channels(struct mlx5e_priv
*priv
)
2037 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2038 * polling for inactive tx queues.
2040 netif_tx_stop_all_queues(priv
->netdev
);
2041 netif_tx_disable(priv
->netdev
);
2043 for (i
= 0; i
< priv
->params
.num_channels
; i
++)
2044 mlx5e_close_channel(priv
->channel
[i
]);
2046 kfree(priv
->txq_to_sq_map
);
2047 kfree(priv
->channel
);
2050 static int mlx5e_rx_hash_fn(int hfunc
)
2052 return (hfunc
== ETH_RSS_HASH_TOP
) ?
2053 MLX5_RX_HASH_FN_TOEPLITZ
:
2054 MLX5_RX_HASH_FN_INVERTED_XOR8
;
2057 static int mlx5e_bits_invert(unsigned long a
, int size
)
2062 for (i
= 0; i
< size
; i
++)
2063 inv
|= (test_bit(size
- i
- 1, &a
) ? 1 : 0) << i
;
2068 static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv
*priv
, void *rqtc
)
2072 for (i
= 0; i
< MLX5E_INDIR_RQT_SIZE
; i
++) {
2076 if (priv
->params
.rss_hfunc
== ETH_RSS_HASH_XOR
)
2077 ix
= mlx5e_bits_invert(i
, MLX5E_LOG_INDIR_RQT_SIZE
);
2079 ix
= priv
->params
.indirection_rqt
[ix
];
2080 rqn
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
) ?
2081 priv
->channel
[ix
]->rq
.rqn
:
2083 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], rqn
);
2087 static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv
*priv
, void *rqtc
,
2090 u32 rqn
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
) ?
2091 priv
->channel
[ix
]->rq
.rqn
:
2094 MLX5_SET(rqtc
, rqtc
, rq_num
[0], rqn
);
2097 static int mlx5e_create_rqt(struct mlx5e_priv
*priv
, int sz
,
2098 int ix
, struct mlx5e_rqt
*rqt
)
2100 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2106 inlen
= MLX5_ST_SZ_BYTES(create_rqt_in
) + sizeof(u32
) * sz
;
2107 in
= mlx5_vzalloc(inlen
);
2111 rqtc
= MLX5_ADDR_OF(create_rqt_in
, in
, rqt_context
);
2113 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
2114 MLX5_SET(rqtc
, rqtc
, rqt_max_size
, sz
);
2116 if (sz
> 1) /* RSS */
2117 mlx5e_fill_indir_rqt_rqns(priv
, rqtc
);
2119 mlx5e_fill_direct_rqt_rqn(priv
, rqtc
, ix
);
2121 err
= mlx5_core_create_rqt(mdev
, in
, inlen
, &rqt
->rqtn
);
2123 rqt
->enabled
= true;
2129 void mlx5e_destroy_rqt(struct mlx5e_priv
*priv
, struct mlx5e_rqt
*rqt
)
2131 rqt
->enabled
= false;
2132 mlx5_core_destroy_rqt(priv
->mdev
, rqt
->rqtn
);
2135 static int mlx5e_create_indirect_rqts(struct mlx5e_priv
*priv
)
2137 struct mlx5e_rqt
*rqt
= &priv
->indir_rqt
;
2139 return mlx5e_create_rqt(priv
, MLX5E_INDIR_RQT_SIZE
, 0, rqt
);
2142 int mlx5e_create_direct_rqts(struct mlx5e_priv
*priv
)
2144 struct mlx5e_rqt
*rqt
;
2148 for (ix
= 0; ix
< priv
->profile
->max_nch(priv
->mdev
); ix
++) {
2149 rqt
= &priv
->direct_tir
[ix
].rqt
;
2150 err
= mlx5e_create_rqt(priv
, 1 /*size */, ix
, rqt
);
2152 goto err_destroy_rqts
;
2158 for (ix
--; ix
>= 0; ix
--)
2159 mlx5e_destroy_rqt(priv
, &priv
->direct_tir
[ix
].rqt
);
2164 int mlx5e_redirect_rqt(struct mlx5e_priv
*priv
, u32 rqtn
, int sz
, int ix
)
2166 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2172 inlen
= MLX5_ST_SZ_BYTES(modify_rqt_in
) + sizeof(u32
) * sz
;
2173 in
= mlx5_vzalloc(inlen
);
2177 rqtc
= MLX5_ADDR_OF(modify_rqt_in
, in
, ctx
);
2179 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
2180 if (sz
> 1) /* RSS */
2181 mlx5e_fill_indir_rqt_rqns(priv
, rqtc
);
2183 mlx5e_fill_direct_rqt_rqn(priv
, rqtc
, ix
);
2185 MLX5_SET(modify_rqt_in
, in
, bitmask
.rqn_list
, 1);
2187 err
= mlx5_core_modify_rqt(mdev
, rqtn
, in
, inlen
);
2194 static void mlx5e_redirect_rqts(struct mlx5e_priv
*priv
)
2199 if (priv
->indir_rqt
.enabled
) {
2200 rqtn
= priv
->indir_rqt
.rqtn
;
2201 mlx5e_redirect_rqt(priv
, rqtn
, MLX5E_INDIR_RQT_SIZE
, 0);
2204 for (ix
= 0; ix
< priv
->params
.num_channels
; ix
++) {
2205 if (!priv
->direct_tir
[ix
].rqt
.enabled
)
2207 rqtn
= priv
->direct_tir
[ix
].rqt
.rqtn
;
2208 mlx5e_redirect_rqt(priv
, rqtn
, 1, ix
);
2212 static void mlx5e_build_tir_ctx_lro(void *tirc
, struct mlx5e_priv
*priv
)
2214 if (!priv
->params
.lro_en
)
2217 #define ROUGH_MAX_L2_L3_HDR_SZ 256
2219 MLX5_SET(tirc
, tirc
, lro_enable_mask
,
2220 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO
|
2221 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO
);
2222 MLX5_SET(tirc
, tirc
, lro_max_ip_payload_size
,
2223 (priv
->params
.lro_wqe_sz
-
2224 ROUGH_MAX_L2_L3_HDR_SZ
) >> 8);
2225 MLX5_SET(tirc
, tirc
, lro_timeout_period_usecs
, priv
->params
.lro_timeout
);
2228 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv
*priv
, void *tirc
,
2229 enum mlx5e_traffic_types tt
)
2231 void *hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
2233 #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2234 MLX5_HASH_FIELD_SEL_DST_IP)
2236 #define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2237 MLX5_HASH_FIELD_SEL_DST_IP |\
2238 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2239 MLX5_HASH_FIELD_SEL_L4_DPORT)
2241 #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2242 MLX5_HASH_FIELD_SEL_DST_IP |\
2243 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2245 MLX5_SET(tirc
, tirc
, rx_hash_fn
,
2246 mlx5e_rx_hash_fn(priv
->params
.rss_hfunc
));
2247 if (priv
->params
.rss_hfunc
== ETH_RSS_HASH_TOP
) {
2248 void *rss_key
= MLX5_ADDR_OF(tirc
, tirc
,
2249 rx_hash_toeplitz_key
);
2250 size_t len
= MLX5_FLD_SZ_BYTES(tirc
,
2251 rx_hash_toeplitz_key
);
2253 MLX5_SET(tirc
, tirc
, rx_hash_symmetric
, 1);
2254 memcpy(rss_key
, priv
->params
.toeplitz_hash_key
, len
);
2258 case MLX5E_TT_IPV4_TCP
:
2259 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2260 MLX5_L3_PROT_TYPE_IPV4
);
2261 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2262 MLX5_L4_PROT_TYPE_TCP
);
2263 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2264 MLX5_HASH_IP_L4PORTS
);
2267 case MLX5E_TT_IPV6_TCP
:
2268 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2269 MLX5_L3_PROT_TYPE_IPV6
);
2270 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2271 MLX5_L4_PROT_TYPE_TCP
);
2272 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2273 MLX5_HASH_IP_L4PORTS
);
2276 case MLX5E_TT_IPV4_UDP
:
2277 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2278 MLX5_L3_PROT_TYPE_IPV4
);
2279 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2280 MLX5_L4_PROT_TYPE_UDP
);
2281 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2282 MLX5_HASH_IP_L4PORTS
);
2285 case MLX5E_TT_IPV6_UDP
:
2286 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2287 MLX5_L3_PROT_TYPE_IPV6
);
2288 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
2289 MLX5_L4_PROT_TYPE_UDP
);
2290 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2291 MLX5_HASH_IP_L4PORTS
);
2294 case MLX5E_TT_IPV4_IPSEC_AH
:
2295 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2296 MLX5_L3_PROT_TYPE_IPV4
);
2297 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2298 MLX5_HASH_IP_IPSEC_SPI
);
2301 case MLX5E_TT_IPV6_IPSEC_AH
:
2302 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2303 MLX5_L3_PROT_TYPE_IPV6
);
2304 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2305 MLX5_HASH_IP_IPSEC_SPI
);
2308 case MLX5E_TT_IPV4_IPSEC_ESP
:
2309 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2310 MLX5_L3_PROT_TYPE_IPV4
);
2311 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2312 MLX5_HASH_IP_IPSEC_SPI
);
2315 case MLX5E_TT_IPV6_IPSEC_ESP
:
2316 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2317 MLX5_L3_PROT_TYPE_IPV6
);
2318 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2319 MLX5_HASH_IP_IPSEC_SPI
);
2323 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2324 MLX5_L3_PROT_TYPE_IPV4
);
2325 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2330 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
2331 MLX5_L3_PROT_TYPE_IPV6
);
2332 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
2336 WARN_ONCE(true, "%s: bad traffic type!\n", __func__
);
2340 static int mlx5e_modify_tirs_lro(struct mlx5e_priv
*priv
)
2342 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2351 inlen
= MLX5_ST_SZ_BYTES(modify_tir_in
);
2352 in
= mlx5_vzalloc(inlen
);
2356 MLX5_SET(modify_tir_in
, in
, bitmask
.lro
, 1);
2357 tirc
= MLX5_ADDR_OF(modify_tir_in
, in
, ctx
);
2359 mlx5e_build_tir_ctx_lro(tirc
, priv
);
2361 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
2362 err
= mlx5_core_modify_tir(mdev
, priv
->indir_tir
[tt
].tirn
, in
,
2368 for (ix
= 0; ix
< priv
->profile
->max_nch(priv
->mdev
); ix
++) {
2369 err
= mlx5_core_modify_tir(mdev
, priv
->direct_tir
[ix
].tirn
,
2381 static int mlx5e_set_mtu(struct mlx5e_priv
*priv
, u16 mtu
)
2383 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2384 u16 hw_mtu
= MLX5E_SW2HW_MTU(mtu
);
2387 err
= mlx5_set_port_mtu(mdev
, hw_mtu
, 1);
2391 /* Update vport context MTU */
2392 mlx5_modify_nic_vport_mtu(mdev
, hw_mtu
);
2396 static void mlx5e_query_mtu(struct mlx5e_priv
*priv
, u16
*mtu
)
2398 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2402 err
= mlx5_query_nic_vport_mtu(mdev
, &hw_mtu
);
2403 if (err
|| !hw_mtu
) /* fallback to port oper mtu */
2404 mlx5_query_port_oper_mtu(mdev
, &hw_mtu
, 1);
2406 *mtu
= MLX5E_HW2SW_MTU(hw_mtu
);
2409 static int mlx5e_set_dev_port_mtu(struct net_device
*netdev
)
2411 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2415 err
= mlx5e_set_mtu(priv
, netdev
->mtu
);
2419 mlx5e_query_mtu(priv
, &mtu
);
2420 if (mtu
!= netdev
->mtu
)
2421 netdev_warn(netdev
, "%s: VPort MTU %d is different than netdev mtu %d\n",
2422 __func__
, mtu
, netdev
->mtu
);
2428 static void mlx5e_netdev_set_tcs(struct net_device
*netdev
)
2430 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2431 int nch
= priv
->params
.num_channels
;
2432 int ntc
= priv
->params
.num_tc
;
2435 netdev_reset_tc(netdev
);
2440 netdev_set_num_tc(netdev
, ntc
);
2442 /* Map netdev TCs to offset 0
2443 * We have our own UP to TXQ mapping for QoS
2445 for (tc
= 0; tc
< ntc
; tc
++)
2446 netdev_set_tc_queue(netdev
, tc
, nch
, 0);
2449 int mlx5e_open_locked(struct net_device
*netdev
)
2451 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2452 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2456 set_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2458 mlx5e_netdev_set_tcs(netdev
);
2460 num_txqs
= priv
->params
.num_channels
* priv
->params
.num_tc
;
2461 netif_set_real_num_tx_queues(netdev
, num_txqs
);
2462 netif_set_real_num_rx_queues(netdev
, priv
->params
.num_channels
);
2464 err
= mlx5e_open_channels(priv
);
2466 netdev_err(netdev
, "%s: mlx5e_open_channels failed, %d\n",
2468 goto err_clear_state_opened_flag
;
2471 err
= mlx5e_refresh_tirs_self_loopback(priv
->mdev
, false);
2473 netdev_err(netdev
, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
2475 goto err_close_channels
;
2478 mlx5e_redirect_rqts(priv
);
2479 mlx5e_update_carrier(priv
);
2480 mlx5e_timestamp_init(priv
);
2482 if (priv
->profile
->update_stats
)
2483 queue_delayed_work(priv
->wq
, &priv
->update_stats_work
, 0);
2485 if (MLX5_CAP_GEN(mdev
, vport_group_manager
)) {
2486 err
= mlx5e_add_sqs_fwd_rules(priv
);
2488 goto err_close_channels
;
2493 mlx5e_close_channels(priv
);
2494 err_clear_state_opened_flag
:
2495 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2499 int mlx5e_open(struct net_device
*netdev
)
2501 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2504 mutex_lock(&priv
->state_lock
);
2505 err
= mlx5e_open_locked(netdev
);
2506 mutex_unlock(&priv
->state_lock
);
2511 int mlx5e_close_locked(struct net_device
*netdev
)
2513 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2514 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2516 /* May already be CLOSED in case a previous configuration operation
2517 * (e.g RX/TX queue size change) that involves close&open failed.
2519 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
2522 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2524 if (MLX5_CAP_GEN(mdev
, vport_group_manager
))
2525 mlx5e_remove_sqs_fwd_rules(priv
);
2527 mlx5e_timestamp_cleanup(priv
);
2528 netif_carrier_off(priv
->netdev
);
2529 mlx5e_redirect_rqts(priv
);
2530 mlx5e_close_channels(priv
);
2535 int mlx5e_close(struct net_device
*netdev
)
2537 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2540 if (!netif_device_present(netdev
))
2543 mutex_lock(&priv
->state_lock
);
2544 err
= mlx5e_close_locked(netdev
);
2545 mutex_unlock(&priv
->state_lock
);
2550 static int mlx5e_alloc_drop_rq(struct mlx5e_priv
*priv
,
2551 struct mlx5e_rq
*rq
,
2552 struct mlx5e_rq_param
*param
)
2554 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2555 void *rqc
= param
->rqc
;
2556 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
2559 param
->wq
.db_numa_node
= param
->wq
.buf_numa_node
;
2561 err
= mlx5_wq_ll_create(mdev
, ¶m
->wq
, rqc_wq
, &rq
->wq
,
2571 static int mlx5e_alloc_drop_cq(struct mlx5e_priv
*priv
,
2572 struct mlx5e_cq
*cq
,
2573 struct mlx5e_cq_param
*param
)
2575 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2576 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
2581 err
= mlx5_cqwq_create(mdev
, ¶m
->wq
, param
->cqc
, &cq
->wq
,
2586 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn_not_used
, &irqn
);
2589 mcq
->set_ci_db
= cq
->wq_ctrl
.db
.db
;
2590 mcq
->arm_db
= cq
->wq_ctrl
.db
.db
+ 1;
2591 *mcq
->set_ci_db
= 0;
2593 mcq
->vector
= param
->eq_ix
;
2594 mcq
->comp
= mlx5e_completion_event
;
2595 mcq
->event
= mlx5e_cq_error_event
;
2603 static int mlx5e_open_drop_rq(struct mlx5e_priv
*priv
)
2605 struct mlx5e_cq_param cq_param
;
2606 struct mlx5e_rq_param rq_param
;
2607 struct mlx5e_rq
*rq
= &priv
->drop_rq
;
2608 struct mlx5e_cq
*cq
= &priv
->drop_rq
.cq
;
2611 memset(&cq_param
, 0, sizeof(cq_param
));
2612 memset(&rq_param
, 0, sizeof(rq_param
));
2613 mlx5e_build_drop_rq_param(&rq_param
);
2615 err
= mlx5e_alloc_drop_cq(priv
, cq
, &cq_param
);
2619 err
= mlx5e_create_cq(cq
, &cq_param
);
2623 err
= mlx5e_alloc_drop_rq(priv
, rq
, &rq_param
);
2625 goto err_destroy_cq
;
2627 err
= mlx5e_create_rq(rq
, &rq_param
);
2634 mlx5e_free_rq(&priv
->drop_rq
);
2637 mlx5e_destroy_cq(&priv
->drop_rq
.cq
);
2640 mlx5e_free_cq(&priv
->drop_rq
.cq
);
2645 static void mlx5e_close_drop_rq(struct mlx5e_priv
*priv
)
2647 mlx5e_destroy_rq(&priv
->drop_rq
);
2648 mlx5e_free_rq(&priv
->drop_rq
);
2649 mlx5e_destroy_cq(&priv
->drop_rq
.cq
);
2650 mlx5e_free_cq(&priv
->drop_rq
.cq
);
2653 static int mlx5e_create_tis(struct mlx5e_priv
*priv
, int tc
)
2655 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2656 u32 in
[MLX5_ST_SZ_DW(create_tis_in
)] = {0};
2657 void *tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
2659 MLX5_SET(tisc
, tisc
, prio
, tc
<< 1);
2660 MLX5_SET(tisc
, tisc
, transport_domain
, mdev
->mlx5e_res
.td
.tdn
);
2662 if (mlx5_lag_is_lacp_owner(mdev
))
2663 MLX5_SET(tisc
, tisc
, strict_lag_tx_port_affinity
, 1);
2665 return mlx5_core_create_tis(mdev
, in
, sizeof(in
), &priv
->tisn
[tc
]);
2668 static void mlx5e_destroy_tis(struct mlx5e_priv
*priv
, int tc
)
2670 mlx5_core_destroy_tis(priv
->mdev
, priv
->tisn
[tc
]);
2673 int mlx5e_create_tises(struct mlx5e_priv
*priv
)
2678 for (tc
= 0; tc
< priv
->profile
->max_tc
; tc
++) {
2679 err
= mlx5e_create_tis(priv
, tc
);
2681 goto err_close_tises
;
2687 for (tc
--; tc
>= 0; tc
--)
2688 mlx5e_destroy_tis(priv
, tc
);
2693 void mlx5e_cleanup_nic_tx(struct mlx5e_priv
*priv
)
2697 for (tc
= 0; tc
< priv
->profile
->max_tc
; tc
++)
2698 mlx5e_destroy_tis(priv
, tc
);
2701 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv
*priv
, u32
*tirc
,
2702 enum mlx5e_traffic_types tt
)
2704 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->mdev
->mlx5e_res
.td
.tdn
);
2706 mlx5e_build_tir_ctx_lro(tirc
, priv
);
2708 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
2709 MLX5_SET(tirc
, tirc
, indirect_table
, priv
->indir_rqt
.rqtn
);
2710 mlx5e_build_indir_tir_ctx_hash(priv
, tirc
, tt
);
2713 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv
*priv
, u32
*tirc
,
2716 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->mdev
->mlx5e_res
.td
.tdn
);
2718 mlx5e_build_tir_ctx_lro(tirc
, priv
);
2720 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
2721 MLX5_SET(tirc
, tirc
, indirect_table
, rqtn
);
2722 MLX5_SET(tirc
, tirc
, rx_hash_fn
, MLX5_RX_HASH_FN_INVERTED_XOR8
);
2725 static int mlx5e_create_indirect_tirs(struct mlx5e_priv
*priv
)
2727 struct mlx5e_tir
*tir
;
2734 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
2735 in
= mlx5_vzalloc(inlen
);
2739 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++) {
2740 memset(in
, 0, inlen
);
2741 tir
= &priv
->indir_tir
[tt
];
2742 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
2743 mlx5e_build_indir_tir_ctx(priv
, tirc
, tt
);
2744 err
= mlx5e_create_tir(priv
->mdev
, tir
, in
, inlen
);
2746 goto err_destroy_tirs
;
2754 for (tt
--; tt
>= 0; tt
--)
2755 mlx5e_destroy_tir(priv
->mdev
, &priv
->indir_tir
[tt
]);
2762 int mlx5e_create_direct_tirs(struct mlx5e_priv
*priv
)
2764 int nch
= priv
->profile
->max_nch(priv
->mdev
);
2765 struct mlx5e_tir
*tir
;
2772 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
2773 in
= mlx5_vzalloc(inlen
);
2777 for (ix
= 0; ix
< nch
; ix
++) {
2778 memset(in
, 0, inlen
);
2779 tir
= &priv
->direct_tir
[ix
];
2780 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
2781 mlx5e_build_direct_tir_ctx(priv
, tirc
,
2782 priv
->direct_tir
[ix
].rqt
.rqtn
);
2783 err
= mlx5e_create_tir(priv
->mdev
, tir
, in
, inlen
);
2785 goto err_destroy_ch_tirs
;
2792 err_destroy_ch_tirs
:
2793 for (ix
--; ix
>= 0; ix
--)
2794 mlx5e_destroy_tir(priv
->mdev
, &priv
->direct_tir
[ix
]);
2801 static void mlx5e_destroy_indirect_tirs(struct mlx5e_priv
*priv
)
2805 for (i
= 0; i
< MLX5E_NUM_INDIR_TIRS
; i
++)
2806 mlx5e_destroy_tir(priv
->mdev
, &priv
->indir_tir
[i
]);
2809 void mlx5e_destroy_direct_tirs(struct mlx5e_priv
*priv
)
2811 int nch
= priv
->profile
->max_nch(priv
->mdev
);
2814 for (i
= 0; i
< nch
; i
++)
2815 mlx5e_destroy_tir(priv
->mdev
, &priv
->direct_tir
[i
]);
2818 int mlx5e_modify_rqs_vsd(struct mlx5e_priv
*priv
, bool vsd
)
2823 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
2826 for (i
= 0; i
< priv
->params
.num_channels
; i
++) {
2827 err
= mlx5e_modify_rq_vsd(&priv
->channel
[i
]->rq
, vsd
);
2835 static int mlx5e_setup_tc(struct net_device
*netdev
, u8 tc
)
2837 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2841 if (tc
&& tc
!= MLX5E_MAX_NUM_TC
)
2844 mutex_lock(&priv
->state_lock
);
2846 was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2848 mlx5e_close_locked(priv
->netdev
);
2850 priv
->params
.num_tc
= tc
? tc
: 1;
2853 err
= mlx5e_open_locked(priv
->netdev
);
2855 mutex_unlock(&priv
->state_lock
);
2860 static int mlx5e_ndo_setup_tc(struct net_device
*dev
, u32 handle
,
2861 __be16 proto
, struct tc_to_netdev
*tc
)
2863 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2865 if (TC_H_MAJ(handle
) != TC_H_MAJ(TC_H_INGRESS
))
2869 case TC_SETUP_CLSFLOWER
:
2870 switch (tc
->cls_flower
->command
) {
2871 case TC_CLSFLOWER_REPLACE
:
2872 return mlx5e_configure_flower(priv
, proto
, tc
->cls_flower
);
2873 case TC_CLSFLOWER_DESTROY
:
2874 return mlx5e_delete_flower(priv
, tc
->cls_flower
);
2875 case TC_CLSFLOWER_STATS
:
2876 return mlx5e_stats_flower(priv
, tc
->cls_flower
);
2883 if (tc
->type
!= TC_SETUP_MQPRIO
)
2886 tc
->mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
2888 return mlx5e_setup_tc(dev
, tc
->mqprio
->num_tc
);
2892 mlx5e_get_stats(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
2894 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2895 struct mlx5e_sw_stats
*sstats
= &priv
->stats
.sw
;
2896 struct mlx5e_vport_stats
*vstats
= &priv
->stats
.vport
;
2897 struct mlx5e_pport_stats
*pstats
= &priv
->stats
.pport
;
2899 if (mlx5e_is_uplink_rep(priv
)) {
2900 stats
->rx_packets
= PPORT_802_3_GET(pstats
, a_frames_received_ok
);
2901 stats
->rx_bytes
= PPORT_802_3_GET(pstats
, a_octets_received_ok
);
2902 stats
->tx_packets
= PPORT_802_3_GET(pstats
, a_frames_transmitted_ok
);
2903 stats
->tx_bytes
= PPORT_802_3_GET(pstats
, a_octets_transmitted_ok
);
2905 stats
->rx_packets
= sstats
->rx_packets
;
2906 stats
->rx_bytes
= sstats
->rx_bytes
;
2907 stats
->tx_packets
= sstats
->tx_packets
;
2908 stats
->tx_bytes
= sstats
->tx_bytes
;
2909 stats
->tx_dropped
= sstats
->tx_queue_dropped
;
2912 stats
->rx_dropped
= priv
->stats
.qcnt
.rx_out_of_buffer
;
2914 stats
->rx_length_errors
=
2915 PPORT_802_3_GET(pstats
, a_in_range_length_errors
) +
2916 PPORT_802_3_GET(pstats
, a_out_of_range_length_field
) +
2917 PPORT_802_3_GET(pstats
, a_frame_too_long_errors
);
2918 stats
->rx_crc_errors
=
2919 PPORT_802_3_GET(pstats
, a_frame_check_sequence_errors
);
2920 stats
->rx_frame_errors
= PPORT_802_3_GET(pstats
, a_alignment_errors
);
2921 stats
->tx_aborted_errors
= PPORT_2863_GET(pstats
, if_out_discards
);
2922 stats
->tx_carrier_errors
=
2923 PPORT_802_3_GET(pstats
, a_symbol_error_during_carrier
);
2924 stats
->rx_errors
= stats
->rx_length_errors
+ stats
->rx_crc_errors
+
2925 stats
->rx_frame_errors
;
2926 stats
->tx_errors
= stats
->tx_aborted_errors
+ stats
->tx_carrier_errors
;
2928 /* vport multicast also counts packets that are dropped due to steering
2929 * or rx out of buffer
2932 VPORT_COUNTER_GET(vstats
, received_eth_multicast
.packets
);
2936 static void mlx5e_set_rx_mode(struct net_device
*dev
)
2938 struct mlx5e_priv
*priv
= netdev_priv(dev
);
2940 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
2943 static int mlx5e_set_mac(struct net_device
*netdev
, void *addr
)
2945 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2946 struct sockaddr
*saddr
= addr
;
2948 if (!is_valid_ether_addr(saddr
->sa_data
))
2949 return -EADDRNOTAVAIL
;
2951 netif_addr_lock_bh(netdev
);
2952 ether_addr_copy(netdev
->dev_addr
, saddr
->sa_data
);
2953 netif_addr_unlock_bh(netdev
);
2955 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
2960 #define MLX5E_SET_FEATURE(netdev, feature, enable) \
2963 netdev->features |= feature; \
2965 netdev->features &= ~feature; \
2968 typedef int (*mlx5e_feature_handler
)(struct net_device
*netdev
, bool enable
);
2970 static int set_feature_lro(struct net_device
*netdev
, bool enable
)
2972 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
2973 bool was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
2976 mutex_lock(&priv
->state_lock
);
2978 if (was_opened
&& (priv
->params
.rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST
))
2979 mlx5e_close_locked(priv
->netdev
);
2981 priv
->params
.lro_en
= enable
;
2982 err
= mlx5e_modify_tirs_lro(priv
);
2984 netdev_err(netdev
, "lro modify failed, %d\n", err
);
2985 priv
->params
.lro_en
= !enable
;
2988 if (was_opened
&& (priv
->params
.rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST
))
2989 mlx5e_open_locked(priv
->netdev
);
2991 mutex_unlock(&priv
->state_lock
);
2996 static int set_feature_vlan_filter(struct net_device
*netdev
, bool enable
)
2998 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3001 mlx5e_enable_vlan_filter(priv
);
3003 mlx5e_disable_vlan_filter(priv
);
3008 static int set_feature_tc_num_filters(struct net_device
*netdev
, bool enable
)
3010 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3012 if (!enable
&& mlx5e_tc_num_filters(priv
)) {
3014 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3021 static int set_feature_rx_all(struct net_device
*netdev
, bool enable
)
3023 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3024 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3026 return mlx5_set_port_fcs(mdev
, !enable
);
3029 static int set_feature_rx_vlan(struct net_device
*netdev
, bool enable
)
3031 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3034 mutex_lock(&priv
->state_lock
);
3036 priv
->params
.vlan_strip_disable
= !enable
;
3037 err
= mlx5e_modify_rqs_vsd(priv
, !enable
);
3039 priv
->params
.vlan_strip_disable
= enable
;
3041 mutex_unlock(&priv
->state_lock
);
3046 #ifdef CONFIG_RFS_ACCEL
3047 static int set_feature_arfs(struct net_device
*netdev
, bool enable
)
3049 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3053 err
= mlx5e_arfs_enable(priv
);
3055 err
= mlx5e_arfs_disable(priv
);
3061 static int mlx5e_handle_feature(struct net_device
*netdev
,
3062 netdev_features_t wanted_features
,
3063 netdev_features_t feature
,
3064 mlx5e_feature_handler feature_handler
)
3066 netdev_features_t changes
= wanted_features
^ netdev
->features
;
3067 bool enable
= !!(wanted_features
& feature
);
3070 if (!(changes
& feature
))
3073 err
= feature_handler(netdev
, enable
);
3075 netdev_err(netdev
, "%s feature 0x%llx failed err %d\n",
3076 enable
? "Enable" : "Disable", feature
, err
);
3080 MLX5E_SET_FEATURE(netdev
, feature
, enable
);
3084 static int mlx5e_set_features(struct net_device
*netdev
,
3085 netdev_features_t features
)
3089 err
= mlx5e_handle_feature(netdev
, features
, NETIF_F_LRO
,
3091 err
|= mlx5e_handle_feature(netdev
, features
,
3092 NETIF_F_HW_VLAN_CTAG_FILTER
,
3093 set_feature_vlan_filter
);
3094 err
|= mlx5e_handle_feature(netdev
, features
, NETIF_F_HW_TC
,
3095 set_feature_tc_num_filters
);
3096 err
|= mlx5e_handle_feature(netdev
, features
, NETIF_F_RXALL
,
3097 set_feature_rx_all
);
3098 err
|= mlx5e_handle_feature(netdev
, features
, NETIF_F_HW_VLAN_CTAG_RX
,
3099 set_feature_rx_vlan
);
3100 #ifdef CONFIG_RFS_ACCEL
3101 err
|= mlx5e_handle_feature(netdev
, features
, NETIF_F_NTUPLE
,
3105 return err
? -EINVAL
: 0;
3108 static int mlx5e_change_mtu(struct net_device
*netdev
, int new_mtu
)
3110 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3115 mutex_lock(&priv
->state_lock
);
3117 reset
= !priv
->params
.lro_en
&&
3118 (priv
->params
.rq_wq_type
!=
3119 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
);
3121 was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3122 if (was_opened
&& reset
)
3123 mlx5e_close_locked(netdev
);
3125 netdev
->mtu
= new_mtu
;
3126 mlx5e_set_dev_port_mtu(netdev
);
3128 if (was_opened
&& reset
)
3129 err
= mlx5e_open_locked(netdev
);
3131 mutex_unlock(&priv
->state_lock
);
3136 static int mlx5e_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
3140 return mlx5e_hwstamp_set(dev
, ifr
);
3142 return mlx5e_hwstamp_get(dev
, ifr
);
3148 static int mlx5e_set_vf_mac(struct net_device
*dev
, int vf
, u8
*mac
)
3150 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3151 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3153 return mlx5_eswitch_set_vport_mac(mdev
->priv
.eswitch
, vf
+ 1, mac
);
3156 static int mlx5e_set_vf_vlan(struct net_device
*dev
, int vf
, u16 vlan
, u8 qos
,
3159 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3160 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3162 if (vlan_proto
!= htons(ETH_P_8021Q
))
3163 return -EPROTONOSUPPORT
;
3165 return mlx5_eswitch_set_vport_vlan(mdev
->priv
.eswitch
, vf
+ 1,
3169 static int mlx5e_set_vf_spoofchk(struct net_device
*dev
, int vf
, bool setting
)
3171 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3172 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3174 return mlx5_eswitch_set_vport_spoofchk(mdev
->priv
.eswitch
, vf
+ 1, setting
);
3177 static int mlx5e_set_vf_trust(struct net_device
*dev
, int vf
, bool setting
)
3179 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3180 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3182 return mlx5_eswitch_set_vport_trust(mdev
->priv
.eswitch
, vf
+ 1, setting
);
3185 static int mlx5e_set_vf_rate(struct net_device
*dev
, int vf
, int min_tx_rate
,
3188 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3189 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3191 return mlx5_eswitch_set_vport_rate(mdev
->priv
.eswitch
, vf
+ 1,
3192 max_tx_rate
, min_tx_rate
);
3195 static int mlx5_vport_link2ifla(u8 esw_link
)
3198 case MLX5_ESW_VPORT_ADMIN_STATE_DOWN
:
3199 return IFLA_VF_LINK_STATE_DISABLE
;
3200 case MLX5_ESW_VPORT_ADMIN_STATE_UP
:
3201 return IFLA_VF_LINK_STATE_ENABLE
;
3203 return IFLA_VF_LINK_STATE_AUTO
;
3206 static int mlx5_ifla_link2vport(u8 ifla_link
)
3208 switch (ifla_link
) {
3209 case IFLA_VF_LINK_STATE_DISABLE
:
3210 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN
;
3211 case IFLA_VF_LINK_STATE_ENABLE
:
3212 return MLX5_ESW_VPORT_ADMIN_STATE_UP
;
3214 return MLX5_ESW_VPORT_ADMIN_STATE_AUTO
;
3217 static int mlx5e_set_vf_link_state(struct net_device
*dev
, int vf
,
3220 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3221 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3223 return mlx5_eswitch_set_vport_state(mdev
->priv
.eswitch
, vf
+ 1,
3224 mlx5_ifla_link2vport(link_state
));
3227 static int mlx5e_get_vf_config(struct net_device
*dev
,
3228 int vf
, struct ifla_vf_info
*ivi
)
3230 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3231 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3234 err
= mlx5_eswitch_get_vport_config(mdev
->priv
.eswitch
, vf
+ 1, ivi
);
3237 ivi
->linkstate
= mlx5_vport_link2ifla(ivi
->linkstate
);
3241 static int mlx5e_get_vf_stats(struct net_device
*dev
,
3242 int vf
, struct ifla_vf_stats
*vf_stats
)
3244 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3245 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3247 return mlx5_eswitch_get_vport_stats(mdev
->priv
.eswitch
, vf
+ 1,
3251 static void mlx5e_add_vxlan_port(struct net_device
*netdev
,
3252 struct udp_tunnel_info
*ti
)
3254 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3256 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
3259 if (!mlx5e_vxlan_allowed(priv
->mdev
))
3262 mlx5e_vxlan_queue_work(priv
, ti
->sa_family
, be16_to_cpu(ti
->port
), 1);
3265 static void mlx5e_del_vxlan_port(struct net_device
*netdev
,
3266 struct udp_tunnel_info
*ti
)
3268 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3270 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
3273 if (!mlx5e_vxlan_allowed(priv
->mdev
))
3276 mlx5e_vxlan_queue_work(priv
, ti
->sa_family
, be16_to_cpu(ti
->port
), 0);
3279 static netdev_features_t
mlx5e_vxlan_features_check(struct mlx5e_priv
*priv
,
3280 struct sk_buff
*skb
,
3281 netdev_features_t features
)
3283 struct udphdr
*udph
;
3287 switch (vlan_get_protocol(skb
)) {
3288 case htons(ETH_P_IP
):
3289 proto
= ip_hdr(skb
)->protocol
;
3291 case htons(ETH_P_IPV6
):
3292 proto
= ipv6_hdr(skb
)->nexthdr
;
3298 if (proto
== IPPROTO_UDP
) {
3299 udph
= udp_hdr(skb
);
3300 port
= be16_to_cpu(udph
->dest
);
3303 /* Verify if UDP port is being offloaded by HW */
3304 if (port
&& mlx5e_vxlan_lookup_port(priv
, port
))
3308 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
3309 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
3312 static netdev_features_t
mlx5e_features_check(struct sk_buff
*skb
,
3313 struct net_device
*netdev
,
3314 netdev_features_t features
)
3316 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3318 features
= vlan_features_check(skb
, features
);
3319 features
= vxlan_features_check(skb
, features
);
3321 /* Validate if the tunneled packet is being offloaded by HW */
3322 if (skb
->encapsulation
&&
3323 (features
& NETIF_F_CSUM_MASK
|| features
& NETIF_F_GSO_MASK
))
3324 return mlx5e_vxlan_features_check(priv
, skb
, features
);
3329 static void mlx5e_tx_timeout(struct net_device
*dev
)
3331 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3332 bool sched_work
= false;
3335 netdev_err(dev
, "TX timeout detected\n");
3337 for (i
= 0; i
< priv
->params
.num_channels
* priv
->params
.num_tc
; i
++) {
3338 struct mlx5e_txqsq
*sq
= priv
->txq_to_sq_map
[i
];
3340 if (!netif_xmit_stopped(netdev_get_tx_queue(dev
, i
)))
3343 clear_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
);
3344 netdev_err(dev
, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
3345 i
, sq
->sqn
, sq
->cq
.mcq
.cqn
, sq
->cc
, sq
->pc
);
3348 if (sched_work
&& test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
3349 schedule_work(&priv
->tx_timeout_work
);
3352 static int mlx5e_xdp_set(struct net_device
*netdev
, struct bpf_prog
*prog
)
3354 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3355 struct bpf_prog
*old_prog
;
3357 bool reset
, was_opened
;
3360 mutex_lock(&priv
->state_lock
);
3362 if ((netdev
->features
& NETIF_F_LRO
) && prog
) {
3363 netdev_warn(netdev
, "can't set XDP while LRO is on, disable LRO first\n");
3368 was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
3369 /* no need for full reset when exchanging programs */
3370 reset
= (!priv
->xdp_prog
|| !prog
);
3372 if (was_opened
&& reset
)
3373 mlx5e_close_locked(netdev
);
3374 if (was_opened
&& !reset
) {
3375 /* num_channels is invariant here, so we can take the
3376 * batched reference right upfront.
3378 prog
= bpf_prog_add(prog
, priv
->params
.num_channels
);
3380 err
= PTR_ERR(prog
);
3385 /* exchange programs, extra prog reference we got from caller
3386 * as long as we don't fail from this point onwards.
3388 old_prog
= xchg(&priv
->xdp_prog
, prog
);
3390 bpf_prog_put(old_prog
);
3392 if (reset
) /* change RQ type according to priv->xdp_prog */
3393 mlx5e_set_rq_priv_params(priv
);
3395 if (was_opened
&& reset
)
3396 mlx5e_open_locked(netdev
);
3398 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
) || reset
)
3401 /* exchanging programs w/o reset, we update ref counts on behalf
3402 * of the channels RQs here.
3404 for (i
= 0; i
< priv
->params
.num_channels
; i
++) {
3405 struct mlx5e_channel
*c
= priv
->channel
[i
];
3407 clear_bit(MLX5E_RQ_STATE_ENABLED
, &c
->rq
.state
);
3408 napi_synchronize(&c
->napi
);
3409 /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
3411 old_prog
= xchg(&c
->rq
.xdp_prog
, prog
);
3413 set_bit(MLX5E_RQ_STATE_ENABLED
, &c
->rq
.state
);
3414 /* napi_schedule in case we have missed anything */
3415 set_bit(MLX5E_CHANNEL_NAPI_SCHED
, &c
->flags
);
3416 napi_schedule(&c
->napi
);
3419 bpf_prog_put(old_prog
);
3423 mutex_unlock(&priv
->state_lock
);
3427 static bool mlx5e_xdp_attached(struct net_device
*dev
)
3429 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3431 return !!priv
->xdp_prog
;
3434 static int mlx5e_xdp(struct net_device
*dev
, struct netdev_xdp
*xdp
)
3436 switch (xdp
->command
) {
3437 case XDP_SETUP_PROG
:
3438 return mlx5e_xdp_set(dev
, xdp
->prog
);
3439 case XDP_QUERY_PROG
:
3440 xdp
->prog_attached
= mlx5e_xdp_attached(dev
);
3447 #ifdef CONFIG_NET_POLL_CONTROLLER
3448 /* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
3449 * reenabling interrupts.
3451 static void mlx5e_netpoll(struct net_device
*dev
)
3453 struct mlx5e_priv
*priv
= netdev_priv(dev
);
3456 for (i
= 0; i
< priv
->params
.num_channels
; i
++)
3457 napi_schedule(&priv
->channel
[i
]->napi
);
3461 static const struct net_device_ops mlx5e_netdev_ops_basic
= {
3462 .ndo_open
= mlx5e_open
,
3463 .ndo_stop
= mlx5e_close
,
3464 .ndo_start_xmit
= mlx5e_xmit
,
3465 .ndo_setup_tc
= mlx5e_ndo_setup_tc
,
3466 .ndo_select_queue
= mlx5e_select_queue
,
3467 .ndo_get_stats64
= mlx5e_get_stats
,
3468 .ndo_set_rx_mode
= mlx5e_set_rx_mode
,
3469 .ndo_set_mac_address
= mlx5e_set_mac
,
3470 .ndo_vlan_rx_add_vid
= mlx5e_vlan_rx_add_vid
,
3471 .ndo_vlan_rx_kill_vid
= mlx5e_vlan_rx_kill_vid
,
3472 .ndo_set_features
= mlx5e_set_features
,
3473 .ndo_change_mtu
= mlx5e_change_mtu
,
3474 .ndo_do_ioctl
= mlx5e_ioctl
,
3475 .ndo_set_tx_maxrate
= mlx5e_set_tx_maxrate
,
3476 #ifdef CONFIG_RFS_ACCEL
3477 .ndo_rx_flow_steer
= mlx5e_rx_flow_steer
,
3479 .ndo_tx_timeout
= mlx5e_tx_timeout
,
3480 .ndo_xdp
= mlx5e_xdp
,
3481 #ifdef CONFIG_NET_POLL_CONTROLLER
3482 .ndo_poll_controller
= mlx5e_netpoll
,
3486 static const struct net_device_ops mlx5e_netdev_ops_sriov
= {
3487 .ndo_open
= mlx5e_open
,
3488 .ndo_stop
= mlx5e_close
,
3489 .ndo_start_xmit
= mlx5e_xmit
,
3490 .ndo_setup_tc
= mlx5e_ndo_setup_tc
,
3491 .ndo_select_queue
= mlx5e_select_queue
,
3492 .ndo_get_stats64
= mlx5e_get_stats
,
3493 .ndo_set_rx_mode
= mlx5e_set_rx_mode
,
3494 .ndo_set_mac_address
= mlx5e_set_mac
,
3495 .ndo_vlan_rx_add_vid
= mlx5e_vlan_rx_add_vid
,
3496 .ndo_vlan_rx_kill_vid
= mlx5e_vlan_rx_kill_vid
,
3497 .ndo_set_features
= mlx5e_set_features
,
3498 .ndo_change_mtu
= mlx5e_change_mtu
,
3499 .ndo_do_ioctl
= mlx5e_ioctl
,
3500 .ndo_udp_tunnel_add
= mlx5e_add_vxlan_port
,
3501 .ndo_udp_tunnel_del
= mlx5e_del_vxlan_port
,
3502 .ndo_set_tx_maxrate
= mlx5e_set_tx_maxrate
,
3503 .ndo_features_check
= mlx5e_features_check
,
3504 #ifdef CONFIG_RFS_ACCEL
3505 .ndo_rx_flow_steer
= mlx5e_rx_flow_steer
,
3507 .ndo_set_vf_mac
= mlx5e_set_vf_mac
,
3508 .ndo_set_vf_vlan
= mlx5e_set_vf_vlan
,
3509 .ndo_set_vf_spoofchk
= mlx5e_set_vf_spoofchk
,
3510 .ndo_set_vf_trust
= mlx5e_set_vf_trust
,
3511 .ndo_set_vf_rate
= mlx5e_set_vf_rate
,
3512 .ndo_get_vf_config
= mlx5e_get_vf_config
,
3513 .ndo_set_vf_link_state
= mlx5e_set_vf_link_state
,
3514 .ndo_get_vf_stats
= mlx5e_get_vf_stats
,
3515 .ndo_tx_timeout
= mlx5e_tx_timeout
,
3516 .ndo_xdp
= mlx5e_xdp
,
3517 #ifdef CONFIG_NET_POLL_CONTROLLER
3518 .ndo_poll_controller
= mlx5e_netpoll
,
3520 .ndo_has_offload_stats
= mlx5e_has_offload_stats
,
3521 .ndo_get_offload_stats
= mlx5e_get_offload_stats
,
3524 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev
*mdev
)
3526 if (MLX5_CAP_GEN(mdev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
)
3528 if (!MLX5_CAP_GEN(mdev
, eth_net_offloads
) ||
3529 !MLX5_CAP_GEN(mdev
, nic_flow_table
) ||
3530 !MLX5_CAP_ETH(mdev
, csum_cap
) ||
3531 !MLX5_CAP_ETH(mdev
, max_lso_cap
) ||
3532 !MLX5_CAP_ETH(mdev
, vlan_cap
) ||
3533 !MLX5_CAP_ETH(mdev
, rss_ind_tbl_cap
) ||
3534 MLX5_CAP_FLOWTABLE(mdev
,
3535 flow_table_properties_nic_receive
.max_ft_level
)
3537 mlx5_core_warn(mdev
,
3538 "Not creating net device, some required device capabilities are missing\n");
3541 if (!MLX5_CAP_ETH(mdev
, self_lb_en_modifiable
))
3542 mlx5_core_warn(mdev
, "Self loop back prevention is not supported\n");
3543 if (!MLX5_CAP_GEN(mdev
, cq_moderation
))
3544 mlx5_core_warn(mdev
, "CQ modiration is not supported\n");
3549 u16
mlx5e_get_max_inline_cap(struct mlx5_core_dev
*mdev
)
3551 int bf_buf_size
= (1 << MLX5_CAP_GEN(mdev
, log_bf_reg_size
)) / 2;
3553 return bf_buf_size
-
3554 sizeof(struct mlx5e_tx_wqe
) +
3555 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
3558 void mlx5e_build_default_indir_rqt(struct mlx5_core_dev
*mdev
,
3559 u32
*indirection_rqt
, int len
,
3562 int node
= mdev
->priv
.numa_node
;
3563 int node_num_of_cores
;
3567 node
= first_online_node
;
3569 node_num_of_cores
= cpumask_weight(cpumask_of_node(node
));
3571 if (node_num_of_cores
)
3572 num_channels
= min_t(int, num_channels
, node_num_of_cores
);
3574 for (i
= 0; i
< len
; i
++)
3575 indirection_rqt
[i
] = i
% num_channels
;
3578 static int mlx5e_get_pci_bw(struct mlx5_core_dev
*mdev
, u32
*pci_bw
)
3580 enum pcie_link_width width
;
3581 enum pci_bus_speed speed
;
3584 err
= pcie_get_minimum_link(mdev
->pdev
, &speed
, &width
);
3588 if (speed
== PCI_SPEED_UNKNOWN
|| width
== PCIE_LNK_WIDTH_UNKNOWN
)
3592 case PCIE_SPEED_2_5GT
:
3593 *pci_bw
= 2500 * width
;
3595 case PCIE_SPEED_5_0GT
:
3596 *pci_bw
= 5000 * width
;
3598 case PCIE_SPEED_8_0GT
:
3599 *pci_bw
= 8000 * width
;
3608 static bool cqe_compress_heuristic(u32 link_speed
, u32 pci_bw
)
3610 return (link_speed
&& pci_bw
&&
3611 (pci_bw
< 40000) && (pci_bw
< link_speed
));
3614 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params
*params
, u8 cq_period_mode
)
3616 params
->rx_cq_period_mode
= cq_period_mode
;
3618 params
->rx_cq_moderation
.pkts
=
3619 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS
;
3620 params
->rx_cq_moderation
.usec
=
3621 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC
;
3623 if (cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
)
3624 params
->rx_cq_moderation
.usec
=
3625 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE
;
3628 u32
mlx5e_choose_lro_timeout(struct mlx5_core_dev
*mdev
, u32 wanted_timeout
)
3632 /* The supported periods are organized in ascending order */
3633 for (i
= 0; i
< MLX5E_LRO_TIMEOUT_ARR_SIZE
- 1; i
++)
3634 if (MLX5_CAP_ETH(mdev
, lro_timer_supported_periods
[i
]) >= wanted_timeout
)
3637 return MLX5_CAP_ETH(mdev
, lro_timer_supported_periods
[i
]);
3640 static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev
*mdev
,
3641 struct net_device
*netdev
,
3642 const struct mlx5e_profile
*profile
,
3645 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3648 u8 cq_period_mode
= MLX5_CAP_GEN(mdev
, cq_period_start_from_cqe
) ?
3649 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
:
3650 MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
3653 priv
->netdev
= netdev
;
3654 priv
->params
.num_channels
= profile
->max_nch(mdev
);
3655 priv
->profile
= profile
;
3656 priv
->ppriv
= ppriv
;
3658 priv
->params
.lro_timeout
=
3659 mlx5e_choose_lro_timeout(mdev
, MLX5E_DEFAULT_LRO_TIMEOUT
);
3661 priv
->params
.log_sq_size
= is_kdump_kernel() ?
3662 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE
:
3663 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE
;
3665 /* set CQE compression */
3666 priv
->params
.rx_cqe_compress_def
= false;
3667 if (MLX5_CAP_GEN(mdev
, cqe_compression
) &&
3668 MLX5_CAP_GEN(mdev
, vport_group_manager
)) {
3669 mlx5e_get_max_linkspeed(mdev
, &link_speed
);
3670 mlx5e_get_pci_bw(mdev
, &pci_bw
);
3671 mlx5_core_dbg(mdev
, "Max link speed = %d, PCI BW = %d\n",
3672 link_speed
, pci_bw
);
3673 priv
->params
.rx_cqe_compress_def
=
3674 cqe_compress_heuristic(link_speed
, pci_bw
);
3677 MLX5E_SET_PFLAG(priv
, MLX5E_PFLAG_RX_CQE_COMPRESS
,
3678 priv
->params
.rx_cqe_compress_def
);
3680 mlx5e_set_rq_priv_params(priv
);
3681 if (priv
->params
.rq_wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
)
3682 priv
->params
.lro_en
= true;
3684 priv
->params
.rx_am_enabled
= MLX5_CAP_GEN(mdev
, cq_moderation
);
3685 mlx5e_set_rx_cq_mode_params(&priv
->params
, cq_period_mode
);
3687 priv
->params
.tx_cq_moderation
.usec
=
3688 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC
;
3689 priv
->params
.tx_cq_moderation
.pkts
=
3690 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS
;
3691 priv
->params
.tx_max_inline
= mlx5e_get_max_inline_cap(mdev
);
3692 mlx5_query_min_inline(mdev
, &priv
->params
.tx_min_inline_mode
);
3693 if (priv
->params
.tx_min_inline_mode
== MLX5_INLINE_MODE_NONE
&&
3694 !MLX5_CAP_ETH(mdev
, wqe_vlan_insert
))
3695 priv
->params
.tx_min_inline_mode
= MLX5_INLINE_MODE_L2
;
3697 priv
->params
.num_tc
= 1;
3698 priv
->params
.rss_hfunc
= ETH_RSS_HASH_XOR
;
3700 netdev_rss_key_fill(priv
->params
.toeplitz_hash_key
,
3701 sizeof(priv
->params
.toeplitz_hash_key
));
3703 mlx5e_build_default_indir_rqt(mdev
, priv
->params
.indirection_rqt
,
3704 MLX5E_INDIR_RQT_SIZE
, profile
->max_nch(mdev
));
3706 /* Initialize pflags */
3707 MLX5E_SET_PFLAG(priv
, MLX5E_PFLAG_RX_CQE_BASED_MODER
,
3708 priv
->params
.rx_cq_period_mode
== MLX5_CQ_PERIOD_MODE_START_FROM_CQE
);
3710 mutex_init(&priv
->state_lock
);
3712 INIT_WORK(&priv
->update_carrier_work
, mlx5e_update_carrier_work
);
3713 INIT_WORK(&priv
->set_rx_mode_work
, mlx5e_set_rx_mode_work
);
3714 INIT_WORK(&priv
->tx_timeout_work
, mlx5e_tx_timeout_work
);
3715 INIT_DELAYED_WORK(&priv
->update_stats_work
, mlx5e_update_stats_work
);
3718 static void mlx5e_set_netdev_dev_addr(struct net_device
*netdev
)
3720 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3722 mlx5_query_nic_vport_mac_address(priv
->mdev
, 0, netdev
->dev_addr
);
3723 if (is_zero_ether_addr(netdev
->dev_addr
) &&
3724 !MLX5_CAP_GEN(priv
->mdev
, vport_group_manager
)) {
3725 eth_hw_addr_random(netdev
);
3726 mlx5_core_info(priv
->mdev
, "Assigned random MAC address %pM\n", netdev
->dev_addr
);
3730 static const struct switchdev_ops mlx5e_switchdev_ops
= {
3731 .switchdev_port_attr_get
= mlx5e_attr_get
,
3734 static void mlx5e_build_nic_netdev(struct net_device
*netdev
)
3736 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3737 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3741 SET_NETDEV_DEV(netdev
, &mdev
->pdev
->dev
);
3743 if (MLX5_CAP_GEN(mdev
, vport_group_manager
)) {
3744 netdev
->netdev_ops
= &mlx5e_netdev_ops_sriov
;
3745 #ifdef CONFIG_MLX5_CORE_EN_DCB
3746 if (MLX5_CAP_GEN(mdev
, qos
))
3747 netdev
->dcbnl_ops
= &mlx5e_dcbnl_ops
;
3750 netdev
->netdev_ops
= &mlx5e_netdev_ops_basic
;
3753 netdev
->watchdog_timeo
= 15 * HZ
;
3755 netdev
->ethtool_ops
= &mlx5e_ethtool_ops
;
3757 netdev
->vlan_features
|= NETIF_F_SG
;
3758 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
3759 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
3760 netdev
->vlan_features
|= NETIF_F_GRO
;
3761 netdev
->vlan_features
|= NETIF_F_TSO
;
3762 netdev
->vlan_features
|= NETIF_F_TSO6
;
3763 netdev
->vlan_features
|= NETIF_F_RXCSUM
;
3764 netdev
->vlan_features
|= NETIF_F_RXHASH
;
3766 if (!!MLX5_CAP_ETH(mdev
, lro_cap
))
3767 netdev
->vlan_features
|= NETIF_F_LRO
;
3769 netdev
->hw_features
= netdev
->vlan_features
;
3770 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
3771 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
3772 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
3774 if (mlx5e_vxlan_allowed(mdev
)) {
3775 netdev
->hw_features
|= NETIF_F_GSO_UDP_TUNNEL
|
3776 NETIF_F_GSO_UDP_TUNNEL_CSUM
|
3777 NETIF_F_GSO_PARTIAL
;
3778 netdev
->hw_enc_features
|= NETIF_F_IP_CSUM
;
3779 netdev
->hw_enc_features
|= NETIF_F_IPV6_CSUM
;
3780 netdev
->hw_enc_features
|= NETIF_F_TSO
;
3781 netdev
->hw_enc_features
|= NETIF_F_TSO6
;
3782 netdev
->hw_enc_features
|= NETIF_F_GSO_UDP_TUNNEL
;
3783 netdev
->hw_enc_features
|= NETIF_F_GSO_UDP_TUNNEL_CSUM
|
3784 NETIF_F_GSO_PARTIAL
;
3785 netdev
->gso_partial_features
= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
3788 mlx5_query_port_fcs(mdev
, &fcs_supported
, &fcs_enabled
);
3791 netdev
->hw_features
|= NETIF_F_RXALL
;
3793 netdev
->features
= netdev
->hw_features
;
3794 if (!priv
->params
.lro_en
)
3795 netdev
->features
&= ~NETIF_F_LRO
;
3798 netdev
->features
&= ~NETIF_F_RXALL
;
3800 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
3801 if (FT_CAP(flow_modify_en
) &&
3802 FT_CAP(modify_root
) &&
3803 FT_CAP(identified_miss_table_mode
) &&
3804 FT_CAP(flow_table_modify
)) {
3805 netdev
->hw_features
|= NETIF_F_HW_TC
;
3806 #ifdef CONFIG_RFS_ACCEL
3807 netdev
->hw_features
|= NETIF_F_NTUPLE
;
3811 netdev
->features
|= NETIF_F_HIGHDMA
;
3813 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3815 mlx5e_set_netdev_dev_addr(netdev
);
3817 #ifdef CONFIG_NET_SWITCHDEV
3818 if (MLX5_CAP_GEN(mdev
, vport_group_manager
))
3819 netdev
->switchdev_ops
= &mlx5e_switchdev_ops
;
3823 static void mlx5e_create_q_counter(struct mlx5e_priv
*priv
)
3825 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3828 err
= mlx5_core_alloc_q_counter(mdev
, &priv
->q_counter
);
3830 mlx5_core_warn(mdev
, "alloc queue counter failed, %d\n", err
);
3831 priv
->q_counter
= 0;
3835 static void mlx5e_destroy_q_counter(struct mlx5e_priv
*priv
)
3837 if (!priv
->q_counter
)
3840 mlx5_core_dealloc_q_counter(priv
->mdev
, priv
->q_counter
);
3843 static void mlx5e_nic_init(struct mlx5_core_dev
*mdev
,
3844 struct net_device
*netdev
,
3845 const struct mlx5e_profile
*profile
,
3848 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
3850 mlx5e_build_nic_netdev_priv(mdev
, netdev
, profile
, ppriv
);
3851 mlx5e_build_nic_netdev(netdev
);
3852 mlx5e_vxlan_init(priv
);
3855 static void mlx5e_nic_cleanup(struct mlx5e_priv
*priv
)
3857 mlx5e_vxlan_cleanup(priv
);
3860 bpf_prog_put(priv
->xdp_prog
);
3863 static int mlx5e_init_nic_rx(struct mlx5e_priv
*priv
)
3865 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3869 err
= mlx5e_create_indirect_rqts(priv
);
3871 mlx5_core_warn(mdev
, "create indirect rqts failed, %d\n", err
);
3875 err
= mlx5e_create_direct_rqts(priv
);
3877 mlx5_core_warn(mdev
, "create direct rqts failed, %d\n", err
);
3878 goto err_destroy_indirect_rqts
;
3881 err
= mlx5e_create_indirect_tirs(priv
);
3883 mlx5_core_warn(mdev
, "create indirect tirs failed, %d\n", err
);
3884 goto err_destroy_direct_rqts
;
3887 err
= mlx5e_create_direct_tirs(priv
);
3889 mlx5_core_warn(mdev
, "create direct tirs failed, %d\n", err
);
3890 goto err_destroy_indirect_tirs
;
3893 err
= mlx5e_create_flow_steering(priv
);
3895 mlx5_core_warn(mdev
, "create flow steering failed, %d\n", err
);
3896 goto err_destroy_direct_tirs
;
3899 err
= mlx5e_tc_init(priv
);
3901 goto err_destroy_flow_steering
;
3905 err_destroy_flow_steering
:
3906 mlx5e_destroy_flow_steering(priv
);
3907 err_destroy_direct_tirs
:
3908 mlx5e_destroy_direct_tirs(priv
);
3909 err_destroy_indirect_tirs
:
3910 mlx5e_destroy_indirect_tirs(priv
);
3911 err_destroy_direct_rqts
:
3912 for (i
= 0; i
< priv
->profile
->max_nch(mdev
); i
++)
3913 mlx5e_destroy_rqt(priv
, &priv
->direct_tir
[i
].rqt
);
3914 err_destroy_indirect_rqts
:
3915 mlx5e_destroy_rqt(priv
, &priv
->indir_rqt
);
3919 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv
*priv
)
3923 mlx5e_tc_cleanup(priv
);
3924 mlx5e_destroy_flow_steering(priv
);
3925 mlx5e_destroy_direct_tirs(priv
);
3926 mlx5e_destroy_indirect_tirs(priv
);
3927 for (i
= 0; i
< priv
->profile
->max_nch(priv
->mdev
); i
++)
3928 mlx5e_destroy_rqt(priv
, &priv
->direct_tir
[i
].rqt
);
3929 mlx5e_destroy_rqt(priv
, &priv
->indir_rqt
);
3932 static int mlx5e_init_nic_tx(struct mlx5e_priv
*priv
)
3936 err
= mlx5e_create_tises(priv
);
3938 mlx5_core_warn(priv
->mdev
, "create tises failed, %d\n", err
);
3942 #ifdef CONFIG_MLX5_CORE_EN_DCB
3943 mlx5e_dcbnl_initialize(priv
);
3948 static void mlx5e_nic_enable(struct mlx5e_priv
*priv
)
3950 struct net_device
*netdev
= priv
->netdev
;
3951 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3952 struct mlx5_eswitch
*esw
= mdev
->priv
.eswitch
;
3953 struct mlx5_eswitch_rep rep
;
3955 mlx5_lag_add(mdev
, netdev
);
3957 mlx5e_enable_async_events(priv
);
3959 if (MLX5_CAP_GEN(mdev
, vport_group_manager
)) {
3960 mlx5_query_nic_vport_mac_address(mdev
, 0, rep
.hw_id
);
3961 rep
.load
= mlx5e_nic_rep_load
;
3962 rep
.unload
= mlx5e_nic_rep_unload
;
3963 rep
.vport
= FDB_UPLINK_VPORT
;
3964 rep
.netdev
= netdev
;
3965 mlx5_eswitch_register_vport_rep(esw
, 0, &rep
);
3968 if (netdev
->reg_state
!= NETREG_REGISTERED
)
3971 /* Device already registered: sync netdev system state */
3972 if (mlx5e_vxlan_allowed(mdev
)) {
3974 udp_tunnel_get_rx_info(netdev
);
3978 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
3981 static void mlx5e_nic_disable(struct mlx5e_priv
*priv
)
3983 struct mlx5_core_dev
*mdev
= priv
->mdev
;
3984 struct mlx5_eswitch
*esw
= mdev
->priv
.eswitch
;
3986 queue_work(priv
->wq
, &priv
->set_rx_mode_work
);
3987 if (MLX5_CAP_GEN(mdev
, vport_group_manager
))
3988 mlx5_eswitch_unregister_vport_rep(esw
, 0);
3989 mlx5e_disable_async_events(priv
);
3990 mlx5_lag_remove(mdev
);
3993 static const struct mlx5e_profile mlx5e_nic_profile
= {
3994 .init
= mlx5e_nic_init
,
3995 .cleanup
= mlx5e_nic_cleanup
,
3996 .init_rx
= mlx5e_init_nic_rx
,
3997 .cleanup_rx
= mlx5e_cleanup_nic_rx
,
3998 .init_tx
= mlx5e_init_nic_tx
,
3999 .cleanup_tx
= mlx5e_cleanup_nic_tx
,
4000 .enable
= mlx5e_nic_enable
,
4001 .disable
= mlx5e_nic_disable
,
4002 .update_stats
= mlx5e_update_stats
,
4003 .max_nch
= mlx5e_get_max_num_channels
,
4004 .max_tc
= MLX5E_MAX_NUM_TC
,
4007 struct net_device
*mlx5e_create_netdev(struct mlx5_core_dev
*mdev
,
4008 const struct mlx5e_profile
*profile
,
4011 int nch
= profile
->max_nch(mdev
);
4012 struct net_device
*netdev
;
4013 struct mlx5e_priv
*priv
;
4015 netdev
= alloc_etherdev_mqs(sizeof(struct mlx5e_priv
),
4016 nch
* profile
->max_tc
,
4019 mlx5_core_err(mdev
, "alloc_etherdev_mqs() failed\n");
4023 #ifdef CONFIG_RFS_ACCEL
4024 netdev
->rx_cpu_rmap
= mdev
->rmap
;
4027 profile
->init(mdev
, netdev
, profile
, ppriv
);
4029 netif_carrier_off(netdev
);
4031 priv
= netdev_priv(netdev
);
4033 priv
->wq
= create_singlethread_workqueue("mlx5e");
4035 goto err_cleanup_nic
;
4040 profile
->cleanup(priv
);
4041 free_netdev(netdev
);
4046 int mlx5e_attach_netdev(struct mlx5_core_dev
*mdev
, struct net_device
*netdev
)
4048 const struct mlx5e_profile
*profile
;
4049 struct mlx5e_priv
*priv
;
4053 priv
= netdev_priv(netdev
);
4054 profile
= priv
->profile
;
4055 clear_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
4057 err
= profile
->init_tx(priv
);
4061 err
= mlx5e_open_drop_rq(priv
);
4063 mlx5_core_err(mdev
, "open drop rq failed, %d\n", err
);
4064 goto err_cleanup_tx
;
4067 err
= profile
->init_rx(priv
);
4069 goto err_close_drop_rq
;
4071 mlx5e_create_q_counter(priv
);
4073 mlx5e_init_l2_addr(priv
);
4075 /* MTU range: 68 - hw-specific max */
4076 netdev
->min_mtu
= ETH_MIN_MTU
;
4077 mlx5_query_port_max_mtu(priv
->mdev
, &max_mtu
, 1);
4078 netdev
->max_mtu
= MLX5E_HW2SW_MTU(max_mtu
);
4080 mlx5e_set_dev_port_mtu(netdev
);
4082 if (profile
->enable
)
4083 profile
->enable(priv
);
4086 if (netif_running(netdev
))
4088 netif_device_attach(netdev
);
4094 mlx5e_close_drop_rq(priv
);
4097 profile
->cleanup_tx(priv
);
4103 static void mlx5e_register_vport_rep(struct mlx5_core_dev
*mdev
)
4105 struct mlx5_eswitch
*esw
= mdev
->priv
.eswitch
;
4106 int total_vfs
= MLX5_TOTAL_VPORTS(mdev
);
4110 if (!MLX5_CAP_GEN(mdev
, vport_group_manager
))
4113 mlx5_query_nic_vport_mac_address(mdev
, 0, mac
);
4115 for (vport
= 1; vport
< total_vfs
; vport
++) {
4116 struct mlx5_eswitch_rep rep
;
4118 rep
.load
= mlx5e_vport_rep_load
;
4119 rep
.unload
= mlx5e_vport_rep_unload
;
4121 ether_addr_copy(rep
.hw_id
, mac
);
4122 mlx5_eswitch_register_vport_rep(esw
, vport
, &rep
);
4126 static void mlx5e_unregister_vport_rep(struct mlx5_core_dev
*mdev
)
4128 struct mlx5_eswitch
*esw
= mdev
->priv
.eswitch
;
4129 int total_vfs
= MLX5_TOTAL_VPORTS(mdev
);
4132 if (!MLX5_CAP_GEN(mdev
, vport_group_manager
))
4135 for (vport
= 1; vport
< total_vfs
; vport
++)
4136 mlx5_eswitch_unregister_vport_rep(esw
, vport
);
4139 void mlx5e_detach_netdev(struct mlx5_core_dev
*mdev
, struct net_device
*netdev
)
4141 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
4142 const struct mlx5e_profile
*profile
= priv
->profile
;
4144 set_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
4147 if (netif_running(netdev
))
4148 mlx5e_close(netdev
);
4149 netif_device_detach(netdev
);
4152 if (profile
->disable
)
4153 profile
->disable(priv
);
4154 flush_workqueue(priv
->wq
);
4156 mlx5e_destroy_q_counter(priv
);
4157 profile
->cleanup_rx(priv
);
4158 mlx5e_close_drop_rq(priv
);
4159 profile
->cleanup_tx(priv
);
4160 cancel_delayed_work_sync(&priv
->update_stats_work
);
4163 /* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
4164 * hardware contexts and to connect it to the current netdev.
4166 static int mlx5e_attach(struct mlx5_core_dev
*mdev
, void *vpriv
)
4168 struct mlx5e_priv
*priv
= vpriv
;
4169 struct net_device
*netdev
= priv
->netdev
;
4172 if (netif_device_present(netdev
))
4175 err
= mlx5e_create_mdev_resources(mdev
);
4179 err
= mlx5e_attach_netdev(mdev
, netdev
);
4181 mlx5e_destroy_mdev_resources(mdev
);
4185 mlx5e_register_vport_rep(mdev
);
4189 static void mlx5e_detach(struct mlx5_core_dev
*mdev
, void *vpriv
)
4191 struct mlx5e_priv
*priv
= vpriv
;
4192 struct net_device
*netdev
= priv
->netdev
;
4194 if (!netif_device_present(netdev
))
4197 mlx5e_unregister_vport_rep(mdev
);
4198 mlx5e_detach_netdev(mdev
, netdev
);
4199 mlx5e_destroy_mdev_resources(mdev
);
4202 static void *mlx5e_add(struct mlx5_core_dev
*mdev
)
4204 struct mlx5_eswitch
*esw
= mdev
->priv
.eswitch
;
4205 int total_vfs
= MLX5_TOTAL_VPORTS(mdev
);
4210 struct net_device
*netdev
;
4212 err
= mlx5e_check_required_hca_cap(mdev
);
4216 if (MLX5_CAP_GEN(mdev
, vport_group_manager
))
4217 ppriv
= &esw
->offloads
.vport_reps
[0];
4219 netdev
= mlx5e_create_netdev(mdev
, &mlx5e_nic_profile
, ppriv
);
4221 mlx5_core_err(mdev
, "mlx5e_create_netdev failed\n");
4222 goto err_unregister_reps
;
4225 priv
= netdev_priv(netdev
);
4227 err
= mlx5e_attach(mdev
, priv
);
4229 mlx5_core_err(mdev
, "mlx5e_attach failed, %d\n", err
);
4230 goto err_destroy_netdev
;
4233 err
= register_netdev(netdev
);
4235 mlx5_core_err(mdev
, "register_netdev failed, %d\n", err
);
4242 mlx5e_detach(mdev
, priv
);
4245 mlx5e_destroy_netdev(mdev
, priv
);
4247 err_unregister_reps
:
4248 for (vport
= 1; vport
< total_vfs
; vport
++)
4249 mlx5_eswitch_unregister_vport_rep(esw
, vport
);
4254 void mlx5e_destroy_netdev(struct mlx5_core_dev
*mdev
, struct mlx5e_priv
*priv
)
4256 const struct mlx5e_profile
*profile
= priv
->profile
;
4257 struct net_device
*netdev
= priv
->netdev
;
4259 destroy_workqueue(priv
->wq
);
4260 if (profile
->cleanup
)
4261 profile
->cleanup(priv
);
4262 free_netdev(netdev
);
4265 static void mlx5e_remove(struct mlx5_core_dev
*mdev
, void *vpriv
)
4267 struct mlx5e_priv
*priv
= vpriv
;
4269 unregister_netdev(priv
->netdev
);
4270 mlx5e_detach(mdev
, vpriv
);
4271 mlx5e_destroy_netdev(mdev
, priv
);
4274 static void *mlx5e_get_netdev(void *vpriv
)
4276 struct mlx5e_priv
*priv
= vpriv
;
4278 return priv
->netdev
;
4281 static struct mlx5_interface mlx5e_interface
= {
4283 .remove
= mlx5e_remove
,
4284 .attach
= mlx5e_attach
,
4285 .detach
= mlx5e_detach
,
4286 .event
= mlx5e_async_event
,
4287 .protocol
= MLX5_INTERFACE_PROTOCOL_ETH
,
4288 .get_dev
= mlx5e_get_netdev
,
4291 void mlx5e_init(void)
4293 mlx5e_build_ptys2ethtool_map();
4294 mlx5_register_interface(&mlx5e_interface
);
4297 void mlx5e_cleanup(void)
4299 mlx5_unregister_interface(&mlx5e_interface
);