net/mlx5e: IPoIB, Xmit flow
[linux-2.6/btrfs-unstable.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tx.c
blobdda7db503043888b4775134be2e5bade5d251fba
1 /*
2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <linux/tcp.h>
34 #include <linux/if_vlan.h>
35 #include "en.h"
37 #define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
38 #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
39 MLX5E_SQ_NOPS_ROOM)
41 static inline void mlx5e_tx_dma_unmap(struct device *pdev,
42 struct mlx5e_sq_dma *dma)
44 switch (dma->type) {
45 case MLX5E_DMA_MAP_SINGLE:
46 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
47 break;
48 case MLX5E_DMA_MAP_PAGE:
49 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
50 break;
51 default:
52 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
56 static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq,
57 dma_addr_t addr,
58 u32 size,
59 enum mlx5e_dma_map_type map_type)
61 u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
63 sq->db.dma_fifo[i].addr = addr;
64 sq->db.dma_fifo[i].size = size;
65 sq->db.dma_fifo[i].type = map_type;
66 sq->dma_fifo_pc++;
69 static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
71 return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
74 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
76 int i;
78 for (i = 0; i < num_dma; i++) {
79 struct mlx5e_sq_dma *last_pushed_dma =
80 mlx5e_dma_get(sq, --sq->dma_fifo_pc);
82 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
86 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
87 void *accel_priv, select_queue_fallback_t fallback)
89 struct mlx5e_priv *priv = netdev_priv(dev);
90 int channel_ix = fallback(dev, skb);
91 u16 num_channels;
92 int up = 0;
94 if (!netdev_get_num_tc(dev))
95 return channel_ix;
97 if (skb_vlan_tag_present(skb))
98 up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
100 /* channel_ix can be larger than num_channels since
101 * dev->num_real_tx_queues = num_channels * num_tc
103 num_channels = priv->channels.params.num_channels;
104 if (channel_ix >= num_channels)
105 channel_ix = reciprocal_scale(channel_ix, num_channels);
107 return priv->channel_tc2txq[channel_ix][up];
110 static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
112 #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
114 return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
117 static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
119 struct flow_keys keys;
121 if (skb_transport_header_was_set(skb))
122 return skb_transport_offset(skb);
123 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
124 return keys.control.thoff;
125 else
126 return mlx5e_skb_l2_header_offset(skb);
129 static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
130 struct sk_buff *skb)
132 int hlen;
134 switch (mode) {
135 case MLX5_INLINE_MODE_NONE:
136 return 0;
137 case MLX5_INLINE_MODE_TCP_UDP:
138 hlen = eth_get_headlen(skb->data, skb_headlen(skb));
139 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
140 hlen += VLAN_HLEN;
141 return hlen;
142 case MLX5_INLINE_MODE_IP:
143 /* When transport header is set to zero, it means no transport
144 * header. When transport header is set to 0xff's, it means
145 * transport header wasn't set.
147 if (skb_transport_offset(skb))
148 return mlx5e_skb_l3_header_offset(skb);
149 /* fall through */
150 case MLX5_INLINE_MODE_L2:
151 default:
152 return mlx5e_skb_l2_header_offset(skb);
156 static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
157 unsigned int *skb_len,
158 unsigned int len)
160 *skb_len -= len;
161 *skb_data += len;
164 static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
165 unsigned char **skb_data,
166 unsigned int *skb_len)
168 struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
169 int cpy1_sz = 2 * ETH_ALEN;
170 int cpy2_sz = ihs - cpy1_sz;
172 memcpy(vhdr, *skb_data, cpy1_sz);
173 mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz);
174 vhdr->h_vlan_proto = skb->vlan_proto;
175 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
176 memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz);
177 mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
180 static inline void
181 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
183 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
184 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
185 if (skb->encapsulation) {
186 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
187 MLX5_ETH_WQE_L4_INNER_CSUM;
188 sq->stats.csum_partial_inner++;
189 } else {
190 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
192 } else
193 sq->stats.csum_none++;
196 static inline u16
197 mlx5e_txwqe_build_eseg_gso(struct mlx5e_txqsq *sq, struct sk_buff *skb,
198 struct mlx5_wqe_eth_seg *eseg, unsigned int *num_bytes)
200 u16 ihs;
202 eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
204 if (skb->encapsulation) {
205 ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
206 sq->stats.tso_inner_packets++;
207 sq->stats.tso_inner_bytes += skb->len - ihs;
208 } else {
209 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
210 sq->stats.tso_packets++;
211 sq->stats.tso_bytes += skb->len - ihs;
214 *num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
215 return ihs;
218 static inline int
219 mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
220 unsigned char *skb_data, u16 headlen,
221 struct mlx5_wqe_data_seg *dseg)
223 dma_addr_t dma_addr = 0;
224 u8 num_dma = 0;
225 int i;
227 if (headlen) {
228 dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
229 DMA_TO_DEVICE);
230 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
231 return -ENOMEM;
233 dseg->addr = cpu_to_be64(dma_addr);
234 dseg->lkey = sq->mkey_be;
235 dseg->byte_count = cpu_to_be32(headlen);
237 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
238 num_dma++;
239 dseg++;
242 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
243 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
244 int fsz = skb_frag_size(frag);
246 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
247 DMA_TO_DEVICE);
248 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
249 return -ENOMEM;
251 dseg->addr = cpu_to_be64(dma_addr);
252 dseg->lkey = sq->mkey_be;
253 dseg->byte_count = cpu_to_be32(fsz);
255 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
256 num_dma++;
257 dseg++;
260 return num_dma;
263 static inline void
264 mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
265 u8 opcode, u16 ds_cnt, u32 num_bytes, u8 num_dma,
266 struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg)
268 struct mlx5_wq_cyc *wq = &sq->wq;
269 u16 pi;
271 wi->num_bytes = num_bytes;
272 wi->num_dma = num_dma;
273 wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
274 wi->skb = skb;
276 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
277 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
279 netdev_tx_sent_queue(sq->txq, num_bytes);
281 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
282 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
284 sq->pc += wi->num_wqebbs;
285 if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) {
286 netif_tx_stop_queue(sq->txq);
287 sq->stats.stopped++;
290 if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
291 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
293 /* fill sq edge with nops to avoid wqe wrap around */
294 while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
295 sq->db.wqe_info[pi].skb = NULL;
296 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
297 sq->stats.nop++;
301 static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb)
303 struct mlx5_wq_cyc *wq = &sq->wq;
305 u16 pi = sq->pc & wq->sz_m1;
306 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
307 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
309 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
310 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
312 unsigned char *skb_data = skb->data;
313 unsigned int skb_len = skb->len;
314 u8 opcode = MLX5_OPCODE_SEND;
315 unsigned int num_bytes;
316 int num_dma;
317 u16 headlen;
318 u16 ds_cnt;
319 u16 ihs;
321 memset(wqe, 0, sizeof(*wqe));
323 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
325 if (skb_is_gso(skb)) {
326 opcode = MLX5_OPCODE_LSO;
327 ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
328 sq->stats.packets += skb_shinfo(skb)->gso_segs;
329 } else {
330 ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
331 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
332 sq->stats.packets++;
334 sq->stats.bytes += num_bytes;
335 sq->stats.xmit_more += skb->xmit_more;
337 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
338 if (ihs) {
339 if (skb_vlan_tag_present(skb)) {
340 mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len);
341 ihs += VLAN_HLEN;
342 } else {
343 memcpy(eseg->inline_hdr.start, skb_data, ihs);
344 mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
346 eseg->inline_hdr.sz = cpu_to_be16(ihs);
347 ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
348 } else if (skb_vlan_tag_present(skb)) {
349 eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
350 eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
353 headlen = skb_len - skb->data_len;
354 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
355 (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
356 if (unlikely(num_dma < 0))
357 goto dma_unmap_wqe_err;
359 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
360 num_bytes, num_dma, wi, cseg);
362 return NETDEV_TX_OK;
364 dma_unmap_wqe_err:
365 sq->stats.dropped++;
366 mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
368 dev_kfree_skb_any(skb);
370 return NETDEV_TX_OK;
373 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
375 struct mlx5e_priv *priv = netdev_priv(dev);
376 struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)];
378 return mlx5e_sq_xmit(sq, skb);
381 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
383 struct mlx5e_txqsq *sq;
384 u32 dma_fifo_cc;
385 u32 nbytes;
386 u16 npkts;
387 u16 sqcc;
388 int i;
390 sq = container_of(cq, struct mlx5e_txqsq, cq);
392 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
393 return false;
395 npkts = 0;
396 nbytes = 0;
398 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
399 * otherwise a cq overrun may occur
401 sqcc = sq->cc;
403 /* avoid dirtying sq cache line every cqe */
404 dma_fifo_cc = sq->dma_fifo_cc;
406 for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
407 struct mlx5_cqe64 *cqe;
408 u16 wqe_counter;
409 bool last_wqe;
411 cqe = mlx5e_get_cqe(cq);
412 if (!cqe)
413 break;
415 mlx5_cqwq_pop(&cq->wq);
417 wqe_counter = be16_to_cpu(cqe->wqe_counter);
419 do {
420 struct mlx5e_tx_wqe_info *wi;
421 struct sk_buff *skb;
422 u16 ci;
423 int j;
425 last_wqe = (sqcc == wqe_counter);
427 ci = sqcc & sq->wq.sz_m1;
428 wi = &sq->db.wqe_info[ci];
429 skb = wi->skb;
431 if (unlikely(!skb)) { /* nop */
432 sqcc++;
433 continue;
436 if (unlikely(skb_shinfo(skb)->tx_flags &
437 SKBTX_HW_TSTAMP)) {
438 struct skb_shared_hwtstamps hwts = {};
440 mlx5e_fill_hwstamp(sq->tstamp,
441 get_cqe_ts(cqe), &hwts);
442 skb_tstamp_tx(skb, &hwts);
445 for (j = 0; j < wi->num_dma; j++) {
446 struct mlx5e_sq_dma *dma =
447 mlx5e_dma_get(sq, dma_fifo_cc++);
449 mlx5e_tx_dma_unmap(sq->pdev, dma);
452 npkts++;
453 nbytes += wi->num_bytes;
454 sqcc += wi->num_wqebbs;
455 napi_consume_skb(skb, napi_budget);
456 } while (!last_wqe);
459 mlx5_cqwq_update_db_record(&cq->wq);
461 /* ensure cq space is freed before enabling more cqes */
462 wmb();
464 sq->dma_fifo_cc = dma_fifo_cc;
465 sq->cc = sqcc;
467 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
469 if (netif_tx_queue_stopped(sq->txq) &&
470 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM)) {
471 netif_tx_wake_queue(sq->txq);
472 sq->stats.wake++;
475 return (i == MLX5E_TX_CQ_POLL_BUDGET);
478 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
480 struct mlx5e_tx_wqe_info *wi;
481 struct sk_buff *skb;
482 u16 ci;
483 int i;
485 while (sq->cc != sq->pc) {
486 ci = sq->cc & sq->wq.sz_m1;
487 wi = &sq->db.wqe_info[ci];
488 skb = wi->skb;
490 if (!skb) { /* nop */
491 sq->cc++;
492 continue;
495 for (i = 0; i < wi->num_dma; i++) {
496 struct mlx5e_sq_dma *dma =
497 mlx5e_dma_get(sq, sq->dma_fifo_cc++);
499 mlx5e_tx_dma_unmap(sq->pdev, dma);
502 dev_kfree_skb_any(skb);
503 sq->cc += wi->num_wqebbs;
507 #ifdef CONFIG_MLX5_CORE_IPOIB
509 struct mlx5_wqe_eth_pad {
510 u8 rsvd0[16];
513 struct mlx5i_tx_wqe {
514 struct mlx5_wqe_ctrl_seg ctrl;
515 struct mlx5_wqe_datagram_seg datagram;
516 struct mlx5_wqe_eth_pad pad;
517 struct mlx5_wqe_eth_seg eth;
520 static inline void
521 mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
522 struct mlx5_wqe_datagram_seg *dseg)
524 memcpy(&dseg->av, av, sizeof(struct mlx5_av));
525 dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV);
526 dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
529 netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
530 struct mlx5_av *av, u32 dqpn, u32 dqkey)
532 struct mlx5_wq_cyc *wq = &sq->wq;
533 u16 pi = sq->pc & wq->sz_m1;
534 struct mlx5i_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
535 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
537 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
538 struct mlx5_wqe_datagram_seg *datagram = &wqe->datagram;
539 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
541 unsigned char *skb_data = skb->data;
542 unsigned int skb_len = skb->len;
543 u8 opcode = MLX5_OPCODE_SEND;
544 unsigned int num_bytes;
545 int num_dma;
546 u16 headlen;
547 u16 ds_cnt;
548 u16 ihs;
550 memset(wqe, 0, sizeof(*wqe));
552 mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
554 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
556 if (skb_is_gso(skb)) {
557 opcode = MLX5_OPCODE_LSO;
558 ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
559 } else {
560 ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
561 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
564 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
565 if (ihs) {
566 memcpy(eseg->inline_hdr.start, skb_data, ihs);
567 mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
568 eseg->inline_hdr.sz = cpu_to_be16(ihs);
569 ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
572 headlen = skb_len - skb->data_len;
573 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
574 (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
575 if (unlikely(num_dma < 0))
576 goto dma_unmap_wqe_err;
578 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
579 num_bytes, num_dma, wi, cseg);
581 return NETDEV_TX_OK;
583 dma_unmap_wqe_err:
584 sq->stats.dropped++;
585 mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
587 dev_kfree_skb_any(skb);
589 return NETDEV_TX_OK;
592 #endif