1241 Need support for latest Emulex 10GbE
[illumos-gate.git] / usr / src / uts / common / io / fibre-channel / fca / oce / oce_tx.c
blobb74f32c6f066d21e8e952a78f07cf804795b0768
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 /* Copyright © 2003-2011 Emulex. All rights reserved. */
25 * Source file containing the implementation of the Transmit
26 * Path
29 #include <oce_impl.h>
31 static void oce_free_wqed(struct oce_wq *wq, oce_wqe_desc_t *wqed);
32 static int oce_map_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed,
33 mblk_t *mp, uint32_t pkt_len);
34 static int oce_bcopy_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed, mblk_t *mp,
35 uint32_t pkt_len);
36 static void oce_wqb_dtor(struct oce_wq *wq, oce_wq_bdesc_t *wqbd);
37 static int oce_wqb_ctor(oce_wq_bdesc_t *wqbd, struct oce_wq *wq,
38 size_t size, int flags);
39 static inline oce_wq_bdesc_t *oce_wqb_alloc(struct oce_wq *wq);
40 static void oce_wqb_free(struct oce_wq *wq, oce_wq_bdesc_t *wqbd);
42 static void oce_wqmd_free(struct oce_wq *wq, oce_wq_mdesc_t *wqmd);
43 static void oce_wqm_free(struct oce_wq *wq, oce_wq_mdesc_t *wqmd);
44 static oce_wq_mdesc_t *oce_wqm_alloc(struct oce_wq *wq);
45 static int oce_wqm_ctor(oce_wq_mdesc_t *wqmd, struct oce_wq *wq);
46 static void oce_wqm_dtor(struct oce_wq *wq, oce_wq_mdesc_t *wqmd);
47 static void oce_fill_ring_descs(struct oce_wq *wq, oce_wqe_desc_t *wqed);
48 static void oce_remove_vtag(mblk_t *mp);
49 static void oce_insert_vtag(mblk_t *mp, uint16_t vlan_tag);
50 static inline int oce_process_tx_compl(struct oce_wq *wq, boolean_t rearm);
53 static ddi_dma_attr_t tx_map_dma_attr = {
54 DMA_ATTR_V0, /* version number */
55 0x0000000000000000ull, /* low address */
56 0xFFFFFFFFFFFFFFFFull, /* high address */
57 0x0000000000010000ull, /* dma counter max */
58 OCE_TXMAP_ALIGN, /* alignment */
59 0x7FF, /* burst sizes */
60 0x00000001, /* minimum transfer size */
61 0x00000000FFFFFFFFull, /* maximum transfer size */
62 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
63 OCE_MAX_TXDMA_COOKIES, /* scatter/gather list length */
64 0x00000001, /* granularity */
65 DDI_DMA_FLAGERR /* dma_attr_flags */
69 ddi_dma_attr_t oce_tx_dma_buf_attr = {
70 DMA_ATTR_V0, /* version number */
71 0x0000000000000000ull, /* low address */
72 0xFFFFFFFFFFFFFFFFull, /* high address */
73 0x00000000FFFFFFFFull, /* dma counter max */
74 OCE_DMA_ALIGNMENT, /* alignment */
75 0x000007FF, /* burst sizes */
76 0x00000001, /* minimum transfer size */
77 0x00000000FFFFFFFFull, /* maximum transfer size */
78 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
79 1, /* scatter/gather list length */
80 0x00000001, /* granularity */
81 DDI_DMA_FLAGERR /* dma_attr_flags */
85 * WQ map handle destructor
87 * wq - Pointer to WQ structure
88 * wqmd - pointer to WQE mapping handle descriptor
90 * return none
93 static void
94 oce_wqm_dtor(struct oce_wq *wq, oce_wq_mdesc_t *wqmd)
96 _NOTE(ARGUNUSED(wq));
97 /* Free the DMA handle */
98 if (wqmd->dma_handle != NULL)
99 (void) ddi_dma_free_handle(&(wqmd->dma_handle));
100 wqmd->dma_handle = NULL;
101 } /* oce_wqm_dtor */
104 * WQ map handles contructor
106 * wqmd - pointer to WQE mapping handle descriptor
107 * wq - Pointer to WQ structure
109 * return DDI_SUCCESS=>success, DDI_FAILURE=>error
111 static int
112 oce_wqm_ctor(oce_wq_mdesc_t *wqmd, struct oce_wq *wq)
114 struct oce_dev *dev;
115 int ret;
117 dev = wq->parent;
118 /* Allocate DMA handle */
119 ret = ddi_dma_alloc_handle(dev->dip, &tx_map_dma_attr,
120 DDI_DMA_DONTWAIT, NULL, &wqmd->dma_handle);
122 return (ret);
123 } /* oce_wqm_ctor */
126 * function to create WQ mapping handles cache
128 * wq - pointer to WQ structure
130 * return DDI_SUCCESS=>success, DDI_FAILURE=>error
133 oce_wqm_cache_create(struct oce_wq *wq)
135 struct oce_dev *dev = wq->parent;
136 int size;
137 int cnt;
138 int ret;
140 size = wq->cfg.nhdl * sizeof (oce_wq_mdesc_t);
141 wq->wq_mdesc_array = kmem_zalloc(size, KM_NOSLEEP);
142 if (wq->wq_mdesc_array == NULL) {
143 return (DDI_FAILURE);
146 /* Create the free buffer list */
147 OCE_LIST_CREATE(&wq->wq_mdesc_list, DDI_INTR_PRI(dev->intr_pri));
149 for (cnt = 0; cnt < wq->cfg.nhdl; cnt++) {
150 ret = oce_wqm_ctor(&wq->wq_mdesc_array[cnt], wq);
151 if (ret != DDI_SUCCESS) {
152 goto wqm_fail;
154 OCE_LIST_INSERT_TAIL(&wq->wq_mdesc_list,
155 &wq->wq_mdesc_array[cnt]);
157 return (DDI_SUCCESS);
159 wqm_fail:
160 oce_wqm_cache_destroy(wq);
161 return (DDI_FAILURE);
165 * function to destroy WQ mapping handles cache
167 * wq - pointer to WQ structure
169 * return none
171 void
172 oce_wqm_cache_destroy(struct oce_wq *wq)
174 oce_wq_mdesc_t *wqmd;
176 while ((wqmd = OCE_LIST_REM_HEAD(&wq->wq_mdesc_list)) != NULL) {
177 oce_wqm_dtor(wq, wqmd);
180 kmem_free(wq->wq_mdesc_array,
181 wq->cfg.nhdl * sizeof (oce_wq_mdesc_t));
183 OCE_LIST_DESTROY(&wq->wq_mdesc_list);
187 * function to create WQ buffer cache
189 * wq - pointer to WQ structure
190 * buf_size - size of the buffer
192 * return DDI_SUCCESS=>success, DDI_FAILURE=>error
195 oce_wqb_cache_create(struct oce_wq *wq, size_t buf_size)
197 struct oce_dev *dev = wq->parent;
198 int size;
199 int cnt;
200 int ret;
202 size = wq->cfg.nbufs * sizeof (oce_wq_bdesc_t);
203 wq->wq_bdesc_array = kmem_zalloc(size, KM_NOSLEEP);
204 if (wq->wq_bdesc_array == NULL) {
205 return (DDI_FAILURE);
208 /* Create the free buffer list */
209 OCE_LIST_CREATE(&wq->wq_buf_list, DDI_INTR_PRI(dev->intr_pri));
211 for (cnt = 0; cnt < wq->cfg.nbufs; cnt++) {
212 ret = oce_wqb_ctor(&wq->wq_bdesc_array[cnt],
213 wq, buf_size, DDI_DMA_STREAMING);
214 if (ret != DDI_SUCCESS) {
215 goto wqb_fail;
217 OCE_LIST_INSERT_TAIL(&wq->wq_buf_list,
218 &wq->wq_bdesc_array[cnt]);
220 return (DDI_SUCCESS);
222 wqb_fail:
223 oce_wqb_cache_destroy(wq);
224 return (DDI_FAILURE);
228 * function to destroy WQ buffer cache
230 * wq - pointer to WQ structure
232 * return none
234 void
235 oce_wqb_cache_destroy(struct oce_wq *wq)
237 oce_wq_bdesc_t *wqbd;
238 while ((wqbd = OCE_LIST_REM_HEAD(&wq->wq_buf_list)) != NULL) {
239 oce_wqb_dtor(wq, wqbd);
241 kmem_free(wq->wq_bdesc_array,
242 wq->cfg.nbufs * sizeof (oce_wq_bdesc_t));
243 OCE_LIST_DESTROY(&wq->wq_buf_list);
247 * WQ buffer constructor
249 * wqbd - pointer to WQ buffer descriptor
250 * wq - pointer to WQ structure
251 * size - size of the buffer
252 * flags - KM_SLEEP or KM_NOSLEEP
254 * return DDI_SUCCESS=>success, DDI_FAILURE=>error
256 static int
257 oce_wqb_ctor(oce_wq_bdesc_t *wqbd, struct oce_wq *wq, size_t size, int flags)
259 struct oce_dev *dev;
260 dev = wq->parent;
262 wqbd->wqb = oce_alloc_dma_buffer(dev, size, &oce_tx_dma_buf_attr,
263 flags);
264 if (wqbd->wqb == NULL) {
265 return (DDI_FAILURE);
267 wqbd->frag_addr.dw.addr_lo = ADDR_LO(wqbd->wqb->addr);
268 wqbd->frag_addr.dw.addr_hi = ADDR_HI(wqbd->wqb->addr);
269 return (DDI_SUCCESS);
273 * WQ buffer destructor
275 * wq - pointer to WQ structure
276 * wqbd - pointer to WQ buffer descriptor
278 * return none
280 static void
281 oce_wqb_dtor(struct oce_wq *wq, oce_wq_bdesc_t *wqbd)
283 oce_free_dma_buffer(wq->parent, wqbd->wqb);
287 * function to alloc WQE buffer descriptor
289 * wq - pointer to WQ structure
291 * return pointer to WQE buffer descriptor
293 static inline oce_wq_bdesc_t *
294 oce_wqb_alloc(struct oce_wq *wq)
296 return (OCE_LIST_REM_HEAD(&wq->wq_buf_list));
300 * function to free WQE buffer descriptor
302 * wq - pointer to WQ structure
303 * wqbd - pointer to WQ buffer descriptor
305 * return none
307 static inline void
308 oce_wqb_free(struct oce_wq *wq, oce_wq_bdesc_t *wqbd)
310 OCE_LIST_INSERT_TAIL(&wq->wq_buf_list, wqbd);
311 } /* oce_wqb_free */
314 * function to allocate WQE mapping descriptor
316 * wq - pointer to WQ structure
318 * return pointer to WQE mapping descriptor
320 static inline oce_wq_mdesc_t *
321 oce_wqm_alloc(struct oce_wq *wq)
323 return (OCE_LIST_REM_HEAD(&wq->wq_mdesc_list));
324 } /* oce_wqm_alloc */
327 * function to insert WQE mapping descriptor to the list
329 * wq - pointer to WQ structure
330 * wqmd - Pointer to WQ mapping descriptor
332 * return none
334 static inline void
335 oce_wqm_free(struct oce_wq *wq, oce_wq_mdesc_t *wqmd)
337 OCE_LIST_INSERT_TAIL(&wq->wq_mdesc_list, wqmd);
341 * function to free WQE mapping descriptor
343 * wq - pointer to WQ structure
344 * wqmd - Pointer to WQ mapping descriptor
346 * return none
348 static void
349 oce_wqmd_free(struct oce_wq *wq, oce_wq_mdesc_t *wqmd)
351 if (wqmd == NULL) {
352 return;
354 (void) ddi_dma_unbind_handle(wqmd->dma_handle);
355 oce_wqm_free(wq, wqmd);
359 * WQED kmem_cache constructor
361 * buf - pointer to WQE descriptor
363 * return DDI_SUCCESS
366 oce_wqe_desc_ctor(void *buf, void *arg, int kmflags)
368 _NOTE(ARGUNUSED(buf));
369 _NOTE(ARGUNUSED(arg));
370 _NOTE(ARGUNUSED(kmflags));
372 return (DDI_SUCCESS);
376 * WQED kmem_cache destructor
378 * buf - pointer to WQE descriptor
380 * return none
382 void
383 oce_wqe_desc_dtor(void *buf, void *arg)
385 _NOTE(ARGUNUSED(buf));
386 _NOTE(ARGUNUSED(arg));
390 * function to choose a WQ given a mblk depending on priority, flowID etc.
392 * dev - software handle to device
393 * mp - the mblk to send
395 * return pointer to the WQ selected
397 static uint8_t oce_tx_hash_policy = 0x4;
398 struct oce_wq *
399 oce_get_wq(struct oce_dev *dev, mblk_t *mp)
401 struct oce_wq *wq;
402 int qidx = 0;
403 if (dev->nwqs > 1) {
404 qidx = mac_pkt_hash(DL_ETHER, mp, oce_tx_hash_policy, B_TRUE);
405 qidx = qidx % dev->nwqs;
407 } else {
408 qidx = 0;
410 wq = dev->wq[qidx];
411 /* for the time being hardcode */
412 return (wq);
413 } /* oce_get_wq */
416 * function to populate the single WQE
418 * wq - pointer to wq
419 * wqed - pointer to WQ entry descriptor
421 * return none
423 #pragma inline(oce_fill_ring_descs)
424 static void
425 oce_fill_ring_descs(struct oce_wq *wq, oce_wqe_desc_t *wqed)
428 struct oce_nic_frag_wqe *wqe;
429 int i;
430 /* Copy the precreate WQE descs to the ring desc */
431 for (i = 0; i < wqed->wqe_cnt; i++) {
432 wqe = RING_GET_PRODUCER_ITEM_VA(wq->ring,
433 struct oce_nic_frag_wqe);
435 bcopy(&wqed->frag[i], wqe, NIC_WQE_SIZE);
436 RING_PUT(wq->ring, 1);
438 } /* oce_fill_ring_descs */
441 * function to copy the packet to preallocated Tx buffer
443 * wq - pointer to WQ
444 * wqed - Pointer to WQE descriptor
445 * mp - Pointer to packet chain
446 * pktlen - Size of the packet
448 * return 0=>success, error code otherwise
450 static int
451 oce_bcopy_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed, mblk_t *mp,
452 uint32_t pkt_len)
454 oce_wq_bdesc_t *wqbd;
455 caddr_t buf_va;
456 struct oce_dev *dev = wq->parent;
457 int len = 0;
459 wqbd = oce_wqb_alloc(wq);
460 if (wqbd == NULL) {
461 atomic_inc_32(&dev->tx_noxmtbuf);
462 oce_log(dev, CE_WARN, MOD_TX, "%s",
463 "wqb pool empty");
464 return (ENOMEM);
467 /* create a fragment wqe for the packet */
468 wqed->frag[wqed->frag_idx].u0.s.frag_pa_hi = wqbd->frag_addr.dw.addr_hi;
469 wqed->frag[wqed->frag_idx].u0.s.frag_pa_lo = wqbd->frag_addr.dw.addr_lo;
470 buf_va = DBUF_VA(wqbd->wqb);
472 /* copy pkt into buffer */
473 for (len = 0; mp != NULL && len < pkt_len; mp = mp->b_cont) {
474 bcopy(mp->b_rptr, buf_va, MBLKL(mp));
475 buf_va += MBLKL(mp);
476 len += MBLKL(mp);
479 (void) ddi_dma_sync(DBUF_DHDL(wqbd->wqb), 0, pkt_len,
480 DDI_DMA_SYNC_FORDEV);
482 if (oce_fm_check_dma_handle(dev, DBUF_DHDL(wqbd->wqb))) {
483 ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
484 /* Free the buffer */
485 oce_wqb_free(wq, wqbd);
486 return (EIO);
488 wqed->frag[wqed->frag_idx].u0.s.frag_len = pkt_len;
489 wqed->hdesc[wqed->nhdl].hdl = (void *)(wqbd);
490 wqed->hdesc[wqed->nhdl].type = COPY_WQE;
491 wqed->frag_cnt++;
492 wqed->frag_idx++;
493 wqed->nhdl++;
494 return (0);
495 } /* oce_bcopy_wqe */
498 * function to copy the packet or dma map on the fly depending on size
500 * wq - pointer to WQ
501 * wqed - Pointer to WQE descriptor
502 * mp - Pointer to packet chain
504 * return DDI_SUCCESS=>success, DDI_FAILURE=>error
506 static int
507 oce_map_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed, mblk_t *mp,
508 uint32_t pkt_len)
510 ddi_dma_cookie_t cookie;
511 oce_wq_mdesc_t *wqmd;
512 uint32_t ncookies;
513 int ret;
514 struct oce_dev *dev = wq->parent;
516 wqmd = oce_wqm_alloc(wq);
517 if (wqmd == NULL) {
518 oce_log(dev, CE_WARN, MOD_TX, "%s",
519 "wqm pool empty");
520 return (ENOMEM);
523 ret = ddi_dma_addr_bind_handle(wqmd->dma_handle,
524 (struct as *)0, (caddr_t)mp->b_rptr,
525 pkt_len, DDI_DMA_WRITE | DDI_DMA_STREAMING,
526 DDI_DMA_DONTWAIT, NULL, &cookie, &ncookies);
527 if (ret != DDI_DMA_MAPPED) {
528 oce_log(dev, CE_WARN, MOD_TX, "MAP FAILED %d",
529 ret);
530 /* free the last one */
531 oce_wqm_free(wq, wqmd);
532 return (ENOMEM);
534 do {
535 wqed->frag[wqed->frag_idx].u0.s.frag_pa_hi =
536 ADDR_HI(cookie.dmac_laddress);
537 wqed->frag[wqed->frag_idx].u0.s.frag_pa_lo =
538 ADDR_LO(cookie.dmac_laddress);
539 wqed->frag[wqed->frag_idx].u0.s.frag_len =
540 (uint32_t)cookie.dmac_size;
541 wqed->frag_cnt++;
542 wqed->frag_idx++;
543 if (--ncookies > 0)
544 ddi_dma_nextcookie(wqmd->dma_handle,
545 &cookie);
546 else break;
547 } while (ncookies > 0);
549 wqed->hdesc[wqed->nhdl].hdl = (void *)wqmd;
550 wqed->hdesc[wqed->nhdl].type = MAPPED_WQE;
551 wqed->nhdl++;
552 return (0);
553 } /* oce_map_wqe */
555 static inline int
556 oce_process_tx_compl(struct oce_wq *wq, boolean_t rearm)
558 struct oce_nic_tx_cqe *cqe;
559 uint16_t num_cqe = 0;
560 struct oce_cq *cq;
561 oce_wqe_desc_t *wqed;
562 int wqe_freed = 0;
563 struct oce_dev *dev;
565 cq = wq->cq;
566 dev = wq->parent;
567 (void) ddi_dma_sync(cq->ring->dbuf->dma_handle, 0, 0,
568 DDI_DMA_SYNC_FORKERNEL);
570 mutex_enter(&wq->txc_lock);
571 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
572 while (WQ_CQE_VALID(cqe)) {
574 DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_tx_cqe));
576 /* update stats */
577 if (cqe->u0.s.status != 0) {
578 atomic_inc_32(&dev->tx_errors);
581 /* complete the WQEs */
582 wqed = OCE_LIST_REM_HEAD(&wq->wqe_desc_list);
584 wqe_freed = wqed->wqe_cnt;
585 oce_free_wqed(wq, wqed);
586 RING_GET(wq->ring, wqe_freed);
587 atomic_add_32(&wq->wq_free, wqe_freed);
588 /* clear the valid bit and progress cqe */
589 WQ_CQE_INVALIDATE(cqe);
590 RING_GET(cq->ring, 1);
591 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
592 struct oce_nic_tx_cqe);
593 num_cqe++;
594 } /* for all valid CQE */
595 mutex_exit(&wq->txc_lock);
596 if (num_cqe)
597 oce_arm_cq(wq->parent, cq->cq_id, num_cqe, rearm);
598 return (num_cqe);
599 } /* oce_process_tx_completion */
602 * function to drain a TxCQ and process its CQEs
604 * dev - software handle to the device
605 * cq - pointer to the cq to drain
607 * return the number of CQEs processed
609 uint16_t
610 oce_drain_wq_cq(void *arg)
612 uint16_t num_cqe = 0;
613 struct oce_dev *dev;
614 struct oce_wq *wq;
616 wq = (struct oce_wq *)arg;
617 dev = wq->parent;
619 /* do while we do not reach a cqe that is not valid */
620 num_cqe = oce_process_tx_compl(wq, B_FALSE);
622 /* check if we need to restart Tx */
623 if (wq->resched && num_cqe) {
624 wq->resched = B_FALSE;
625 mac_tx_update(dev->mac_handle);
628 return (num_cqe);
629 } /* oce_process_wq_cqe */
632 * function to insert vtag to packet
634 * mp - mblk pointer
635 * vlan_tag - tag to be inserted
637 * return none
639 static inline void
640 oce_insert_vtag(mblk_t *mp, uint16_t vlan_tag)
642 struct ether_vlan_header *evh;
643 (void) memmove(mp->b_rptr - VTAG_SIZE,
644 mp->b_rptr, 2 * ETHERADDRL);
645 mp->b_rptr -= VTAG_SIZE;
646 evh = (struct ether_vlan_header *)(void *)mp->b_rptr;
647 evh->ether_tpid = htons(VLAN_TPID);
648 evh->ether_tci = htons(vlan_tag);
652 * function to strip vtag from packet
654 * mp - mblk pointer
656 * return none
659 static inline void
660 oce_remove_vtag(mblk_t *mp)
662 (void) memmove(mp->b_rptr + VTAG_SIZE, mp->b_rptr,
663 ETHERADDRL * 2);
664 mp->b_rptr += VTAG_SIZE;
668 * function to xmit Single packet over the wire
670 * wq - pointer to WQ
671 * mp - Pointer to packet chain
673 * return pointer to the packet
675 mblk_t *
676 oce_send_packet(struct oce_wq *wq, mblk_t *mp)
678 struct oce_nic_hdr_wqe *wqeh;
679 struct oce_dev *dev;
680 struct ether_header *eh;
681 struct ether_vlan_header *evh;
682 int32_t num_wqes;
683 uint16_t etype;
684 uint32_t ip_offset;
685 uint32_t csum_flags = 0;
686 boolean_t use_copy = B_FALSE;
687 boolean_t tagged = B_FALSE;
688 uint16_t vlan_tag;
689 uint32_t reg_value = 0;
690 oce_wqe_desc_t *wqed = NULL;
691 mblk_t *nmp = NULL;
692 mblk_t *tmp = NULL;
693 uint32_t pkt_len = 0;
694 int num_mblks = 0;
695 int ret = 0;
696 uint32_t mss = 0;
697 uint32_t flags = 0;
698 int len = 0;
700 /* retrieve the adap priv struct ptr */
701 dev = wq->parent;
703 /* check if we have enough free slots */
704 if (wq->wq_free < dev->tx_reclaim_threshold) {
705 (void) oce_process_tx_compl(wq, B_FALSE);
707 if (wq->wq_free < OCE_MAX_TX_HDL) {
708 return (mp);
711 /* check if we should copy */
712 for (tmp = mp; tmp != NULL; tmp = tmp->b_cont) {
713 pkt_len += MBLKL(tmp);
714 num_mblks++;
717 if (pkt_len == 0 || num_mblks == 0) {
718 freemsg(mp);
719 return (NULL);
722 /* retrieve LSO information */
723 mac_lso_get(mp, &mss, &flags);
725 /* get the offload flags */
726 mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &csum_flags);
728 /* restrict the mapped segment to wat we support */
729 if (num_mblks > OCE_MAX_TX_HDL) {
730 nmp = msgpullup(mp, -1);
731 if (nmp == NULL) {
732 atomic_inc_32(&wq->pkt_drops);
733 freemsg(mp);
734 return (NULL);
736 /* Reset it to new collapsed mp */
737 freemsg(mp);
738 mp = nmp;
741 /* Get the packet descriptor for Tx */
742 wqed = kmem_cache_alloc(wq->wqed_cache, KM_NOSLEEP);
743 if (wqed == NULL) {
744 atomic_inc_32(&wq->pkt_drops);
745 freemsg(mp);
746 return (NULL);
748 eh = (struct ether_header *)(void *)mp->b_rptr;
749 if (ntohs(eh->ether_type) == VLAN_TPID) {
750 evh = (struct ether_vlan_header *)(void *)mp->b_rptr;
751 tagged = B_TRUE;
752 etype = ntohs(evh->ether_type);
753 ip_offset = sizeof (struct ether_vlan_header);
754 pkt_len -= VTAG_SIZE;
755 vlan_tag = ntohs(evh->ether_tci);
756 oce_remove_vtag(mp);
757 } else {
758 etype = ntohs(eh->ether_type);
759 ip_offset = sizeof (struct ether_header);
762 /* Save the WQ pointer */
763 wqed->wq = wq;
764 wqed->frag_idx = 1; /* index zero is always header */
765 wqed->frag_cnt = 0;
766 wqed->nhdl = 0;
767 wqed->mp = NULL;
768 OCE_LIST_LINK_INIT(&wqed->link);
770 /* If entire packet is less than the copy limit just do copy */
771 if (pkt_len < dev->tx_bcopy_limit) {
772 use_copy = B_TRUE;
773 ret = oce_bcopy_wqe(wq, wqed, mp, pkt_len);
774 } else {
775 /* copy or dma map the individual fragments */
776 for (nmp = mp; nmp != NULL; nmp = nmp->b_cont) {
777 len = MBLKL(nmp);
778 if (len == 0) {
779 continue;
781 if (len < dev->tx_bcopy_limit) {
782 ret = oce_bcopy_wqe(wq, wqed, nmp, len);
783 } else {
784 ret = oce_map_wqe(wq, wqed, nmp, len);
786 if (ret != 0)
787 break;
792 * Any failure other than insufficient Q entries
793 * drop the packet
795 if (ret != 0) {
796 oce_free_wqed(wq, wqed);
797 atomic_inc_32(&wq->pkt_drops);
798 freemsg(mp);
799 return (NULL);
802 wqeh = (struct oce_nic_hdr_wqe *)&wqed->frag[0];
803 bzero(wqeh, sizeof (struct oce_nic_hdr_wqe));
805 /* fill rest of wqe header fields based on packet */
806 if (flags & HW_LSO) {
807 wqeh->u0.s.lso = B_TRUE;
808 wqeh->u0.s.lso_mss = mss;
810 if (csum_flags & HCK_FULLCKSUM) {
811 uint8_t *proto;
812 if (etype == ETHERTYPE_IP) {
813 proto = (uint8_t *)(void *)
814 (mp->b_rptr + ip_offset);
815 if (proto[9] == 6)
816 /* IPPROTO_TCP */
817 wqeh->u0.s.tcpcs = B_TRUE;
818 else if (proto[9] == 17)
819 /* IPPROTO_UDP */
820 wqeh->u0.s.udpcs = B_TRUE;
824 if (csum_flags & HCK_IPV4_HDRCKSUM)
825 wqeh->u0.s.ipcs = B_TRUE;
826 if (tagged) {
827 wqeh->u0.s.vlan = B_TRUE;
828 wqeh->u0.s.vlan_tag = vlan_tag;
831 wqeh->u0.s.complete = B_TRUE;
832 wqeh->u0.s.event = B_TRUE;
833 wqeh->u0.s.crc = B_TRUE;
834 wqeh->u0.s.total_length = pkt_len;
836 num_wqes = wqed->frag_cnt + 1;
838 /* h/w expects even no. of WQEs */
839 if (num_wqes & 0x1) {
840 bzero(&wqed->frag[num_wqes], sizeof (struct oce_nic_frag_wqe));
841 num_wqes++;
843 wqed->wqe_cnt = (uint16_t)num_wqes;
844 wqeh->u0.s.num_wqe = num_wqes;
845 DW_SWAP(u32ptr(&wqed->frag[0]), (wqed->wqe_cnt * NIC_WQE_SIZE));
847 mutex_enter(&wq->tx_lock);
848 if (num_wqes > wq->wq_free) {
849 atomic_inc_32(&wq->tx_deferd);
850 mutex_exit(&wq->tx_lock);
851 goto wqe_fail;
853 atomic_add_32(&wq->wq_free, -num_wqes);
855 /* fill the wq for adapter */
856 oce_fill_ring_descs(wq, wqed);
858 /* Set the mp pointer in the wqe descriptor */
859 if (use_copy == B_FALSE) {
860 wqed->mp = mp;
862 /* Add the packet desc to list to be retrieved during cmpl */
863 OCE_LIST_INSERT_TAIL(&wq->wqe_desc_list, wqed);
864 (void) ddi_dma_sync(wq->ring->dbuf->dma_handle, 0, 0,
865 DDI_DMA_SYNC_FORDEV);
867 /* ring tx doorbell */
868 reg_value = (num_wqes << 16) | wq->wq_id;
869 /* Ring the door bell */
870 OCE_DB_WRITE32(dev, PD_TXULP_DB, reg_value);
871 mutex_exit(&wq->tx_lock);
872 if (oce_fm_check_acc_handle(dev, dev->db_handle) != DDI_FM_OK) {
873 ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
876 /* free mp if copied or packet chain collapsed */
877 if (use_copy == B_TRUE) {
878 freemsg(mp);
880 return (NULL);
882 wqe_fail:
884 if (tagged) {
885 oce_insert_vtag(mp, vlan_tag);
887 oce_free_wqed(wq, wqed);
888 return (mp);
889 } /* oce_send_packet */
892 * function to free the WQE descriptor
894 * wq - pointer to WQ
895 * wqed - Pointer to WQE descriptor
897 * return none
899 #pragma inline(oce_free_wqed)
900 static void
901 oce_free_wqed(struct oce_wq *wq, oce_wqe_desc_t *wqed)
903 int i = 0;
904 if (wqed == NULL) {
905 return;
908 for (i = 0; i < wqed->nhdl; i++) {
909 if (wqed->hdesc[i].type == COPY_WQE) {
910 oce_wqb_free(wq, wqed->hdesc[i].hdl);
911 } else if (wqed->hdesc[i].type == MAPPED_WQE) {
912 oce_wqmd_free(wq, wqed->hdesc[i].hdl);
915 if (wqed->mp)
916 freemsg(wqed->mp);
917 kmem_cache_free(wq->wqed_cache, wqed);
918 } /* oce_free_wqed */
921 * function to start the WQ
923 * wq - pointer to WQ
925 * return DDI_SUCCESS
929 oce_start_wq(struct oce_wq *wq)
931 _NOTE(ARGUNUSED(wq));
932 return (DDI_SUCCESS);
933 } /* oce_start_wq */
936 * function to stop the WQ
938 * wq - pointer to WQ
940 * return none
942 void
943 oce_clean_wq(struct oce_wq *wq)
945 oce_wqe_desc_t *wqed;
946 int ti;
948 /* Wait for already posted Tx to complete */
950 for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {
951 (void) oce_process_tx_compl(wq, B_FALSE);
952 OCE_MSDELAY(1);
955 /* Free the remaining descriptors */
956 while ((wqed = OCE_LIST_REM_HEAD(&wq->wqe_desc_list)) != NULL) {
957 atomic_add_32(&wq->wq_free, wqed->wqe_cnt);
958 oce_free_wqed(wq, wqed);
960 oce_drain_eq(wq->cq->eq);
961 } /* oce_stop_wq */
964 * function to set the tx mapping handle fma attr
966 * fm_caps - capability flags
968 * return none
971 void
972 oce_set_tx_map_dma_fma_flags(int fm_caps)
974 if (fm_caps == DDI_FM_NOT_CAPABLE) {
975 return;
978 if (DDI_FM_DMA_ERR_CAP(fm_caps)) {
979 tx_map_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
980 } else {
981 tx_map_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
983 } /* oce_set_tx_map_dma_fma_flags */