4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 /* Copyright © 2003-2011 Emulex. All rights reserved. */
25 * Source file containing the implementation of the Transmit
31 static void oce_free_wqed(struct oce_wq
*wq
, oce_wqe_desc_t
*wqed
);
32 static int oce_map_wqe(struct oce_wq
*wq
, oce_wqe_desc_t
*wqed
,
33 mblk_t
*mp
, uint32_t pkt_len
);
34 static int oce_bcopy_wqe(struct oce_wq
*wq
, oce_wqe_desc_t
*wqed
, mblk_t
*mp
,
36 static void oce_wqb_dtor(struct oce_wq
*wq
, oce_wq_bdesc_t
*wqbd
);
37 static int oce_wqb_ctor(oce_wq_bdesc_t
*wqbd
, struct oce_wq
*wq
,
38 size_t size
, int flags
);
39 static inline oce_wq_bdesc_t
*oce_wqb_alloc(struct oce_wq
*wq
);
40 static void oce_wqb_free(struct oce_wq
*wq
, oce_wq_bdesc_t
*wqbd
);
42 static void oce_wqmd_free(struct oce_wq
*wq
, oce_wq_mdesc_t
*wqmd
);
43 static void oce_wqm_free(struct oce_wq
*wq
, oce_wq_mdesc_t
*wqmd
);
44 static oce_wq_mdesc_t
*oce_wqm_alloc(struct oce_wq
*wq
);
45 static int oce_wqm_ctor(oce_wq_mdesc_t
*wqmd
, struct oce_wq
*wq
);
46 static void oce_wqm_dtor(struct oce_wq
*wq
, oce_wq_mdesc_t
*wqmd
);
47 static void oce_fill_ring_descs(struct oce_wq
*wq
, oce_wqe_desc_t
*wqed
);
48 static void oce_remove_vtag(mblk_t
*mp
);
49 static void oce_insert_vtag(mblk_t
*mp
, uint16_t vlan_tag
);
50 static inline int oce_process_tx_compl(struct oce_wq
*wq
, boolean_t rearm
);
53 static ddi_dma_attr_t tx_map_dma_attr
= {
54 DMA_ATTR_V0
, /* version number */
55 0x0000000000000000ull
, /* low address */
56 0xFFFFFFFFFFFFFFFFull
, /* high address */
57 0x0000000000010000ull
, /* dma counter max */
58 OCE_TXMAP_ALIGN
, /* alignment */
59 0x7FF, /* burst sizes */
60 0x00000001, /* minimum transfer size */
61 0x00000000FFFFFFFFull
, /* maximum transfer size */
62 0xFFFFFFFFFFFFFFFFull
, /* maximum segment size */
63 OCE_MAX_TXDMA_COOKIES
, /* scatter/gather list length */
64 0x00000001, /* granularity */
65 DDI_DMA_FLAGERR
/* dma_attr_flags */
69 ddi_dma_attr_t oce_tx_dma_buf_attr
= {
70 DMA_ATTR_V0
, /* version number */
71 0x0000000000000000ull
, /* low address */
72 0xFFFFFFFFFFFFFFFFull
, /* high address */
73 0x00000000FFFFFFFFull
, /* dma counter max */
74 OCE_DMA_ALIGNMENT
, /* alignment */
75 0x000007FF, /* burst sizes */
76 0x00000001, /* minimum transfer size */
77 0x00000000FFFFFFFFull
, /* maximum transfer size */
78 0xFFFFFFFFFFFFFFFFull
, /* maximum segment size */
79 1, /* scatter/gather list length */
80 0x00000001, /* granularity */
81 DDI_DMA_FLAGERR
/* dma_attr_flags */
85 * WQ map handle destructor
87 * wq - Pointer to WQ structure
88 * wqmd - pointer to WQE mapping handle descriptor
94 oce_wqm_dtor(struct oce_wq
*wq
, oce_wq_mdesc_t
*wqmd
)
97 /* Free the DMA handle */
98 if (wqmd
->dma_handle
!= NULL
)
99 (void) ddi_dma_free_handle(&(wqmd
->dma_handle
));
100 wqmd
->dma_handle
= NULL
;
104 * WQ map handles contructor
106 * wqmd - pointer to WQE mapping handle descriptor
107 * wq - Pointer to WQ structure
109 * return DDI_SUCCESS=>success, DDI_FAILURE=>error
112 oce_wqm_ctor(oce_wq_mdesc_t
*wqmd
, struct oce_wq
*wq
)
118 /* Allocate DMA handle */
119 ret
= ddi_dma_alloc_handle(dev
->dip
, &tx_map_dma_attr
,
120 DDI_DMA_DONTWAIT
, NULL
, &wqmd
->dma_handle
);
126 * function to create WQ mapping handles cache
128 * wq - pointer to WQ structure
130 * return DDI_SUCCESS=>success, DDI_FAILURE=>error
133 oce_wqm_cache_create(struct oce_wq
*wq
)
135 struct oce_dev
*dev
= wq
->parent
;
140 size
= wq
->cfg
.nhdl
* sizeof (oce_wq_mdesc_t
);
141 wq
->wq_mdesc_array
= kmem_zalloc(size
, KM_NOSLEEP
);
142 if (wq
->wq_mdesc_array
== NULL
) {
143 return (DDI_FAILURE
);
146 /* Create the free buffer list */
147 OCE_LIST_CREATE(&wq
->wq_mdesc_list
, DDI_INTR_PRI(dev
->intr_pri
));
149 for (cnt
= 0; cnt
< wq
->cfg
.nhdl
; cnt
++) {
150 ret
= oce_wqm_ctor(&wq
->wq_mdesc_array
[cnt
], wq
);
151 if (ret
!= DDI_SUCCESS
) {
154 OCE_LIST_INSERT_TAIL(&wq
->wq_mdesc_list
,
155 &wq
->wq_mdesc_array
[cnt
]);
157 return (DDI_SUCCESS
);
160 oce_wqm_cache_destroy(wq
);
161 return (DDI_FAILURE
);
165 * function to destroy WQ mapping handles cache
167 * wq - pointer to WQ structure
172 oce_wqm_cache_destroy(struct oce_wq
*wq
)
174 oce_wq_mdesc_t
*wqmd
;
176 while ((wqmd
= OCE_LIST_REM_HEAD(&wq
->wq_mdesc_list
)) != NULL
) {
177 oce_wqm_dtor(wq
, wqmd
);
180 kmem_free(wq
->wq_mdesc_array
,
181 wq
->cfg
.nhdl
* sizeof (oce_wq_mdesc_t
));
183 OCE_LIST_DESTROY(&wq
->wq_mdesc_list
);
187 * function to create WQ buffer cache
189 * wq - pointer to WQ structure
190 * buf_size - size of the buffer
192 * return DDI_SUCCESS=>success, DDI_FAILURE=>error
195 oce_wqb_cache_create(struct oce_wq
*wq
, size_t buf_size
)
197 struct oce_dev
*dev
= wq
->parent
;
202 size
= wq
->cfg
.nbufs
* sizeof (oce_wq_bdesc_t
);
203 wq
->wq_bdesc_array
= kmem_zalloc(size
, KM_NOSLEEP
);
204 if (wq
->wq_bdesc_array
== NULL
) {
205 return (DDI_FAILURE
);
208 /* Create the free buffer list */
209 OCE_LIST_CREATE(&wq
->wq_buf_list
, DDI_INTR_PRI(dev
->intr_pri
));
211 for (cnt
= 0; cnt
< wq
->cfg
.nbufs
; cnt
++) {
212 ret
= oce_wqb_ctor(&wq
->wq_bdesc_array
[cnt
],
213 wq
, buf_size
, DDI_DMA_STREAMING
);
214 if (ret
!= DDI_SUCCESS
) {
217 OCE_LIST_INSERT_TAIL(&wq
->wq_buf_list
,
218 &wq
->wq_bdesc_array
[cnt
]);
220 return (DDI_SUCCESS
);
223 oce_wqb_cache_destroy(wq
);
224 return (DDI_FAILURE
);
228 * function to destroy WQ buffer cache
230 * wq - pointer to WQ structure
235 oce_wqb_cache_destroy(struct oce_wq
*wq
)
237 oce_wq_bdesc_t
*wqbd
;
238 while ((wqbd
= OCE_LIST_REM_HEAD(&wq
->wq_buf_list
)) != NULL
) {
239 oce_wqb_dtor(wq
, wqbd
);
241 kmem_free(wq
->wq_bdesc_array
,
242 wq
->cfg
.nbufs
* sizeof (oce_wq_bdesc_t
));
243 OCE_LIST_DESTROY(&wq
->wq_buf_list
);
247 * WQ buffer constructor
249 * wqbd - pointer to WQ buffer descriptor
250 * wq - pointer to WQ structure
251 * size - size of the buffer
252 * flags - KM_SLEEP or KM_NOSLEEP
254 * return DDI_SUCCESS=>success, DDI_FAILURE=>error
257 oce_wqb_ctor(oce_wq_bdesc_t
*wqbd
, struct oce_wq
*wq
, size_t size
, int flags
)
262 wqbd
->wqb
= oce_alloc_dma_buffer(dev
, size
, &oce_tx_dma_buf_attr
,
264 if (wqbd
->wqb
== NULL
) {
265 return (DDI_FAILURE
);
267 wqbd
->frag_addr
.dw
.addr_lo
= ADDR_LO(wqbd
->wqb
->addr
);
268 wqbd
->frag_addr
.dw
.addr_hi
= ADDR_HI(wqbd
->wqb
->addr
);
269 return (DDI_SUCCESS
);
273 * WQ buffer destructor
275 * wq - pointer to WQ structure
276 * wqbd - pointer to WQ buffer descriptor
281 oce_wqb_dtor(struct oce_wq
*wq
, oce_wq_bdesc_t
*wqbd
)
283 oce_free_dma_buffer(wq
->parent
, wqbd
->wqb
);
287 * function to alloc WQE buffer descriptor
289 * wq - pointer to WQ structure
291 * return pointer to WQE buffer descriptor
293 static inline oce_wq_bdesc_t
*
294 oce_wqb_alloc(struct oce_wq
*wq
)
296 return (OCE_LIST_REM_HEAD(&wq
->wq_buf_list
));
300 * function to free WQE buffer descriptor
302 * wq - pointer to WQ structure
303 * wqbd - pointer to WQ buffer descriptor
308 oce_wqb_free(struct oce_wq
*wq
, oce_wq_bdesc_t
*wqbd
)
310 OCE_LIST_INSERT_TAIL(&wq
->wq_buf_list
, wqbd
);
314 * function to allocate WQE mapping descriptor
316 * wq - pointer to WQ structure
318 * return pointer to WQE mapping descriptor
320 static inline oce_wq_mdesc_t
*
321 oce_wqm_alloc(struct oce_wq
*wq
)
323 return (OCE_LIST_REM_HEAD(&wq
->wq_mdesc_list
));
324 } /* oce_wqm_alloc */
327 * function to insert WQE mapping descriptor to the list
329 * wq - pointer to WQ structure
330 * wqmd - Pointer to WQ mapping descriptor
335 oce_wqm_free(struct oce_wq
*wq
, oce_wq_mdesc_t
*wqmd
)
337 OCE_LIST_INSERT_TAIL(&wq
->wq_mdesc_list
, wqmd
);
341 * function to free WQE mapping descriptor
343 * wq - pointer to WQ structure
344 * wqmd - Pointer to WQ mapping descriptor
349 oce_wqmd_free(struct oce_wq
*wq
, oce_wq_mdesc_t
*wqmd
)
354 (void) ddi_dma_unbind_handle(wqmd
->dma_handle
);
355 oce_wqm_free(wq
, wqmd
);
359 * WQED kmem_cache constructor
361 * buf - pointer to WQE descriptor
366 oce_wqe_desc_ctor(void *buf
, void *arg
, int kmflags
)
368 _NOTE(ARGUNUSED(buf
));
369 _NOTE(ARGUNUSED(arg
));
370 _NOTE(ARGUNUSED(kmflags
));
372 return (DDI_SUCCESS
);
376 * WQED kmem_cache destructor
378 * buf - pointer to WQE descriptor
383 oce_wqe_desc_dtor(void *buf
, void *arg
)
385 _NOTE(ARGUNUSED(buf
));
386 _NOTE(ARGUNUSED(arg
));
390 * function to choose a WQ given a mblk depending on priority, flowID etc.
392 * dev - software handle to device
393 * mp - the mblk to send
395 * return pointer to the WQ selected
397 static uint8_t oce_tx_hash_policy
= 0x4;
399 oce_get_wq(struct oce_dev
*dev
, mblk_t
*mp
)
404 qidx
= mac_pkt_hash(DL_ETHER
, mp
, oce_tx_hash_policy
, B_TRUE
);
405 qidx
= qidx
% dev
->nwqs
;
411 /* for the time being hardcode */
416 * function to populate the single WQE
419 * wqed - pointer to WQ entry descriptor
423 #pragma inline(oce_fill_ring_descs)
425 oce_fill_ring_descs(struct oce_wq
*wq
, oce_wqe_desc_t
*wqed
)
428 struct oce_nic_frag_wqe
*wqe
;
430 /* Copy the precreate WQE descs to the ring desc */
431 for (i
= 0; i
< wqed
->wqe_cnt
; i
++) {
432 wqe
= RING_GET_PRODUCER_ITEM_VA(wq
->ring
,
433 struct oce_nic_frag_wqe
);
435 bcopy(&wqed
->frag
[i
], wqe
, NIC_WQE_SIZE
);
436 RING_PUT(wq
->ring
, 1);
438 } /* oce_fill_ring_descs */
441 * function to copy the packet to preallocated Tx buffer
444 * wqed - Pointer to WQE descriptor
445 * mp - Pointer to packet chain
446 * pktlen - Size of the packet
448 * return 0=>success, error code otherwise
451 oce_bcopy_wqe(struct oce_wq
*wq
, oce_wqe_desc_t
*wqed
, mblk_t
*mp
,
454 oce_wq_bdesc_t
*wqbd
;
456 struct oce_dev
*dev
= wq
->parent
;
459 wqbd
= oce_wqb_alloc(wq
);
461 atomic_inc_32(&dev
->tx_noxmtbuf
);
462 oce_log(dev
, CE_WARN
, MOD_TX
, "%s",
467 /* create a fragment wqe for the packet */
468 wqed
->frag
[wqed
->frag_idx
].u0
.s
.frag_pa_hi
= wqbd
->frag_addr
.dw
.addr_hi
;
469 wqed
->frag
[wqed
->frag_idx
].u0
.s
.frag_pa_lo
= wqbd
->frag_addr
.dw
.addr_lo
;
470 buf_va
= DBUF_VA(wqbd
->wqb
);
472 /* copy pkt into buffer */
473 for (len
= 0; mp
!= NULL
&& len
< pkt_len
; mp
= mp
->b_cont
) {
474 bcopy(mp
->b_rptr
, buf_va
, MBLKL(mp
));
479 (void) ddi_dma_sync(DBUF_DHDL(wqbd
->wqb
), 0, pkt_len
,
480 DDI_DMA_SYNC_FORDEV
);
482 if (oce_fm_check_dma_handle(dev
, DBUF_DHDL(wqbd
->wqb
))) {
483 ddi_fm_service_impact(dev
->dip
, DDI_SERVICE_DEGRADED
);
484 /* Free the buffer */
485 oce_wqb_free(wq
, wqbd
);
488 wqed
->frag
[wqed
->frag_idx
].u0
.s
.frag_len
= pkt_len
;
489 wqed
->hdesc
[wqed
->nhdl
].hdl
= (void *)(wqbd
);
490 wqed
->hdesc
[wqed
->nhdl
].type
= COPY_WQE
;
495 } /* oce_bcopy_wqe */
498 * function to copy the packet or dma map on the fly depending on size
501 * wqed - Pointer to WQE descriptor
502 * mp - Pointer to packet chain
504 * return DDI_SUCCESS=>success, DDI_FAILURE=>error
507 oce_map_wqe(struct oce_wq
*wq
, oce_wqe_desc_t
*wqed
, mblk_t
*mp
,
510 ddi_dma_cookie_t cookie
;
511 oce_wq_mdesc_t
*wqmd
;
514 struct oce_dev
*dev
= wq
->parent
;
516 wqmd
= oce_wqm_alloc(wq
);
518 oce_log(dev
, CE_WARN
, MOD_TX
, "%s",
523 ret
= ddi_dma_addr_bind_handle(wqmd
->dma_handle
,
524 (struct as
*)0, (caddr_t
)mp
->b_rptr
,
525 pkt_len
, DDI_DMA_WRITE
| DDI_DMA_STREAMING
,
526 DDI_DMA_DONTWAIT
, NULL
, &cookie
, &ncookies
);
527 if (ret
!= DDI_DMA_MAPPED
) {
528 oce_log(dev
, CE_WARN
, MOD_TX
, "MAP FAILED %d",
530 /* free the last one */
531 oce_wqm_free(wq
, wqmd
);
535 wqed
->frag
[wqed
->frag_idx
].u0
.s
.frag_pa_hi
=
536 ADDR_HI(cookie
.dmac_laddress
);
537 wqed
->frag
[wqed
->frag_idx
].u0
.s
.frag_pa_lo
=
538 ADDR_LO(cookie
.dmac_laddress
);
539 wqed
->frag
[wqed
->frag_idx
].u0
.s
.frag_len
=
540 (uint32_t)cookie
.dmac_size
;
544 ddi_dma_nextcookie(wqmd
->dma_handle
,
547 } while (ncookies
> 0);
549 wqed
->hdesc
[wqed
->nhdl
].hdl
= (void *)wqmd
;
550 wqed
->hdesc
[wqed
->nhdl
].type
= MAPPED_WQE
;
556 oce_process_tx_compl(struct oce_wq
*wq
, boolean_t rearm
)
558 struct oce_nic_tx_cqe
*cqe
;
559 uint16_t num_cqe
= 0;
561 oce_wqe_desc_t
*wqed
;
567 (void) ddi_dma_sync(cq
->ring
->dbuf
->dma_handle
, 0, 0,
568 DDI_DMA_SYNC_FORKERNEL
);
570 mutex_enter(&wq
->txc_lock
);
571 cqe
= RING_GET_CONSUMER_ITEM_VA(cq
->ring
, struct oce_nic_tx_cqe
);
572 while (WQ_CQE_VALID(cqe
)) {
574 DW_SWAP(u32ptr(cqe
), sizeof (struct oce_nic_tx_cqe
));
577 if (cqe
->u0
.s
.status
!= 0) {
578 atomic_inc_32(&dev
->tx_errors
);
581 /* complete the WQEs */
582 wqed
= OCE_LIST_REM_HEAD(&wq
->wqe_desc_list
);
584 wqe_freed
= wqed
->wqe_cnt
;
585 oce_free_wqed(wq
, wqed
);
586 RING_GET(wq
->ring
, wqe_freed
);
587 atomic_add_32(&wq
->wq_free
, wqe_freed
);
588 /* clear the valid bit and progress cqe */
589 WQ_CQE_INVALIDATE(cqe
);
590 RING_GET(cq
->ring
, 1);
591 cqe
= RING_GET_CONSUMER_ITEM_VA(cq
->ring
,
592 struct oce_nic_tx_cqe
);
594 } /* for all valid CQE */
595 mutex_exit(&wq
->txc_lock
);
597 oce_arm_cq(wq
->parent
, cq
->cq_id
, num_cqe
, rearm
);
599 } /* oce_process_tx_completion */
602 * function to drain a TxCQ and process its CQEs
604 * dev - software handle to the device
605 * cq - pointer to the cq to drain
607 * return the number of CQEs processed
610 oce_drain_wq_cq(void *arg
)
612 uint16_t num_cqe
= 0;
616 wq
= (struct oce_wq
*)arg
;
619 /* do while we do not reach a cqe that is not valid */
620 num_cqe
= oce_process_tx_compl(wq
, B_FALSE
);
622 /* check if we need to restart Tx */
623 if (wq
->resched
&& num_cqe
) {
624 wq
->resched
= B_FALSE
;
625 mac_tx_update(dev
->mac_handle
);
629 } /* oce_process_wq_cqe */
632 * function to insert vtag to packet
635 * vlan_tag - tag to be inserted
640 oce_insert_vtag(mblk_t
*mp
, uint16_t vlan_tag
)
642 struct ether_vlan_header
*evh
;
643 (void) memmove(mp
->b_rptr
- VTAG_SIZE
,
644 mp
->b_rptr
, 2 * ETHERADDRL
);
645 mp
->b_rptr
-= VTAG_SIZE
;
646 evh
= (struct ether_vlan_header
*)(void *)mp
->b_rptr
;
647 evh
->ether_tpid
= htons(VLAN_TPID
);
648 evh
->ether_tci
= htons(vlan_tag
);
652 * function to strip vtag from packet
660 oce_remove_vtag(mblk_t
*mp
)
662 (void) memmove(mp
->b_rptr
+ VTAG_SIZE
, mp
->b_rptr
,
664 mp
->b_rptr
+= VTAG_SIZE
;
668 * function to xmit Single packet over the wire
671 * mp - Pointer to packet chain
673 * return pointer to the packet
676 oce_send_packet(struct oce_wq
*wq
, mblk_t
*mp
)
678 struct oce_nic_hdr_wqe
*wqeh
;
680 struct ether_header
*eh
;
681 struct ether_vlan_header
*evh
;
685 uint32_t csum_flags
= 0;
686 boolean_t use_copy
= B_FALSE
;
687 boolean_t tagged
= B_FALSE
;
689 uint32_t reg_value
= 0;
690 oce_wqe_desc_t
*wqed
= NULL
;
693 uint32_t pkt_len
= 0;
700 /* retrieve the adap priv struct ptr */
703 /* check if we have enough free slots */
704 if (wq
->wq_free
< dev
->tx_reclaim_threshold
) {
705 (void) oce_process_tx_compl(wq
, B_FALSE
);
707 if (wq
->wq_free
< OCE_MAX_TX_HDL
) {
711 /* check if we should copy */
712 for (tmp
= mp
; tmp
!= NULL
; tmp
= tmp
->b_cont
) {
713 pkt_len
+= MBLKL(tmp
);
717 if (pkt_len
== 0 || num_mblks
== 0) {
722 /* retrieve LSO information */
723 mac_lso_get(mp
, &mss
, &flags
);
725 /* get the offload flags */
726 mac_hcksum_get(mp
, NULL
, NULL
, NULL
, NULL
, &csum_flags
);
728 /* restrict the mapped segment to wat we support */
729 if (num_mblks
> OCE_MAX_TX_HDL
) {
730 nmp
= msgpullup(mp
, -1);
732 atomic_inc_32(&wq
->pkt_drops
);
736 /* Reset it to new collapsed mp */
741 /* Get the packet descriptor for Tx */
742 wqed
= kmem_cache_alloc(wq
->wqed_cache
, KM_NOSLEEP
);
744 atomic_inc_32(&wq
->pkt_drops
);
748 eh
= (struct ether_header
*)(void *)mp
->b_rptr
;
749 if (ntohs(eh
->ether_type
) == VLAN_TPID
) {
750 evh
= (struct ether_vlan_header
*)(void *)mp
->b_rptr
;
752 etype
= ntohs(evh
->ether_type
);
753 ip_offset
= sizeof (struct ether_vlan_header
);
754 pkt_len
-= VTAG_SIZE
;
755 vlan_tag
= ntohs(evh
->ether_tci
);
758 etype
= ntohs(eh
->ether_type
);
759 ip_offset
= sizeof (struct ether_header
);
762 /* Save the WQ pointer */
764 wqed
->frag_idx
= 1; /* index zero is always header */
768 OCE_LIST_LINK_INIT(&wqed
->link
);
770 /* If entire packet is less than the copy limit just do copy */
771 if (pkt_len
< dev
->tx_bcopy_limit
) {
773 ret
= oce_bcopy_wqe(wq
, wqed
, mp
, pkt_len
);
775 /* copy or dma map the individual fragments */
776 for (nmp
= mp
; nmp
!= NULL
; nmp
= nmp
->b_cont
) {
781 if (len
< dev
->tx_bcopy_limit
) {
782 ret
= oce_bcopy_wqe(wq
, wqed
, nmp
, len
);
784 ret
= oce_map_wqe(wq
, wqed
, nmp
, len
);
792 * Any failure other than insufficient Q entries
796 oce_free_wqed(wq
, wqed
);
797 atomic_inc_32(&wq
->pkt_drops
);
802 wqeh
= (struct oce_nic_hdr_wqe
*)&wqed
->frag
[0];
803 bzero(wqeh
, sizeof (struct oce_nic_hdr_wqe
));
805 /* fill rest of wqe header fields based on packet */
806 if (flags
& HW_LSO
) {
807 wqeh
->u0
.s
.lso
= B_TRUE
;
808 wqeh
->u0
.s
.lso_mss
= mss
;
810 if (csum_flags
& HCK_FULLCKSUM
) {
812 if (etype
== ETHERTYPE_IP
) {
813 proto
= (uint8_t *)(void *)
814 (mp
->b_rptr
+ ip_offset
);
817 wqeh
->u0
.s
.tcpcs
= B_TRUE
;
818 else if (proto
[9] == 17)
820 wqeh
->u0
.s
.udpcs
= B_TRUE
;
824 if (csum_flags
& HCK_IPV4_HDRCKSUM
)
825 wqeh
->u0
.s
.ipcs
= B_TRUE
;
827 wqeh
->u0
.s
.vlan
= B_TRUE
;
828 wqeh
->u0
.s
.vlan_tag
= vlan_tag
;
831 wqeh
->u0
.s
.complete
= B_TRUE
;
832 wqeh
->u0
.s
.event
= B_TRUE
;
833 wqeh
->u0
.s
.crc
= B_TRUE
;
834 wqeh
->u0
.s
.total_length
= pkt_len
;
836 num_wqes
= wqed
->frag_cnt
+ 1;
838 /* h/w expects even no. of WQEs */
839 if (num_wqes
& 0x1) {
840 bzero(&wqed
->frag
[num_wqes
], sizeof (struct oce_nic_frag_wqe
));
843 wqed
->wqe_cnt
= (uint16_t)num_wqes
;
844 wqeh
->u0
.s
.num_wqe
= num_wqes
;
845 DW_SWAP(u32ptr(&wqed
->frag
[0]), (wqed
->wqe_cnt
* NIC_WQE_SIZE
));
847 mutex_enter(&wq
->tx_lock
);
848 if (num_wqes
> wq
->wq_free
) {
849 atomic_inc_32(&wq
->tx_deferd
);
850 mutex_exit(&wq
->tx_lock
);
853 atomic_add_32(&wq
->wq_free
, -num_wqes
);
855 /* fill the wq for adapter */
856 oce_fill_ring_descs(wq
, wqed
);
858 /* Set the mp pointer in the wqe descriptor */
859 if (use_copy
== B_FALSE
) {
862 /* Add the packet desc to list to be retrieved during cmpl */
863 OCE_LIST_INSERT_TAIL(&wq
->wqe_desc_list
, wqed
);
864 (void) ddi_dma_sync(wq
->ring
->dbuf
->dma_handle
, 0, 0,
865 DDI_DMA_SYNC_FORDEV
);
867 /* ring tx doorbell */
868 reg_value
= (num_wqes
<< 16) | wq
->wq_id
;
869 /* Ring the door bell */
870 OCE_DB_WRITE32(dev
, PD_TXULP_DB
, reg_value
);
871 mutex_exit(&wq
->tx_lock
);
872 if (oce_fm_check_acc_handle(dev
, dev
->db_handle
) != DDI_FM_OK
) {
873 ddi_fm_service_impact(dev
->dip
, DDI_SERVICE_DEGRADED
);
876 /* free mp if copied or packet chain collapsed */
877 if (use_copy
== B_TRUE
) {
885 oce_insert_vtag(mp
, vlan_tag
);
887 oce_free_wqed(wq
, wqed
);
889 } /* oce_send_packet */
892 * function to free the WQE descriptor
895 * wqed - Pointer to WQE descriptor
899 #pragma inline(oce_free_wqed)
901 oce_free_wqed(struct oce_wq
*wq
, oce_wqe_desc_t
*wqed
)
908 for (i
= 0; i
< wqed
->nhdl
; i
++) {
909 if (wqed
->hdesc
[i
].type
== COPY_WQE
) {
910 oce_wqb_free(wq
, wqed
->hdesc
[i
].hdl
);
911 } else if (wqed
->hdesc
[i
].type
== MAPPED_WQE
) {
912 oce_wqmd_free(wq
, wqed
->hdesc
[i
].hdl
);
917 kmem_cache_free(wq
->wqed_cache
, wqed
);
918 } /* oce_free_wqed */
921 * function to start the WQ
929 oce_start_wq(struct oce_wq
*wq
)
931 _NOTE(ARGUNUSED(wq
));
932 return (DDI_SUCCESS
);
936 * function to stop the WQ
943 oce_clean_wq(struct oce_wq
*wq
)
945 oce_wqe_desc_t
*wqed
;
948 /* Wait for already posted Tx to complete */
950 for (ti
= 0; ti
< DEFAULT_DRAIN_TIME
; ti
++) {
951 (void) oce_process_tx_compl(wq
, B_FALSE
);
955 /* Free the remaining descriptors */
956 while ((wqed
= OCE_LIST_REM_HEAD(&wq
->wqe_desc_list
)) != NULL
) {
957 atomic_add_32(&wq
->wq_free
, wqed
->wqe_cnt
);
958 oce_free_wqed(wq
, wqed
);
960 oce_drain_eq(wq
->cq
->eq
);
964 * function to set the tx mapping handle fma attr
966 * fm_caps - capability flags
972 oce_set_tx_map_dma_fma_flags(int fm_caps
)
974 if (fm_caps
== DDI_FM_NOT_CAPABLE
) {
978 if (DDI_FM_DMA_ERR_CAP(fm_caps
)) {
979 tx_map_dma_attr
.dma_attr_flags
|= DDI_DMA_FLAGERR
;
981 tx_map_dma_attr
.dma_attr_flags
&= ~DDI_DMA_FLAGERR
;
983 } /* oce_set_tx_map_dma_fma_flags */