4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2014 QLogic Corporation
24 * The contents of this file are subject to the terms of the
25 * QLogic End User License (the "License").
26 * You may not use this file except in compliance with the License.
28 * You can obtain a copy of the License at
29 * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
30 * QLogic_End_User_Software_License.txt
31 * See the License for the specific language governing permissions
32 * and limitations under the License.
36 * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
41 ddi_dma_attr_t bnxeTxDmaAttrib
=
43 DMA_ATTR_V0
, /* dma_attr_version */
44 0, /* dma_attr_addr_lo */
45 0xffffffffffffffff, /* dma_attr_addr_hi */
46 0xffffffffffffffff, /* dma_attr_count_max */
47 BNXE_DMA_ALIGNMENT
, /* dma_attr_align */
48 0xffffffff, /* dma_attr_burstsizes */
49 1, /* dma_attr_minxfer */
50 0xffffffffffffffff, /* dma_attr_maxxfer */
51 0xffffffffffffffff, /* dma_attr_seg */
52 BNXE_MAX_DMA_SGLLEN
, /* dma_attr_sgllen */
53 1, /* dma_attr_granular */
54 0, /* dma_attr_flags */
57 ddi_dma_attr_t bnxeTxCbDmaAttrib
=
59 DMA_ATTR_V0
, /* dma_attr_version */
60 0, /* dma_attr_addr_lo */
61 0xffffffffffffffff, /* dma_attr_addr_hi */
62 0xffffffffffffffff, /* dma_attr_count_max */
63 BNXE_DMA_ALIGNMENT
, /* dma_attr_align */
64 0xffffffff, /* dma_attr_burstsizes */
65 1, /* dma_attr_minxfer */
66 0xffffffffffffffff, /* dma_attr_maxxfer */
67 0xffffffffffffffff, /* dma_attr_seg */
68 1, /* dma_attr_sgllen */
69 1, /* dma_attr_granular */
70 0, /* dma_attr_flags */
74 static um_txpacket_t
* BnxeTxPktAlloc(um_device_t
* pUM
, size_t size
);
77 static inline void BnxeTxPktUnmap(um_txpacket_t
* pTxPkt
)
81 for (i
= 0; i
< pTxPkt
->num_handles
; i
++)
83 ddi_dma_unbind_handle(pTxPkt
->dmaHandles
[i
]);
86 pTxPkt
->num_handles
= 0;
90 static void BnxeTxPktsFree(um_txpacket_t
* pTxPkt
)
94 if (pTxPkt
->num_handles
> 0)
96 BnxeTxPktUnmap(pTxPkt
);
99 if (pTxPkt
->pMblk
!= NULL
)
101 freemsg(pTxPkt
->pMblk
);
104 for (i
= 0; i
< BNXE_MAX_DMA_HANDLES_PER_PKT
; i
++)
106 ddi_dma_free_handle(&pTxPkt
->dmaHandles
[i
]);
109 pTxPkt
->pMblk
= NULL
;
110 pTxPkt
->num_handles
= 0;
111 pTxPkt
->frag_list
.cnt
= 0;
113 ddi_dma_unbind_handle(pTxPkt
->cbDmaHandle
);
114 ddi_dma_mem_free(&pTxPkt
->cbDmaAccHandle
);
115 ddi_dma_free_handle(&pTxPkt
->cbDmaHandle
);
116 kmem_free(pTxPkt
, sizeof(um_txpacket_t
));
120 static void BnxeTxPktsFreeList(s_list_t
* pPktList
)
122 um_txpacket_t
* pTxPkt
;
124 while (!s_list_is_empty(pPktList
))
126 pTxPkt
= (um_txpacket_t
*)s_list_pop_head(pPktList
);
127 BnxeTxPktsFree(pTxPkt
);
133 * Free the mblk and all frag mappings used by each packet in the list
134 * and then put the entire list on the free queue for immediate use.
136 void BnxeTxPktsReclaim(um_device_t
* pUM
,
140 um_txpacket_t
* pTxPkt
;
142 if (s_list_entry_cnt(pPktList
) == 0)
147 for (pTxPkt
= (um_txpacket_t
*)s_list_peek_head(pPktList
);
149 pTxPkt
= (um_txpacket_t
*)s_list_next_entry(&pTxPkt
->lm_pkt
.link
))
151 if (pTxPkt
->num_handles
> 0)
153 BnxeTxPktUnmap(pTxPkt
);
156 if (pTxPkt
->pMblk
!= NULL
)
158 freemsg(pTxPkt
->pMblk
);
159 pTxPkt
->pMblk
= NULL
;
163 BNXE_LOCK_ENTER_FREETX(pUM
, idx
);
164 s_list_add_tail(&pUM
->txq
[idx
].freeTxDescQ
, pPktList
);
165 BNXE_LOCK_EXIT_FREETX(pUM
, idx
);
169 /* Must be called with TX lock held!!! */
170 static int BnxeTxSendWaitingPkt(um_device_t
* pUM
,
173 TxQueue
* pTxQ
= &pUM
->txq
[idx
];
174 lm_device_t
* pLM
= &pUM
->lm_dev
;
175 lm_tx_chain_t
* pLmTxChain
;
176 um_txpacket_t
* pTxPkt
;
179 pLmTxChain
= &pLM
->tx_info
.chain
[idx
];
181 while (s_list_entry_cnt(&pTxQ
->waitTxDescQ
))
183 pTxPkt
= (um_txpacket_t
*)s_list_peek_head(&pTxQ
->waitTxDescQ
);
185 if (pTxPkt
->frag_list
.cnt
+ 2 > pLmTxChain
->bd_chain
.bd_left
)
187 return BNXE_TX_DEFERPKT
;
190 pTxPkt
= (um_txpacket_t
*)s_list_pop_head(&pTxQ
->waitTxDescQ
);
192 rc
= lm_send_packet(pLM
, idx
, &pTxPkt
->lm_pkt
, &pTxPkt
->frag_list
);
194 if (pUM
->fmCapabilities
&&
195 BnxeCheckAccHandle(pLM
->vars
.reg_handle
[BAR_0
]) != DDI_FM_OK
)
197 ddi_fm_service_impact(pUM
->pDev
, DDI_SERVICE_DEGRADED
);
200 if (rc
!= LM_STATUS_SUCCESS
)
203 * Send failed (probably not enough BDs available)...
204 * Put the packet back at the head of the wait queue.
207 s_list_push_head(&pTxQ
->waitTxDescQ
, &pTxPkt
->lm_pkt
.link
);
208 return BNXE_TX_DEFERPKT
;
212 return BNXE_TX_GOODXMIT
;
216 void BnxeTxRingProcess(um_device_t
* pUM
,
219 TxQueue
* pTxQ
= &pUM
->txq
[idx
];
220 lm_device_t
* pLM
= &pUM
->lm_dev
;
221 lm_tx_chain_t
* pLmTxChain
;
226 s_list_clear(&tmpList
);
228 BNXE_LOCK_ENTER_TX(pUM
, idx
);
230 pktsTxed
= lm_get_packets_sent(&pUM
->lm_dev
, idx
, &tmpList
);
232 if (pUM
->fmCapabilities
&&
233 BnxeCheckAccHandle(pUM
->lm_dev
.vars
.reg_handle
[BAR_0
]) != DDI_FM_OK
)
235 ddi_fm_service_impact(pUM
->pDev
, DDI_SERVICE_DEGRADED
);
238 if ((pktsTxed
+ s_list_entry_cnt(&pTxQ
->sentTxQ
)) >=
239 pUM
->devParams
.maxTxFree
)
241 s_list_add_tail(&tmpList
, &pTxQ
->sentTxQ
);
242 s_list_clear(&pTxQ
->sentTxQ
);
246 s_list_add_tail(&pTxQ
->sentTxQ
, &tmpList
);
247 s_list_clear(&tmpList
);
250 BNXE_LOCK_EXIT_TX(pUM
, idx
);
252 if (s_list_entry_cnt(&tmpList
))
254 BnxeTxPktsReclaim(pUM
, idx
, &tmpList
);
257 if (pTxQ
->noTxCredits
== 0)
259 /* no need to notify the stack */
263 pLmTxChain
= &pUM
->lm_dev
.tx_info
.chain
[idx
];
265 if (pTxQ
->noTxCredits
& BNXE_TX_RESOURCES_NO_CREDIT
)
267 BNXE_LOCK_ENTER_TX(pUM
, idx
);
268 rc
= BnxeTxSendWaitingPkt(pUM
, idx
);
269 BNXE_LOCK_EXIT_TX(pUM
, idx
);
271 if ((rc
== BNXE_TX_GOODXMIT
) &&
272 (pLmTxChain
->bd_chain
.bd_left
>= BNXE_MAX_DMA_FRAGS_PER_PKT
))
274 atomic_and_32(&pTxQ
->noTxCredits
, ~BNXE_TX_RESOURCES_NO_CREDIT
);
278 if ((pTxQ
->noTxCredits
& BNXE_TX_RESOURCES_NO_DESC
) &&
279 (s_list_entry_cnt(&pTxQ
->freeTxDescQ
) > pTxQ
->thresh_pdwm
))
281 atomic_and_32(&pTxQ
->noTxCredits
, ~BNXE_TX_RESOURCES_NO_DESC
);
284 if (pTxQ
->noTxCredits
== 0)
286 if (idx
== FCOE_CID(pLM
))
288 BnxeLogInfo(pUM
, "FCoE tx credit ok, no upcall!");
292 /* notify the stack that tx resources are now available */
293 #if defined(BNXE_RINGS) && (defined(__S11) || defined(__S12))
294 mac_tx_ring_update(pUM
->pMac
, pTxQ
->ringHandle
);
296 mac_tx_update(pUM
->pMac
);
303 static inline int BnxeTxPktMapFrag(um_device_t
* pUM
,
304 um_txpacket_t
* pTxPkt
,
307 ddi_dma_handle_t dmaHandle
;
308 ddi_dma_cookie_t cookie
;
315 if (pTxPkt
->num_handles
== BNXE_MAX_DMA_HANDLES_PER_PKT
)
317 return BNXE_TX_RESOURCES_NO_OS_DMA_RES
;
320 if (pTxPkt
->frag_list
.cnt
>= BNXE_MAX_DMA_FRAGS_PER_PKT
)
322 return BNXE_TX_RESOURCES_TOO_MANY_FRAGS
;
325 dmaHandle
= pTxPkt
->dmaHandles
[pTxPkt
->num_handles
];
327 if ((rc
= ddi_dma_addr_bind_handle(dmaHandle
,
329 (caddr_t
)pMblk
->b_rptr
,
330 (pMblk
->b_wptr
- pMblk
->b_rptr
),
331 (DDI_DMA_WRITE
| DDI_DMA_STREAMING
),
335 &count
)) != DDI_DMA_MAPPED
)
337 BnxeLogWarn(pUM
, "Failed to bind DMA address for tx packet (%d)", rc
);
338 return BNXE_TX_RESOURCES_NO_OS_DMA_RES
;
342 * ddi_dma_addr_bind_handle() correctly returns an error if the physical
343 * fragment count exceeds the maximum fragment count specified in the
344 * ddi_dma_attrib structure for the current pMblk. However, a packet can
345 * span multiple mblk's. The purpose of the check below is to make sure we
346 * do not overflow our fragment count limit based on what has already been
347 * mapped from this packet.
349 partial
= ((pTxPkt
->frag_list
.cnt
+ count
) >
350 (pMblk
->b_cont
? BNXE_MAX_DMA_FRAGS_PER_PKT
- 1
351 : BNXE_MAX_DMA_FRAGS_PER_PKT
));
355 * Going to try a partial dma so (re)set count to the remaining number
356 * of dma fragments that are available leaving one fragment at the end.
358 count
= (BNXE_MAX_DMA_FRAGS_PER_PKT
- 1 - pTxPkt
->frag_list
.cnt
);
362 * No more dma fragments are available. This fragment was not
363 * mapped and will be copied into the copy buffer along with the
364 * rest of the packet data.
366 ddi_dma_unbind_handle(dmaHandle
);
367 return BNXE_TX_RESOURCES_TOO_MANY_FRAGS
;
371 pFrag
= &pTxPkt
->frag_list
.frag_arr
[pTxPkt
->frag_list
.cnt
];
372 pTxPkt
->frag_list
.cnt
+= count
;
374 /* map "count" dma fragments */
377 for (i
= 0; i
< (count
- 1); i
++)
379 pFrag
->addr
.as_u64
= cookie
.dmac_laddress
;
380 bindLen
+= pFrag
->size
= cookie
.dmac_size
;
384 ddi_dma_nextcookie(dmaHandle
, &cookie
);
387 pFrag
->addr
.as_u64
= cookie
.dmac_laddress
;
388 bindLen
+= pFrag
->size
= cookie
.dmac_size
;
390 pTxPkt
->num_handles
++;
395 * Move the mblk's read pointer past the data that was bound to a DMA
396 * fragment. Any remaining data will get copied into the copy buffer.
398 pMblk
->b_rptr
+= bindLen
;
399 return BNXE_TX_RESOURCES_TOO_MANY_FRAGS
;
406 static int BnxeTxPktCopy(um_device_t
* pUM
,
408 um_txpacket_t
* pTxPkt
)
410 lm_frag_t
* pCopyFrag
= NULL
;
414 boolean_t tryMap
= B_TRUE
;
419 /* Walk the chain to get the total pkt length... */
420 for (pMblk
= pTxPkt
->pMblk
; pMblk
; pMblk
= pMblk
->b_cont
)
422 pktLen
+= MBLKL(pMblk
);
426 * If the packet length is under the tx copy threshold then copy
427 * the all data into the copy buffer.
429 if (pktLen
< pUM
->devParams
.txCopyThreshold
)
431 ASSERT(pktLen
<= pTxPkt
->cbLength
);
433 pTmp
= pTxPkt
->pCbBuf
;
435 for (pMblk
= pTxPkt
->pMblk
; pMblk
; pMblk
= pMblk
->b_cont
)
437 if ((msgSize
= MBLKL(pMblk
)) == 0)
442 bcopy(pMblk
->b_rptr
, pTmp
, msgSize
);
446 pCopyFrag
= &pTxPkt
->frag_list
.frag_arr
[0];
447 pCopyFrag
->addr
.as_u64
= pTxPkt
->cbPhysAddr
.as_u64
;
448 pCopyFrag
->size
= pktLen
;
449 pTxPkt
->frag_list
.cnt
++;
455 goto _BnxeTxPktCopy_DMA_SYNC_COPY_BUFFER
;
458 /* Try to DMA map all the blocks... */
460 for (pMblk
= pTxPkt
->pMblk
; pMblk
; pMblk
= pMblk
->b_cont
)
462 if ((msgSize
= MBLKL(pMblk
)) == 0)
469 if (BnxeTxPktMapFrag(pUM
, pTxPkt
, pMblk
) == 0)
472 * The fragment was successfully mapped now move on to the
473 * next one. Here we set pCopyFrag to NULL which represents
474 * a break of continuous data in the copy buffer. If the
475 * packet header was copied the first fragment points to the
476 * beginning of the copy buffer. Since this block was mapped
477 * any future blocks that have to be copied must be handled by
478 * a new fragment even though the fragment is pointed to the
479 * copied data in the copy buffer.
487 * The frament was not mapped or was partially mapped. In
488 * either case we will no longer try to map the remaining
489 * blocks. All remaining packet data is copied.
492 msgSize
= MBLKL(pMblk
); /* new msgSize with partial binding */
497 if ((copySize
+ msgSize
) > pTxPkt
->cbLength
)
499 /* remaining packet is too large (length more than copy buffer) */
500 BnxeTxPktUnmap(pTxPkt
);
504 ASSERT((copySize
+ msgSize
) <= pTxPkt
->cbLength
);
507 bcopy(pMblk
->b_rptr
, (pTxPkt
->pCbBuf
+ copySize
), msgSize
);
510 * If pCopyFrag is already specified then simply update the copy size.
511 * If not then set pCopyFrag to the next available fragment.
515 pCopyFrag
->size
+= msgSize
;
519 ASSERT((pTxPkt
->frag_list
.cnt
+ 1) <= BNXE_MAX_DMA_FRAGS_PER_PKT
);
520 pCopyFrag
= &pTxPkt
->frag_list
.frag_arr
[pTxPkt
->frag_list
.cnt
++];
521 pCopyFrag
->size
= msgSize
;
522 pCopyFrag
->addr
.as_u64
= pTxPkt
->cbPhysAddr
.as_u64
+ copySize
;
525 /* update count of bytes in the copy buffer needed for DMA sync */
529 _BnxeTxPktCopy_DMA_SYNC_COPY_BUFFER
:
533 /* DMA sync the copy buffer before sending */
535 rc
= ddi_dma_sync(pTxPkt
->cbDmaHandle
, 0, copySize
,
536 DDI_DMA_SYNC_FORDEV
);
538 if (pUM
->fmCapabilities
&&
539 BnxeCheckDmaHandle(pTxPkt
->cbDmaHandle
) != DDI_FM_OK
)
541 ddi_fm_service_impact(pUM
->pDev
, DDI_SERVICE_DEGRADED
);
544 if (rc
!= DDI_SUCCESS
)
546 BnxeLogWarn(pUM
, "(%d) Failed to dma sync tx copy (%p / %d)",
547 rc
, pTxPkt
, copySize
);
551 if (pTxPkt
->num_handles
== 0)
553 freemsg(pTxPkt
->pMblk
);
554 pTxPkt
->pMblk
= NULL
;
561 /* this code is derived from that shown in RFC 1071 Section 4.1 */
562 static inline u16_t
BnxeCalcCksum(void * start
,
569 pword
= (u16_t
*)start
;
571 for ( ; len
> 1; len
-= 2, pword
++)
577 /* add left-over byte, if any */
580 sum
+= (u16_t
)(*((u8_t
*)pword
));
585 /* fold 32-bit sum to 16 bits */
588 sum
= ((sum
& 0xffff) + (sum
>> 16));
596 * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP checksums
597 * and does not know anything about the UDP header and where the checksum field
598 * is located. It only knows about TCP. Therefore we "lie" to the hardware for
599 * outgoing UDP packets w/ checksum offload. Since the checksum field offset
600 * for TCP is 16 bytes and for UDP it is 6 bytes we pass a pointer to the
601 * hardware that is 10 bytes less than the start of the UDP header. This allows
602 * the hardware to write the checksum in the correct spot. But the hardware
603 * will compute a checksum which includes the last 10 bytes of the IP header.
604 * To correct this we tweak the stack computed pseudo checksum by folding in the
605 * calculation of the inverse checksum for those final 10 bytes of the IP
606 * header. This allows the correct checksum to be computed by the hardware.
609 #define TCP_CS_OFFSET 16
610 #define UDP_CS_OFFSET 6
611 #define UDP_TCP_CS_OFFSET_DIFF (TCP_CS_OFFSET - UDP_CS_OFFSET)
613 static inline u16_t
BnxeUdpPseudoCsum(um_device_t
* pUM
,
622 ASSERT(ipHdrLen
>= UDP_TCP_CS_OFFSET_DIFF
);
624 /* calc cksum on last UDP_TCP_CS_OFFSET_DIFF bytes of ip header */
625 sum16
= BnxeCalcCksum(&pIpHdr
[ipHdrLen
- UDP_TCP_CS_OFFSET_DIFF
],
626 UDP_TCP_CS_OFFSET_DIFF
, 0);
628 /* substruct the calculated cksum from the udp pseudo cksum */
629 pseudo_cs
= (*((u16_t
*)&pUdpHdr
[6]));
631 sum32
= (pseudo_cs
+ sum16
);
633 /* fold 32-bit sum to 16 bits */
636 sum32
= ((sum32
& 0xffff) + (sum32
>> 16));
639 return ntohs((u16_t
)sum32
);
643 static inline u16_t
BnxeGetVlanTag(mblk_t
* pMblk
)
645 ASSERT(MBLKL(pMblk
) >= sizeof(struct ether_vlan_header
));
646 return GLD_VTAG_VID(ntohs(((struct ether_vlan_header
*)pMblk
->b_rptr
)->ether_tci
));
650 static inline int BnxeGetHdrInfo(um_device_t
* pUM
,
651 um_txpacket_t
* pTxPkt
)
666 pMblk
= pTxPkt
->pMblk
;
667 msgSize
= MBLKL(pMblk
);
669 /* At least the MAC header... */
671 if (msgSize
< sizeof(struct ether_header
))
673 BnxeLogWarn(pUM
, "Invalid initial segment size in packet!");
677 ASSERT(msgSize
>= sizeof(struct ether_header
));
680 mac_hcksum_get(pMblk
, &csStart
, &csStuff
, NULL
, NULL
, &csFlags
);
682 lso
= DB_LSOFLAGS(pMblk
) & HW_LSO
;
684 /* get the Ethernet header */
685 pL2Hdr
= (u8_t
*)pMblk
->b_rptr
;
687 /* grab the destination mac addr */
688 memcpy(pTxPkt
->tx_info
.dst_mac_addr
, pL2Hdr
, 6);
692 pTxPkt
->tx_info
.flags
|= LM_TX_FLAG_TCP_LSO_FRAME
;
694 pTxPkt
->tx_info
.lso_mss
= (u16_t
)DB_LSOMSS(pMblk
);
698 /* no offload requested, just check for VLAN */
700 if (((struct ether_header
*)pMblk
->b_rptr
)->ether_type
==
701 htons(ETHERTYPE_VLAN
))
703 pTxPkt
->tx_info
.vlan_tag
= BnxeGetVlanTag(pMblk
);
704 pTxPkt
->tx_info
.flags
|= LM_TX_FLAG_VLAN_TAG_EXISTS
;
710 if (((struct ether_header
*)pL2Hdr
)->ether_type
== htons(ETHERTYPE_VLAN
))
712 l2HdrLen
= sizeof(struct ether_vlan_header
);
714 pTxPkt
->tx_info
.vlan_tag
= BnxeGetVlanTag(pMblk
);
715 pTxPkt
->tx_info
.flags
|= LM_TX_FLAG_VLAN_TAG_EXISTS
;
719 l2HdrLen
= sizeof(struct ether_header
);
722 if (csFlags
& HCK_IPV4_HDRCKSUM
)
724 pTxPkt
->tx_info
.flags
|= LM_TX_FLAG_COMPUTE_IP_CKSUM
;
727 if (csFlags
& HCK_PARTIALCKSUM
)
729 pTxPkt
->tx_info
.flags
|= LM_TX_FLAG_COMPUTE_TCP_UDP_CKSUM
;
732 l4HdrLen
= (l2HdrLen
+ csStuff
+ sizeof(u16_t
));
735 * For TCP, here we ignore the urgent pointer and size of the
736 * options. We'll get that info later.
741 /* Solaris doesn't do LSO if there is option in the IP header. */
742 l3HdrLen
= sizeof(struct ip
);
743 l4HdrLen
= (l2HdrLen
+ l3HdrLen
+ sizeof(struct tcphdr
));
750 if (msgSize
>= l4HdrLen
)
752 /* the header is in the first block */
753 pL3Hdr
= (pL2Hdr
+ l2HdrLen
);
757 if ((msgSize
<= l2HdrLen
) && pMblk
->b_cont
&&
758 ((msgSize
+ MBLKL(pMblk
->b_cont
)) >= l4HdrLen
))
760 /* the header is in the second block */
761 pL3Hdr
= pMblk
->b_cont
->b_rptr
+ (l2HdrLen
- msgSize
);
765 /* do a pullup to make sure headers are in the first block */
768 if ((pMblk
= msgpullup(pMblk
, l4HdrLen
)) == NULL
)
773 freemsg(pTxPkt
->pMblk
);
774 pTxPkt
->pMblk
= pMblk
;
776 pL3Hdr
= (pMblk
->b_rptr
+ l2HdrLen
);
780 /* must be IPv4 or IPv6 */
781 ASSERT((pL3Hdr
[0] & 0xf0) == 0x60 || (pL3Hdr
[0] & 0xf0) == 0x40);
783 if ((pL3Hdr
[0] & 0xf0) == 0x60)
785 pTxPkt
->tx_info
.flags
|= LM_TX_FLAG_IPV6_PACKET
;
788 if (lso
|| ((csStuff
- csStart
) == TCP_CS_OFFSET
))
790 /* get the TCP header */
791 pL4Hdr
= (pL3Hdr
+ l3HdrLen
);
792 l4HdrLen
= ((pL4Hdr
[12] & 0xf0) >> 2);
794 pTxPkt
->tx_info
.cs_any_offset
= 0;
795 pTxPkt
->tx_info
.tcp_nonce_sum_bit
= (pL4Hdr
[12] & 0x1);
796 pTxPkt
->tx_info
.tcp_pseudo_csum
= ntohs(*((u16_t
*)&pL4Hdr
[TCP_CS_OFFSET
]));
800 pTxPkt
->tx_info
.lso_ipid
= ntohs(*((u16_t
*)&pL3Hdr
[4]));
801 pTxPkt
->tx_info
.lso_tcp_send_seq
= ntohl(*((u32_t
*)&pL4Hdr
[4]));
802 pTxPkt
->tx_info
.lso_tcp_flags
= pL4Hdr
[13];
807 ASSERT((csStuff
- csStart
) == UDP_CS_OFFSET
);
809 /* get the UDP header */
810 pL4Hdr
= pL3Hdr
+ l3HdrLen
;
812 l4HdrLen
= sizeof(struct udphdr
);
814 pTxPkt
->tx_info
.cs_any_offset
= UDP_TCP_CS_OFFSET_DIFF
;
815 pTxPkt
->tx_info
.tcp_nonce_sum_bit
= 0;
816 pTxPkt
->tx_info
.tcp_pseudo_csum
=
817 CHIP_IS_E1x(((lm_device_t
*)pUM
)) ?
818 BnxeUdpPseudoCsum(pUM
, pL4Hdr
, pL3Hdr
, l3HdrLen
) :
819 ntohs(*((u16_t
*)&pL4Hdr
[UDP_CS_OFFSET
]));
822 pTxPkt
->tx_info
.lso_ip_hdr_len
= l3HdrLen
;
823 pTxPkt
->tx_info
.lso_tcp_hdr_len
= l4HdrLen
;
829 int BnxeTxSendMblk(um_device_t
* pUM
,
835 lm_device_t
* pLM
= &pUM
->lm_dev
;
836 TxQueue
* pTxQ
= &pUM
->txq
[idx
];
837 lm_tx_chain_t
* pLmTxChain
;
838 um_txpacket_t
* pTxPkt
;
843 BNXE_LOCK_ENTER_FREETX(pUM
, idx
);
845 pTxPkt
= (um_txpacket_t
*)s_list_pop_head(&pTxQ
->freeTxDescQ
);
847 if (pTxQ
->txLowWater
> s_list_entry_cnt(&pTxQ
->freeTxDescQ
))
849 pTxQ
->txLowWater
= s_list_entry_cnt(&pTxQ
->freeTxDescQ
);
852 BNXE_LOCK_EXIT_FREETX(pUM
, idx
);
854 /* try to recycle if no more packet available */
859 s_list_clear(&tmpList
);
861 BNXE_LOCK_ENTER_TX(pUM
, idx
);
862 numPkts
= lm_get_packets_sent(pLM
, idx
, &tmpList
);
863 BNXE_LOCK_EXIT_TX(pUM
, idx
);
865 if (pUM
->fmCapabilities
&&
866 BnxeCheckAccHandle(pLM
->vars
.reg_handle
[BAR_0
]) != DDI_FM_OK
)
868 ddi_fm_service_impact(pUM
->pDev
, DDI_SERVICE_DEGRADED
);
873 atomic_or_32(&pTxQ
->noTxCredits
, BNXE_TX_RESOURCES_NO_DESC
);
875 return BNXE_TX_HDWRFULL
;
878 /* steal the first packet from the list before reclaiming */
880 pTxPkt
= (um_txpacket_t
*)s_list_pop_head(&tmpList
);
882 if (pTxPkt
->num_handles
)
884 BnxeTxPktUnmap(pTxPkt
);
889 freemsg(pTxPkt
->pMblk
);
890 pTxPkt
->pMblk
= NULL
;
893 BnxeTxPktsReclaim(pUM
, idx
, &tmpList
);
896 pTxPkt
->lm_pkt
.link
.next
= NULL
;
898 pTxPkt
->tx_info
.flags
= 0;
899 pTxPkt
->tx_info
.vlan_tag
= 0;
900 pTxPkt
->frag_list
.cnt
= 0;
901 pTxPkt
->pMblk
= pMblk
;
905 (BNXE_FCOE(pUM
) && (idx
== FCOE_CID(&pUM
->lm_dev
))) ?
906 "-> FCoE L2 TX ->" : "-> L2 TX ->",
910 if (idx
== FCOE_CID(pLM
))
912 if (flags
& PRV_TX_VLAN_TAG
)
914 pTxPkt
->tx_info
.vlan_tag
= vlan_tag
;
915 pTxPkt
->tx_info
.flags
|= LM_TX_FLAG_INSERT_VLAN_TAG
;
918 else if (BnxeGetHdrInfo(pUM
, pTxPkt
))
920 goto BnxeTxSendMblk_fail
;
923 if (BnxeTxPktCopy(pUM
, pTxQ
, pTxPkt
))
925 goto BnxeTxSendMblk_fail
;
928 /* Now try to send the packet... */
930 pLmTxChain
= &pLM
->tx_info
.chain
[idx
];
932 BNXE_LOCK_ENTER_TX(pUM
, idx
);
934 /* Try to reclaim sent packets if available BDs is lower than threshold */
935 if (pLmTxChain
->bd_chain
.bd_left
< BNXE_MAX_DMA_FRAGS_PER_PKT
+ 2)
939 s_list_clear(&tmpList
);
941 numPkts
= lm_get_packets_sent(pLM
, idx
, &tmpList
);
943 if (pUM
->fmCapabilities
&&
944 BnxeCheckAccHandle(pLM
->vars
.reg_handle
[BAR_0
]) != DDI_FM_OK
)
946 ddi_fm_service_impact(pUM
->pDev
, DDI_SERVICE_DEGRADED
);
951 BnxeTxPktsReclaim(pUM
, idx
, &tmpList
);
956 * If there are no packets currently waiting to be sent and there are enough
957 * BDs available to satisfy this packet then send it now.
959 if (s_list_is_empty(&pTxQ
->waitTxDescQ
) &&
960 (pLmTxChain
->bd_chain
.bd_left
>= pTxPkt
->frag_list
.cnt
+ 2))
962 rc
= lm_send_packet(pLM
, idx
, &pTxPkt
->lm_pkt
, &pTxPkt
->frag_list
);
964 if (pUM
->fmCapabilities
&&
965 BnxeCheckAccHandle(pLM
->vars
.reg_handle
[BAR_0
]) != DDI_FM_OK
)
967 ddi_fm_service_impact(pUM
->pDev
, DDI_SERVICE_DEGRADED
);
970 if (pUM
->fmCapabilities
&&
971 BnxeCheckAccHandle(pLM
->vars
.reg_handle
[BAR_1
]) != DDI_FM_OK
)
973 ddi_fm_service_impact(pUM
->pDev
, DDI_SERVICE_DEGRADED
);
976 if (rc
== LM_STATUS_SUCCESS
)
978 /* send completely successfully */
979 BNXE_LOCK_EXIT_TX(pUM
, idx
);
980 return BNXE_TX_GOODXMIT
;
984 * Send failed (probably not enough BDs available)...
985 * Continue on with putting this packet on the wait queue.
991 BnxeLogWarn(pUM
, "WAIT TX DESCQ %lu %d %d",
992 s_list_entry_cnt(&pTxQ
->waitTxDescQ
),
993 pLmTxChain
->bd_chain
.bd_left
, pTxPkt
->frag_list
.cnt
);
997 * If we got here then there are other packets waiting to be sent or there
998 * aren't enough BDs available. In either case put this packet at the end
999 * of the waiting queue.
1001 s_list_push_tail(&pTxQ
->waitTxDescQ
, &pTxPkt
->lm_pkt
.link
);
1006 * If there appears to be a sufficient number of BDs available then make a
1007 * quick attempt to send as many waiting packets as possible.
1009 if ((pLmTxChain
->bd_chain
.bd_left
>= BNXE_MAX_DMA_FRAGS_PER_PKT
) &&
1010 (BnxeTxSendWaitingPkt(pUM
, idx
) == BNXE_TX_GOODXMIT
))
1012 BNXE_LOCK_EXIT_TX(pUM
, idx
);
1013 return BNXE_TX_GOODXMIT
;
1016 /* Couldn't send anything! */
1017 atomic_or_32(&pTxQ
->noTxCredits
, BNXE_TX_RESOURCES_NO_CREDIT
);
1020 BNXE_LOCK_EXIT_TX(pUM
, idx
);
1022 return BNXE_TX_DEFERPKT
;
1024 BnxeTxSendMblk_fail
:
1028 ASSERT(pTxPkt
!= NULL
);
1032 freemsg(pTxPkt
->pMblk
);
1033 pTxPkt
->pMblk
= NULL
;
1036 BNXE_LOCK_ENTER_FREETX(pUM
, idx
);
1037 s_list_push_tail(&pTxQ
->freeTxDescQ
, &pTxPkt
->lm_pkt
.link
);
1038 BNXE_LOCK_EXIT_FREETX(pUM
, idx
);
1041 * Yes GOODXMIT since mblk was free'd here and this triggers caller to
1042 * try and send the next packet in its chain.
1044 return BNXE_TX_GOODXMIT
;
1048 static void BnxeTxPktsAbortIdx(um_device_t
* pUM
,
1053 BNXE_LOCK_ENTER_TX(pUM
, idx
);
1054 lm_abort(&pUM
->lm_dev
, ABORT_OP_INDICATE_TX_CHAIN
, idx
);
1055 tmpList
= pUM
->txq
[idx
].waitTxDescQ
;
1056 s_list_clear(&pUM
->txq
[idx
].waitTxDescQ
);
1057 BNXE_LOCK_EXIT_TX(pUM
, idx
);
1059 BnxeTxPktsReclaim(pUM
, idx
, &tmpList
);
1063 void BnxeTxPktsAbort(um_device_t
* pUM
,
1070 case LM_CLI_IDX_FCOE
:
1072 BnxeTxPktsAbortIdx(pUM
, FCOE_CID(&pUM
->lm_dev
));
1075 case LM_CLI_IDX_NDIS
:
1077 LM_FOREACH_TSS_IDX(&pUM
->lm_dev
, idx
)
1079 BnxeTxPktsAbortIdx(pUM
, idx
);
1086 BnxeLogWarn(pUM
, "ERROR: Invalid cliIdx for BnxeTxPktsAbort (%d)", cliIdx
);
1092 static um_txpacket_t
* BnxeTxPktAlloc(um_device_t
* pUM
,
1095 um_txpacket_t
* pTxPkt
;
1096 ddi_dma_cookie_t cookie
;
1101 if ((pTxPkt
= kmem_zalloc(sizeof(um_txpacket_t
), KM_NOSLEEP
)) == NULL
)
1106 pTxPkt
->lm_pkt
.l2pkt_tx_info
= &pTxPkt
->tx_info
;
1108 if ((rc
= ddi_dma_alloc_handle(pUM
->pDev
,
1112 &pTxPkt
->cbDmaHandle
)) != DDI_SUCCESS
)
1114 BnxeLogWarn(pUM
, "Failed to alloc DMA handle for Tx Desc (%d)", rc
);
1115 kmem_free(pTxPkt
, sizeof(um_txpacket_t
));
1119 if ((rc
= ddi_dma_mem_alloc(pTxPkt
->cbDmaHandle
,
1121 &bnxeAccessAttribBUF
,
1127 &pTxPkt
->cbDmaAccHandle
)) != DDI_SUCCESS
)
1129 BnxeLogWarn(pUM
, "Failed to alloc DMA memory for Tx Desc (%d)", rc
);
1130 ddi_dma_free_handle(&pTxPkt
->cbDmaHandle
);
1131 kmem_free(pTxPkt
, sizeof(um_txpacket_t
));
1135 if ((rc
= ddi_dma_addr_bind_handle(pTxPkt
->cbDmaHandle
,
1139 DDI_DMA_WRITE
| DDI_DMA_STREAMING
,
1143 &count
)) != DDI_DMA_MAPPED
)
1145 BnxeLogWarn(pUM
, "Failed to bind DMA address for Tx Desc (%d)", rc
);
1146 ddi_dma_mem_free(&pTxPkt
->cbDmaAccHandle
);
1147 ddi_dma_free_handle(&pTxPkt
->cbDmaHandle
);
1148 kmem_free(pTxPkt
, sizeof(um_txpacket_t
));
1152 pTxPkt
->cbPhysAddr
.as_u64
= cookie
.dmac_laddress
;
1154 for (j
= 0; j
< BNXE_MAX_DMA_HANDLES_PER_PKT
; j
++)
1156 if ((rc
= ddi_dma_alloc_handle(pUM
->pDev
,
1160 &pTxPkt
->dmaHandles
[j
])) !=
1163 BnxeLogWarn(pUM
, "Failed to alloc DMA handles for Tx Pkt %d (%d)",
1166 for(--j
; j
>= 0; j
--) /* unwind */
1168 ddi_dma_free_handle(&pTxPkt
->dmaHandles
[j
]);
1171 ddi_dma_unbind_handle(pTxPkt
->cbDmaHandle
);
1172 ddi_dma_mem_free(&pTxPkt
->cbDmaAccHandle
);
1173 ddi_dma_free_handle(&pTxPkt
->cbDmaHandle
);
1174 kmem_free(pTxPkt
, sizeof(um_txpacket_t
));
1179 ASSERT(pTxPkt
->pMblk
== NULL
);
1180 ASSERT(pTxPkt
->num_handles
== 0);
1181 ASSERT(pTxPkt
->frag_list
.cnt
== 0);
1182 pTxPkt
->cbLength
= size
;
1188 static int BnxeTxPktsInitIdx(um_device_t
* pUM
,
1191 lm_device_t
* pLM
= &pUM
->lm_dev
;
1193 um_txpacket_t
* pTxPkt
;
1197 pTxQ
= &pUM
->txq
[idx
];
1199 s_list_clear(&pTxQ
->sentTxQ
);
1200 s_list_clear(&pTxQ
->freeTxDescQ
);
1201 s_list_clear(&pTxQ
->waitTxDescQ
);
1203 pTxQ
->desc_cnt
= pUM
->devParams
.numTxDesc
[LM_CHAIN_IDX_CLI(pLM
, idx
)];
1204 pTxQ
->txLowWater
= pUM
->devParams
.numTxDesc
[LM_CHAIN_IDX_CLI(pLM
, idx
)];
1205 pTxQ
->thresh_pdwm
= BNXE_PDWM_THRESHOLD
;
1207 pTxQ
->txDiscards
= 0;
1208 pTxQ
->txRecycle
= 0;
1210 pTxQ
->txBlocked
= 0;
1213 if (pUM
->devParams
.lsoEnable
)
1215 for (i
= 0; i
< pTxQ
->desc_cnt
; i
++)
1217 pTxPkt
= BnxeTxPktAlloc(pUM
,
1219 sizeof(struct ether_vlan_header
)));
1222 BnxeLogWarn(pUM
, "Failed to allocate all Tx Descs for LSO (%d/%d allocated), LSO is disabled",
1225 /* free existing in freeTxDescQ... */
1227 BNXE_LOCK_ENTER_FREETX(pUM
, idx
);
1228 tmpList
= pTxQ
->freeTxDescQ
;
1229 s_list_clear(&pTxQ
->freeTxDescQ
);
1230 BNXE_LOCK_EXIT_FREETX(pUM
, idx
);
1232 BnxeTxPktsFreeList(&tmpList
);
1234 pUM
->devParams
.lsoEnable
= 0; /* Disabling LSO! */
1239 BNXE_LOCK_ENTER_FREETX(pUM
, idx
);
1240 s_list_push_tail(&pTxQ
->freeTxDescQ
, &pTxPkt
->lm_pkt
.link
);
1241 BNXE_LOCK_EXIT_FREETX(pUM
, idx
);
1245 if (!pUM
->devParams
.lsoEnable
)
1247 for (i
= 0; i
< pTxQ
->desc_cnt
; i
++)
1249 pTxPkt
= BnxeTxPktAlloc(pUM
,
1250 (pUM
->devParams
.mtu
[LM_CHAIN_IDX_CLI(pLM
, idx
)] +
1251 sizeof(struct ether_vlan_header
)));
1254 BnxeLogWarn(pUM
, "Failed to allocate all Tx Descs (%d/%d allocated)",
1257 /* free existing in freeTxDescQ... */
1259 BNXE_LOCK_ENTER_FREETX(pUM
, idx
);
1260 tmpList
= pTxQ
->freeTxDescQ
;
1261 s_list_clear(&pTxQ
->freeTxDescQ
);
1262 BNXE_LOCK_EXIT_FREETX(pUM
, idx
);
1264 BnxeTxPktsFreeList(&tmpList
);
1269 BNXE_LOCK_ENTER_FREETX(pUM
, idx
);
1270 s_list_push_tail(&pTxQ
->freeTxDescQ
, &pTxPkt
->lm_pkt
.link
);
1271 BNXE_LOCK_EXIT_FREETX(pUM
, idx
);
1279 int BnxeTxPktsInit(um_device_t
* pUM
,
1286 case LM_CLI_IDX_FCOE
:
1288 rc
= BnxeTxPktsInitIdx(pUM
, FCOE_CID(&pUM
->lm_dev
));
1291 case LM_CLI_IDX_NDIS
:
1293 LM_FOREACH_TSS_IDX(&pUM
->lm_dev
, idx
)
1295 if ((rc
= BnxeTxPktsInitIdx(pUM
, idx
)) < 0)
1305 BnxeLogWarn(pUM
, "ERROR: Invalid cliIdx for BnxeTxPktsFini (%d)", cliIdx
);
1314 static void BnxeTxPktsFiniIdx(um_device_t
* pUM
,
1317 lm_device_t
* pLM
= &pUM
->lm_dev
;
1321 pTxQ
= &pUM
->txq
[idx
];
1323 BNXE_LOCK_ENTER_FREETX(pUM
, idx
);
1324 tmpList
= pTxQ
->freeTxDescQ
;
1325 s_list_clear(&pTxQ
->freeTxDescQ
);
1326 BNXE_LOCK_EXIT_FREETX(pUM
, idx
);
1328 BNXE_LOCK_ENTER_TX(pUM
, idx
);
1329 s_list_add_tail(&tmpList
, &pTxQ
->sentTxQ
);
1330 s_list_clear(&pTxQ
->sentTxQ
);
1331 BNXE_LOCK_EXIT_TX(pUM
, idx
);
1333 /* there could be more than originally allocated but less is bad */
1334 if (s_list_entry_cnt(&tmpList
) <
1335 pUM
->devParams
.numTxDesc
[LM_CHAIN_IDX_CLI(pLM
, idx
)])
1337 BnxeLogWarn(pUM
, "Missing TX descriptors (%lu / %d) (TxFail: %d)",
1338 s_list_entry_cnt(&tmpList
), pUM
->devParams
.numTxDesc
,
1342 BnxeTxPktsFreeList(&tmpList
);
1346 void BnxeTxPktsFini(um_device_t
* pUM
,
1353 case LM_CLI_IDX_FCOE
:
1355 BnxeTxPktsFiniIdx(pUM
, FCOE_CID(&pUM
->lm_dev
));
1358 case LM_CLI_IDX_NDIS
:
1360 LM_FOREACH_TSS_IDX(&pUM
->lm_dev
, idx
)
1362 BnxeTxPktsFiniIdx(pUM
, idx
);
1369 BnxeLogWarn(pUM
, "ERROR: Invalid cliIdx for BnxeTxPktsFini (%d)", cliIdx
);