7096 vioif should not log to the console on boot, or ever
[unleashed.git] / usr / src / uts / common / io / bnxe / bnxe_rx.c
blob692842c5cc020362998df168a11e428833d136a7
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2014 QLogic Corporation
24 * The contents of this file are subject to the terms of the
25 * QLogic End User License (the "License").
26 * You may not use this file except in compliance with the License.
28 * You can obtain a copy of the License at
29 * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
30 * QLogic_End_User_Software_License.txt
31 * See the License for the specific language governing permissions
32 * and limitations under the License.
36 * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
39 #include "bnxe.h"
42 ddi_dma_attr_t bnxeRxDmaAttrib =
44 DMA_ATTR_V0, /* dma_attr_version */
45 0, /* dma_attr_addr_lo */
46 0xffffffffffffffff, /* dma_attr_addr_hi */
47 0xffffffffffffffff, /* dma_attr_count_max */
48 BNXE_DMA_ALIGNMENT, /* dma_attr_align */
49 0xffffffff, /* dma_attr_burstsizes */
50 1, /* dma_attr_minxfer */
51 0xffffffffffffffff, /* dma_attr_maxxfer */
52 0xffffffffffffffff, /* dma_attr_seg */
53 1, /* dma_attr_sgllen */
54 1, /* dma_attr_granular */
55 0, /* dma_attr_flags */
59 static void BnxeRxPostBuffers(um_device_t * pUM,
60 int idx,
61 s_list_t * pReclaimList)
63 lm_rx_chain_t * pLmRxChain = &LM_RXQ(&pUM->lm_dev, idx);
64 u32_t returnedBytes = 0;
65 lm_packet_t * pLmPkt;
67 /* return bytes from reclaimed list to LM */
68 pLmPkt = (lm_packet_t *)s_list_peek_head(pReclaimList);
69 while (pLmPkt)
71 returnedBytes += pLmPkt->size;
72 pLmPkt = (lm_packet_t *)s_list_next_entry(&pLmPkt->link);
75 BNXE_LOCK_ENTER_RX(pUM, idx);
77 if (pUM->rxq[idx].rxLowWater > s_list_entry_cnt(&pLmRxChain->active_descq))
79 pUM->rxq[idx].rxLowWater = s_list_entry_cnt(&pLmRxChain->active_descq);
82 lm_return_packet_bytes(&pUM->lm_dev, idx, returnedBytes);
84 s_list_add_tail(&pLmRxChain->common.free_descq, pReclaimList);
85 s_list_clear(pReclaimList);
87 #if 0
89 * Don't post buffers if we don't have too many free buffers and there are a
90 * lot of buffers already posted.
92 if (lm_bd_chain_avail_bds(&pLmRxChain->bd_chain) < 32)
94 BNXE_LOCK_EXIT_RX(pUM, idx);
95 return;
99 * Don't post buffers if there aren't really that many to post yet.
101 if (s_list_entry_cnt(&pLmRxChain->common.free_descq) < 32)
103 BNXE_LOCK_EXIT_RX(pUM, idx);
104 return;
106 #endif
108 lm_post_buffers(&pUM->lm_dev, idx, NULL, 0);
110 BNXE_LOCK_EXIT_RX(pUM, idx);
114 static u32_t BnxeRxPktDescrSize(um_device_t * pUM)
116 u32_t descSize;
118 (void)pUM;
120 descSize = sizeof(um_rxpacket_t) + SIZEOF_SIG;
122 return ALIGN_VALUE_TO_WORD_BOUNDARY(descSize);
126 static void BnxeRxPktDescrFree(um_device_t * pUM,
127 um_rxpacket_t * pRxPkt)
129 u32_t descSize;
130 caddr_t pMem;
132 BnxeDbgBreakIfFastPath(pUM, SIG(pRxPkt) != L2PACKET_RX_SIG);
134 descSize = BnxeRxPktDescrSize(pUM);
135 pMem = (caddr_t)pRxPkt - SIZEOF_SIG;
137 kmem_free(pMem, descSize);
141 static void BnxeRxPktFree(char * free_arg)
143 um_rxpacket_t * pRxPkt = (um_rxpacket_t *)free_arg;
144 um_device_t * pUM = (um_device_t *)pRxPkt->pUM;
145 int idx = pRxPkt->idx;
146 s_list_t doneRxQ;
148 if (pUM->magic != BNXE_MAGIC)
151 * Oh my! The free_arg data got corrupted. Log a message and leak this
152 * packet. We don't decrement the 'up in the stack count' since we
153 * can't be sure this packet really was a packet we previously sent up.
155 BnxeLogWarn(NULL, "ERROR freeing packet - UM is invalid! (%p)", pRxPkt);
156 return;
159 if (pUM->rxBufSignature[LM_CHAIN_IDX_CLI(&pUM->lm_dev, idx)] !=
160 pRxPkt->signature)
163 * The stack is freeing a packet that was from a previous plumb of
164 * the interface.
166 pRxPkt->lm_pkt.u1.rx.mem_phys[0].as_u64 = 0;
167 pRxPkt->rx_info.mem_virt = NULL;
168 pRxPkt->rx_info.mem_size = 0;
170 ddi_dma_unbind_handle(pRxPkt->dmaHandle);
171 ddi_dma_mem_free(&pRxPkt->dmaAccHandle);
172 ddi_dma_free_handle(&pRxPkt->dmaHandle);
174 BnxeRxPktDescrFree(pUM, pRxPkt);
176 else
178 s_list_clear(&doneRxQ);
180 BNXE_LOCK_ENTER_DONERX(pUM, idx);
182 s_list_push_tail(&pUM->rxq[idx].doneRxQ,
183 &((lm_packet_t *)pRxPkt)->link);
185 /* post packets when a bunch are ready */
186 if (s_list_entry_cnt(&pUM->rxq[idx].doneRxQ) >= pUM->devParams.maxRxFree)
188 doneRxQ = pUM->rxq[idx].doneRxQ;
189 s_list_clear(&pUM->rxq[idx].doneRxQ);
192 BNXE_LOCK_EXIT_DONERX(pUM, idx);
194 if (s_list_entry_cnt(&doneRxQ))
196 BnxeRxPostBuffers(pUM, idx, &doneRxQ);
200 atomic_dec_32(&pUM->rxq[idx].rxBufUpInStack);
204 boolean_t BnxeWaitForPacketsFromClient(um_device_t * pUM,
205 int cliIdx)
207 int i, idx, cnt=0, tot=0;
209 switch (cliIdx)
211 case LM_CLI_IDX_FCOE:
213 for (i = 0; i < 5; i++)
215 if ((cnt = pUM->rxq[FCOE_CID(&pUM->lm_dev)].rxBufUpInStack) == 0)
217 break;
220 /* twiddle our thumbs for one second */
221 delay(drv_usectohz(1000000));
224 if (cnt)
226 BnxeLogWarn(pUM, "%d packets still held by FCoE (chain %d)!",
227 cnt, FCOE_CID(&pUM->lm_dev));
228 return B_FALSE;
231 break;
233 case LM_CLI_IDX_NDIS:
235 tot = 0;
237 LM_FOREACH_RSS_IDX(&pUM->lm_dev, idx)
239 for (i = 0; i < 5; i++)
241 if ((cnt = pUM->rxq[idx].rxBufUpInStack) == 0)
243 break;
246 /* twiddle our thumbs for one second */
247 delay(drv_usectohz(1000000));
250 tot += cnt;
253 if (tot)
255 BnxeLogWarn(pUM, "%d packets still held by the stack (chain %d)!",
256 tot, idx);
257 return B_FALSE;
260 break;
262 default:
264 BnxeLogWarn(pUM, "ERROR: Invalid cliIdx for BnxeWaitForPacketsFromClient (%d)", cliIdx);
265 break;
268 return B_TRUE;
272 /* numBytes is only valid when polling is TRUE */
273 mblk_t * BnxeRxRingProcess(um_device_t * pUM,
274 int idx,
275 boolean_t polling,
276 int numBytes)
278 RxQueue * pRxQ;
279 lm_rx_chain_t * pLmRxChain;
280 u32_t activeDescqCount;
281 boolean_t forceCopy;
282 um_rxpacket_t * pRxPkt;
283 lm_packet_t * pLmPkt;
284 u32_t pktLen;
285 boolean_t dataCopied;
286 u32_t notCopiedCount;
287 mblk_t * pMblk;
288 int ofldFlags;
289 mblk_t * head = NULL;
290 mblk_t * tail = NULL;
291 s_list_t rxList;
292 s_list_t reclaimList;
293 int procBytes = 0;
294 s_list_t tmpList;
295 sp_cqes_info sp_cqes;
296 u32_t pktsRxed;
298 pRxQ = &pUM->rxq[idx];
300 s_list_clear(&tmpList);
302 /* get the list of packets received */
303 BNXE_LOCK_ENTER_RX(pUM, idx);
305 pktsRxed = lm_get_packets_rcvd(&pUM->lm_dev, idx, &tmpList, &sp_cqes);
307 /* grab any waiting packets */
308 rxList = pRxQ->waitRxQ;
309 s_list_clear(&pRxQ->waitRxQ);
311 /* put any new packets at the end of the queue */
312 s_list_add_tail(&rxList, &tmpList);
314 BNXE_LOCK_EXIT_RX(pUM, idx);
316 /* now complete the ramrods */
317 lm_complete_ramrods(&pUM->lm_dev, &sp_cqes);
319 if (s_list_entry_cnt(&rxList) == 0)
321 return NULL;
324 s_list_clear(&reclaimList);
325 notCopiedCount = 0;
327 pLmRxChain = &LM_RXQ(&pUM->lm_dev, idx);
329 activeDescqCount = s_list_entry_cnt(&pLmRxChain->active_descq);
331 forceCopy = (activeDescqCount <
332 (pUM->lm_dev.params.l2_rx_desc_cnt[LM_CHAIN_IDX_CLI(&pUM->lm_dev, idx)] >> 3));
334 /* send the packets up the stack */
335 while (1)
337 pRxPkt = (um_rxpacket_t *)s_list_pop_head(&rxList);
338 if (pRxPkt == NULL)
340 break;
343 pLmPkt = &(pRxPkt->lm_pkt);
345 if (pLmPkt->status != LM_STATUS_SUCCESS)
347 /* XXX increment error stat? */
348 s_list_push_tail(&reclaimList, &pLmPkt->link);
349 continue;
352 pktLen = pLmPkt->size;
354 if (polling == TRUE)
356 /* When polling an rx ring we can only process up to numBytes */
357 if ((procBytes + pktLen) <= numBytes)
359 /* continue to process this packet */
360 procBytes += pktLen;
362 else
364 /* put this packet not processed back on the list (front) */
365 s_list_push_head(&rxList, &pRxPkt->lm_pkt.link);
366 break;
370 (void)ddi_dma_sync(pRxPkt->dmaHandle,
372 pktLen,
373 DDI_DMA_SYNC_FORKERNEL);
375 if (pUM->fmCapabilities &&
376 BnxeCheckDmaHandle(pRxPkt->dmaHandle) != DDI_FM_OK)
378 ddi_fm_service_impact(pUM->pDev, DDI_SERVICE_DEGRADED);
381 dataCopied = B_FALSE;
383 if (forceCopy ||
384 (pUM->devParams.rxCopyThreshold &&
385 (pktLen < pUM->devParams.rxCopyThreshold)))
387 if ((pMblk = allocb(pktLen, BPRI_MED)) == NULL)
389 pRxQ->rxDiscards++;
390 s_list_push_tail(&reclaimList, &pLmPkt->link);
391 continue;
394 /* copy the packet into the new mblk */
395 bcopy((pRxPkt->rx_info.mem_virt + BNXE_DMA_RX_OFFSET),
396 pMblk->b_rptr, pktLen);
397 pMblk->b_wptr = (pMblk->b_rptr + pktLen);
398 dataCopied = B_TRUE;
400 pRxQ->rxCopied++;
402 goto BnxeRxRingProcess_sendup;
405 if ((activeDescqCount == 0) && (s_list_entry_cnt(&rxList) == 0))
408 * If the hardware is out of receive buffers and we are on the last
409 * receive packet then drop the packet. We do this because we might
410 * not be able to allocate any new receive buffers before the ISR
411 * completes. If this happens, the driver will enter an infinite
412 * interrupt loop where the hardware is requesting rx buffers the
413 * driver cannot allocate. To prevent a system livelock we leave
414 * one buffer perpetually available. Note that we do this after
415 * giving the double copy code a chance to claim the packet.
418 /* FIXME
419 * Make sure to add one more to the rx packet descriptor count
420 * before allocating them.
423 pRxQ->rxDiscards++;
424 s_list_push_tail(&reclaimList, &pLmPkt->link);
425 continue;
429 * If we got here then the packet wasn't copied so we need to create a
430 * new mblk_t which references the lm_packet_t buffer.
433 pRxPkt->freeRtn.free_func = BnxeRxPktFree;
434 pRxPkt->freeRtn.free_arg = (char *)pRxPkt;
435 pRxPkt->pUM = (void *)pUM;
436 pRxPkt->idx = idx;
438 if ((pMblk = desballoc((pRxPkt->rx_info.mem_virt + BNXE_DMA_RX_OFFSET),
439 pktLen,
440 BPRI_MED,
441 &pRxPkt->freeRtn)) == NULL)
443 pRxQ->rxDiscards++;
444 s_list_push_tail(&reclaimList, &pLmPkt->link);
445 continue;
448 pMblk->b_wptr = (pMblk->b_rptr + pktLen);
450 BnxeRxRingProcess_sendup:
453 * Check if the checksum was offloaded so we can pass the result to
454 * the stack.
456 ofldFlags = 0;
458 if ((pUM->devParams.enabled_oflds & LM_OFFLOAD_RX_IP_CKSUM) &&
459 (pRxPkt->rx_info.flags & LM_RX_FLAG_IP_CKSUM_IS_GOOD))
461 ofldFlags |= HCK_IPV4_HDRCKSUM_OK;
464 if (((pUM->devParams.enabled_oflds & LM_OFFLOAD_RX_TCP_CKSUM) &&
465 (pRxPkt->rx_info.flags & LM_RX_FLAG_TCP_CKSUM_IS_GOOD)) ||
466 ((pUM->devParams.enabled_oflds & LM_OFFLOAD_RX_UDP_CKSUM) &&
467 (pRxPkt->rx_info.flags & LM_RX_FLAG_UDP_CKSUM_IS_GOOD)))
469 ofldFlags |= HCK_FULLCKSUM_OK;
472 if (ofldFlags != 0)
474 mac_hcksum_set(pMblk, 0, 0, 0, 0, ofldFlags);
478 * If the packet data was copied into a new recieve buffer then put this
479 * descriptor in a list to be reclaimed later. If not, then increment a
480 * counter so we can track how many of our descriptors are held by the
481 * stack.
483 if (dataCopied == B_TRUE)
485 s_list_push_tail(&reclaimList, &pLmPkt->link);
487 else
489 notCopiedCount++;
492 if (head == NULL)
494 head = pMblk;
496 else
498 tail->b_next = pMblk;
501 tail = pMblk;
502 tail->b_next = NULL;
504 #if 0
505 BnxeDumpPkt(pUM,
506 (BNXE_FCOE(pUM) && (idx == FCOE_CID(&pUM->lm_dev))) ?
507 "<- FCoE L2 RX <-" : "<- L2 RX <-",
508 pMblk, B_TRUE);
509 #endif
512 if (head)
514 if (notCopiedCount)
516 /* track all non-copied packets that will be held by the stack */
517 atomic_add_32(&pUM->rxq[idx].rxBufUpInStack, notCopiedCount);
520 /* pass the mblk chain up the stack */
521 if (polling == FALSE)
524 /* XXX NEED TO ADD STATS FOR RX PATH UPCALLS */
526 if (BNXE_FCOE(pUM) && (idx == FCOE_CID(&pUM->lm_dev)))
528 /* XXX verify fcoe frees all packets on success or error */
529 if (pUM->fcoe.pDev && pUM->fcoe.bind.cliIndicateRx)
531 pUM->fcoe.bind.cliIndicateRx(pUM->fcoe.pDev, head);
533 else
535 /* FCoE isn't bound? Reclaim the chain... */
536 freemsgchain(head);
537 head = NULL;
540 else
542 #if defined(BNXE_RINGS) && (defined(__S11) || defined(__S12))
543 mac_rx_ring(pUM->pMac,
544 pUM->rxq[idx].ringHandle,
545 head,
546 pUM->rxq[idx].genNumber);
547 #else
548 mac_rx(pUM->pMac,
549 pUM->macRxResourceHandles[idx],
550 head);
551 #endif
556 if ((polling == TRUE) && s_list_entry_cnt(&rxList))
558 /* put the packets not processed back on the list (front) */
559 BNXE_LOCK_ENTER_RX(pUM, idx);
560 s_list_add_head(&pRxQ->waitRxQ, &rxList);
561 BNXE_LOCK_EXIT_RX(pUM, idx);
564 if (s_list_entry_cnt(&reclaimList))
566 BnxeRxPostBuffers(pUM, idx, &reclaimList);
569 return (polling == TRUE) ? head : NULL;
574 * Dumping packets simply moves all packets from the waiting queue to the free
575 * queue. Note that the packets are not posted back to the LM.
577 static void BnxeRxRingDump(um_device_t * pUM,
578 int idx)
580 s_list_t tmpList;
582 BNXE_LOCK_ENTER_RX(pUM, idx);
584 tmpList = pUM->rxq[idx].waitRxQ;
585 s_list_clear(&pUM->rxq[idx].waitRxQ);
587 s_list_add_tail(&LM_RXQ(&pUM->lm_dev, idx).common.free_descq, &tmpList);
589 BNXE_LOCK_EXIT_RX(pUM, idx);
594 * Aborting packets stops all rx processing by dumping the currently waiting
595 * packets and aborting all the rx descriptors currently posted in the LM.
597 static void BnxeRxPktsAbortIdx(um_device_t * pUM,
598 int idx)
600 BnxeRxRingDump(pUM, idx);
602 BNXE_LOCK_ENTER_RX(pUM, idx);
603 lm_abort(&pUM->lm_dev, ABORT_OP_RX_CHAIN, idx);
604 BNXE_LOCK_EXIT_RX(pUM, idx);
608 void BnxeRxPktsAbort(um_device_t * pUM,
609 int cliIdx)
611 int idx;
613 switch (cliIdx)
615 case LM_CLI_IDX_FCOE:
617 BnxeRxPktsAbortIdx(pUM, FCOE_CID(&pUM->lm_dev));
618 break;
620 case LM_CLI_IDX_NDIS:
622 LM_FOREACH_RSS_IDX(&pUM->lm_dev, idx)
624 BnxeRxPktsAbortIdx(pUM, idx);
627 break;
629 default:
631 BnxeLogWarn(pUM, "ERROR: Invalid cliIdx for BnxeRxPktsAbort (%d)", cliIdx);
632 break;
637 static int BnxeRxBufAlloc(um_device_t * pUM,
638 int idx,
639 um_rxpacket_t * pRxPkt)
641 ddi_dma_cookie_t cookie;
642 u32_t count;
643 size_t length;
644 int rc;
646 if ((rc = ddi_dma_alloc_handle(pUM->pDev,
647 &bnxeRxDmaAttrib,
648 DDI_DMA_DONTWAIT,
649 NULL,
650 &pRxPkt->dmaHandle)) != DDI_SUCCESS)
652 BnxeLogWarn(pUM, "Failed to alloc DMA handle for rx buffer");
653 return -1;
656 pRxPkt->rx_info.mem_size = MAX_L2_CLI_BUFFER_SIZE(&pUM->lm_dev, idx);
658 if ((rc = ddi_dma_mem_alloc(pRxPkt->dmaHandle,
659 pRxPkt->rx_info.mem_size,
660 &bnxeAccessAttribBUF,
661 DDI_DMA_STREAMING,
662 DDI_DMA_DONTWAIT,
663 NULL,
664 (caddr_t *)&pRxPkt->rx_info.mem_virt,
665 &length,
666 &pRxPkt->dmaAccHandle)) != DDI_SUCCESS)
668 BnxeLogWarn(pUM, "Failed to alloc DMA memory for rx buffer");
669 ddi_dma_free_handle(&pRxPkt->dmaHandle);
670 return -1;
673 if ((rc = ddi_dma_addr_bind_handle(pRxPkt->dmaHandle,
674 NULL,
675 (caddr_t)pRxPkt->rx_info.mem_virt,
676 pRxPkt->rx_info.mem_size,
677 DDI_DMA_READ | DDI_DMA_STREAMING,
678 DDI_DMA_DONTWAIT,
679 NULL,
680 &cookie,
681 &count)) != DDI_DMA_MAPPED)
683 BnxeLogWarn(pUM, "Failed to bind DMA address for rx buffer");
684 ddi_dma_mem_free(&pRxPkt->dmaAccHandle);
685 ddi_dma_free_handle(&pRxPkt->dmaHandle);
686 return -1;
689 pRxPkt->lm_pkt.u1.rx.mem_phys[0].as_u64 = cookie.dmac_laddress;
691 return 0;
695 static int BnxeRxPktsInitPostBuffersIdx(um_device_t * pUM,
696 int idx)
698 BNXE_LOCK_ENTER_RX(pUM, idx);
699 lm_post_buffers(&pUM->lm_dev, idx, NULL, 0);
700 BNXE_LOCK_EXIT_RX(pUM, idx);
702 return 0;
706 int BnxeRxPktsInitPostBuffers(um_device_t * pUM,
707 int cliIdx)
709 int idx;
711 switch (cliIdx)
713 case LM_CLI_IDX_FCOE:
715 BnxeRxPktsInitPostBuffersIdx(pUM, FCOE_CID(&pUM->lm_dev));
716 break;
718 case LM_CLI_IDX_NDIS:
720 LM_FOREACH_RSS_IDX(&pUM->lm_dev, idx)
722 BnxeRxPktsInitPostBuffersIdx(pUM, idx);
725 break;
727 default:
729 BnxeLogWarn(pUM, "ERROR: Invalid cliIdx for BnxeRxPktsInit (%d)", cliIdx);
730 break;
733 return 0;
737 static int BnxeRxPktsInitIdx(um_device_t * pUM,
738 int idx)
740 lm_device_t * pLM = &pUM->lm_dev;
741 lm_rx_chain_t * pLmRxChain;
742 um_rxpacket_t * pRxPkt;
743 lm_packet_t * pLmPkt;
744 u8_t * pTmp;
745 int postCnt, i;
747 BNXE_LOCK_ENTER_RX(pUM, idx);
749 pLmRxChain = &LM_RXQ(pLM, idx);
751 s_list_clear(&pUM->rxq[idx].doneRxQ);
752 pUM->rxq[idx].rxLowWater = pLM->params.l2_rx_desc_cnt[LM_CHAIN_IDX_CLI(pLM, idx)];
753 pUM->rxq[idx].rxDiscards = 0;
754 pUM->rxq[idx].rxCopied = 0;
756 s_list_clear(&pUM->rxq[idx].waitRxQ);
758 /* allocate the packet descriptors */
759 for (i = 0;
760 i < pLM->params.l2_rx_desc_cnt[LM_CHAIN_IDX_CLI(pLM, idx)];
761 i++)
763 if ((pTmp = kmem_zalloc(BnxeRxPktDescrSize(pUM),
764 KM_NOSLEEP)) == NULL)
766 BnxeLogWarn(pUM, "Failed to alloc an rx packet descriptor!!!");
767 break; /* continue without error */
770 pRxPkt = (um_rxpacket_t *)(pTmp + SIZEOF_SIG);
771 SIG(pRxPkt) = L2PACKET_RX_SIG;
772 pRxPkt->signature = pUM->rxBufSignature[LM_CHAIN_IDX_CLI(pLM, idx)];
774 pLmPkt = (lm_packet_t *)pRxPkt;
775 pLmPkt->u1.rx.hash_val_ptr = &pRxPkt->hash_value;
776 pLmPkt->l2pkt_rx_info = &pRxPkt->rx_info;
778 if (BnxeRxBufAlloc(pUM, idx, pRxPkt) != 0)
780 BnxeRxPktDescrFree(pUM, pRxPkt);
781 break; /* continue without error */
784 s_list_push_tail(&pLmRxChain->common.free_descq, &pLmPkt->link);
787 postCnt = s_list_entry_cnt(&pLmRxChain->common.free_descq);
789 if (postCnt != pLM->params.l2_rx_desc_cnt[LM_CHAIN_IDX_CLI(pLM, idx)])
791 BnxeLogWarn(pUM, "%d rx buffers requested and only %d allocated!!!",
792 pLM->params.l2_rx_desc_cnt[LM_CHAIN_IDX_CLI(pLM, idx)],
793 postCnt);
796 BNXE_LOCK_EXIT_RX(pUM, idx);
798 return 0;
802 int BnxeRxPktsInit(um_device_t * pUM,
803 int cliIdx)
805 int idx;
807 /* set the rx buffer signature for this plumb */
808 atomic_swap_32(&pUM->rxBufSignature[cliIdx], (u32_t)ddi_get_time());
810 switch (cliIdx)
812 case LM_CLI_IDX_FCOE:
814 BnxeRxPktsInitIdx(pUM, FCOE_CID(&pUM->lm_dev));
815 break;
817 case LM_CLI_IDX_NDIS:
819 LM_FOREACH_RSS_IDX(&pUM->lm_dev, idx)
821 BnxeRxPktsInitIdx(pUM, idx);
824 break;
826 default:
828 BnxeLogWarn(pUM, "ERROR: Invalid cliIdx for BnxeRxPktsInit (%d)", cliIdx);
829 break;
832 return 0;
836 static void BnxeRxPktsFiniIdx(um_device_t * pUM,
837 int idx)
839 lm_rx_chain_t * pLmRxChain;
840 um_rxpacket_t * pRxPkt;
841 s_list_t tmpList;
843 pLmRxChain = &LM_RXQ(&pUM->lm_dev, idx);
845 s_list_clear(&tmpList);
847 BNXE_LOCK_ENTER_RX(pUM, idx);
848 s_list_add_tail(&tmpList, &pLmRxChain->common.free_descq);
849 s_list_clear(&pLmRxChain->common.free_descq);
850 BNXE_LOCK_EXIT_RX(pUM, idx);
852 BNXE_LOCK_ENTER_DONERX(pUM, idx);
853 s_list_add_tail(&tmpList, &pUM->rxq[idx].doneRxQ);
854 s_list_clear(&pUM->rxq[idx].doneRxQ);
855 BNXE_LOCK_EXIT_DONERX(pUM, idx);
857 if (s_list_entry_cnt(&tmpList) !=
858 pUM->lm_dev.params.l2_rx_desc_cnt[LM_CHAIN_IDX_CLI(&pUM->lm_dev, idx)])
860 BnxeLogWarn(pUM, "WARNING Missing RX packets (idx:%d) (%lu / %d - %u in stack)",
861 idx, s_list_entry_cnt(&tmpList),
862 pUM->lm_dev.params.l2_rx_desc_cnt[LM_CHAIN_IDX_CLI(&pUM->lm_dev, idx)],
863 pUM->rxq[idx].rxBufUpInStack);
867 * Back out all the packets in the "available for hardware use" queue.
868 * Free the buffers associated with the descriptors as we go.
870 while (1)
872 pRxPkt = (um_rxpacket_t *)s_list_pop_head(&tmpList);
873 if (pRxPkt == NULL)
875 break;
878 pRxPkt->lm_pkt.u1.rx.mem_phys[0].as_u64 = 0;
879 pRxPkt->rx_info.mem_virt = NULL;
880 pRxPkt->rx_info.mem_size = 0;
882 ddi_dma_unbind_handle(pRxPkt->dmaHandle);
883 ddi_dma_mem_free(&pRxPkt->dmaAccHandle);
884 ddi_dma_free_handle(&pRxPkt->dmaHandle);
886 BnxeRxPktDescrFree(pUM, pRxPkt);
891 void BnxeRxPktsFini(um_device_t * pUM,
892 int cliIdx)
894 int idx;
896 /* reset the signature for this unplumb */
897 atomic_swap_32(&pUM->rxBufSignature[cliIdx], 0);
899 switch (cliIdx)
901 case LM_CLI_IDX_FCOE:
903 BnxeRxPktsFiniIdx(pUM, FCOE_CID(&pUM->lm_dev));
904 break;
906 case LM_CLI_IDX_NDIS:
908 LM_FOREACH_RSS_IDX(&pUM->lm_dev, idx)
910 BnxeRxPktsFiniIdx(pUM, idx);
913 break;
915 default:
917 BnxeLogWarn(pUM, "ERROR: Invalid cliIdx for BnxeRxPktsFini (%d)", cliIdx);
918 break;