Staging: et131x: clean up DMA10/DMA4 types
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / et131x / et1310_rx.c
blob757a8cd45416307b387f560557267efda3013e66
1 /*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
5 * Copyright © 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
9 *------------------------------------------------------------------------------
11 * et1310_rx.c - Routines used to perform data reception
13 *------------------------------------------------------------------------------
15 * SOFTWARE LICENSE
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
22 * Copyright © 2005 Agere Systems Inc.
23 * All rights reserved.
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
31 * distribution.
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
41 * Disclaimer
43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE.
58 #include "et131x_version.h"
59 #include "et131x_debug.h"
60 #include "et131x_defs.h"
62 #include <linux/pci.h>
63 #include <linux/init.h>
64 #include <linux/module.h>
65 #include <linux/types.h>
66 #include <linux/kernel.h>
68 #include <linux/sched.h>
69 #include <linux/ptrace.h>
70 #include <linux/slab.h>
71 #include <linux/ctype.h>
72 #include <linux/string.h>
73 #include <linux/timer.h>
74 #include <linux/interrupt.h>
75 #include <linux/in.h>
76 #include <linux/delay.h>
77 #include <linux/io.h>
78 #include <linux/bitops.h>
79 #include <asm/system.h>
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/skbuff.h>
84 #include <linux/if_arp.h>
85 #include <linux/ioport.h>
87 #include "et1310_phy.h"
88 #include "et1310_pm.h"
89 #include "et1310_jagcore.h"
91 #include "et131x_adapter.h"
92 #include "et131x_initpci.h"
94 #include "et1310_rx.h"
96 /* Data for debugging facilities */
97 #ifdef CONFIG_ET131X_DEBUG
98 extern dbg_info_t *et131x_dbginfo;
99 #endif /* CONFIG_ET131X_DEBUG */
102 void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd);
105 * et131x_rx_dma_memory_alloc
106 * @adapter: pointer to our private adapter structure
108 * Returns 0 on success and errno on failure (as defined in errno.h)
110 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
111 * and the Packet Status Ring.
113 int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
115 uint32_t OuterLoop, InnerLoop;
116 uint32_t bufsize;
117 uint32_t pktStatRingSize, FBRChunkSize;
118 RX_RING_t *rx_ring;
120 DBG_ENTER(et131x_dbginfo);
122 /* Setup some convenience pointers */
123 rx_ring = (RX_RING_t *) &adapter->RxRing;
125 /* Alloc memory for the lookup table */
126 #ifdef USE_FBR0
127 rx_ring->Fbr[0] = kmalloc(sizeof(FBRLOOKUPTABLE), GFP_KERNEL);
128 #endif
130 rx_ring->Fbr[1] = kmalloc(sizeof(FBRLOOKUPTABLE), GFP_KERNEL);
132 /* The first thing we will do is configure the sizes of the buffer
133 * rings. These will change based on jumbo packet support. Larger
134 * jumbo packets increases the size of each entry in FBR0, and the
135 * number of entries in FBR0, while at the same time decreasing the
136 * number of entries in FBR1.
138 * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1
139 * entries are huge in order to accomodate a "jumbo" frame, then it
140 * will have less entries. Conversely, FBR1 will now be relied upon
141 * to carry more "normal" frames, thus it's entry size also increases
142 * and the number of entries goes up too (since it now carries
143 * "small" + "regular" packets.
145 * In this scheme, we try to maintain 512 entries between the two
146 * rings. Also, FBR1 remains a constant size - when it's size doubles
147 * the number of entries halves. FBR0 increases in size, however.
150 if (adapter->RegistryJumboPacket < 2048) {
151 #ifdef USE_FBR0
152 rx_ring->Fbr0BufferSize = 256;
153 rx_ring->Fbr0NumEntries = 512;
154 #endif
155 rx_ring->Fbr1BufferSize = 2048;
156 rx_ring->Fbr1NumEntries = 512;
157 } else if (adapter->RegistryJumboPacket < 4096) {
158 #ifdef USE_FBR0
159 rx_ring->Fbr0BufferSize = 512;
160 rx_ring->Fbr0NumEntries = 1024;
161 #endif
162 rx_ring->Fbr1BufferSize = 4096;
163 rx_ring->Fbr1NumEntries = 512;
164 } else {
165 #ifdef USE_FBR0
166 rx_ring->Fbr0BufferSize = 1024;
167 rx_ring->Fbr0NumEntries = 768;
168 #endif
169 rx_ring->Fbr1BufferSize = 16384;
170 rx_ring->Fbr1NumEntries = 128;
173 #ifdef USE_FBR0
174 adapter->RxRing.PsrNumEntries = adapter->RxRing.Fbr0NumEntries +
175 adapter->RxRing.Fbr1NumEntries;
176 #else
177 adapter->RxRing.PsrNumEntries = adapter->RxRing.Fbr1NumEntries;
178 #endif
180 /* Allocate an area of memory for Free Buffer Ring 1 */
181 bufsize = (sizeof(FBR_DESC_t) * rx_ring->Fbr1NumEntries) + 0xfff;
182 rx_ring->pFbr1RingVa = pci_alloc_consistent(adapter->pdev,
183 bufsize,
184 &rx_ring->pFbr1RingPa);
185 if (!rx_ring->pFbr1RingVa) {
186 DBG_ERROR(et131x_dbginfo,
187 "Cannot alloc memory for Free Buffer Ring 1\n");
188 DBG_LEAVE(et131x_dbginfo);
189 return -ENOMEM;
192 /* Save physical address
194 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
195 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
196 * are ever returned, make sure the high part is retrieved here
197 * before storing the adjusted address.
199 rx_ring->Fbr1Realpa = rx_ring->pFbr1RingPa;
201 /* Align Free Buffer Ring 1 on a 4K boundary */
202 et131x_align_allocated_memory(adapter,
203 &rx_ring->Fbr1Realpa,
204 &rx_ring->Fbr1offset, 0x0FFF);
206 rx_ring->pFbr1RingVa = (void *)((uint8_t *) rx_ring->pFbr1RingVa +
207 rx_ring->Fbr1offset);
209 #ifdef USE_FBR0
210 /* Allocate an area of memory for Free Buffer Ring 0 */
211 bufsize = (sizeof(FBR_DESC_t) * rx_ring->Fbr0NumEntries) + 0xfff;
212 rx_ring->pFbr0RingVa = pci_alloc_consistent(adapter->pdev,
213 bufsize,
214 &rx_ring->pFbr0RingPa);
215 if (!rx_ring->pFbr0RingVa) {
216 DBG_ERROR(et131x_dbginfo,
217 "Cannot alloc memory for Free Buffer Ring 0\n");
218 DBG_LEAVE(et131x_dbginfo);
219 return -ENOMEM;
222 /* Save physical address
224 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
225 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
226 * are ever returned, make sure the high part is retrieved here before
227 * storing the adjusted address.
229 rx_ring->Fbr0Realpa = rx_ring->pFbr0RingPa;
231 /* Align Free Buffer Ring 0 on a 4K boundary */
232 et131x_align_allocated_memory(adapter,
233 &rx_ring->Fbr0Realpa,
234 &rx_ring->Fbr0offset, 0x0FFF);
236 rx_ring->pFbr0RingVa = (void *)((uint8_t *) rx_ring->pFbr0RingVa +
237 rx_ring->Fbr0offset);
238 #endif
240 for (OuterLoop = 0; OuterLoop < (rx_ring->Fbr1NumEntries / FBR_CHUNKS);
241 OuterLoop++) {
242 uint64_t Fbr1Offset;
243 uint64_t Fbr1TempPa;
244 uint32_t Fbr1Align;
246 /* This code allocates an area of memory big enough for N
247 * free buffers + (buffer_size - 1) so that the buffers can
248 * be aligned on 4k boundaries. If each buffer were aligned
249 * to a buffer_size boundary, the effect would be to double
250 * the size of FBR0. By allocating N buffers at once, we
251 * reduce this overhead.
253 if (rx_ring->Fbr1BufferSize > 4096)
254 Fbr1Align = 4096;
255 else
256 Fbr1Align = rx_ring->Fbr1BufferSize;
258 FBRChunkSize =
259 (FBR_CHUNKS * rx_ring->Fbr1BufferSize) + Fbr1Align - 1;
260 rx_ring->Fbr1MemVa[OuterLoop] =
261 pci_alloc_consistent(adapter->pdev, FBRChunkSize,
262 &rx_ring->Fbr1MemPa[OuterLoop]);
264 if (!rx_ring->Fbr1MemVa[OuterLoop]) {
265 DBG_ERROR(et131x_dbginfo, "Could not alloc memory\n");
266 DBG_LEAVE(et131x_dbginfo);
267 return -ENOMEM;
270 /* See NOTE in "Save Physical Address" comment above */
271 Fbr1TempPa = rx_ring->Fbr1MemPa[OuterLoop];
273 et131x_align_allocated_memory(adapter,
274 &Fbr1TempPa,
275 &Fbr1Offset, (Fbr1Align - 1));
277 for (InnerLoop = 0; InnerLoop < FBR_CHUNKS; InnerLoop++) {
278 uint32_t index = (OuterLoop * FBR_CHUNKS) + InnerLoop;
280 /* Save the Virtual address of this index for quick
281 * access later
283 rx_ring->Fbr[1]->Va[index] =
284 (uint8_t *) rx_ring->Fbr1MemVa[OuterLoop] +
285 (InnerLoop * rx_ring->Fbr1BufferSize) + Fbr1Offset;
287 /* now store the physical address in the descriptor
288 * so the device can access it
290 rx_ring->Fbr[1]->PAHigh[index] =
291 (uint32_t) (Fbr1TempPa >> 32);
292 rx_ring->Fbr[1]->PALow[index] = (uint32_t) Fbr1TempPa;
294 Fbr1TempPa += rx_ring->Fbr1BufferSize;
296 rx_ring->Fbr[1]->Buffer1[index] =
297 rx_ring->Fbr[1]->Va[index];
298 rx_ring->Fbr[1]->Buffer2[index] =
299 rx_ring->Fbr[1]->Va[index] - 4;
303 #ifdef USE_FBR0
304 /* Same for FBR0 (if in use) */
305 for (OuterLoop = 0; OuterLoop < (rx_ring->Fbr0NumEntries / FBR_CHUNKS);
306 OuterLoop++) {
307 uint64_t Fbr0Offset;
308 uint64_t Fbr0TempPa;
310 FBRChunkSize = ((FBR_CHUNKS + 1) * rx_ring->Fbr0BufferSize) - 1;
311 rx_ring->Fbr0MemVa[OuterLoop] =
312 pci_alloc_consistent(adapter->pdev, FBRChunkSize,
313 &rx_ring->Fbr0MemPa[OuterLoop]);
315 if (!rx_ring->Fbr0MemVa[OuterLoop]) {
316 DBG_ERROR(et131x_dbginfo, "Could not alloc memory\n");
317 DBG_LEAVE(et131x_dbginfo);
318 return -ENOMEM;
321 /* See NOTE in "Save Physical Address" comment above */
322 Fbr0TempPa = rx_ring->Fbr0MemPa[OuterLoop];
324 et131x_align_allocated_memory(adapter,
325 &Fbr0TempPa,
326 &Fbr0Offset,
327 rx_ring->Fbr0BufferSize - 1);
329 for (InnerLoop = 0; InnerLoop < FBR_CHUNKS; InnerLoop++) {
330 uint32_t index = (OuterLoop * FBR_CHUNKS) + InnerLoop;
332 rx_ring->Fbr[0]->Va[index] =
333 (uint8_t *) rx_ring->Fbr0MemVa[OuterLoop] +
334 (InnerLoop * rx_ring->Fbr0BufferSize) + Fbr0Offset;
336 rx_ring->Fbr[0]->PAHigh[index] =
337 (uint32_t) (Fbr0TempPa >> 32);
338 rx_ring->Fbr[0]->PALow[index] = (uint32_t) Fbr0TempPa;
340 Fbr0TempPa += rx_ring->Fbr0BufferSize;
342 rx_ring->Fbr[0]->Buffer1[index] =
343 rx_ring->Fbr[0]->Va[index];
344 rx_ring->Fbr[0]->Buffer2[index] =
345 rx_ring->Fbr[0]->Va[index] - 4;
348 #endif
350 /* Allocate an area of memory for FIFO of Packet Status ring entries */
351 pktStatRingSize =
352 sizeof(PKT_STAT_DESC_t) * adapter->RxRing.PsrNumEntries;
354 rx_ring->pPSRingVa = pci_alloc_consistent(adapter->pdev,
355 pktStatRingSize + 0x0fff,
356 &rx_ring->pPSRingPa);
358 if (!rx_ring->pPSRingVa) {
359 DBG_ERROR(et131x_dbginfo,
360 "Cannot alloc memory for Packet Status Ring\n");
361 DBG_LEAVE(et131x_dbginfo);
362 return -ENOMEM;
365 /* Save physical address
367 * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
368 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
369 * are ever returned, make sure the high part is retrieved here before
370 * storing the adjusted address.
372 rx_ring->pPSRingRealPa = rx_ring->pPSRingPa;
374 /* Align Packet Status Ring on a 4K boundary */
375 et131x_align_allocated_memory(adapter,
376 &rx_ring->pPSRingRealPa,
377 &rx_ring->pPSRingOffset, 0x0FFF);
379 rx_ring->pPSRingVa = (void *)((uint8_t *) rx_ring->pPSRingVa +
380 rx_ring->pPSRingOffset);
382 /* Allocate an area of memory for writeback of status information */
383 rx_ring->pRxStatusVa = pci_alloc_consistent(adapter->pdev,
384 sizeof(RX_STATUS_BLOCK_t) +
385 0x7, &rx_ring->pRxStatusPa);
386 if (!rx_ring->pRxStatusVa) {
387 DBG_ERROR(et131x_dbginfo,
388 "Cannot alloc memory for Status Block\n");
389 DBG_LEAVE(et131x_dbginfo);
390 return -ENOMEM;
393 /* Save physical address */
394 rx_ring->RxStatusRealPA = rx_ring->pRxStatusPa;
396 /* Align write back on an 8 byte boundary */
397 et131x_align_allocated_memory(adapter,
398 &rx_ring->RxStatusRealPA,
399 &rx_ring->RxStatusOffset, 0x07);
401 rx_ring->pRxStatusVa = (void *)((uint8_t *) rx_ring->pRxStatusVa +
402 rx_ring->RxStatusOffset);
403 rx_ring->NumRfd = NIC_DEFAULT_NUM_RFD;
405 /* Recv
406 * pci_pool_create initializes a lookaside list. After successful
407 * creation, nonpaged fixed-size blocks can be allocated from and
408 * freed to the lookaside list.
409 * RFDs will be allocated from this pool.
411 rx_ring->RecvLookaside = kmem_cache_create(adapter->netdev->name,
412 sizeof(MP_RFD),
414 SLAB_CACHE_DMA |
415 SLAB_HWCACHE_ALIGN,
416 NULL);
418 MP_SET_FLAG(adapter, fMP_ADAPTER_RECV_LOOKASIDE);
420 /* The RFDs are going to be put on lists later on, so initialize the
421 * lists now.
423 INIT_LIST_HEAD(&rx_ring->RecvList);
424 INIT_LIST_HEAD(&rx_ring->RecvPendingList);
426 DBG_LEAVE(et131x_dbginfo);
427 return 0;
431 * et131x_rx_dma_memory_free - Free all memory allocated within this module.
432 * @adapter: pointer to our private adapter structure
434 void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
436 uint32_t index;
437 uint32_t bufsize;
438 uint32_t pktStatRingSize;
439 PMP_RFD pMpRfd;
440 RX_RING_t *rx_ring;
442 DBG_ENTER(et131x_dbginfo);
444 /* Setup some convenience pointers */
445 rx_ring = (RX_RING_t *) &adapter->RxRing;
447 /* Free RFDs and associated packet descriptors */
448 DBG_ASSERT(rx_ring->nReadyRecv == rx_ring->NumRfd);
450 while (!list_empty(&rx_ring->RecvList)) {
451 pMpRfd = (MP_RFD *) list_entry(rx_ring->RecvList.next,
452 MP_RFD, list_node);
454 list_del(&pMpRfd->list_node);
455 et131x_rfd_resources_free(adapter, pMpRfd);
458 while (!list_empty(&rx_ring->RecvPendingList)) {
459 pMpRfd = (MP_RFD *) list_entry(rx_ring->RecvPendingList.next,
460 MP_RFD, list_node);
461 list_del(&pMpRfd->list_node);
462 et131x_rfd_resources_free(adapter, pMpRfd);
465 /* Free Free Buffer Ring 1 */
466 if (rx_ring->pFbr1RingVa) {
467 /* First the packet memory */
468 for (index = 0; index <
469 (rx_ring->Fbr1NumEntries / FBR_CHUNKS); index++) {
470 if (rx_ring->Fbr1MemVa[index]) {
471 uint32_t Fbr1Align;
473 if (rx_ring->Fbr1BufferSize > 4096)
474 Fbr1Align = 4096;
475 else
476 Fbr1Align = rx_ring->Fbr1BufferSize;
478 bufsize =
479 (rx_ring->Fbr1BufferSize * FBR_CHUNKS) +
480 Fbr1Align - 1;
482 pci_free_consistent(adapter->pdev,
483 bufsize,
484 rx_ring->Fbr1MemVa[index],
485 rx_ring->Fbr1MemPa[index]);
487 rx_ring->Fbr1MemVa[index] = NULL;
491 /* Now the FIFO itself */
492 rx_ring->pFbr1RingVa = (void *)((uint8_t *)
493 rx_ring->pFbr1RingVa - rx_ring->Fbr1offset);
495 bufsize =
496 (sizeof(FBR_DESC_t) * rx_ring->Fbr1NumEntries) + 0xfff;
498 pci_free_consistent(adapter->pdev,
499 bufsize,
500 rx_ring->pFbr1RingVa, rx_ring->pFbr1RingPa);
502 rx_ring->pFbr1RingVa = NULL;
505 #ifdef USE_FBR0
506 /* Now the same for Free Buffer Ring 0 */
507 if (rx_ring->pFbr0RingVa) {
508 /* First the packet memory */
509 for (index = 0; index <
510 (rx_ring->Fbr0NumEntries / FBR_CHUNKS); index++) {
511 if (rx_ring->Fbr0MemVa[index]) {
512 bufsize =
513 (rx_ring->Fbr0BufferSize *
514 (FBR_CHUNKS + 1)) - 1;
516 pci_free_consistent(adapter->pdev,
517 bufsize,
518 rx_ring->Fbr0MemVa[index],
519 rx_ring->Fbr0MemPa[index]);
521 rx_ring->Fbr0MemVa[index] = NULL;
525 /* Now the FIFO itself */
526 rx_ring->pFbr0RingVa = (void *)((uint8_t *)
527 rx_ring->pFbr0RingVa - rx_ring->Fbr0offset);
529 bufsize =
530 (sizeof(FBR_DESC_t) * rx_ring->Fbr0NumEntries) + 0xfff;
532 pci_free_consistent(adapter->pdev,
533 bufsize,
534 rx_ring->pFbr0RingVa, rx_ring->pFbr0RingPa);
536 rx_ring->pFbr0RingVa = NULL;
538 #endif
540 /* Free Packet Status Ring */
541 if (rx_ring->pPSRingVa) {
542 rx_ring->pPSRingVa = (void *)((uint8_t *) rx_ring->pPSRingVa -
543 rx_ring->pPSRingOffset);
545 pktStatRingSize =
546 sizeof(PKT_STAT_DESC_t) * adapter->RxRing.PsrNumEntries;
548 pci_free_consistent(adapter->pdev,
549 pktStatRingSize + 0x0fff,
550 rx_ring->pPSRingVa, rx_ring->pPSRingPa);
552 rx_ring->pPSRingVa = NULL;
555 /* Free area of memory for the writeback of status information */
556 if (rx_ring->pRxStatusVa) {
557 rx_ring->pRxStatusVa = (void *)((uint8_t *)
558 rx_ring->pRxStatusVa - rx_ring->RxStatusOffset);
560 pci_free_consistent(adapter->pdev,
561 sizeof(RX_STATUS_BLOCK_t) + 0x7,
562 rx_ring->pRxStatusVa, rx_ring->pRxStatusPa);
564 rx_ring->pRxStatusVa = NULL;
567 /* Free receive buffer pool */
569 /* Free receive packet pool */
571 /* Destroy the lookaside (RFD) pool */
572 if (MP_TEST_FLAG(adapter, fMP_ADAPTER_RECV_LOOKASIDE)) {
573 kmem_cache_destroy(rx_ring->RecvLookaside);
574 MP_CLEAR_FLAG(adapter, fMP_ADAPTER_RECV_LOOKASIDE);
577 /* Free the FBR Lookup Table */
578 #ifdef USE_FBR0
579 kfree(rx_ring->Fbr[0]);
580 #endif
582 kfree(rx_ring->Fbr[1]);
584 /* Reset Counters */
585 rx_ring->nReadyRecv = 0;
587 DBG_LEAVE(et131x_dbginfo);
591 * et131x_init_recv - Initialize receive data structures.
592 * @adapter: pointer to our private adapter structure
594 * Returns 0 on success and errno on failure (as defined in errno.h)
596 int et131x_init_recv(struct et131x_adapter *adapter)
598 int status = -ENOMEM;
599 PMP_RFD pMpRfd = NULL;
600 uint32_t RfdCount;
601 uint32_t TotalNumRfd = 0;
602 RX_RING_t *rx_ring = NULL;
604 DBG_ENTER(et131x_dbginfo);
606 /* Setup some convenience pointers */
607 rx_ring = (RX_RING_t *) &adapter->RxRing;
609 /* Setup each RFD */
610 for (RfdCount = 0; RfdCount < rx_ring->NumRfd; RfdCount++) {
611 pMpRfd = (MP_RFD *) kmem_cache_alloc(rx_ring->RecvLookaside,
612 GFP_ATOMIC | GFP_DMA);
614 if (!pMpRfd) {
615 DBG_ERROR(et131x_dbginfo,
616 "Couldn't alloc RFD out of kmem_cache\n");
617 status = -ENOMEM;
618 continue;
621 status = et131x_rfd_resources_alloc(adapter, pMpRfd);
622 if (status != 0) {
623 DBG_ERROR(et131x_dbginfo,
624 "Couldn't alloc packet for RFD\n");
625 kmem_cache_free(rx_ring->RecvLookaside, pMpRfd);
626 continue;
629 /* Add this RFD to the RecvList */
630 list_add_tail(&pMpRfd->list_node, &rx_ring->RecvList);
632 /* Increment both the available RFD's, and the total RFD's. */
633 rx_ring->nReadyRecv++;
634 TotalNumRfd++;
637 if (TotalNumRfd > NIC_MIN_NUM_RFD)
638 status = 0;
640 rx_ring->NumRfd = TotalNumRfd;
642 if (status != 0) {
643 kmem_cache_free(rx_ring->RecvLookaside, pMpRfd);
644 DBG_ERROR(et131x_dbginfo,
645 "Allocation problems in et131x_init_recv\n");
648 DBG_LEAVE(et131x_dbginfo);
649 return status;
653 * et131x_rfd_resources_alloc
654 * @adapter: pointer to our private adapter structure
655 * @pMpRfd: pointer to a RFD
657 * Returns 0 on success and errno on failure (as defined in errno.h)
659 int et131x_rfd_resources_alloc(struct et131x_adapter *adapter, MP_RFD *pMpRfd)
661 pMpRfd->Packet = NULL;
663 return 0;
667 * et131x_rfd_resources_free - Free the packet allocated for the given RFD
668 * @adapter: pointer to our private adapter structure
669 * @pMpRfd: pointer to a RFD
671 void et131x_rfd_resources_free(struct et131x_adapter *adapter, MP_RFD *pMpRfd)
673 pMpRfd->Packet = NULL;
674 kmem_cache_free(adapter->RxRing.RecvLookaside, pMpRfd);
678 * ConfigRxDmaRegs - Start of Rx_DMA init sequence
679 * @etdev: pointer to our adapter structure
681 void ConfigRxDmaRegs(struct et131x_adapter *etdev)
683 struct _RXDMA_t __iomem *rx_dma = &etdev->regs->rxdma;
684 struct _rx_ring_t *pRxLocal = &etdev->RxRing;
685 PFBR_DESC_t fbr_entry;
686 uint32_t entry;
687 RXDMA_PSR_NUM_DES_t psr_num_des;
688 unsigned long flags;
690 DBG_ENTER(et131x_dbginfo);
692 /* Halt RXDMA to perform the reconfigure. */
693 et131x_rx_dma_disable(etdev);
695 /* Load the completion writeback physical address
697 * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
698 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
699 * are ever returned, make sure the high part is retrieved here
700 * before storing the adjusted address.
702 writel((uint32_t) (pRxLocal->RxStatusRealPA >> 32),
703 &rx_dma->dma_wb_base_hi);
704 writel((uint32_t) pRxLocal->RxStatusRealPA, &rx_dma->dma_wb_base_lo);
706 memset(pRxLocal->pRxStatusVa, 0, sizeof(RX_STATUS_BLOCK_t));
708 /* Set the address and parameters of the packet status ring into the
709 * 1310's registers
711 writel((uint32_t) (pRxLocal->pPSRingRealPa >> 32),
712 &rx_dma->psr_base_hi);
713 writel((uint32_t) pRxLocal->pPSRingRealPa, &rx_dma->psr_base_lo);
714 writel(pRxLocal->PsrNumEntries - 1, &rx_dma->psr_num_des.value);
715 writel(0, &rx_dma->psr_full_offset.value);
717 psr_num_des.value = readl(&rx_dma->psr_num_des.value);
718 writel((psr_num_des.bits.psr_ndes * LO_MARK_PERCENT_FOR_PSR) / 100,
719 &rx_dma->psr_min_des.value);
721 spin_lock_irqsave(&etdev->RcvLock, flags);
723 /* These local variables track the PSR in the adapter structure */
724 pRxLocal->local_psr_full.bits.psr_full = 0;
725 pRxLocal->local_psr_full.bits.psr_full_wrap = 0;
727 /* Now's the best time to initialize FBR1 contents */
728 fbr_entry = (PFBR_DESC_t) pRxLocal->pFbr1RingVa;
729 for (entry = 0; entry < pRxLocal->Fbr1NumEntries; entry++) {
730 fbr_entry->addr_hi = pRxLocal->Fbr[1]->PAHigh[entry];
731 fbr_entry->addr_lo = pRxLocal->Fbr[1]->PALow[entry];
732 fbr_entry->word2.bits.bi = entry;
733 fbr_entry++;
736 /* Set the address and parameters of Free buffer ring 1 (and 0 if
737 * required) into the 1310's registers
739 writel((uint32_t) (pRxLocal->Fbr1Realpa >> 32), &rx_dma->fbr1_base_hi);
740 writel((uint32_t) pRxLocal->Fbr1Realpa, &rx_dma->fbr1_base_lo);
741 writel(pRxLocal->Fbr1NumEntries - 1, &rx_dma->fbr1_num_des.value);
742 writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
744 /* This variable tracks the free buffer ring 1 full position, so it
745 * has to match the above.
747 pRxLocal->local_Fbr1_full = ET_DMA10_WRAP;
748 writel(((pRxLocal->Fbr1NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
749 &rx_dma->fbr1_min_des.value);
751 #ifdef USE_FBR0
752 /* Now's the best time to initialize FBR0 contents */
753 fbr_entry = (PFBR_DESC_t) pRxLocal->pFbr0RingVa;
754 for (entry = 0; entry < pRxLocal->Fbr0NumEntries; entry++) {
755 fbr_entry->addr_hi = pRxLocal->Fbr[0]->PAHigh[entry];
756 fbr_entry->addr_lo = pRxLocal->Fbr[0]->PALow[entry];
757 fbr_entry->word2.bits.bi = entry;
758 fbr_entry++;
761 writel((uint32_t) (pRxLocal->Fbr0Realpa >> 32), &rx_dma->fbr0_base_hi);
762 writel((uint32_t) pRxLocal->Fbr0Realpa, &rx_dma->fbr0_base_lo);
763 writel(pRxLocal->Fbr0NumEntries - 1, &rx_dma->fbr0_num_des.value);
764 writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
766 /* This variable tracks the free buffer ring 0 full position, so it
767 * has to match the above.
769 pRxLocal->local_Fbr0_full = ET_DMA10_WRAP;
770 writel(((pRxLocal->Fbr0NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
771 &rx_dma->fbr0_min_des.value);
772 #endif
774 /* Program the number of packets we will receive before generating an
775 * interrupt.
776 * For version B silicon, this value gets updated once autoneg is
777 *complete.
779 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done.value);
781 /* The "time_done" is not working correctly to coalesce interrupts
782 * after a given time period, but rather is giving us an interrupt
783 * regardless of whether we have received packets.
784 * This value gets updated once autoneg is complete.
786 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time.value);
788 spin_unlock_irqrestore(&etdev->RcvLock, flags);
790 DBG_LEAVE(et131x_dbginfo);
794 * SetRxDmaTimer - Set the heartbeat timer according to line rate.
795 * @etdev: pointer to our adapter structure
797 void SetRxDmaTimer(struct et131x_adapter *etdev)
799 /* For version B silicon, we do not use the RxDMA timer for 10 and 100
800 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
802 if ((etdev->linkspeed == TRUEPHY_SPEED_100MBPS) ||
803 (etdev->linkspeed == TRUEPHY_SPEED_10MBPS)) {
804 writel(0, &etdev->regs->rxdma.max_pkt_time.value);
805 writel(1, &etdev->regs->rxdma.num_pkt_done.value);
810 * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
811 * @etdev: pointer to our adapter structure
813 void et131x_rx_dma_disable(struct et131x_adapter *etdev)
815 RXDMA_CSR_t csr;
817 DBG_ENTER(et131x_dbginfo);
819 /* Setup the receive dma configuration register */
820 writel(0x00002001, &etdev->regs->rxdma.csr.value);
821 csr.value = readl(&etdev->regs->rxdma.csr.value);
822 if (csr.bits.halt_status != 1) {
823 udelay(5);
824 csr.value = readl(&etdev->regs->rxdma.csr.value);
825 if (csr.bits.halt_status != 1)
826 DBG_ERROR(et131x_dbginfo,
827 "RX Dma failed to enter halt state. CSR 0x%08x\n",
828 csr.value);
831 DBG_LEAVE(et131x_dbginfo);
835 * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
836 * @etdev: pointer to our adapter structure
838 void et131x_rx_dma_enable(struct et131x_adapter *etdev)
840 DBG_RX_ENTER(et131x_dbginfo);
842 if (etdev->RegistryPhyLoopbk)
843 /* RxDMA is disabled for loopback operation. */
844 writel(0x1, &etdev->regs->rxdma.csr.value);
845 else {
846 /* Setup the receive dma configuration register for normal operation */
847 RXDMA_CSR_t csr = { 0 };
849 csr.bits.fbr1_enable = 1;
850 if (etdev->RxRing.Fbr1BufferSize == 4096)
851 csr.bits.fbr1_size = 1;
852 else if (etdev->RxRing.Fbr1BufferSize == 8192)
853 csr.bits.fbr1_size = 2;
854 else if (etdev->RxRing.Fbr1BufferSize == 16384)
855 csr.bits.fbr1_size = 3;
856 #ifdef USE_FBR0
857 csr.bits.fbr0_enable = 1;
858 if (etdev->RxRing.Fbr0BufferSize == 256)
859 csr.bits.fbr0_size = 1;
860 else if (etdev->RxRing.Fbr0BufferSize == 512)
861 csr.bits.fbr0_size = 2;
862 else if (etdev->RxRing.Fbr0BufferSize == 1024)
863 csr.bits.fbr0_size = 3;
864 #endif
865 writel(csr.value, &etdev->regs->rxdma.csr.value);
867 csr.value = readl(&etdev->regs->rxdma.csr.value);
868 if (csr.bits.halt_status != 0) {
869 udelay(5);
870 csr.value = readl(&etdev->regs->rxdma.csr.value);
871 if (csr.bits.halt_status != 0) {
872 DBG_ERROR(et131x_dbginfo,
873 "RX Dma failed to exit halt state. CSR 0x%08x\n",
874 csr.value);
879 DBG_RX_LEAVE(et131x_dbginfo);
883 * nic_rx_pkts - Checks the hardware for available packets
884 * @etdev: pointer to our adapter
886 * Returns pMpRfd, a pointer to our MPRFD.
888 * Checks the hardware for available packets, using completion ring
889 * If packets are available, it gets an RFD from the RecvList, attaches
890 * the packet to it, puts the RFD in the RecvPendList, and also returns
891 * the pointer to the RFD.
893 PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
895 struct _rx_ring_t *pRxLocal = &etdev->RxRing;
896 PRX_STATUS_BLOCK_t pRxStatusBlock;
897 PPKT_STAT_DESC_t pPSREntry;
898 PMP_RFD pMpRfd;
899 uint32_t nIndex;
900 uint8_t *pBufVa;
901 unsigned long flags;
902 struct list_head *element;
903 uint8_t ringIndex;
904 uint16_t bufferIndex;
905 uint32_t localLen;
906 PKT_STAT_DESC_WORD0_t Word0;
909 DBG_RX_ENTER(et131x_dbginfo);
911 /* RX Status block is written by the DMA engine prior to every
912 * interrupt. It contains the next to be used entry in the Packet
913 * Status Ring, and also the two Free Buffer rings.
915 pRxStatusBlock = (PRX_STATUS_BLOCK_t) pRxLocal->pRxStatusVa;
917 if (pRxStatusBlock->Word1.bits.PSRoffset ==
918 pRxLocal->local_psr_full.bits.psr_full &&
919 pRxStatusBlock->Word1.bits.PSRwrap ==
920 pRxLocal->local_psr_full.bits.psr_full_wrap) {
921 /* Looks like this ring is not updated yet */
922 DBG_RX(et131x_dbginfo, "(0)\n");
923 DBG_RX_LEAVE(et131x_dbginfo);
924 return NULL;
927 /* The packet status ring indicates that data is available. */
928 pPSREntry = (PPKT_STAT_DESC_t) (pRxLocal->pPSRingVa) +
929 pRxLocal->local_psr_full.bits.psr_full;
931 /* Grab any information that is required once the PSR is
932 * advanced, since we can no longer rely on the memory being
933 * accurate
935 localLen = pPSREntry->word1.bits.length;
936 ringIndex = (uint8_t) pPSREntry->word1.bits.ri;
937 bufferIndex = (uint16_t) pPSREntry->word1.bits.bi;
938 Word0 = pPSREntry->word0;
940 DBG_RX(et131x_dbginfo, "RX PACKET STATUS\n");
941 DBG_RX(et131x_dbginfo, "\tlength : %d\n", localLen);
942 DBG_RX(et131x_dbginfo, "\tringIndex : %d\n", ringIndex);
943 DBG_RX(et131x_dbginfo, "\tbufferIndex : %d\n", bufferIndex);
944 DBG_RX(et131x_dbginfo, "\tword0 : 0x%08x\n", Word0.value);
946 #if 0
947 /* Check the Status Word that the MAC has appended to the PSR
948 * entry in case the MAC has detected errors.
950 if (Word0.value & ALCATEL_BAD_STATUS) {
951 DBG_ERROR(et131x_dbginfo,
952 "NICRxPkts >> Alcatel Status Word error."
953 "Value 0x%08x\n", pPSREntry->word0.value);
955 #endif
957 /* Indicate that we have used this PSR entry. */
958 if (++pRxLocal->local_psr_full.bits.psr_full >
959 pRxLocal->PsrNumEntries - 1) {
960 pRxLocal->local_psr_full.bits.psr_full = 0;
961 pRxLocal->local_psr_full.bits.psr_full_wrap ^= 1;
964 writel(pRxLocal->local_psr_full.value,
965 &etdev->regs->rxdma.psr_full_offset.value);
967 #ifndef USE_FBR0
968 if (ringIndex != 1) {
969 DBG_ERROR(et131x_dbginfo,
970 "NICRxPkts PSR Entry %d indicates "
971 "Buffer Ring 0 in use\n",
972 pRxLocal->local_psr_full.bits.psr_full);
973 DBG_RX_LEAVE(et131x_dbginfo);
974 return NULL;
976 #endif
978 #ifdef USE_FBR0
979 if (ringIndex > 1 ||
980 (ringIndex == 0 &&
981 bufferIndex > pRxLocal->Fbr0NumEntries - 1) ||
982 (ringIndex == 1 &&
983 bufferIndex > pRxLocal->Fbr1NumEntries - 1))
984 #else
985 if (ringIndex != 1 ||
986 bufferIndex > pRxLocal->Fbr1NumEntries - 1)
987 #endif
989 /* Illegal buffer or ring index cannot be used by S/W*/
990 DBG_ERROR(et131x_dbginfo,
991 "NICRxPkts PSR Entry %d indicates "
992 "length of %d and/or bad bi(%d)\n",
993 pRxLocal->local_psr_full.bits.psr_full,
994 localLen, bufferIndex);
995 DBG_RX_LEAVE(et131x_dbginfo);
996 return NULL;
999 /* Get and fill the RFD. */
1000 spin_lock_irqsave(&etdev->RcvLock, flags);
1002 pMpRfd = NULL;
1003 element = pRxLocal->RecvList.next;
1004 pMpRfd = (PMP_RFD) list_entry(element, MP_RFD, list_node);
1006 if (pMpRfd == NULL) {
1007 DBG_RX(et131x_dbginfo,
1008 "NULL RFD returned from RecvList via list_entry()\n");
1009 DBG_RX_LEAVE(et131x_dbginfo);
1010 spin_unlock_irqrestore(&etdev->RcvLock, flags);
1011 return NULL;
1014 list_del(&pMpRfd->list_node);
1015 pRxLocal->nReadyRecv--;
1017 spin_unlock_irqrestore(&etdev->RcvLock, flags);
1019 pMpRfd->bufferindex = bufferIndex;
1020 pMpRfd->ringindex = ringIndex;
1022 /* In V1 silicon, there is a bug which screws up filtering of
1023 * runt packets. Therefore runt packet filtering is disabled
1024 * in the MAC and the packets are dropped here. They are
1025 * also counted here.
1027 if (localLen < (NIC_MIN_PACKET_SIZE + 4)) {
1028 etdev->Stats.other_errors++;
1029 localLen = 0;
1032 if (localLen) {
1033 if (etdev->ReplicaPhyLoopbk == 1) {
1034 pBufVa = pRxLocal->Fbr[ringIndex]->Va[bufferIndex];
1036 if (memcmp(&pBufVa[6], &etdev->CurrentAddress[0],
1037 ETH_ALEN) == 0) {
1038 if (memcmp(&pBufVa[42], "Replica packet",
1039 ETH_HLEN)) {
1040 etdev->ReplicaPhyLoopbkPF = 1;
1043 DBG_WARNING(et131x_dbginfo,
1044 "pBufVa:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
1045 pBufVa[6], pBufVa[7], pBufVa[8],
1046 pBufVa[9], pBufVa[10], pBufVa[11]);
1048 DBG_WARNING(et131x_dbginfo,
1049 "CurrentAddr:\t%02x:%02x:%02x:%02x:%02x:%02x\n",
1050 etdev->CurrentAddress[0],
1051 etdev->CurrentAddress[1],
1052 etdev->CurrentAddress[2],
1053 etdev->CurrentAddress[3],
1054 etdev->CurrentAddress[4],
1055 etdev->CurrentAddress[5]);
1058 /* Determine if this is a multicast packet coming in */
1059 if ((Word0.value & ALCATEL_MULTICAST_PKT) &&
1060 !(Word0.value & ALCATEL_BROADCAST_PKT)) {
1061 /* Promiscuous mode and Multicast mode are
1062 * not mutually exclusive as was first
1063 * thought. I guess Promiscuous is just
1064 * considered a super-set of the other
1065 * filters. Generally filter is 0x2b when in
1066 * promiscuous mode.
1068 if ((etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST)
1069 && !(etdev->PacketFilter & ET131X_PACKET_TYPE_PROMISCUOUS)
1070 && !(etdev->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1071 pBufVa = pRxLocal->Fbr[ringIndex]->
1072 Va[bufferIndex];
1074 /* Loop through our list to see if the
1075 * destination address of this packet
1076 * matches one in our list.
1078 for (nIndex = 0;
1079 nIndex < etdev->MCAddressCount;
1080 nIndex++) {
1081 if (pBufVa[0] ==
1082 etdev->MCList[nIndex][0]
1083 && pBufVa[1] ==
1084 etdev->MCList[nIndex][1]
1085 && pBufVa[2] ==
1086 etdev->MCList[nIndex][2]
1087 && pBufVa[3] ==
1088 etdev->MCList[nIndex][3]
1089 && pBufVa[4] ==
1090 etdev->MCList[nIndex][4]
1091 && pBufVa[5] ==
1092 etdev->MCList[nIndex][5]) {
1093 break;
1097 /* If our index is equal to the number
1098 * of Multicast address we have, then
1099 * this means we did not find this
1100 * packet's matching address in our
1101 * list. Set the PacketSize to zero,
1102 * so we free our RFD when we return
1103 * from this function.
1105 if (nIndex == etdev->MCAddressCount)
1106 localLen = 0;
1109 if (localLen > 0)
1110 etdev->Stats.multircv++;
1111 } else if (Word0.value & ALCATEL_BROADCAST_PKT)
1112 etdev->Stats.brdcstrcv++;
1113 else
1114 /* Not sure what this counter measures in
1115 * promiscuous mode. Perhaps we should check
1116 * the MAC address to see if it is directed
1117 * to us in promiscuous mode.
1119 etdev->Stats.unircv++;
1122 if (localLen > 0) {
1123 struct sk_buff *skb = NULL;
1125 /* pMpRfd->PacketSize = localLen - 4; */
1126 pMpRfd->PacketSize = localLen;
1128 skb = dev_alloc_skb(pMpRfd->PacketSize + 2);
1129 if (!skb) {
1130 DBG_ERROR(et131x_dbginfo,
1131 "Couldn't alloc an SKB for Rx\n");
1132 DBG_RX_LEAVE(et131x_dbginfo);
1133 return NULL;
1136 etdev->net_stats.rx_bytes += pMpRfd->PacketSize;
1138 memcpy(skb_put(skb, pMpRfd->PacketSize),
1139 pRxLocal->Fbr[ringIndex]->Va[bufferIndex],
1140 pMpRfd->PacketSize);
1142 skb->dev = etdev->netdev;
1143 skb->protocol = eth_type_trans(skb, etdev->netdev);
1144 skb->ip_summed = CHECKSUM_NONE;
1146 netif_rx(skb);
1147 } else {
1148 pMpRfd->PacketSize = 0;
1151 nic_return_rfd(etdev, pMpRfd);
1153 DBG_RX(et131x_dbginfo, "(1)\n");
1154 DBG_RX_LEAVE(et131x_dbginfo);
1155 return pMpRfd;
1159 * et131x_reset_recv - Reset the receive list
1160 * @etdev: pointer to our adapter
1162 * Assumption, Rcv spinlock has been acquired.
1164 void et131x_reset_recv(struct et131x_adapter *etdev)
1166 PMP_RFD pMpRfd;
1167 struct list_head *element;
1169 DBG_ENTER(et131x_dbginfo);
1171 DBG_ASSERT(!list_empty(&etdev->RxRing.RecvList));
1173 /* Take all the RFD's from the pending list, and stick them on the
1174 * RecvList.
1176 while (!list_empty(&etdev->RxRing.RecvPendingList)) {
1177 element = etdev->RxRing.RecvPendingList.next;
1179 pMpRfd = (PMP_RFD) list_entry(element, MP_RFD, list_node);
1181 list_move_tail(&pMpRfd->list_node, &etdev->RxRing.RecvList);
1184 DBG_LEAVE(et131x_dbginfo);
1188 * et131x_handle_recv_interrupt - Interrupt handler for receive processing
1189 * @etdev: pointer to our adapter
1191 * Assumption, Rcv spinlock has been acquired.
1193 void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
1195 PMP_RFD pMpRfd = NULL;
1196 struct sk_buff *PacketArray[NUM_PACKETS_HANDLED];
1197 PMP_RFD RFDFreeArray[NUM_PACKETS_HANDLED];
1198 uint32_t PacketArrayCount = 0;
1199 uint32_t PacketsToHandle;
1200 uint32_t PacketFreeCount = 0;
1201 bool TempUnfinishedRec = false;
1203 DBG_RX_ENTER(et131x_dbginfo);
1205 PacketsToHandle = NUM_PACKETS_HANDLED;
1207 /* Process up to available RFD's */
1208 while (PacketArrayCount < PacketsToHandle) {
1209 if (list_empty(&etdev->RxRing.RecvList)) {
1210 DBG_ASSERT(etdev->RxRing.nReadyRecv == 0);
1211 DBG_ERROR(et131x_dbginfo, "NO RFD's !!!!!!!!!!!!!\n");
1212 TempUnfinishedRec = true;
1213 break;
1216 pMpRfd = nic_rx_pkts(etdev);
1218 if (pMpRfd == NULL)
1219 break;
1221 /* Do not receive any packets until a filter has been set.
1222 * Do not receive any packets until we have link.
1223 * If length is zero, return the RFD in order to advance the
1224 * Free buffer ring.
1226 if ((!etdev->PacketFilter) ||
1227 (!MP_LINK_DETECTED(etdev)) ||
1228 (pMpRfd->PacketSize == 0)) {
1229 continue;
1232 /* Increment the number of packets we received */
1233 etdev->Stats.ipackets++;
1235 /* Set the status on the packet, either resources or success */
1236 if (etdev->RxRing.nReadyRecv >= RFD_LOW_WATER_MARK) {
1237 /* Put this RFD on the pending list
1239 * NOTE: nic_rx_pkts() above is already returning the
1240 * RFD to the RecvList, so don't additionally do that
1241 * here.
1242 * Besides, we don't really need (at this point) the
1243 * pending list anyway.
1245 } else {
1246 RFDFreeArray[PacketFreeCount] = pMpRfd;
1247 PacketFreeCount++;
1249 DBG_WARNING(et131x_dbginfo,
1250 "RFD's are running out !!!!!!!!!!!!!\n");
1253 PacketArray[PacketArrayCount] = pMpRfd->Packet;
1254 PacketArrayCount++;
1257 if ((PacketArrayCount == NUM_PACKETS_HANDLED) || TempUnfinishedRec) {
1258 etdev->RxRing.UnfinishedReceives = true;
1259 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
1260 &etdev->regs->global.watchdog_timer);
1261 } else {
1262 /* Watchdog timer will disable itself if appropriate. */
1263 etdev->RxRing.UnfinishedReceives = false;
1266 DBG_RX_LEAVE(et131x_dbginfo);
1269 static inline u32 bump_fbr(u32 *fbr, u32 limit)
1271 u32 v = *fbr;
1272 add_10bit(&v, 1);
1273 if (v > limit)
1274 v = (*fbr & ~ET_DMA10_MASK) ^ ET_DMA10_WRAP;
1275 *fbr = v;
1276 return v;
1280 * NICReturnRFD - Recycle a RFD and put it back onto the receive list
1281 * @etdev: pointer to our adapter
1282 * @pMpRfd: pointer to the RFD
1284 void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd)
1286 struct _rx_ring_t *rx_local = &etdev->RxRing;
1287 struct _RXDMA_t __iomem *rx_dma = &etdev->regs->rxdma;
1288 uint16_t bi = pMpRfd->bufferindex;
1289 uint8_t ri = pMpRfd->ringindex;
1290 unsigned long flags;
1292 DBG_RX_ENTER(et131x_dbginfo);
1294 /* We don't use any of the OOB data besides status. Otherwise, we
1295 * need to clean up OOB data
1297 if (
1298 #ifdef USE_FBR0
1299 (ri == 0 && bi < rx_local->Fbr0NumEntries) ||
1300 #endif
1301 (ri == 1 && bi < rx_local->Fbr1NumEntries)) {
1302 spin_lock_irqsave(&etdev->FbrLock, flags);
1304 if (ri == 1) {
1305 PFBR_DESC_t pNextDesc =
1306 (PFBR_DESC_t) (rx_local->pFbr1RingVa) +
1307 INDEX10(rx_local->local_Fbr1_full);
1309 /* Handle the Free Buffer Ring advancement here. Write
1310 * the PA / Buffer Index for the returned buffer into
1311 * the oldest (next to be freed)FBR entry
1313 pNextDesc->addr_hi = rx_local->Fbr[1]->PAHigh[bi];
1314 pNextDesc->addr_lo = rx_local->Fbr[1]->PALow[bi];
1315 pNextDesc->word2.value = bi;
1317 writel(bump_fbr(&rx_local->local_Fbr1_full,
1318 rx_local->Fbr1NumEntries - 1),
1319 &rx_dma->fbr1_full_offset);
1321 #ifdef USE_FBR0
1322 else {
1323 PFBR_DESC_t pNextDesc =
1324 (PFBR_DESC_t) rx_local->pFbr0RingVa +
1325 INDEX10(rx_local->local_Fbr0_full);
1327 /* Handle the Free Buffer Ring advancement here. Write
1328 * the PA / Buffer Index for the returned buffer into
1329 * the oldest (next to be freed) FBR entry
1331 pNextDesc->addr_hi = rx_local->Fbr[0]->PAHigh[bi];
1332 pNextDesc->addr_lo = rx_local->Fbr[0]->PALow[bi];
1333 pNextDesc->word2.value = bi;
1335 writel(bump_fbr(&rx_local->local_Fbr0_full,
1336 rx_local->Fbr0NumEntries - 1),
1337 &rx_dma->fbr0_full_offset);
1339 #endif
1340 spin_unlock_irqrestore(&etdev->FbrLock, flags);
1341 } else {
1342 DBG_ERROR(et131x_dbginfo,
1343 "NICReturnRFD illegal Buffer Index returned\n");
1346 /* The processing on this RFD is done, so put it back on the tail of
1347 * our list
1349 spin_lock_irqsave(&etdev->RcvLock, flags);
1350 list_add_tail(&pMpRfd->list_node, &rx_local->RecvList);
1351 rx_local->nReadyRecv++;
1352 spin_unlock_irqrestore(&etdev->RcvLock, flags);
1354 DBG_ASSERT(rx_local->nReadyRecv <= rx_local->NumRfd);
1355 DBG_RX_LEAVE(et131x_dbginfo);