staging: et131x: Fix stats->rx_packets accounting
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / et131x / et1310_rx.c
blob6c0a64bdd593c0547cbcfd44f24ecb14d65847c0
1 /*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
5 * Copyright © 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
9 *------------------------------------------------------------------------------
11 * et1310_rx.c - Routines used to perform data reception
13 *------------------------------------------------------------------------------
15 * SOFTWARE LICENSE
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
22 * Copyright © 2005 Agere Systems Inc.
23 * All rights reserved.
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
31 * distribution.
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
41 * Disclaimer
43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE.
58 #include "et131x_version.h"
59 #include "et131x_defs.h"
61 #include <linux/pci.h>
62 #include <linux/init.h>
63 #include <linux/module.h>
64 #include <linux/types.h>
65 #include <linux/kernel.h>
67 #include <linux/sched.h>
68 #include <linux/ptrace.h>
69 #include <linux/slab.h>
70 #include <linux/ctype.h>
71 #include <linux/string.h>
72 #include <linux/timer.h>
73 #include <linux/interrupt.h>
74 #include <linux/in.h>
75 #include <linux/delay.h>
76 #include <linux/io.h>
77 #include <linux/bitops.h>
78 #include <asm/system.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/if_arp.h>
84 #include <linux/ioport.h>
86 #include "et1310_phy.h"
87 #include "et131x_adapter.h"
88 #include "et1310_rx.h"
89 #include "et131x.h"
91 static inline u32 bump_fbr(u32 *fbr, u32 limit)
93 u32 v = *fbr;
94 v++;
95 /* This works for all cases where limit < 1024. The 1023 case
96 works because 1023++ is 1024 which means the if condition is not
97 taken but the carry of the bit into the wrap bit toggles the wrap
98 value correctly */
99 if ((v & ET_DMA10_MASK) > limit) {
100 v &= ~ET_DMA10_MASK;
101 v ^= ET_DMA10_WRAP;
103 /* For the 1023 case */
104 v &= (ET_DMA10_MASK|ET_DMA10_WRAP);
105 *fbr = v;
106 return v;
110 * et131x_rx_dma_memory_alloc
111 * @adapter: pointer to our private adapter structure
113 * Returns 0 on success and errno on failure (as defined in errno.h)
115 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
116 * and the Packet Status Ring.
118 int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
120 u32 i, j;
121 u32 bufsize;
122 u32 pktStatRingSize, FBRChunkSize;
123 struct rx_ring *rx_ring;
125 /* Setup some convenience pointers */
126 rx_ring = &adapter->rx_ring;
128 /* Alloc memory for the lookup table */
129 #ifdef USE_FBR0
130 rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
131 #endif
132 rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
134 /* The first thing we will do is configure the sizes of the buffer
135 * rings. These will change based on jumbo packet support. Larger
136 * jumbo packets increases the size of each entry in FBR0, and the
137 * number of entries in FBR0, while at the same time decreasing the
138 * number of entries in FBR1.
140 * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1
141 * entries are huge in order to accommodate a "jumbo" frame, then it
142 * will have less entries. Conversely, FBR1 will now be relied upon
143 * to carry more "normal" frames, thus it's entry size also increases
144 * and the number of entries goes up too (since it now carries
145 * "small" + "regular" packets.
147 * In this scheme, we try to maintain 512 entries between the two
148 * rings. Also, FBR1 remains a constant size - when it's size doubles
149 * the number of entries halves. FBR0 increases in size, however.
152 if (adapter->RegistryJumboPacket < 2048) {
153 #ifdef USE_FBR0
154 rx_ring->Fbr0BufferSize = 256;
155 rx_ring->Fbr0NumEntries = 512;
156 #endif
157 rx_ring->Fbr1BufferSize = 2048;
158 rx_ring->Fbr1NumEntries = 512;
159 } else if (adapter->RegistryJumboPacket < 4096) {
160 #ifdef USE_FBR0
161 rx_ring->Fbr0BufferSize = 512;
162 rx_ring->Fbr0NumEntries = 1024;
163 #endif
164 rx_ring->Fbr1BufferSize = 4096;
165 rx_ring->Fbr1NumEntries = 512;
166 } else {
167 #ifdef USE_FBR0
168 rx_ring->Fbr0BufferSize = 1024;
169 rx_ring->Fbr0NumEntries = 768;
170 #endif
171 rx_ring->Fbr1BufferSize = 16384;
172 rx_ring->Fbr1NumEntries = 128;
175 #ifdef USE_FBR0
176 adapter->rx_ring.PsrNumEntries = adapter->rx_ring.Fbr0NumEntries +
177 adapter->rx_ring.Fbr1NumEntries;
178 #else
179 adapter->rx_ring.PsrNumEntries = adapter->rx_ring.Fbr1NumEntries;
180 #endif
182 /* Allocate an area of memory for Free Buffer Ring 1 */
183 bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr1NumEntries) + 0xfff;
184 rx_ring->pFbr1RingVa = pci_alloc_consistent(adapter->pdev,
185 bufsize,
186 &rx_ring->pFbr1RingPa);
187 if (!rx_ring->pFbr1RingVa) {
188 dev_err(&adapter->pdev->dev,
189 "Cannot alloc memory for Free Buffer Ring 1\n");
190 return -ENOMEM;
193 /* Save physical address
195 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
196 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
197 * are ever returned, make sure the high part is retrieved here
198 * before storing the adjusted address.
200 rx_ring->Fbr1Realpa = rx_ring->pFbr1RingPa;
202 /* Align Free Buffer Ring 1 on a 4K boundary */
203 et131x_align_allocated_memory(adapter,
204 &rx_ring->Fbr1Realpa,
205 &rx_ring->Fbr1offset, 0x0FFF);
207 rx_ring->pFbr1RingVa = (void *)((u8 *) rx_ring->pFbr1RingVa +
208 rx_ring->Fbr1offset);
210 #ifdef USE_FBR0
211 /* Allocate an area of memory for Free Buffer Ring 0 */
212 bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr0NumEntries) + 0xfff;
213 rx_ring->pFbr0RingVa = pci_alloc_consistent(adapter->pdev,
214 bufsize,
215 &rx_ring->pFbr0RingPa);
216 if (!rx_ring->pFbr0RingVa) {
217 dev_err(&adapter->pdev->dev,
218 "Cannot alloc memory for Free Buffer Ring 0\n");
219 return -ENOMEM;
222 /* Save physical address
224 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
225 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
226 * are ever returned, make sure the high part is retrieved here before
227 * storing the adjusted address.
229 rx_ring->Fbr0Realpa = rx_ring->pFbr0RingPa;
231 /* Align Free Buffer Ring 0 on a 4K boundary */
232 et131x_align_allocated_memory(adapter,
233 &rx_ring->Fbr0Realpa,
234 &rx_ring->Fbr0offset, 0x0FFF);
236 rx_ring->pFbr0RingVa = (void *)((u8 *) rx_ring->pFbr0RingVa +
237 rx_ring->Fbr0offset);
238 #endif
240 for (i = 0; i < (rx_ring->Fbr1NumEntries / FBR_CHUNKS);
241 i++) {
242 u64 Fbr1Offset;
243 u64 Fbr1TempPa;
244 u32 Fbr1Align;
246 /* This code allocates an area of memory big enough for N
247 * free buffers + (buffer_size - 1) so that the buffers can
248 * be aligned on 4k boundaries. If each buffer were aligned
249 * to a buffer_size boundary, the effect would be to double
250 * the size of FBR0. By allocating N buffers at once, we
251 * reduce this overhead.
253 if (rx_ring->Fbr1BufferSize > 4096)
254 Fbr1Align = 4096;
255 else
256 Fbr1Align = rx_ring->Fbr1BufferSize;
258 FBRChunkSize =
259 (FBR_CHUNKS * rx_ring->Fbr1BufferSize) + Fbr1Align - 1;
260 rx_ring->Fbr1MemVa[i] =
261 pci_alloc_consistent(adapter->pdev, FBRChunkSize,
262 &rx_ring->Fbr1MemPa[i]);
264 if (!rx_ring->Fbr1MemVa[i]) {
265 dev_err(&adapter->pdev->dev,
266 "Could not alloc memory\n");
267 return -ENOMEM;
270 /* See NOTE in "Save Physical Address" comment above */
271 Fbr1TempPa = rx_ring->Fbr1MemPa[i];
273 et131x_align_allocated_memory(adapter,
274 &Fbr1TempPa,
275 &Fbr1Offset, (Fbr1Align - 1));
277 for (j = 0; j < FBR_CHUNKS; j++) {
278 u32 index = (i * FBR_CHUNKS) + j;
280 /* Save the Virtual address of this index for quick
281 * access later
283 rx_ring->fbr[1]->virt[index] =
284 (u8 *) rx_ring->Fbr1MemVa[i] +
285 (j * rx_ring->Fbr1BufferSize) + Fbr1Offset;
287 /* now store the physical address in the descriptor
288 * so the device can access it
290 rx_ring->fbr[1]->bus_high[index] =
291 (u32) (Fbr1TempPa >> 32);
292 rx_ring->fbr[1]->bus_low[index] = (u32) Fbr1TempPa;
294 Fbr1TempPa += rx_ring->Fbr1BufferSize;
296 rx_ring->fbr[1]->buffer1[index] =
297 rx_ring->fbr[1]->virt[index];
298 rx_ring->fbr[1]->buffer2[index] =
299 rx_ring->fbr[1]->virt[index] - 4;
303 #ifdef USE_FBR0
304 /* Same for FBR0 (if in use) */
305 for (i = 0; i < (rx_ring->Fbr0NumEntries / FBR_CHUNKS);
306 i++) {
307 u64 Fbr0Offset;
308 u64 Fbr0TempPa;
310 FBRChunkSize = ((FBR_CHUNKS + 1) * rx_ring->Fbr0BufferSize) - 1;
311 rx_ring->Fbr0MemVa[i] =
312 pci_alloc_consistent(adapter->pdev, FBRChunkSize,
313 &rx_ring->Fbr0MemPa[i]);
315 if (!rx_ring->Fbr0MemVa[i]) {
316 dev_err(&adapter->pdev->dev,
317 "Could not alloc memory\n");
318 return -ENOMEM;
321 /* See NOTE in "Save Physical Address" comment above */
322 Fbr0TempPa = rx_ring->Fbr0MemPa[i];
324 et131x_align_allocated_memory(adapter,
325 &Fbr0TempPa,
326 &Fbr0Offset,
327 rx_ring->Fbr0BufferSize - 1);
329 for (j = 0; j < FBR_CHUNKS; j++) {
330 u32 index = (i * FBR_CHUNKS) + j;
332 rx_ring->fbr[0]->virt[index] =
333 (u8 *) rx_ring->Fbr0MemVa[i] +
334 (j * rx_ring->Fbr0BufferSize) + Fbr0Offset;
336 rx_ring->fbr[0]->bus_high[index] =
337 (u32) (Fbr0TempPa >> 32);
338 rx_ring->fbr[0]->bus_low[index] = (u32) Fbr0TempPa;
340 Fbr0TempPa += rx_ring->Fbr0BufferSize;
342 rx_ring->fbr[0]->buffer1[index] =
343 rx_ring->fbr[0]->virt[index];
344 rx_ring->fbr[0]->buffer2[index] =
345 rx_ring->fbr[0]->virt[index] - 4;
348 #endif
350 /* Allocate an area of memory for FIFO of Packet Status ring entries */
351 pktStatRingSize =
352 sizeof(struct pkt_stat_desc) * adapter->rx_ring.PsrNumEntries;
354 rx_ring->pPSRingVa = pci_alloc_consistent(adapter->pdev,
355 pktStatRingSize,
356 &rx_ring->pPSRingPa);
358 if (!rx_ring->pPSRingVa) {
359 dev_err(&adapter->pdev->dev,
360 "Cannot alloc memory for Packet Status Ring\n");
361 return -ENOMEM;
363 printk(KERN_INFO "PSR %lx\n", (unsigned long) rx_ring->pPSRingPa);
366 * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
367 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
368 * are ever returned, make sure the high part is retrieved here before
369 * storing the adjusted address.
372 /* Allocate an area of memory for writeback of status information */
373 rx_ring->rx_status_block = pci_alloc_consistent(adapter->pdev,
374 sizeof(struct rx_status_block),
375 &rx_ring->rx_status_bus);
376 if (!rx_ring->rx_status_block) {
377 dev_err(&adapter->pdev->dev,
378 "Cannot alloc memory for Status Block\n");
379 return -ENOMEM;
381 rx_ring->NumRfd = NIC_DEFAULT_NUM_RFD;
382 printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus);
384 /* Recv
385 * pci_pool_create initializes a lookaside list. After successful
386 * creation, nonpaged fixed-size blocks can be allocated from and
387 * freed to the lookaside list.
388 * RFDs will be allocated from this pool.
390 rx_ring->RecvLookaside = kmem_cache_create(adapter->netdev->name,
391 sizeof(struct rfd),
393 SLAB_CACHE_DMA |
394 SLAB_HWCACHE_ALIGN,
395 NULL);
397 adapter->Flags |= fMP_ADAPTER_RECV_LOOKASIDE;
399 /* The RFDs are going to be put on lists later on, so initialize the
400 * lists now.
402 INIT_LIST_HEAD(&rx_ring->RecvList);
403 return 0;
407 * et131x_rx_dma_memory_free - Free all memory allocated within this module.
408 * @adapter: pointer to our private adapter structure
410 void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
412 u32 index;
413 u32 bufsize;
414 u32 pktStatRingSize;
415 struct rfd *rfd;
416 struct rx_ring *rx_ring;
418 /* Setup some convenience pointers */
419 rx_ring = &adapter->rx_ring;
421 /* Free RFDs and associated packet descriptors */
422 WARN_ON(rx_ring->nReadyRecv != rx_ring->NumRfd);
424 while (!list_empty(&rx_ring->RecvList)) {
425 rfd = (struct rfd *) list_entry(rx_ring->RecvList.next,
426 struct rfd, list_node);
428 list_del(&rfd->list_node);
429 rfd->skb = NULL;
430 kmem_cache_free(adapter->rx_ring.RecvLookaside, rfd);
433 /* Free Free Buffer Ring 1 */
434 if (rx_ring->pFbr1RingVa) {
435 /* First the packet memory */
436 for (index = 0; index <
437 (rx_ring->Fbr1NumEntries / FBR_CHUNKS); index++) {
438 if (rx_ring->Fbr1MemVa[index]) {
439 u32 Fbr1Align;
441 if (rx_ring->Fbr1BufferSize > 4096)
442 Fbr1Align = 4096;
443 else
444 Fbr1Align = rx_ring->Fbr1BufferSize;
446 bufsize =
447 (rx_ring->Fbr1BufferSize * FBR_CHUNKS) +
448 Fbr1Align - 1;
450 pci_free_consistent(adapter->pdev,
451 bufsize,
452 rx_ring->Fbr1MemVa[index],
453 rx_ring->Fbr1MemPa[index]);
455 rx_ring->Fbr1MemVa[index] = NULL;
459 /* Now the FIFO itself */
460 rx_ring->pFbr1RingVa = (void *)((u8 *)
461 rx_ring->pFbr1RingVa - rx_ring->Fbr1offset);
463 bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr1NumEntries)
464 + 0xfff;
466 pci_free_consistent(adapter->pdev, bufsize,
467 rx_ring->pFbr1RingVa, rx_ring->pFbr1RingPa);
469 rx_ring->pFbr1RingVa = NULL;
472 #ifdef USE_FBR0
473 /* Now the same for Free Buffer Ring 0 */
474 if (rx_ring->pFbr0RingVa) {
475 /* First the packet memory */
476 for (index = 0; index <
477 (rx_ring->Fbr0NumEntries / FBR_CHUNKS); index++) {
478 if (rx_ring->Fbr0MemVa[index]) {
479 bufsize =
480 (rx_ring->Fbr0BufferSize *
481 (FBR_CHUNKS + 1)) - 1;
483 pci_free_consistent(adapter->pdev,
484 bufsize,
485 rx_ring->Fbr0MemVa[index],
486 rx_ring->Fbr0MemPa[index]);
488 rx_ring->Fbr0MemVa[index] = NULL;
492 /* Now the FIFO itself */
493 rx_ring->pFbr0RingVa = (void *)((u8 *)
494 rx_ring->pFbr0RingVa - rx_ring->Fbr0offset);
496 bufsize = (sizeof(struct fbr_desc) * rx_ring->Fbr0NumEntries)
497 + 0xfff;
499 pci_free_consistent(adapter->pdev,
500 bufsize,
501 rx_ring->pFbr0RingVa, rx_ring->pFbr0RingPa);
503 rx_ring->pFbr0RingVa = NULL;
505 #endif
507 /* Free Packet Status Ring */
508 if (rx_ring->pPSRingVa) {
509 pktStatRingSize =
510 sizeof(struct pkt_stat_desc) * adapter->rx_ring.PsrNumEntries;
512 pci_free_consistent(adapter->pdev, pktStatRingSize,
513 rx_ring->pPSRingVa, rx_ring->pPSRingPa);
515 rx_ring->pPSRingVa = NULL;
518 /* Free area of memory for the writeback of status information */
519 if (rx_ring->rx_status_block) {
520 pci_free_consistent(adapter->pdev,
521 sizeof(struct rx_status_block),
522 rx_ring->rx_status_block, rx_ring->rx_status_bus);
523 rx_ring->rx_status_block = NULL;
526 /* Free receive buffer pool */
528 /* Free receive packet pool */
530 /* Destroy the lookaside (RFD) pool */
531 if (adapter->Flags & fMP_ADAPTER_RECV_LOOKASIDE) {
532 kmem_cache_destroy(rx_ring->RecvLookaside);
533 adapter->Flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
536 /* Free the FBR Lookup Table */
537 #ifdef USE_FBR0
538 kfree(rx_ring->fbr[0]);
539 #endif
541 kfree(rx_ring->fbr[1]);
543 /* Reset Counters */
544 rx_ring->nReadyRecv = 0;
548 * et131x_init_recv - Initialize receive data structures.
549 * @adapter: pointer to our private adapter structure
551 * Returns 0 on success and errno on failure (as defined in errno.h)
553 int et131x_init_recv(struct et131x_adapter *adapter)
555 int status = -ENOMEM;
556 struct rfd *rfd = NULL;
557 u32 rfdct;
558 u32 numrfd = 0;
559 struct rx_ring *rx_ring;
561 /* Setup some convenience pointers */
562 rx_ring = &adapter->rx_ring;
564 /* Setup each RFD */
565 for (rfdct = 0; rfdct < rx_ring->NumRfd; rfdct++) {
566 rfd = kmem_cache_alloc(rx_ring->RecvLookaside,
567 GFP_ATOMIC | GFP_DMA);
569 if (!rfd) {
570 dev_err(&adapter->pdev->dev,
571 "Couldn't alloc RFD out of kmem_cache\n");
572 status = -ENOMEM;
573 continue;
576 rfd->skb = NULL;
578 /* Add this RFD to the RecvList */
579 list_add_tail(&rfd->list_node, &rx_ring->RecvList);
581 /* Increment both the available RFD's, and the total RFD's. */
582 rx_ring->nReadyRecv++;
583 numrfd++;
586 if (numrfd > NIC_MIN_NUM_RFD)
587 status = 0;
589 rx_ring->NumRfd = numrfd;
591 if (status != 0) {
592 kmem_cache_free(rx_ring->RecvLookaside, rfd);
593 dev_err(&adapter->pdev->dev,
594 "Allocation problems in et131x_init_recv\n");
596 return status;
600 * ConfigRxDmaRegs - Start of Rx_DMA init sequence
601 * @etdev: pointer to our adapter structure
603 void ConfigRxDmaRegs(struct et131x_adapter *etdev)
605 struct rxdma_regs __iomem *rx_dma = &etdev->regs->rxdma;
606 struct rx_ring *rx_local = &etdev->rx_ring;
607 struct fbr_desc *fbr_entry;
608 u32 entry;
609 u32 psr_num_des;
610 unsigned long flags;
612 /* Halt RXDMA to perform the reconfigure. */
613 et131x_rx_dma_disable(etdev);
615 /* Load the completion writeback physical address
617 * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
618 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
619 * are ever returned, make sure the high part is retrieved here
620 * before storing the adjusted address.
622 writel((u32) ((u64)rx_local->rx_status_bus >> 32),
623 &rx_dma->dma_wb_base_hi);
624 writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo);
626 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
628 /* Set the address and parameters of the packet status ring into the
629 * 1310's registers
631 writel((u32) ((u64)rx_local->pPSRingPa >> 32),
632 &rx_dma->psr_base_hi);
633 writel((u32) rx_local->pPSRingPa, &rx_dma->psr_base_lo);
634 writel(rx_local->PsrNumEntries - 1, &rx_dma->psr_num_des);
635 writel(0, &rx_dma->psr_full_offset);
637 psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
638 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
639 &rx_dma->psr_min_des);
641 spin_lock_irqsave(&etdev->rcv_lock, flags);
643 /* These local variables track the PSR in the adapter structure */
644 rx_local->local_psr_full = 0;
646 /* Now's the best time to initialize FBR1 contents */
647 fbr_entry = (struct fbr_desc *) rx_local->pFbr1RingVa;
648 for (entry = 0; entry < rx_local->Fbr1NumEntries; entry++) {
649 fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry];
650 fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry];
651 fbr_entry->word2 = entry;
652 fbr_entry++;
655 /* Set the address and parameters of Free buffer ring 1 (and 0 if
656 * required) into the 1310's registers
658 writel((u32) (rx_local->Fbr1Realpa >> 32), &rx_dma->fbr1_base_hi);
659 writel((u32) rx_local->Fbr1Realpa, &rx_dma->fbr1_base_lo);
660 writel(rx_local->Fbr1NumEntries - 1, &rx_dma->fbr1_num_des);
661 writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
663 /* This variable tracks the free buffer ring 1 full position, so it
664 * has to match the above.
666 rx_local->local_Fbr1_full = ET_DMA10_WRAP;
667 writel(((rx_local->Fbr1NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
668 &rx_dma->fbr1_min_des);
670 #ifdef USE_FBR0
671 /* Now's the best time to initialize FBR0 contents */
672 fbr_entry = (struct fbr_desc *) rx_local->pFbr0RingVa;
673 for (entry = 0; entry < rx_local->Fbr0NumEntries; entry++) {
674 fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry];
675 fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry];
676 fbr_entry->word2 = entry;
677 fbr_entry++;
680 writel((u32) (rx_local->Fbr0Realpa >> 32), &rx_dma->fbr0_base_hi);
681 writel((u32) rx_local->Fbr0Realpa, &rx_dma->fbr0_base_lo);
682 writel(rx_local->Fbr0NumEntries - 1, &rx_dma->fbr0_num_des);
683 writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
685 /* This variable tracks the free buffer ring 0 full position, so it
686 * has to match the above.
688 rx_local->local_Fbr0_full = ET_DMA10_WRAP;
689 writel(((rx_local->Fbr0NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
690 &rx_dma->fbr0_min_des);
691 #endif
693 /* Program the number of packets we will receive before generating an
694 * interrupt.
695 * For version B silicon, this value gets updated once autoneg is
696 *complete.
698 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
700 /* The "time_done" is not working correctly to coalesce interrupts
701 * after a given time period, but rather is giving us an interrupt
702 * regardless of whether we have received packets.
703 * This value gets updated once autoneg is complete.
705 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
707 spin_unlock_irqrestore(&etdev->rcv_lock, flags);
711 * SetRxDmaTimer - Set the heartbeat timer according to line rate.
712 * @etdev: pointer to our adapter structure
714 void SetRxDmaTimer(struct et131x_adapter *etdev)
716 /* For version B silicon, we do not use the RxDMA timer for 10 and 100
717 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
719 if ((etdev->linkspeed == TRUEPHY_SPEED_100MBPS) ||
720 (etdev->linkspeed == TRUEPHY_SPEED_10MBPS)) {
721 writel(0, &etdev->regs->rxdma.max_pkt_time);
722 writel(1, &etdev->regs->rxdma.num_pkt_done);
727 * NICReturnRFD - Recycle a RFD and put it back onto the receive list
728 * @etdev: pointer to our adapter
729 * @rfd: pointer to the RFD
731 void nic_return_rfd(struct et131x_adapter *etdev, struct rfd *rfd)
733 struct rx_ring *rx_local = &etdev->rx_ring;
734 struct rxdma_regs __iomem *rx_dma = &etdev->regs->rxdma;
735 u16 bi = rfd->bufferindex;
736 u8 ri = rfd->ringindex;
737 unsigned long flags;
739 /* We don't use any of the OOB data besides status. Otherwise, we
740 * need to clean up OOB data
742 if (
743 #ifdef USE_FBR0
744 (ri == 0 && bi < rx_local->Fbr0NumEntries) ||
745 #endif
746 (ri == 1 && bi < rx_local->Fbr1NumEntries)) {
747 spin_lock_irqsave(&etdev->FbrLock, flags);
749 if (ri == 1) {
750 struct fbr_desc *next =
751 (struct fbr_desc *) (rx_local->pFbr1RingVa) +
752 INDEX10(rx_local->local_Fbr1_full);
754 /* Handle the Free Buffer Ring advancement here. Write
755 * the PA / Buffer Index for the returned buffer into
756 * the oldest (next to be freed)FBR entry
758 next->addr_hi = rx_local->fbr[1]->bus_high[bi];
759 next->addr_lo = rx_local->fbr[1]->bus_low[bi];
760 next->word2 = bi;
762 writel(bump_fbr(&rx_local->local_Fbr1_full,
763 rx_local->Fbr1NumEntries - 1),
764 &rx_dma->fbr1_full_offset);
766 #ifdef USE_FBR0
767 else {
768 struct fbr_desc *next = (struct fbr_desc *)
769 rx_local->pFbr0RingVa +
770 INDEX10(rx_local->local_Fbr0_full);
772 /* Handle the Free Buffer Ring advancement here. Write
773 * the PA / Buffer Index for the returned buffer into
774 * the oldest (next to be freed) FBR entry
776 next->addr_hi = rx_local->fbr[0]->bus_high[bi];
777 next->addr_lo = rx_local->fbr[0]->bus_low[bi];
778 next->word2 = bi;
780 writel(bump_fbr(&rx_local->local_Fbr0_full,
781 rx_local->Fbr0NumEntries - 1),
782 &rx_dma->fbr0_full_offset);
784 #endif
785 spin_unlock_irqrestore(&etdev->FbrLock, flags);
786 } else {
787 dev_err(&etdev->pdev->dev,
788 "NICReturnRFD illegal Buffer Index returned\n");
791 /* The processing on this RFD is done, so put it back on the tail of
792 * our list
794 spin_lock_irqsave(&etdev->rcv_lock, flags);
795 list_add_tail(&rfd->list_node, &rx_local->RecvList);
796 rx_local->nReadyRecv++;
797 spin_unlock_irqrestore(&etdev->rcv_lock, flags);
799 WARN_ON(rx_local->nReadyRecv > rx_local->NumRfd);
803 * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
804 * @etdev: pointer to our adapter structure
806 void et131x_rx_dma_disable(struct et131x_adapter *etdev)
808 u32 csr;
809 /* Setup the receive dma configuration register */
810 writel(0x00002001, &etdev->regs->rxdma.csr);
811 csr = readl(&etdev->regs->rxdma.csr);
812 if ((csr & 0x00020000) == 0) { /* Check halt status (bit 17) */
813 udelay(5);
814 csr = readl(&etdev->regs->rxdma.csr);
815 if ((csr & 0x00020000) == 0)
816 dev_err(&etdev->pdev->dev,
817 "RX Dma failed to enter halt state. CSR 0x%08x\n",
818 csr);
823 * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
824 * @etdev: pointer to our adapter structure
826 void et131x_rx_dma_enable(struct et131x_adapter *etdev)
828 /* Setup the receive dma configuration register for normal operation */
829 u32 csr = 0x2000; /* FBR1 enable */
831 if (etdev->rx_ring.Fbr1BufferSize == 4096)
832 csr |= 0x0800;
833 else if (etdev->rx_ring.Fbr1BufferSize == 8192)
834 csr |= 0x1000;
835 else if (etdev->rx_ring.Fbr1BufferSize == 16384)
836 csr |= 0x1800;
837 #ifdef USE_FBR0
838 csr |= 0x0400; /* FBR0 enable */
839 if (etdev->rx_ring.Fbr0BufferSize == 256)
840 csr |= 0x0100;
841 else if (etdev->rx_ring.Fbr0BufferSize == 512)
842 csr |= 0x0200;
843 else if (etdev->rx_ring.Fbr0BufferSize == 1024)
844 csr |= 0x0300;
845 #endif
846 writel(csr, &etdev->regs->rxdma.csr);
848 csr = readl(&etdev->regs->rxdma.csr);
849 if ((csr & 0x00020000) != 0) {
850 udelay(5);
851 csr = readl(&etdev->regs->rxdma.csr);
852 if ((csr & 0x00020000) != 0) {
853 dev_err(&etdev->pdev->dev,
854 "RX Dma failed to exit halt state. CSR 0x%08x\n",
855 csr);
861 * nic_rx_pkts - Checks the hardware for available packets
862 * @etdev: pointer to our adapter
864 * Returns rfd, a pointer to our MPRFD.
866 * Checks the hardware for available packets, using completion ring
867 * If packets are available, it gets an RFD from the RecvList, attaches
868 * the packet to it, puts the RFD in the RecvPendList, and also returns
869 * the pointer to the RFD.
871 struct rfd *nic_rx_pkts(struct et131x_adapter *etdev)
873 struct rx_ring *rx_local = &etdev->rx_ring;
874 struct rx_status_block *status;
875 struct pkt_stat_desc *psr;
876 struct rfd *rfd;
877 u32 i;
878 u8 *buf;
879 unsigned long flags;
880 struct list_head *element;
881 u8 rindex;
882 u16 bindex;
883 u32 len;
884 u32 word0;
885 u32 word1;
887 /* RX Status block is written by the DMA engine prior to every
888 * interrupt. It contains the next to be used entry in the Packet
889 * Status Ring, and also the two Free Buffer rings.
891 status = rx_local->rx_status_block;
892 word1 = status->Word1 >> 16; /* Get the useful bits */
894 /* Check the PSR and wrap bits do not match */
895 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
896 /* Looks like this ring is not updated yet */
897 return NULL;
899 /* The packet status ring indicates that data is available. */
900 psr = (struct pkt_stat_desc *) (rx_local->pPSRingVa) +
901 (rx_local->local_psr_full & 0xFFF);
903 /* Grab any information that is required once the PSR is
904 * advanced, since we can no longer rely on the memory being
905 * accurate
907 len = psr->word1 & 0xFFFF;
908 rindex = (psr->word1 >> 26) & 0x03;
909 bindex = (psr->word1 >> 16) & 0x3FF;
910 word0 = psr->word0;
912 /* Indicate that we have used this PSR entry. */
913 /* FIXME wrap 12 */
914 add_12bit(&rx_local->local_psr_full, 1);
915 if ((rx_local->local_psr_full & 0xFFF) > rx_local->PsrNumEntries - 1) {
916 /* Clear psr full and toggle the wrap bit */
917 rx_local->local_psr_full &= ~0xFFF;
918 rx_local->local_psr_full ^= 0x1000;
921 writel(rx_local->local_psr_full,
922 &etdev->regs->rxdma.psr_full_offset);
924 #ifndef USE_FBR0
925 if (rindex != 1)
926 return NULL;
927 #endif
929 #ifdef USE_FBR0
930 if (rindex > 1 ||
931 (rindex == 0 &&
932 bindex > rx_local->Fbr0NumEntries - 1) ||
933 (rindex == 1 &&
934 bindex > rx_local->Fbr1NumEntries - 1))
935 #else
936 if (rindex != 1 || bindex > rx_local->Fbr1NumEntries - 1)
937 #endif
939 /* Illegal buffer or ring index cannot be used by S/W*/
940 dev_err(&etdev->pdev->dev,
941 "NICRxPkts PSR Entry %d indicates "
942 "length of %d and/or bad bi(%d)\n",
943 rx_local->local_psr_full & 0xFFF,
944 len, bindex);
945 return NULL;
948 /* Get and fill the RFD. */
949 spin_lock_irqsave(&etdev->rcv_lock, flags);
951 rfd = NULL;
952 element = rx_local->RecvList.next;
953 rfd = (struct rfd *) list_entry(element, struct rfd, list_node);
955 if (rfd == NULL) {
956 spin_unlock_irqrestore(&etdev->rcv_lock, flags);
957 return NULL;
960 list_del(&rfd->list_node);
961 rx_local->nReadyRecv--;
963 spin_unlock_irqrestore(&etdev->rcv_lock, flags);
965 rfd->bufferindex = bindex;
966 rfd->ringindex = rindex;
968 /* In V1 silicon, there is a bug which screws up filtering of
969 * runt packets. Therefore runt packet filtering is disabled
970 * in the MAC and the packets are dropped here. They are
971 * also counted here.
973 if (len < (NIC_MIN_PACKET_SIZE + 4)) {
974 etdev->stats.other_errors++;
975 len = 0;
978 if (len) {
979 if (etdev->ReplicaPhyLoopbk == 1) {
980 buf = rx_local->fbr[rindex]->virt[bindex];
982 if (memcmp(&buf[6], etdev->addr, ETH_ALEN) == 0) {
983 if (memcmp(&buf[42], "Replica packet",
984 ETH_HLEN)) {
985 etdev->ReplicaPhyLoopbkPF = 1;
990 /* Determine if this is a multicast packet coming in */
991 if ((word0 & ALCATEL_MULTICAST_PKT) &&
992 !(word0 & ALCATEL_BROADCAST_PKT)) {
993 /* Promiscuous mode and Multicast mode are
994 * not mutually exclusive as was first
995 * thought. I guess Promiscuous is just
996 * considered a super-set of the other
997 * filters. Generally filter is 0x2b when in
998 * promiscuous mode.
1000 if ((etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST)
1001 && !(etdev->PacketFilter & ET131X_PACKET_TYPE_PROMISCUOUS)
1002 && !(etdev->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1003 buf = rx_local->fbr[rindex]->
1004 virt[bindex];
1006 /* Loop through our list to see if the
1007 * destination address of this packet
1008 * matches one in our list.
1010 for (i = 0;
1011 i < etdev->MCAddressCount;
1012 i++) {
1013 if (buf[0] ==
1014 etdev->MCList[i][0]
1015 && buf[1] ==
1016 etdev->MCList[i][1]
1017 && buf[2] ==
1018 etdev->MCList[i][2]
1019 && buf[3] ==
1020 etdev->MCList[i][3]
1021 && buf[4] ==
1022 etdev->MCList[i][4]
1023 && buf[5] ==
1024 etdev->MCList[i][5]) {
1025 break;
1029 /* If our index is equal to the number
1030 * of Multicast address we have, then
1031 * this means we did not find this
1032 * packet's matching address in our
1033 * list. Set the len to zero,
1034 * so we free our RFD when we return
1035 * from this function.
1037 if (i == etdev->MCAddressCount)
1038 len = 0;
1041 if (len > 0)
1042 etdev->stats.multircv++;
1043 } else if (word0 & ALCATEL_BROADCAST_PKT)
1044 etdev->stats.brdcstrcv++;
1045 else
1046 /* Not sure what this counter measures in
1047 * promiscuous mode. Perhaps we should check
1048 * the MAC address to see if it is directed
1049 * to us in promiscuous mode.
1051 etdev->stats.unircv++;
1054 if (len > 0) {
1055 struct sk_buff *skb = NULL;
1057 /*rfd->len = len - 4; */
1058 rfd->len = len;
1060 skb = dev_alloc_skb(rfd->len + 2);
1061 if (!skb) {
1062 dev_err(&etdev->pdev->dev,
1063 "Couldn't alloc an SKB for Rx\n");
1064 return NULL;
1067 etdev->net_stats.rx_bytes += rfd->len;
1069 memcpy(skb_put(skb, rfd->len),
1070 rx_local->fbr[rindex]->virt[bindex],
1071 rfd->len);
1073 skb->dev = etdev->netdev;
1074 skb->protocol = eth_type_trans(skb, etdev->netdev);
1075 skb->ip_summed = CHECKSUM_NONE;
1077 netif_rx(skb);
1078 } else {
1079 rfd->len = 0;
1082 nic_return_rfd(etdev, rfd);
1083 return rfd;
1087 * et131x_reset_recv - Reset the receive list
1088 * @etdev: pointer to our adapter
1090 * Assumption, Rcv spinlock has been acquired.
1092 void et131x_reset_recv(struct et131x_adapter *etdev)
1094 WARN_ON(list_empty(&etdev->rx_ring.RecvList));
1099 * et131x_handle_recv_interrupt - Interrupt handler for receive processing
1100 * @etdev: pointer to our adapter
1102 * Assumption, Rcv spinlock has been acquired.
1104 void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
1106 struct rfd *rfd = NULL;
1107 u32 count = 0;
1108 bool done = true;
1110 /* Process up to available RFD's */
1111 while (count < NUM_PACKETS_HANDLED) {
1112 if (list_empty(&etdev->rx_ring.RecvList)) {
1113 WARN_ON(etdev->rx_ring.nReadyRecv != 0);
1114 done = false;
1115 break;
1118 rfd = nic_rx_pkts(etdev);
1120 if (rfd == NULL)
1121 break;
1123 /* Do not receive any packets until a filter has been set.
1124 * Do not receive any packets until we have link.
1125 * If length is zero, return the RFD in order to advance the
1126 * Free buffer ring.
1128 if (!etdev->PacketFilter ||
1129 !netif_carrier_ok(etdev->netdev) ||
1130 rfd->len == 0)
1131 continue;
1133 /* Increment the number of packets we received */
1134 etdev->stats.ipackets++;
1136 /* Set the status on the packet, either resources or success */
1137 if (etdev->rx_ring.nReadyRecv < RFD_LOW_WATER_MARK) {
1138 dev_warn(&etdev->pdev->dev,
1139 "RFD's are running out\n");
1141 count++;
1144 if (count == NUM_PACKETS_HANDLED || !done) {
1145 etdev->rx_ring.UnfinishedReceives = true;
1146 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
1147 &etdev->regs->global.watchdog_timer);
1148 } else
1149 /* Watchdog timer will disable itself if appropriate. */
1150 etdev->rx_ring.UnfinishedReceives = false;