Staging: et131x: Kill the NoPhyAccess variable
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / et131x / et1310_tx.c
blobb19d5069a0338071f425e815f53735ee9c033262
1 /*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
5 * Copyright © 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
9 *------------------------------------------------------------------------------
11 * et1310_tx.c - Routines used to perform data transmission.
13 *------------------------------------------------------------------------------
15 * SOFTWARE LICENSE
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
22 * Copyright © 2005 Agere Systems Inc.
23 * All rights reserved.
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
31 * distribution.
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
41 * Disclaimer
43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE.
58 #include "et131x_version.h"
59 #include "et131x_defs.h"
61 #include <linux/pci.h>
62 #include <linux/init.h>
63 #include <linux/module.h>
64 #include <linux/types.h>
65 #include <linux/kernel.h>
67 #include <linux/sched.h>
68 #include <linux/ptrace.h>
69 #include <linux/slab.h>
70 #include <linux/ctype.h>
71 #include <linux/string.h>
72 #include <linux/timer.h>
73 #include <linux/interrupt.h>
74 #include <linux/in.h>
75 #include <linux/delay.h>
76 #include <linux/io.h>
77 #include <linux/bitops.h>
78 #include <asm/system.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/if_arp.h>
84 #include <linux/ioport.h>
86 #include "et1310_phy.h"
87 #include "et1310_pm.h"
88 #include "et1310_jagcore.h"
90 #include "et131x_adapter.h"
91 #include "et131x_initpci.h"
92 #include "et131x_isr.h"
94 #include "et1310_tx.h"
97 static void et131x_update_tcb_list(struct et131x_adapter *etdev);
98 static void et131x_check_send_wait_list(struct et131x_adapter *etdev);
99 static inline void et131x_free_send_packet(struct et131x_adapter *etdev,
100 PMP_TCB pMpTcb);
101 static int et131x_send_packet(struct sk_buff *skb,
102 struct et131x_adapter *etdev);
103 static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb);
106 * et131x_tx_dma_memory_alloc
107 * @adapter: pointer to our private adapter structure
109 * Returns 0 on success and errno on failure (as defined in errno.h).
111 * Allocates memory that will be visible both to the device and to the CPU.
112 * The OS will pass us packets, pointers to which we will insert in the Tx
113 * Descriptor queue. The device will read this queue to find the packets in
114 * memory. The device will update the "status" in memory each time it xmits a
115 * packet.
117 int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
119 int desc_size = 0;
120 TX_RING_t *tx_ring = &adapter->TxRing;
122 /* Allocate memory for the TCB's (Transmit Control Block) */
123 adapter->TxRing.MpTcbMem = (MP_TCB *)kcalloc(NUM_TCB, sizeof(MP_TCB),
124 GFP_ATOMIC | GFP_DMA);
125 if (!adapter->TxRing.MpTcbMem) {
126 dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
127 return -ENOMEM;
130 /* Allocate enough memory for the Tx descriptor ring, and allocate
131 * some extra so that the ring can be aligned on a 4k boundary.
133 desc_size = (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
134 tx_ring->pTxDescRingVa =
135 (PTX_DESC_ENTRY_t) pci_alloc_consistent(adapter->pdev, desc_size,
136 &tx_ring->pTxDescRingPa);
137 if (!adapter->TxRing.pTxDescRingVa) {
138 dev_err(&adapter->pdev->dev, "Cannot alloc memory for Tx Ring\n");
139 return -ENOMEM;
142 /* Save physical address
144 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
145 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
146 * are ever returned, make sure the high part is retrieved here before
147 * storing the adjusted address.
149 tx_ring->pTxDescRingAdjustedPa = tx_ring->pTxDescRingPa;
151 /* Align Tx Descriptor Ring on a 4k (0x1000) byte boundary */
152 et131x_align_allocated_memory(adapter,
153 &tx_ring->pTxDescRingAdjustedPa,
154 &tx_ring->TxDescOffset, 0x0FFF);
156 tx_ring->pTxDescRingVa += tx_ring->TxDescOffset;
158 /* Allocate memory for the Tx status block */
159 tx_ring->pTxStatusVa = pci_alloc_consistent(adapter->pdev,
160 sizeof(TX_STATUS_BLOCK_t),
161 &tx_ring->pTxStatusPa);
162 if (!adapter->TxRing.pTxStatusPa) {
163 dev_err(&adapter->pdev->dev,
164 "Cannot alloc memory for Tx status block\n");
165 return -ENOMEM;
168 /* Allocate memory for a dummy buffer */
169 tx_ring->pTxDummyBlkVa = pci_alloc_consistent(adapter->pdev,
170 NIC_MIN_PACKET_SIZE,
171 &tx_ring->pTxDummyBlkPa);
172 if (!adapter->TxRing.pTxDummyBlkPa) {
173 dev_err(&adapter->pdev->dev,
174 "Cannot alloc memory for Tx dummy buffer\n");
175 return -ENOMEM;
178 return 0;
182 * et131x_tx_dma_memory_free - Free all memory allocated within this module
183 * @adapter: pointer to our private adapter structure
185 * Returns 0 on success and errno on failure (as defined in errno.h).
187 void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
189 int desc_size = 0;
191 if (adapter->TxRing.pTxDescRingVa) {
192 /* Free memory relating to Tx rings here */
193 adapter->TxRing.pTxDescRingVa -= adapter->TxRing.TxDescOffset;
195 desc_size =
196 (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
198 pci_free_consistent(adapter->pdev,
199 desc_size,
200 adapter->TxRing.pTxDescRingVa,
201 adapter->TxRing.pTxDescRingPa);
203 adapter->TxRing.pTxDescRingVa = NULL;
206 /* Free memory for the Tx status block */
207 if (adapter->TxRing.pTxStatusVa) {
208 pci_free_consistent(adapter->pdev,
209 sizeof(TX_STATUS_BLOCK_t),
210 adapter->TxRing.pTxStatusVa,
211 adapter->TxRing.pTxStatusPa);
213 adapter->TxRing.pTxStatusVa = NULL;
216 /* Free memory for the dummy buffer */
217 if (adapter->TxRing.pTxDummyBlkVa) {
218 pci_free_consistent(adapter->pdev,
219 NIC_MIN_PACKET_SIZE,
220 adapter->TxRing.pTxDummyBlkVa,
221 adapter->TxRing.pTxDummyBlkPa);
223 adapter->TxRing.pTxDummyBlkVa = NULL;
226 /* Free the memory for MP_TCB structures */
227 kfree(adapter->TxRing.MpTcbMem);
231 * ConfigTxDmaRegs - Set up the tx dma section of the JAGCore.
232 * @etdev: pointer to our private adapter structure
234 void ConfigTxDmaRegs(struct et131x_adapter *etdev)
236 struct _TXDMA_t __iomem *txdma = &etdev->regs->txdma;
238 /* Load the hardware with the start of the transmit descriptor ring. */
239 writel((uint32_t) (etdev->TxRing.pTxDescRingAdjustedPa >> 32),
240 &txdma->pr_base_hi);
241 writel((uint32_t) etdev->TxRing.pTxDescRingAdjustedPa,
242 &txdma->pr_base_lo);
244 /* Initialise the transmit DMA engine */
245 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des.value);
247 /* Load the completion writeback physical address
249 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
250 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
251 * are ever returned, make sure the high part is retrieved here before
252 * storing the adjusted address.
254 writel(0, &txdma->dma_wb_base_hi);
255 writel(etdev->TxRing.pTxStatusPa, &txdma->dma_wb_base_lo);
257 memset(etdev->TxRing.pTxStatusVa, 0, sizeof(TX_STATUS_BLOCK_t));
259 writel(0, &txdma->service_request);
260 etdev->TxRing.txDmaReadyToSend = 0;
264 * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
265 * @etdev: pointer to our adapter structure
267 void et131x_tx_dma_disable(struct et131x_adapter *etdev)
269 /* Setup the tramsmit dma configuration register */
270 writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
271 &etdev->regs->txdma.csr);
275 * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
276 * @etdev: pointer to our adapter structure
278 * Mainly used after a return to the D0 (full-power) state from a lower state.
280 void et131x_tx_dma_enable(struct et131x_adapter *etdev)
282 u32 csr = ET_TXDMA_SNGL_EPKT;
283 if (etdev->RegistryPhyLoopbk)
284 /* TxDMA is disabled for loopback operation. */
285 csr |= ET_TXDMA_CSR_HALT;
286 else
287 /* Setup the transmit dma configuration register for normal
288 * operation
290 csr |= PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT;
291 writel(csr, &etdev->regs->txdma.csr);
295 * et131x_init_send - Initialize send data structures
296 * @adapter: pointer to our private adapter structure
298 void et131x_init_send(struct et131x_adapter *adapter)
300 PMP_TCB pMpTcb;
301 uint32_t TcbCount;
302 TX_RING_t *tx_ring;
304 /* Setup some convenience pointers */
305 tx_ring = &adapter->TxRing;
306 pMpTcb = adapter->TxRing.MpTcbMem;
308 tx_ring->TCBReadyQueueHead = pMpTcb;
310 /* Go through and set up each TCB */
311 for (TcbCount = 0; TcbCount < NUM_TCB; TcbCount++) {
312 memset(pMpTcb, 0, sizeof(MP_TCB));
314 /* Set the link pointer in HW TCB to the next TCB in the
315 * chain. If this is the last TCB in the chain, also set the
316 * tail pointer.
318 if (TcbCount < NUM_TCB - 1) {
319 pMpTcb->Next = pMpTcb + 1;
320 } else {
321 tx_ring->TCBReadyQueueTail = pMpTcb;
322 pMpTcb->Next = (PMP_TCB) NULL;
325 pMpTcb++;
328 /* Curr send queue should now be empty */
329 tx_ring->CurrSendHead = (PMP_TCB) NULL;
330 tx_ring->CurrSendTail = (PMP_TCB) NULL;
332 INIT_LIST_HEAD(&adapter->TxRing.SendWaitQueue);
336 * et131x_send_packets - This function is called by the OS to send packets
337 * @skb: the packet(s) to send
338 * @netdev:device on which to TX the above packet(s)
340 * Return 0 in almost all cases; non-zero value in extreme hard failure only
342 int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
344 int status = 0;
345 struct et131x_adapter *etdev = NULL;
347 etdev = netdev_priv(netdev);
349 /* Send these packets
351 * NOTE: The Linux Tx entry point is only given one packet at a time
352 * to Tx, so the PacketCount and it's array used makes no sense here
355 /* Queue is not empty or TCB is not available */
356 if (!list_empty(&etdev->TxRing.SendWaitQueue) ||
357 MP_TCB_RESOURCES_NOT_AVAILABLE(etdev)) {
358 /* NOTE: If there's an error on send, no need to queue the
359 * packet under Linux; if we just send an error up to the
360 * netif layer, it will resend the skb to us.
362 status = -ENOMEM;
363 } else {
364 /* We need to see if the link is up; if it's not, make the
365 * netif layer think we're good and drop the packet
368 * if( MP_SHOULD_FAIL_SEND( etdev ) ||
369 * etdev->DriverNoPhyAccess )
371 if (MP_SHOULD_FAIL_SEND(etdev) || !netif_carrier_ok(netdev)) {
372 dev_kfree_skb_any(skb);
373 skb = NULL;
375 etdev->net_stats.tx_dropped++;
376 } else {
377 status = et131x_send_packet(skb, etdev);
379 if (status == -ENOMEM) {
381 /* NOTE: If there's an error on send, no need
382 * to queue the packet under Linux; if we just
383 * send an error up to the netif layer, it
384 * will resend the skb to us.
386 } else if (status != 0) {
387 /* On any other error, make netif think we're
388 * OK and drop the packet
390 dev_kfree_skb_any(skb);
391 skb = NULL;
392 etdev->net_stats.tx_dropped++;
396 return status;
400 * et131x_send_packet - Do the work to send a packet
401 * @skb: the packet(s) to send
402 * @etdev: a pointer to the device's private adapter structure
404 * Return 0 in almost all cases; non-zero value in extreme hard failure only.
406 * Assumption: Send spinlock has been acquired
408 static int et131x_send_packet(struct sk_buff *skb,
409 struct et131x_adapter *etdev)
411 int status = 0;
412 PMP_TCB pMpTcb = NULL;
413 uint16_t *shbufva;
414 unsigned long flags;
416 /* All packets must have at least a MAC address and a protocol type */
417 if (skb->len < ETH_HLEN) {
418 return -EIO;
421 /* Get a TCB for this packet */
422 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
424 pMpTcb = etdev->TxRing.TCBReadyQueueHead;
426 if (pMpTcb == NULL) {
427 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
428 return -ENOMEM;
431 etdev->TxRing.TCBReadyQueueHead = pMpTcb->Next;
433 if (etdev->TxRing.TCBReadyQueueHead == NULL)
434 etdev->TxRing.TCBReadyQueueTail = NULL;
436 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
438 pMpTcb->PacketLength = skb->len;
439 pMpTcb->Packet = skb;
441 if ((skb->data != NULL) && ((skb->len - skb->data_len) >= 6)) {
442 shbufva = (uint16_t *) skb->data;
444 if ((shbufva[0] == 0xffff) &&
445 (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
446 pMpTcb->Flags |= fMP_DEST_BROAD;
447 } else if ((shbufva[0] & 0x3) == 0x0001) {
448 pMpTcb->Flags |= fMP_DEST_MULTI;
452 pMpTcb->Next = NULL;
454 /* Call the NIC specific send handler. */
455 if (status == 0)
456 status = nic_send_packet(etdev, pMpTcb);
458 if (status != 0) {
459 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
461 if (etdev->TxRing.TCBReadyQueueTail) {
462 etdev->TxRing.TCBReadyQueueTail->Next = pMpTcb;
463 } else {
464 /* Apparently ready Q is empty. */
465 etdev->TxRing.TCBReadyQueueHead = pMpTcb;
468 etdev->TxRing.TCBReadyQueueTail = pMpTcb;
469 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
470 return status;
472 WARN_ON(etdev->TxRing.nBusySend > NUM_TCB);
473 return 0;
477 * nic_send_packet - NIC specific send handler for version B silicon.
478 * @etdev: pointer to our adapter
479 * @pMpTcb: pointer to MP_TCB
481 * Returns 0 or errno.
483 static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
485 uint32_t loopIndex;
486 TX_DESC_ENTRY_t CurDesc[24];
487 uint32_t FragmentNumber = 0;
488 uint32_t thiscopy, remainder;
489 struct sk_buff *pPacket = pMpTcb->Packet;
490 uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1;
491 struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0];
492 unsigned long flags;
494 /* Part of the optimizations of this send routine restrict us to
495 * sending 24 fragments at a pass. In practice we should never see
496 * more than 5 fragments.
498 * NOTE: The older version of this function (below) can handle any
499 * number of fragments. If needed, we can call this function,
500 * although it is less efficient.
502 if (FragListCount > 23) {
503 return -EIO;
506 memset(CurDesc, 0, sizeof(TX_DESC_ENTRY_t) * (FragListCount + 1));
508 for (loopIndex = 0; loopIndex < FragListCount; loopIndex++) {
509 /* If there is something in this element, lets get a
510 * descriptor from the ring and get the necessary data
512 if (loopIndex == 0) {
513 /* If the fragments are smaller than a standard MTU,
514 * then map them to a single descriptor in the Tx
515 * Desc ring. However, if they're larger, as is
516 * possible with support for jumbo packets, then
517 * split them each across 2 descriptors.
519 * This will work until we determine why the hardware
520 * doesn't seem to like large fragments.
522 if ((pPacket->len - pPacket->data_len) <= 1514) {
523 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
524 CurDesc[FragmentNumber].word2.bits.
525 length_in_bytes =
526 pPacket->len - pPacket->data_len;
528 /* NOTE: Here, the dma_addr_t returned from
529 * pci_map_single() is implicitly cast as a
530 * uint32_t. Although dma_addr_t can be
531 * 64-bit, the address returned by
532 * pci_map_single() is always 32-bit
533 * addressable (as defined by the pci/dma
534 * subsystem)
536 CurDesc[FragmentNumber++].DataBufferPtrLow =
537 pci_map_single(etdev->pdev,
538 pPacket->data,
539 pPacket->len -
540 pPacket->data_len,
541 PCI_DMA_TODEVICE);
542 } else {
543 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
544 CurDesc[FragmentNumber].word2.bits.
545 length_in_bytes =
546 ((pPacket->len - pPacket->data_len) / 2);
548 /* NOTE: Here, the dma_addr_t returned from
549 * pci_map_single() is implicitly cast as a
550 * uint32_t. Although dma_addr_t can be
551 * 64-bit, the address returned by
552 * pci_map_single() is always 32-bit
553 * addressable (as defined by the pci/dma
554 * subsystem)
556 CurDesc[FragmentNumber++].DataBufferPtrLow =
557 pci_map_single(etdev->pdev,
558 pPacket->data,
559 ((pPacket->len -
560 pPacket->data_len) / 2),
561 PCI_DMA_TODEVICE);
562 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
564 CurDesc[FragmentNumber].word2.bits.
565 length_in_bytes =
566 ((pPacket->len - pPacket->data_len) / 2);
568 /* NOTE: Here, the dma_addr_t returned from
569 * pci_map_single() is implicitly cast as a
570 * uint32_t. Although dma_addr_t can be
571 * 64-bit, the address returned by
572 * pci_map_single() is always 32-bit
573 * addressable (as defined by the pci/dma
574 * subsystem)
576 CurDesc[FragmentNumber++].DataBufferPtrLow =
577 pci_map_single(etdev->pdev,
578 pPacket->data +
579 ((pPacket->len -
580 pPacket->data_len) / 2),
581 ((pPacket->len -
582 pPacket->data_len) / 2),
583 PCI_DMA_TODEVICE);
585 } else {
586 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
587 CurDesc[FragmentNumber].word2.bits.length_in_bytes =
588 pFragList[loopIndex - 1].size;
590 /* NOTE: Here, the dma_addr_t returned from
591 * pci_map_page() is implicitly cast as a uint32_t.
592 * Although dma_addr_t can be 64-bit, the address
593 * returned by pci_map_page() is always 32-bit
594 * addressable (as defined by the pci/dma subsystem)
596 CurDesc[FragmentNumber++].DataBufferPtrLow =
597 pci_map_page(etdev->pdev,
598 pFragList[loopIndex - 1].page,
599 pFragList[loopIndex - 1].page_offset,
600 pFragList[loopIndex - 1].size,
601 PCI_DMA_TODEVICE);
605 if (FragmentNumber == 0)
606 return -EIO;
608 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
609 if (++etdev->TxRing.TxPacketsSinceLastinterrupt ==
610 PARM_TX_NUM_BUFS_DEF) {
611 CurDesc[FragmentNumber - 1].word3.value = 0x5;
612 etdev->TxRing.TxPacketsSinceLastinterrupt = 0;
613 } else {
614 CurDesc[FragmentNumber - 1].word3.value = 0x1;
616 } else {
617 CurDesc[FragmentNumber - 1].word3.value = 0x5;
620 CurDesc[0].word3.bits.f = 1;
622 pMpTcb->WrIndexStart = etdev->TxRing.txDmaReadyToSend;
623 pMpTcb->PacketStaleCount = 0;
625 spin_lock_irqsave(&etdev->SendHWLock, flags);
627 thiscopy = NUM_DESC_PER_RING_TX -
628 INDEX10(etdev->TxRing.txDmaReadyToSend);
630 if (thiscopy >= FragmentNumber) {
631 remainder = 0;
632 thiscopy = FragmentNumber;
633 } else {
634 remainder = FragmentNumber - thiscopy;
637 memcpy(etdev->TxRing.pTxDescRingVa +
638 INDEX10(etdev->TxRing.txDmaReadyToSend), CurDesc,
639 sizeof(TX_DESC_ENTRY_t) * thiscopy);
641 add_10bit(&etdev->TxRing.txDmaReadyToSend, thiscopy);
643 if (INDEX10(etdev->TxRing.txDmaReadyToSend)== 0 ||
644 INDEX10(etdev->TxRing.txDmaReadyToSend) == NUM_DESC_PER_RING_TX) {
645 etdev->TxRing.txDmaReadyToSend &= ~ET_DMA10_MASK;
646 etdev->TxRing.txDmaReadyToSend ^= ET_DMA10_WRAP;
649 if (remainder) {
650 memcpy(etdev->TxRing.pTxDescRingVa,
651 CurDesc + thiscopy,
652 sizeof(TX_DESC_ENTRY_t) * remainder);
654 add_10bit(&etdev->TxRing.txDmaReadyToSend, remainder);
657 if (INDEX10(etdev->TxRing.txDmaReadyToSend) == 0) {
658 if (etdev->TxRing.txDmaReadyToSend)
659 pMpTcb->WrIndex = NUM_DESC_PER_RING_TX - 1;
660 else
661 pMpTcb->WrIndex= ET_DMA10_WRAP | (NUM_DESC_PER_RING_TX - 1);
662 } else
663 pMpTcb->WrIndex = etdev->TxRing.txDmaReadyToSend - 1;
665 spin_lock(&etdev->TCBSendQLock);
667 if (etdev->TxRing.CurrSendTail)
668 etdev->TxRing.CurrSendTail->Next = pMpTcb;
669 else
670 etdev->TxRing.CurrSendHead = pMpTcb;
672 etdev->TxRing.CurrSendTail = pMpTcb;
674 WARN_ON(pMpTcb->Next != NULL);
676 etdev->TxRing.nBusySend++;
678 spin_unlock(&etdev->TCBSendQLock);
680 /* Write the new write pointer back to the device. */
681 writel(etdev->TxRing.txDmaReadyToSend,
682 &etdev->regs->txdma.service_request);
684 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
685 * timer to wake us up if this packet isn't followed by N more.
687 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
688 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
689 &etdev->regs->global.watchdog_timer);
691 spin_unlock_irqrestore(&etdev->SendHWLock, flags);
693 return 0;
698 * et131x_free_send_packet - Recycle a MP_TCB, complete the packet if necessary
699 * @etdev: pointer to our adapter
700 * @pMpTcb: pointer to MP_TCB
702 * Assumption - Send spinlock has been acquired
704 inline void et131x_free_send_packet(struct et131x_adapter *etdev,
705 PMP_TCB pMpTcb)
707 unsigned long flags;
708 TX_DESC_ENTRY_t *desc = NULL;
709 struct net_device_stats *stats = &etdev->net_stats;
711 if (pMpTcb->Flags & fMP_DEST_BROAD)
712 atomic_inc(&etdev->Stats.brdcstxmt);
713 else if (pMpTcb->Flags & fMP_DEST_MULTI)
714 atomic_inc(&etdev->Stats.multixmt);
715 else
716 atomic_inc(&etdev->Stats.unixmt);
718 if (pMpTcb->Packet) {
719 stats->tx_bytes += pMpTcb->Packet->len;
721 /* Iterate through the TX descriptors on the ring
722 * corresponding to this packet and umap the fragments
723 * they point to
725 do {
726 desc =
727 (TX_DESC_ENTRY_t *) (etdev->TxRing.pTxDescRingVa +
728 INDEX10(pMpTcb->WrIndexStart));
730 pci_unmap_single(etdev->pdev,
731 desc->DataBufferPtrLow,
732 desc->word2.value, PCI_DMA_TODEVICE);
734 add_10bit(&pMpTcb->WrIndexStart, 1);
735 if (INDEX10(pMpTcb->WrIndexStart) >=
736 NUM_DESC_PER_RING_TX) {
737 pMpTcb->WrIndexStart &= ~ET_DMA10_MASK;
738 pMpTcb->WrIndexStart ^= ET_DMA10_WRAP;
740 } while (desc != (etdev->TxRing.pTxDescRingVa +
741 INDEX10(pMpTcb->WrIndex)));
743 dev_kfree_skb_any(pMpTcb->Packet);
746 memset(pMpTcb, 0, sizeof(MP_TCB));
748 /* Add the TCB to the Ready Q */
749 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
751 etdev->Stats.opackets++;
753 if (etdev->TxRing.TCBReadyQueueTail) {
754 etdev->TxRing.TCBReadyQueueTail->Next = pMpTcb;
755 } else {
756 /* Apparently ready Q is empty. */
757 etdev->TxRing.TCBReadyQueueHead = pMpTcb;
760 etdev->TxRing.TCBReadyQueueTail = pMpTcb;
762 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
763 WARN_ON(etdev->TxRing.nBusySend < 0);
767 * et131x_free_busy_send_packets - Free and complete the stopped active sends
768 * @etdev: pointer to our adapter
770 * Assumption - Send spinlock has been acquired
772 void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
774 PMP_TCB pMpTcb;
775 struct list_head *entry;
776 unsigned long flags;
777 uint32_t FreeCounter = 0;
779 while (!list_empty(&etdev->TxRing.SendWaitQueue)) {
780 spin_lock_irqsave(&etdev->SendWaitLock, flags);
782 etdev->TxRing.nWaitSend--;
783 spin_unlock_irqrestore(&etdev->SendWaitLock, flags);
785 entry = etdev->TxRing.SendWaitQueue.next;
788 etdev->TxRing.nWaitSend = 0;
790 /* Any packets being sent? Check the first TCB on the send list */
791 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
793 pMpTcb = etdev->TxRing.CurrSendHead;
795 while ((pMpTcb != NULL) && (FreeCounter < NUM_TCB)) {
796 PMP_TCB pNext = pMpTcb->Next;
798 etdev->TxRing.CurrSendHead = pNext;
800 if (pNext == NULL)
801 etdev->TxRing.CurrSendTail = NULL;
803 etdev->TxRing.nBusySend--;
805 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
807 FreeCounter++;
808 et131x_free_send_packet(etdev, pMpTcb);
810 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
812 pMpTcb = etdev->TxRing.CurrSendHead;
815 WARN_ON(FreeCounter == NUM_TCB);
817 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
819 etdev->TxRing.nBusySend = 0;
823 * et131x_handle_send_interrupt - Interrupt handler for sending processing
824 * @etdev: pointer to our adapter
826 * Re-claim the send resources, complete sends and get more to send from
827 * the send wait queue.
829 * Assumption - Send spinlock has been acquired
831 void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
833 /* Mark as completed any packets which have been sent by the device. */
834 et131x_update_tcb_list(etdev);
836 /* If we queued any transmits because we didn't have any TCBs earlier,
837 * dequeue and send those packets now, as long as we have free TCBs.
839 et131x_check_send_wait_list(etdev);
843 * et131x_update_tcb_list - Helper routine for Send Interrupt handler
844 * @etdev: pointer to our adapter
846 * Re-claims the send resources and completes sends. Can also be called as
847 * part of the NIC send routine when the "ServiceComplete" indication has
848 * wrapped.
850 static void et131x_update_tcb_list(struct et131x_adapter *etdev)
852 unsigned long flags;
853 u32 ServiceComplete;
854 PMP_TCB pMpTcb;
855 u32 index;
857 ServiceComplete = readl(&etdev->regs->txdma.NewServiceComplete);
858 index = INDEX10(ServiceComplete);
860 /* Has the ring wrapped? Process any descriptors that do not have
861 * the same "wrap" indicator as the current completion indicator
863 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
865 pMpTcb = etdev->TxRing.CurrSendHead;
867 while (pMpTcb &&
868 ((ServiceComplete ^ pMpTcb->WrIndex) & ET_DMA10_WRAP) &&
869 index < INDEX10(pMpTcb->WrIndex)) {
870 etdev->TxRing.nBusySend--;
871 etdev->TxRing.CurrSendHead = pMpTcb->Next;
872 if (pMpTcb->Next == NULL)
873 etdev->TxRing.CurrSendTail = NULL;
875 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
876 et131x_free_send_packet(etdev, pMpTcb);
877 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
879 /* Goto the next packet */
880 pMpTcb = etdev->TxRing.CurrSendHead;
882 while (pMpTcb &&
883 !((ServiceComplete ^ pMpTcb->WrIndex) & ET_DMA10_WRAP)
884 && index > (pMpTcb->WrIndex & ET_DMA10_MASK)) {
885 etdev->TxRing.nBusySend--;
886 etdev->TxRing.CurrSendHead = pMpTcb->Next;
887 if (pMpTcb->Next == NULL)
888 etdev->TxRing.CurrSendTail = NULL;
890 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
891 et131x_free_send_packet(etdev, pMpTcb);
892 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
894 /* Goto the next packet */
895 pMpTcb = etdev->TxRing.CurrSendHead;
898 /* Wake up the queue when we hit a low-water mark */
899 if (etdev->TxRing.nBusySend <= (NUM_TCB / 3))
900 netif_wake_queue(etdev->netdev);
902 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
906 * et131x_check_send_wait_list - Helper routine for the interrupt handler
907 * @etdev: pointer to our adapter
909 * Takes packets from the send wait queue and posts them to the device (if
910 * room available).
912 static void et131x_check_send_wait_list(struct et131x_adapter *etdev)
914 unsigned long flags;
916 spin_lock_irqsave(&etdev->SendWaitLock, flags);
918 while (!list_empty(&etdev->TxRing.SendWaitQueue) &&
919 MP_TCB_RESOURCES_AVAILABLE(etdev)) {
920 struct list_head *entry;
922 entry = etdev->TxRing.SendWaitQueue.next;
924 etdev->TxRing.nWaitSend--;
927 spin_unlock_irqrestore(&etdev->SendWaitLock, flags);