Clean and tiddy-up files.
[tomato.git] / release / src-rt / linux / linux-2.6 / drivers / net / gt64240eth.c
blob7859202e7533cc63acd7dbe940d7dedf684e8e0a
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 2001 Patton Electronics Company
7 * Copyright (C) 2002 Momentum Computer
9 * Copyright 2000 MontaVista Software Inc.
10 * Author: MontaVista Software, Inc.
11 * stevel@mvista.com or support@mvista.com
13 * This program is free software; you can distribute it and/or modify it
14 * under the terms of the GNU General Public License (Version 2) as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
26 * Ethernet driver for the MIPS GT96100 Advanced Communication Controller.
28 * Modified for the Gallileo/Marvell GT-64240 Communication Controller.
30 * Support for Rx NAPI, Rx checksum offload, IOCTL and ETHTOOL added
31 * Manish Lachwani (lachwani@pmc-sierra.com) - 09/16/2003
33 * Modified for later version of Linux 2.4 kernel
34 * Manish Lachwani (lachwani@pmc-sierra.com) - 04/29/2004
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/sched.h>
39 #include <linux/string.h>
40 #include <linux/timer.h>
41 #include <linux/errno.h>
42 #include <linux/in.h>
43 #include <linux/ioport.h>
44 #include <linux/slab.h>
45 #include <linux/interrupt.h>
46 #include <linux/pci.h>
47 #include <linux/init.h>
48 #include <linux/netdevice.h>
49 #include <linux/etherdevice.h>
50 #include <linux/ethtool.h>
51 #include <linux/skbuff.h>
52 #include <linux/delay.h>
53 #include <linux/ctype.h>
54 #include <linux/mii.h>
56 #include <asm/irq.h>
57 #include <asm/bitops.h>
58 #include <asm/io.h>
59 #include <asm/uaccess.h>
61 #define DESC_DATA_BE 1
63 #include "gt64240eth.h"
65 // enable this port (set hash size to 1/2K)
66 //- #define PORT_CONFIG pcrHS
67 #define PORT_CONFIG (pcrHS | pcrHD)
68 //- #define PORT_CONFIG pcrHS |pcrPM |pcrPBF|pcrHDM
69 //- GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG, pcrEN | pcrHS);
70 //- GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG, pcrEN | pcrHS | pcrPM);
71 //- GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG, pcrEN | pcrHS | pcrPM | 1<<pcrLPBKBit);
73 // clear all the MIB ctr regs
74 #define EXT_CONFIG_CLEAR (pcxrFCTL | pcxrFCTLen | pcxrFLP | pcxrDPLXen | pcxrPRIOrxOverride | pcxrRMIIen)
77 * _debug level:
78 * <= 2 none.
79 * > 2 some warnings such as queue full, .....
80 * > 3 lots of change-of-state messages.
81 * > 4 EXTENSIVE data/descriptor dumps.
84 #ifdef GT64240_DEBUG
85 static int gt64240_debug = GT64240_DEBUG;
86 #else
87 static int gt64240_debug = 0;
88 #endif
90 static int debug = -1;
92 #define GT64240_MSG_ENABLE (NETIF_MSG_DRV | \
93 NETIF_MSG_PROBE | \
94 NETIF_MSG_LINK)
97 /********************************************************/
99 // prototypes
100 static void gt64240_delay(int msec);
101 static int gt64240_add_hash_entry(struct net_device *dev,
102 unsigned char *addr);
103 static void read_mib_counters(struct gt64240_private *gp);
104 static void dump_MII(struct net_device *dev);
105 static void dump_tx_desc(struct net_device *dev, int i);
106 static void dump_rx_desc(struct net_device *dev, int i);
107 static void dump_hw_addr(unsigned char *addr_str);
108 static void update_stats(struct gt64240_private *gp);
109 static void abort(struct net_device *dev, u32 abort_bits);
110 static void hard_stop(struct net_device *dev);
111 static void enable_ether_irq(struct net_device *dev);
112 static void disable_ether_irq(struct net_device *dev);
113 static int __init gt64240_probe1(unsigned long ioaddr, int irq, int port_num);
114 static void reset_tx(struct net_device *dev);
115 static void reset_rx(struct net_device *dev);
116 static int gt64240_init(struct net_device *dev);
117 static int gt64240_open(struct net_device *dev);
118 static int gt64240_close(struct net_device *dev);
119 static int gt64240_tx(struct sk_buff *skb, struct net_device *dev);
120 #ifdef GT64240_NAPI
121 static int gt64240_poll(struct net_device *dev, int *budget);
122 static int gt64240_rx(struct net_device *dev, u32 status, int budget);
123 #else
124 static int gt64240_rx(struct net_device *dev, u32 status);
125 #endif
126 static void gt64240_tx_timeout(struct net_device *dev);
127 static void gt64240_set_rx_mode(struct net_device *dev);
128 static struct net_device_stats *gt64240_get_stats(struct net_device *dev);
130 extern char * prom_getcmdline(void);
131 extern int prom_get_mac_addrs(unsigned char
132 station_addr[NUM_INTERFACES][6]);
134 static char version[] __devinitdata =
135 "gt64240eth.o: version 0.1, <www.patton.com>\n";
137 // PHY device addresses
138 static u32 gt64240_phy_addr[NUM_INTERFACES] __devinitdata = { 0x8, 0x1, 0xa };
140 // Need real Ethernet addresses -- in parse_mac_addr_options(),
141 // these will be replaced by prom_get_mac_addrs() and/or prom_getcmdline().
142 static unsigned char gt64240_station_addr[NUM_INTERFACES][6] = {
143 {0x00, 0x01, 0x02, 0x03, 0x04, 0x05},
144 {0x01, 0x02, 0x03, 0x04, 0x05, 0x06},
145 {0x02, 0x03, 0x04, 0x05, 0x06, 0x07}
148 static int max_interrupt_work = 32;
151 * Base address and interupt of the GT64240 ethernet controllers
153 static struct {
154 unsigned int port;
155 int irq;
156 } gt64240_iflist[NUM_INTERFACES] = {
158 GT64240_ETH0_BASE, 8}, {
159 GT64240_ETH1_BASE, 8}, {
160 GT64240_ETH2_BASE, 8}
163 static void gt64240_delay(int ms)
165 if (in_interrupt())
166 return;
167 else {
168 current->state = TASK_INTERRUPTIBLE;
169 schedule_timeout(ms * HZ / 1000);
173 unsigned char prom_mac_addr_base[6];
175 int prom_get_mac_addrs(unsigned char station_addr[NUM_INTERFACES][6])
177 memcpy(station_addr[0], prom_mac_addr_base, 6);
178 memcpy(station_addr[1], prom_mac_addr_base, 6);
179 memcpy(station_addr[2], prom_mac_addr_base, 6);
181 station_addr[1][5] += 1;
182 station_addr[2][5] += 2;
184 return 0;
187 void parse_mac_addr_options(void)
189 prom_get_mac_addrs(gt64240_station_addr);
192 static int read_MII(struct net_device *dev, int phy, int reg)
194 int timedout = 20;
195 u32 smir = smirOpCode | (phy << smirPhyAdBit) |
196 (reg << smirRegAdBit);
198 // wait for last operation to complete
199 while ((GT64240_READ(GT64240_ETH_SMI_REG)) & smirBusy) {
200 // snooze for 1 msec and check again
201 gt64240_delay(1);
203 if (--timedout == 0) {
204 printk("%s: read_MII busy timeout!!\n", dev->name);
205 return -1;
209 GT64240_WRITE(GT64240_ETH_SMI_REG, smir);
211 timedout = 20;
212 // wait for read to complete
213 while (!
214 ((smir =
215 GT64240_READ(GT64240_ETH_SMI_REG)) & smirReadValid)) {
216 // snooze for 1 msec and check again
217 gt64240_delay(1);
219 if (--timedout == 0) {
220 printk("%s: read_MII timeout!!\n", dev->name);
221 return -1;
225 return (int) (smir & smirDataMask);
228 static void gp_get_drvinfo (struct net_device *dev,
229 struct ethtool_drvinfo *info)
231 strcpy(info->driver, "gt64260");
232 strcpy(info->version, version);
235 static int gp_get_settings(struct net_device *dev,
236 struct ethtool_cmd *cmd)
238 struct gt64240_private *gp = netdev_priv(dev);
239 int rc;
241 spin_lock_irq(&gp->lock);
242 rc = mii_ethtool_gset(&gp->mii_if, cmd);
243 spin_unlock_irq(&gp->lock);
244 return rc;
247 static int gp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
249 struct gt64240_private *gp = netdev_priv(dev);
250 int rc;
252 spin_lock_irq(&gp->lock);
253 rc = mii_ethtool_sset(&gp->mii_if, cmd);
254 spin_unlock_irq(&gp->lock);
255 return rc;
258 static int gp_nway_reset(struct net_device *dev)
260 struct gt64240_private *gp = netdev_priv(dev);
261 return mii_nway_restart(&gp->mii_if);
264 static u32 gp_get_link(struct net_device *dev)
266 struct gt64240_private *gp = netdev_priv(dev);
267 return mii_link_ok(&gp->mii_if);
270 static u32 gp_get_msglevel(struct net_device *dev)
272 struct gt64240_private *gp = netdev_priv(dev);
273 return gp->msg_enable;
276 static void gp_set_msglevel(struct net_device *dev, u32 value)
278 struct gt64240_private *gp = netdev_priv(dev);
279 gp->msg_enable = value;
282 static struct ethtool_ops gp_ethtool_ops = {
283 .get_drvinfo = gp_get_drvinfo,
284 .get_settings = gp_get_settings,
285 .set_settings = gp_set_settings,
286 .nway_reset = gp_nway_reset,
287 .get_link = gp_get_link,
288 .get_msglevel = gp_get_msglevel,
289 .set_msglevel = gp_set_msglevel,
292 static int gt64240_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
294 struct gt64240_private *gp = netdev_priv(dev);
295 struct mii_ioctl_data *data =
296 (struct mii_ioctl_data *) &rq->ifr_data;
297 int retval;
299 if (!netif_running(dev))
300 return -EINVAL;
302 spin_lock_irq(&gp->lock);
303 retval = generic_mii_ioctl(&gp->mii_if, data, cmd, NULL);
304 spin_unlock_irq(&gp->lock);
306 return retval;
309 static void dump_tx_desc(struct net_device *dev, int i)
311 struct gt64240_private *gp = netdev_priv(dev);
312 gt64240_td_t *td = &gp->tx_ring[i];
314 printk("%s:tx[%d]: self=%p cmd=%08x, cnt=%4d. bufp=%08x, next=%08x\n",
315 dev->name, i, td, td->cmdstat, td->byte_cnt, td->buff_ptr,
316 td->next);
319 static void dump_rx_desc(struct net_device *dev, int i)
321 struct gt64240_private *gp = netdev_priv(dev);
322 gt64240_rd_t *rd = &gp->rx_ring[i];
324 printk("%s:rx_dsc[%d]: self=%p cst=%08x,size=%4d. cnt=%4d. "
325 "bufp=%08x, next=%08x\n",
326 dev->name, i, rd, rd->cmdstat, rd->buff_sz, rd->byte_cnt,
327 rd->buff_ptr, rd->next);
330 // These routines work, just disabled to avoid compile warnings
331 static void write_MII(struct net_device *dev, int phy, int reg, int data)
333 u32 smir = (phy << smirPhyAdBit) | (reg << smirRegAdBit) | data;
334 int timedout = 20;
336 // wait for last operation to complete
337 while (GT64240_READ(GT64240_ETH_SMI_REG) & smirBusy) {
338 // snooze for 1 msec and check again
339 gt64240_delay(1);
341 if (--timedout == 0) {
342 printk("%s: write_MII busy timeout!!\n",
343 dev->name);
344 return;
348 GT64240_WRITE(GT64240_ETH_SMI_REG, smir);
351 static void dump_MII(struct net_device *dev)
353 struct gt64240_private *gp = netdev_priv(dev);
354 int i, val;
356 for (i = 0; i < 7; i++) {
357 if ((val = read_MII(dev, gp->phy_addr, i)) >= 0)
358 printk("%s: MII Reg %d=%x\n", dev->name, i, val);
360 for (i = 16; i < 21; i++) {
361 if ((val = read_MII(dev, gp->phy_addr, i)) >= 0)
362 printk("%s: MII Reg %d=%x\n", dev->name, i, val);
367 static void dump_hw_addr(unsigned char *addr_str)
369 int i;
370 for (i = 0; i < 6; i++) {
371 printk("%2.2x", addr_str[i]);
372 printk(i < 5 ? ":" : "\n");
376 static int gt64240_add_hash_entry(struct net_device *dev,
377 unsigned char *addr)
379 static unsigned char swapped[256];
380 struct gt64240_private *gp;
381 u32 value1, value0, *entry;
382 unsigned char hash_ea[6];
383 static int flag = 0;
384 u16 hashResult;
385 int i;
387 if (flag == 0) { /* Create table to swap bits in a byte */
388 flag = 1;
389 for (i = 0; i < 256; i++) {
390 swapped[i] = (i & 0x01) << 7;
391 swapped[i] |= (i & 0x02) << 5;
392 swapped[i] |= (i & 0x04) << 3;
393 swapped[i] |= (i & 0x08) << 1;
394 swapped[i] |= (i & 0x10) >> 1;
395 swapped[i] |= (i & 0x20) >> 3;
396 swapped[i] |= (i & 0x40) >> 5;
397 swapped[i] |= (i & 0x80) >> 7;
401 for (i = 0; i < 6; i++) { /* swap bits from mac to create hash mac */
402 hash_ea[i] = swapped[addr[i]];
405 gp = netdev_priv(dev);
407 /* create hash entry address */
408 hashResult = (((hash_ea[5] >> 2) & 0x3F) << 9) & 0x7E00;
409 hashResult |= ((hash_ea[4] & 0x7F) << 2) | (hash_ea[5] & 0x03);
410 hashResult ^=
411 ((hash_ea[3] & 0xFF) << 1) | ((hash_ea[4] >> 7) & 0x01);
412 hashResult ^= ((hash_ea[1] & 0x01) << 8) | (hash_ea[2] & 0xFF);
414 value0 = hteValid | hteRD; /* Create hash table entry value */
415 value0 |= (u32) addr[0] << 3;
416 value0 |= (u32) addr[1] << 11;
417 value0 |= (u32) addr[2] << 19;
418 value0 |= ((u32) addr[3] & 0x1f) << 27;
420 value1 = ((u32) addr[3] >> 5) & 0x07;
421 value1 |= (u32) addr[4] << 3;
422 value1 |= (u32) addr[5] << 11;
424 /* Inset entry value into hash table */
425 for (i = 0; i < HASH_HOP_NUMBER; i++) {
426 entry = (u32 *) ((u32) gp->hash_table +
427 (((u32) hashResult & 0x07ff) << 3));
428 if ((*entry & hteValid) && !(*entry & hteSkip)) {
429 hashResult += 2; /* oops, occupied, go to next entry */
430 } else {
431 #ifdef __LITTLE_ENDIAN
432 entry[1] = value1;
433 entry[0] = value0;
434 #else
435 entry[0] = value1;
436 entry[1] = value0;
437 #endif
438 break;
441 if (i >= HASH_HOP_NUMBER) {
442 printk("%s: gt64240_add_hash_entry expired!\n", dev->name);
443 return (-1);
445 return (0);
449 static void read_mib_counters(struct gt64240_private *gp)
451 u32 *mib_regs = (u32 *) & gp->mib;
452 int i;
454 for (i = 0; i < sizeof(mib_counters_t) / sizeof(u32); i++)
455 mib_regs[i] =
456 GT64240ETH_READ(gp,
457 GT64240_ETH_MIB_COUNT_BASE +
458 i * sizeof(u32));
462 static void update_stats(struct gt64240_private *gp)
464 mib_counters_t *mib = &gp->mib;
465 struct net_device_stats *stats = &gp->stats;
467 read_mib_counters(gp);
469 stats->rx_packets = mib->totalFramesReceived;
470 stats->tx_packets = mib->framesSent;
471 stats->rx_bytes = mib->totalByteReceived;
472 stats->tx_bytes = mib->byteSent;
473 stats->rx_errors = mib->totalFramesReceived - mib->framesReceived;
474 //the tx error counters are incremented by the ISR
475 //rx_dropped incremented by gt64240_rx
476 //tx_dropped incremented by gt64240_tx
477 stats->multicast = mib->multicastFramesReceived;
478 // collisions incremented by gt64240_tx_complete
479 stats->rx_length_errors = mib->oversizeFrames + mib->fragments;
480 // The RxError condition means the Rx DMA encountered a
481 // CPU owned descriptor, which, if things are working as
482 // they should, means the Rx ring has overflowed.
483 stats->rx_over_errors = mib->macRxError;
484 stats->rx_crc_errors = mib->cRCError;
487 static void abort(struct net_device *dev, u32 abort_bits)
489 struct gt64240_private *gp = netdev_priv(dev);
490 int timedout = 100; // wait up to 100 msec for hard stop to complete
492 if (gt64240_debug > 3)
493 printk("%s: abort\n", dev->name);
495 // Return if neither Rx or Tx abort bits are set
496 if (!(abort_bits & (sdcmrAR | sdcmrAT)))
497 return;
499 // make sure only the Rx/Tx abort bits are set
500 abort_bits &= (sdcmrAR | sdcmrAT);
502 spin_lock(&gp->lock);
504 // abort any Rx/Tx DMA immediately
505 GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_COMM, abort_bits);
507 if (gt64240_debug > 3)
508 printk("%s: abort: SDMA cmd = %x/%x\n",
509 dev->name, abort_bits, GT64240ETH_READ(gp,
510 GT64240_ETH_SDMA_COMM));
512 // wait for abort to complete
513 while ((GT64240ETH_READ(gp, GT64240_ETH_SDMA_COMM)) & abort_bits) {
514 // snooze for 20 msec and check again
515 gt64240_delay(1);
517 if (--timedout == 0) {
518 printk("%s: abort timeout!!\n", dev->name);
519 break;
523 spin_unlock(&gp->lock);
527 static void hard_stop(struct net_device *dev)
529 struct gt64240_private *gp = netdev_priv(dev);
531 if (gt64240_debug > 3)
532 printk("%s: hard stop\n", dev->name);
534 disable_ether_irq(dev);
536 abort(dev, sdcmrAR | sdcmrAT);
538 // disable port
539 GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG, 0);
540 if (gt64240_debug > 3)
541 printk("%s: gt64240_hard_stop: Port Config=%x\n",
542 dev->name, GT64240ETH_READ(gp,
543 GT64240_ETH_PORT_CONFIG));
547 static void gt64240_tx_complete(struct net_device *dev, u32 status)
549 struct gt64240_private *gp = netdev_priv(dev);
550 int nextOut, cdp;
551 gt64240_td_t *td;
552 u32 cmdstat;
554 cdp = (GT64240ETH_READ(gp, GT64240_ETH_CURR_TX_DESC_PTR0)
555 - gp->tx_ring_dma) / sizeof(gt64240_td_t);
557 if (gt64240_debug > 3) { /*+prk17aug01 */
558 nextOut = gp->tx_next_out;
559 printk
560 ("%s: tx_complete: TX_PTR0=0x%08x, cdp=%d. nextOut=%d.\n",
561 dev->name, GT64240ETH_READ(gp,
562 GT64240_ETH_CURR_TX_DESC_PTR0),
563 cdp, nextOut);
564 td = &gp->tx_ring[nextOut];
567 /*** NEED to check and CLEAR these errors every time thru here: ***/
568 if (gt64240_debug > 2) {
569 if (GT64240_READ(COMM_UNIT_INTERRUPT_CAUSE))
570 printk
571 ("%s: gt64240_tx_complete: CIU Cause=%08x, Mask=%08x, EAddr=%08x\n",
572 dev->name,
573 GT64240_READ(COMM_UNIT_INTERRUPT_CAUSE),
574 GT64240_READ(COMM_UNIT_INTERRUPT_MASK),
575 GT64240_READ(COMM_UNIT_ERROR_ADDRESS));
576 GT64240_WRITE(COMM_UNIT_INTERRUPT_CAUSE, 0);
578 // Continue until we reach the current descriptor pointer
579 for (nextOut = gp->tx_next_out; nextOut != cdp;
580 nextOut = (nextOut + 1) % TX_RING_SIZE) {
582 if (--gp->intr_work_done == 0)
583 break;
585 td = &gp->tx_ring[nextOut];
586 cmdstat = td->cmdstat;
588 if (cmdstat & (u32) txOwn) {
589 // DMA is not finished writing descriptor???
590 // Leave and come back later to pick-up where we left off.
591 break;
593 // increment Tx error stats
594 if (cmdstat & (u32) txErrorSummary) {
595 if (gt64240_debug > 2)
596 printk
597 ("%s: tx_complete: Tx error, cmdstat = %x\n",
598 dev->name, cmdstat);
599 gp->stats.tx_errors++;
600 if (cmdstat & (u32) txReTxLimit)
601 gp->stats.tx_aborted_errors++;
602 if (cmdstat & (u32) txUnderrun)
603 gp->stats.tx_fifo_errors++;
604 if (cmdstat & (u32) txLateCollision)
605 gp->stats.tx_window_errors++;
608 if (cmdstat & (u32) txCollision)
609 gp->stats.collisions +=
610 (unsigned long) ((cmdstat & txReTxCntMask) >>
611 txReTxCntBit);
613 // Wake the queue if the ring was full
614 if (gp->tx_full) {
615 gp->tx_full = 0;
616 if (gp->last_psr & psrLink) {
617 netif_wake_queue(dev);
620 // decrement tx ring buffer count
621 if (gp->tx_count)
622 gp->tx_count--;
624 // free the skb
625 if (gp->tx_skbuff[nextOut]) {
626 if (gt64240_debug > 3)
627 printk
628 ("%s: tx_complete: good Tx, skb=%p\n",
629 dev->name, gp->tx_skbuff[nextOut]);
630 dev_kfree_skb_irq(gp->tx_skbuff[nextOut]);
631 gp->tx_skbuff[nextOut] = NULL;
632 } else {
633 printk("%s: tx_complete: no skb!\n", dev->name);
637 gp->tx_next_out = nextOut;
639 if ((status & icrTxEndLow) && gp->tx_count != 0) {
640 // we must restart the DMA
641 GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_COMM,
642 sdcmrERD | sdcmrTXDL);
646 static irqreturn_t gt64240_interrupt(int irq, void *dev_id)
648 struct net_device *dev = (struct net_device *) dev_id;
649 struct gt64240_private *gp = netdev_priv(dev);
650 u32 status;
652 if (dev == NULL) {
653 printk("%s: isr: null dev ptr\n", dev->name);
654 return IRQ_NONE;
657 spin_lock(&gp->lock);
659 if (gt64240_debug > 3)
660 printk("%s: isr: entry\n", dev->name);
662 gp->intr_work_done = max_interrupt_work;
664 while (gp->intr_work_done > 0) {
666 status = GT64240ETH_READ(gp, GT64240_ETH_INT_CAUSE);
667 #ifdef GT64240_NAPI
668 /* dont ack Rx interrupts */
669 if (!(status & icrRxBuffer))
670 GT64240ETH_WRITE(gp, GT64240_ETH_INT_CAUSE, 0);
671 #else
672 // ACK interrupts
673 GT64240ETH_WRITE(gp, GT64240_ETH_INT_CAUSE, 0);
674 #endif
676 if (gt64240_debug > 3)
677 printk("%s: isr: work=%d., icr=%x\n", dev->name,
678 gp->intr_work_done, status);
680 if ((status & icrEtherIntSum) == 0) {
681 if (!(status &
682 (icrTxBufferLow | icrTxBufferHigh |
683 icrRxBuffer))) {
684 /* exit from the while() loop */
685 break;
689 if (status & icrMIIPhySTC) {
690 u32 psr =
691 GT64240ETH_READ(gp, GT64240_ETH_PORT_STATUS);
692 if (gp->last_psr != psr) {
693 printk("%s: port status: 0x%08x\n",
694 dev->name, psr);
695 printk
696 ("%s: %s MBit/s, %s-duplex, flow-control %s, link is %s,\n",
697 dev->name,
698 psr & psrSpeed ? "100" : "10",
699 psr & psrDuplex ? "full" : "half",
700 psr & psrFctl ? "disabled" :
701 "enabled",
702 psr & psrLink ? "up" : "down");
703 printk
704 ("%s: TxLowQ is %s, TxHighQ is %s, Transmitter is %s\n",
705 dev->name,
706 psr & psrTxLow ? "running" :
707 "stopped",
708 psr & psrTxHigh ? "running" :
709 "stopped",
710 psr & psrTxInProg ? "on" : "off");
712 if ((psr & psrLink) && !gp->tx_full &&
713 netif_queue_stopped(dev)) {
714 printk
715 ("%s: isr: Link up, waking queue.\n",
716 dev->name);
717 netif_wake_queue(dev);
718 } else if (!(psr & psrLink)
719 && !netif_queue_stopped(dev)) {
720 printk
721 ("%s: isr: Link down, stopping queue.\n",
722 dev->name);
723 netif_stop_queue(dev);
726 gp->last_psr = psr;
730 if (status & (icrTxBufferLow | icrTxEndLow))
731 gt64240_tx_complete(dev, status);
733 if (status & icrRxBuffer) {
734 #ifdef GT64240_NAPI
735 if (netif_rx_schedule_prep(dev)) {
736 disable_ether_irq(dev);
737 __netif_rx_schedule(dev);
739 #else
740 gt64240_rx(dev, status);
741 #endif
743 // Now check TX errors (RX errors were handled in gt64240_rx)
744 if (status & icrTxErrorLow) {
745 printk("%s: isr: Tx resource error\n", dev->name);
748 if (status & icrTxUdr) {
749 printk("%s: isr: Tx underrun error\n", dev->name);
753 if (gp->intr_work_done == 0) {
754 // ACK any remaining pending interrupts
755 GT64240ETH_WRITE(gp, GT64240_ETH_INT_CAUSE, 0);
756 if (gt64240_debug > 3)
757 printk("%s: isr: hit max work\n", dev->name);
760 if (gt64240_debug > 3)
761 printk("%s: isr: exit, icr=%x\n",
762 dev->name, GT64240ETH_READ(gp,
763 GT64240_ETH_INT_CAUSE));
765 spin_unlock(&gp->lock);
767 return IRQ_HANDLED;
770 static void enable_ether_irq(struct net_device *dev)
772 struct gt64240_private *gp = netdev_priv(dev);
773 u32 intMask;
775 intMask =
776 icrTxBufferLow | icrTxEndLow | icrTxErrorLow |
777 icrTxBufferHigh | icrTxEndHigh | icrTxErrorHigh | icrTxUdr |
778 icrRxBuffer | icrRxOVR | icrRxError | icrMIIPhySTC |
779 icrEtherIntSum;
782 //- GT64240ETH_WRITE(gp, GT64240_ETH_INT_CAUSE, 0); /* CLEAR existing ints */
783 // unmask device interrupts:
784 GT64240ETH_WRITE(gp, GT64240_ETH_INT_MASK, intMask);
786 // now route ethernet interrupts to GT PCI1 (eth0 and eth1 will be
787 // sharing it).
788 intMask = MV_READ(PCI_1INTERRUPT_CAUSE_MASK_REGISTER_HIGH);
789 intMask |= 1 << gp->port_num;
790 MV_WRITE(PCI_1INTERRUPT_CAUSE_MASK_REGISTER_HIGH, intMask);
793 static void disable_ether_irq(struct net_device *dev)
795 struct gt64240_private *gp = netdev_priv(dev);
796 u32 intMask;
798 intMask = MV_READ(PCI_1INTERRUPT_CAUSE_MASK_REGISTER_HIGH);
799 intMask &= ~(1 << gp->port_num);
800 MV_WRITE(PCI_1INTERRUPT_CAUSE_MASK_REGISTER_HIGH, intMask);
802 // mask all device interrupts:
803 GT64240ETH_WRITE(gp, GT64240_ETH_INT_MASK, 0);
807 * Probe for a GT64240 ethernet controller.
809 static int __init gt64240_probe(void)
811 int found = 0;
812 int i;
814 parse_mac_addr_options();
816 for (i = 0; i < NUM_INTERFACES; i++) {
817 unsigned long base_addr = gt64240_iflist[i].port;
819 if (check_region(base_addr, GT64240_ETH_IO_SIZE)) {
820 printk("gt64240_probe: ioaddr 0x%lx taken?\n",
821 base_addr);
822 continue;
825 if (gt64240_probe1(base_addr, gt64240_iflist[i].irq, i) == 0) {
827 * Does not seem to be the "traditional" way folks do
828 * this, but I want to init both eth ports if at all
829 * possible!
831 * So, until I find out the "correct" way to do this:
833 if (++found == NUM_INTERFACES) /* That's all of them */
834 return 0;
838 if (found)
839 return 0; /* as long as we found at least one! */
841 return -ENODEV;
844 module_init(gt64240_probe);
846 static int __init gt64240_probe1(unsigned long ioaddr, int irq, int port_num)
848 struct net_device *dev = NULL;
849 static unsigned version_printed = 0;
850 struct gt64240_private *gp = NULL;
851 int retval;
852 u32 cpuConfig;
854 dev = alloc_etherdev(sizeof(struct gt64240_private));
855 if (!dev)
856 return -ENOMEM;
858 if (irq < 0) {
859 printk
860 ("gt64240_probe1: irq unknown - probing not supported\n");
861 return -ENODEV;
863 #if 1 /* KLUDGE Alert: no check on return value: */
864 if (!request_region(ioaddr, GT64240_ETH_IO_SIZE, "gt64240eth"))
865 printk("*** request_region() failed!\n");
866 #endif
868 cpuConfig = GT64240_READ(CPU_CONFIGURATION);
869 printk("gt64240_probe1: cpu in %s-endian mode\n",
870 (cpuConfig & (1 << 12)) ? "little" : "big");
872 printk("%s: GT64240 found at ioaddr 0x%lx, irq %d.\n",
873 dev->name, ioaddr, irq);
875 if (gt64240_debug && version_printed++ == 0)
876 printk("%s: %s", dev->name, version);
878 /* private struct aligned and zeroed by init_etherdev */
879 /* Fill in the 'dev' fields. */
880 dev->base_addr = ioaddr;
881 dev->irq = irq;
882 memcpy(dev->dev_addr, gt64240_station_addr[port_num],
883 sizeof(dev->dev_addr));
885 printk("%s: HW Address ", dev->name);
886 dump_hw_addr(dev->dev_addr);
888 gp = dev->priv;
890 gp->msg_enable = (debug < 0 ? GT64240_MSG_ENABLE : debug);
891 gp->port_num = port_num;
892 gp->io_size = GT64240_ETH_IO_SIZE;
893 gp->port_offset = port_num * GT64240_ETH_IO_SIZE;
894 gp->phy_addr = gt64240_phy_addr[port_num];
896 printk("%s: GT64240 ethernet port %d\n", dev->name, gp->port_num);
898 #ifdef GT64240_NAPI
899 printk("Rx NAPI supported \n");
900 #endif
902 /* MII Initialization */
903 gp->mii_if.dev = dev;
904 gp->mii_if.phy_id = dev->base_addr;
905 gp->mii_if.mdio_read = read_MII;
906 gp->mii_if.mdio_write = write_MII;
907 gp->mii_if.advertising = read_MII(dev, gp->phy_addr, MII_ADVERTISE);
909 // Allocate Rx and Tx descriptor rings
910 if (gp->rx_ring == NULL) {
911 // All descriptors in ring must be 16-byte aligned
912 gp->rx_ring = dma_alloc_noncoherent(NULL,
913 sizeof(gt64240_rd_t) * RX_RING_SIZE +
914 sizeof(gt64240_td_t) * TX_RING_SIZE,
915 &gp->rx_ring_dma, GFP_KERNEL);
916 if (gp->rx_ring == NULL) {
917 retval = -ENOMEM;
918 goto free_region;
921 gp->tx_ring = (gt64240_td_t *) (gp->rx_ring + RX_RING_SIZE);
922 gp->tx_ring_dma =
923 gp->rx_ring_dma + sizeof(gt64240_rd_t) * RX_RING_SIZE;
925 // Allocate the Rx Data Buffers
926 if (gp->rx_buff == NULL) {
927 gp->rx_buff = dma_alloc_coherent(NULL,
928 PKT_BUF_SZ * RX_RING_SIZE, &gp->rx_buff_dma,
929 GFP_KERNEL);
930 if (gp->rx_buff == NULL) {
931 dma_free_noncoherent(NULL,
932 sizeof(gt64240_rd_t) * RX_RING_SIZE +
933 sizeof(gt64240_td_t) * TX_RING_SIZE,
934 gp->rx_ring, gp->rx_ring_dma);
935 retval = -ENOMEM;
936 goto free_region;
940 if (gt64240_debug > 3)
941 printk("%s: gt64240_probe1, rx_ring=%p, tx_ring=%p\n",
942 dev->name, gp->rx_ring, gp->tx_ring);
944 // Allocate Rx Hash Table
945 if (gp->hash_table == NULL) {
946 gp->hash_table = dma_alloc_coherent(NULL,
947 RX_HASH_TABLE_SIZE, &gp->hash_table_dma,
948 GFP_KERNEL);
949 if (gp->hash_table == NULL) {
950 dma_free_noncoherent(NULL,
951 sizeof(gt64240_rd_t) * RX_RING_SIZE +
952 sizeof(gt64240_td_t) * TX_RING_SIZE,
953 gp->rx_ring, gp->rx_ring_dma);
954 dma_free_noncoherent(NULL, PKT_BUF_SZ * RX_RING_SIZE,
955 gp->rx_buff, gp->rx_buff_dma);
956 retval = -ENOMEM;
957 goto free_region;
961 if (gt64240_debug > 3)
962 printk("%s: gt64240_probe1, hash=%p\n",
963 dev->name, gp->hash_table);
965 spin_lock_init(&gp->lock);
967 dev->open = gt64240_open;
968 dev->hard_start_xmit = gt64240_tx;
969 dev->stop = gt64240_close;
970 dev->get_stats = gt64240_get_stats;
971 dev->do_ioctl = gt64240_ioctl;
972 dev->set_multicast_list = gt64240_set_rx_mode;
973 dev->tx_timeout = gt64240_tx_timeout;
974 dev->watchdog_timeo = GT64240ETH_TX_TIMEOUT;
976 #ifdef GT64240_NAPI
977 dev->poll = gt64240_poll;
978 dev->weight = 64;
979 #endif
980 dev->ethtool_ops = &gp_ethtool_ops;
982 /* Fill in the fields of the device structure with ethernet values. */
983 return 0;
985 free_region:
986 release_region(ioaddr, gp->io_size);
987 unregister_netdev(dev);
988 free_netdev(dev);
989 printk("%s: gt64240_probe1 failed. Returns %d\n",
990 dev->name, retval);
991 return retval;
995 static void reset_tx(struct net_device *dev)
997 struct gt64240_private *gp = netdev_priv(dev);
998 int i;
1000 abort(dev, sdcmrAT);
1002 for (i = 0; i < TX_RING_SIZE; i++) {
1003 if (gp->tx_skbuff[i]) {
1004 if (in_interrupt())
1005 dev_kfree_skb_irq(gp->tx_skbuff[i]);
1006 else
1007 dev_kfree_skb(gp->tx_skbuff[i]);
1008 gp->tx_skbuff[i] = NULL;
1010 //- gp->tx_ring[i].cmdstat = 0; // CPU owns
1011 gp->tx_ring[i].cmdstat =
1012 (u32) (txGenCRC | txEI | txPad | txFirst | txLast);
1013 gp->tx_ring[i].byte_cnt = 0;
1014 gp->tx_ring[i].buff_ptr = 0;
1015 gp->tx_ring[i].next =
1016 gp->tx_ring_dma + sizeof(gt64240_td_t) * (i + 1);
1017 if (gt64240_debug > 4)
1018 dump_tx_desc(dev, i);
1020 /* Wrap the ring. */
1021 gp->tx_ring[i - 1].next = gp->tx_ring_dma;
1022 if (gt64240_debug > 4)
1023 dump_tx_desc(dev, i - 1);
1025 // setup only the lowest priority TxCDP reg
1026 GT64240ETH_WRITE(gp, GT64240_ETH_CURR_TX_DESC_PTR0,
1027 gp->tx_ring_dma);
1028 //- GT64240ETH_WRITE(gp, GT64240_ETH_CURR_TX_DESC_PTR0, 0); /* ROLLINS */
1029 //- GT64240ETH_WRITE(gp, GT64240_ETH_CURR_TX_DESC_PTR0,virt_to_phys(&gp->tx_ring[0])); /* ROLLINS */
1031 GT64240ETH_WRITE(gp, GT64240_ETH_CURR_TX_DESC_PTR1, 0);
1033 // init Tx indeces and pkt counter
1034 gp->tx_next_in = gp->tx_next_out = 0;
1035 gp->tx_count = 0;
1038 static void reset_rx(struct net_device *dev)
1040 struct gt64240_private *gp = netdev_priv(dev);
1041 int i;
1043 abort(dev, sdcmrAR);
1045 for (i = 0; i < RX_RING_SIZE; i++) {
1046 gp->rx_ring[i].next =
1047 gp->rx_ring_dma + sizeof(gt64240_rd_t) * (i + 1);
1048 gp->rx_ring[i].buff_ptr = gp->rx_buff_dma + i * PKT_BUF_SZ;
1049 gp->rx_ring[i].buff_sz = PKT_BUF_SZ;
1050 gp->rx_ring[i].byte_cnt = 0; /* just for debug printk's */
1051 // Give ownership to device, set first and last, enable interrupt
1052 gp->rx_ring[i].cmdstat =
1053 (uint32_t) (rxFirst | rxLast | rxOwn | rxEI);
1054 if (gt64240_debug > 4)
1055 dump_rx_desc(dev, i);
1057 /* Wrap the ring. */
1058 gp->rx_ring[i - 1].next = gp->rx_ring_dma;
1059 if (gt64240_debug > 4)
1060 dump_rx_desc(dev, i - 1);
1062 // Setup only the lowest priority RxFDP and RxCDP regs
1063 for (i = 0; i < 4; i++) {
1064 if (i == 0) {
1065 GT64240ETH_WRITE(gp, GT64240_ETH_1ST_RX_DESC_PTR0,
1066 gp->rx_ring_dma);
1067 GT64240ETH_WRITE(gp, GT64240_ETH_CURR_RX_DESC_PTR0,
1068 gp->rx_ring_dma);
1069 } else {
1070 GT64240ETH_WRITE(gp,
1071 GT64240_ETH_1ST_RX_DESC_PTR0 +
1072 i * 4, 0);
1073 GT64240ETH_WRITE(gp,
1074 GT64240_ETH_CURR_RX_DESC_PTR0 +
1075 i * 4, 0);
1079 // init Rx NextOut index
1080 gp->rx_next_out = 0;
1084 static int gt64240_init(struct net_device *dev)
1086 struct gt64240_private *gp = netdev_priv(dev);
1088 if (gt64240_debug > 3) {
1089 printk("%s: gt64240_init: dev=%p\n", dev->name, dev);
1090 printk("%s: gt64240_init: scs0_lo=%04x, scs0_hi=%04x\n",
1091 dev->name, GT64240_READ(0x008),
1092 GT64240_READ(0x010));
1093 printk("%s: gt64240_init: scs1_lo=%04x, scs1_hi=%04x\n",
1094 dev->name, GT64240_READ(0x208),
1095 GT64240_READ(0x210));
1096 printk("%s: gt64240_init: scs2_lo=%04x, scs2_hi=%04x\n",
1097 dev->name, GT64240_READ(0x018),
1098 GT64240_READ(0x020));
1099 printk("%s: gt64240_init: scs3_lo=%04x, scs3_hi=%04x\n",
1100 dev->name, GT64240_READ(0x218),
1101 GT64240_READ(0x220));
1103 // Stop and disable Port
1104 hard_stop(dev);
1106 GT64240_WRITE(COMM_UNIT_INTERRUPT_MASK, 0x07070777); /*+prk21aug01 */
1107 if (gt64240_debug > 2)
1108 printk
1109 ("%s: gt64240_init: CIU Cause=%08x, Mask=%08x, EAddr=%08x\n",
1110 dev->name, GT64240_READ(COMM_UNIT_INTERRUPT_CAUSE),
1111 GT64240_READ(COMM_UNIT_INTERRUPT_MASK),
1112 GT64240_READ(COMM_UNIT_ERROR_ADDRESS));
1114 // Set-up hash table
1115 memset(gp->hash_table, 0, RX_HASH_TABLE_SIZE); // clear it
1116 gp->hash_mode = 0;
1117 // Add a single entry to hash table - our ethernet address
1118 gt64240_add_hash_entry(dev, dev->dev_addr);
1119 // Set-up DMA ptr to hash table
1120 GT64240ETH_WRITE(gp, GT64240_ETH_HASH_TBL_PTR, gp->hash_table_dma);
1121 if (gt64240_debug > 3)
1122 printk("%s: gt64240_init: Hash Tbl Ptr=%x\n", dev->name,
1123 GT64240ETH_READ(gp, GT64240_ETH_HASH_TBL_PTR));
1125 // Setup Tx
1126 reset_tx(dev);
1128 if (gt64240_debug > 3)
1129 printk("%s: gt64240_init: Curr Tx Desc Ptr0=%x\n",
1130 dev->name, GT64240ETH_READ(gp,
1131 GT64240_ETH_CURR_TX_DESC_PTR0));
1133 // Setup Rx
1134 reset_rx(dev);
1136 if (gt64240_debug > 3)
1137 printk("%s: gt64240_init: 1st/Curr Rx Desc Ptr0=%x/%x\n",
1138 dev->name, GT64240ETH_READ(gp,
1139 GT64240_ETH_1ST_RX_DESC_PTR0),
1140 GT64240ETH_READ(gp, GT64240_ETH_CURR_RX_DESC_PTR0));
1142 if (gt64240_debug > 3)
1143 dump_MII(dev);
1145 /* force a PHY reset -- self-clearing! */
1146 write_MII(dev, gp->phy_addr, 0, 0x8000);
1148 if (gt64240_debug > 3)
1149 printk("%s: gt64240_init: PhyAD=%x\n", dev->name,
1150 GT64240_READ(GT64240_ETH_PHY_ADDR_REG));
1152 // setup DMA
1153 // We want the Rx/Tx DMA to write/read data to/from memory in
1154 // Big Endian mode. Also set DMA Burst Size to 8 64Bit words.
1155 #ifdef DESC_DATA_BE
1156 GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_CONFIG,
1157 (0xf << sdcrRCBit) | sdcrRIFB | (3 <<
1158 sdcrBSZBit));
1159 #else
1160 GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_CONFIG, sdcrBLMR | sdcrBLMT |
1161 //- (0xf<<sdcrRCBit) | sdcrRIFB | (3<<sdcrBSZBit));
1162 (0xf << sdcrRCBit) | sdcrRIFB | (2 <<
1163 sdcrBSZBit));
1164 #endif
1166 if (gt64240_debug > 3)
1167 printk("%s: gt64240_init: SDMA Config=%x\n", dev->name,
1168 GT64240ETH_READ(gp, GT64240_ETH_SDMA_CONFIG));
1170 #if 0
1171 // start Rx DMA
1172 GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_COMM, sdcmrERD);
1173 #endif
1175 if (gt64240_debug > 3)
1176 printk("%s: gt64240_init: SDMA Cmd =%x\n", dev->name,
1177 GT64240ETH_READ(gp, GT64240_ETH_SDMA_COMM));
1179 #if 1
1180 GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG, PORT_CONFIG);
1181 #endif
1183 if (gt64240_debug > 3)
1184 printk("%s: gt64240_init: Port Config=%x\n", dev->name,
1185 GT64240ETH_READ(gp, GT64240_ETH_PORT_CONFIG));
1188 * Disable all Type-of-Service queueing. All Rx packets will be
1189 * treated normally and will be sent to the lowest priority
1190 * queue.
1192 * Disable flow-control for now. FIX! support flow control?
1195 #if 1
1196 // clear all the MIB ctr regs
1197 GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG_EXT,
1198 EXT_CONFIG_CLEAR);
1199 read_mib_counters(gp);
1200 GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG_EXT,
1201 EXT_CONFIG_CLEAR | pcxrMIBclrMode);
1203 #endif
1204 if (gt64240_debug > 3)
1205 printk("%s: gt64240_init: Port Config Ext=%x\n", dev->name,
1206 GT64240ETH_READ(gp, GT64240_ETH_PORT_CONFIG_EXT));
1208 if (gt64240_debug > 3)
1209 printk("%s: gt64240_init: Port Command=%x\n", dev->name,
1210 GT64240ETH_READ(gp, GT64240_ETH_PORT_COMMAND));
1211 GT64240ETH_WRITE(gp, GT64240_ETH_PORT_COMMAND, 0x0);
1213 netif_start_queue(dev);
1215 /* enable the port */
1216 GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG,
1217 (PORT_CONFIG | pcrEN));
1218 if (gt64240_debug > 3)
1219 printk("%s: gt64240_init: Port Config=%x\n", dev->name,
1220 GT64240ETH_READ(gp, GT64240_ETH_PORT_CONFIG));
1221 #if 1
1222 // start Rx DMA
1223 GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_COMM, sdcmrERD);
1224 #endif
1227 // enable interrupts
1228 enable_ether_irq(dev);
1230 //--- gp->last_psr |= psrLink; /* KLUDGE ALERT */
1232 // we should now be receiving frames
1233 return 0;
1237 static int gt64240_open(struct net_device *dev)
1239 int retval;
1241 if (gt64240_debug > 3)
1242 printk("%s: gt64240_open: dev=%p\n", dev->name, dev);
1244 if ((retval = request_irq(dev->irq, &gt64240_interrupt,
1245 SA_SHIRQ, dev->name, dev))) {
1246 printk("%s: unable to get IRQ %d\n", dev->name, dev->irq);
1248 return retval;
1250 // Initialize and startup the GT-64240 ethernet port
1251 if ((retval = gt64240_init(dev))) {
1252 printk("%s: error in gt64240_open\n", dev->name);
1253 free_irq(dev->irq, dev);
1255 return retval;
1258 if (gt64240_debug > 3)
1259 printk("%s: gt64240_open: Initialization done.\n",
1260 dev->name);
1262 return 0;
1265 static int gt64240_close(struct net_device *dev)
1267 if (gt64240_debug > 3)
1268 printk("%s: gt64240_close: dev=%p\n", dev->name, dev);
1270 // stop the device
1271 if (netif_device_present(dev)) {
1272 netif_stop_queue(dev);
1273 hard_stop(dev);
1276 free_irq(dev->irq, dev);
1278 return 0;
1281 #ifdef GT64240_NAPI
1283 * Function will release Tx skbs which are now complete
1285 static void gt64240_tx_fill(struct net_device *dev, u32 status)
1287 struct gt64240_private *gp = netdev_priv(dev);
1288 int nextOut, cdp;
1289 gt64240_td_t *td;
1290 u32 cmdstat;
1292 cdp = (GT64240ETH_READ(gp, GT64240_ETH_CURR_TX_DESC_PTR0)
1293 - gp->tx_ring_dma) / sizeof(gt64240_td_t);
1295 for (nextOut = gp->tx_next_out; nextOut != cdp;
1296 nextOut = (nextOut + 1) % TX_RING_SIZE) {
1297 if (--gp->intr_work_done == 0)
1298 break;
1300 td = &gp->tx_ring[nextOut];
1301 cmdstat = td->cmdstat;
1303 if (cmdstat & (u32) txOwn)
1304 break;
1306 if (gp->tx_full) {
1307 gp->tx_full = 0;
1308 if (gp->last_psr & psrLink) {
1309 netif_wake_queue(dev);
1312 // decrement tx ring buffer count
1313 if (gp->tx_count)
1314 gp->tx_count--;
1316 // free the skb
1317 if (gp->tx_skbuff[nextOut]) {
1318 dev_kfree_skb_irq(gp->tx_skbuff[nextOut]);
1319 gp->tx_skbuff[nextOut] = NULL;
1323 gp->tx_next_out = nextOut;
1325 if ((status & icrTxEndLow) && gp->tx_count != 0)
1326 // we must restart the DMA
1327 GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_COMM,
1328 sdcmrERD | sdcmrTXDL);
1332 * Main function for NAPI
1334 static int gt64240_poll(struct net_device *dev, int *budget)
1336 struct gt64240_private *gp = netdev_priv(dev);
1337 unsigned long flags;
1338 int done = 1, orig_budget, work_done;
1339 u32 status = GT64240ETH_READ(gp, GT64240_ETH_INT_CAUSE);
1341 spin_lock_irqsave(&gp->lock, flags);
1342 gt64240_tx_fill(dev, status);
1344 if (GT64240ETH_READ(gp, GT64240_ETH_CURR_RX_DESC_PTR0) !=
1345 gp->rx_next_out) {
1346 orig_budget = *budget;
1347 if (orig_budget > dev->quota)
1348 orig_budget = dev->quota;
1350 work_done = gt64240_rx(dev, status, orig_budget);
1351 *budget -= work_done;
1352 dev->quota -= work_done;
1353 if (work_done >= orig_budget)
1354 done = 0;
1355 if (done) {
1356 __netif_rx_complete(dev);
1357 enable_ether_irq(dev);
1361 spin_unlock_irqrestore(&gp->lock, flags);
1363 return (done ? 0 : 1);
1365 #endif
1367 static int gt64240_tx(struct sk_buff *skb, struct net_device *dev)
1369 struct gt64240_private *gp = netdev_priv(dev);
1370 unsigned long flags;
1371 int nextIn;
1373 spin_lock_irqsave(&gp->lock, flags);
1375 nextIn = gp->tx_next_in;
1377 if (gt64240_debug > 3) {
1378 printk("%s: gt64240_tx: nextIn=%d.\n", dev->name, nextIn);
1381 if (gp->tx_count >= TX_RING_SIZE) {
1382 printk("%s: Tx Ring full, pkt dropped.\n", dev->name);
1383 gp->stats.tx_dropped++;
1384 spin_unlock_irqrestore(&gp->lock, flags);
1385 return 1;
1388 if (!(gp->last_psr & psrLink)) {
1389 printk("%s: gt64240_tx: Link down, pkt dropped.\n",
1390 dev->name);
1391 gp->stats.tx_dropped++;
1392 spin_unlock_irqrestore(&gp->lock, flags);
1393 //--- dump_MII(dev); /* KLUDGE ALERT !!! */
1394 return 1;
1397 if (gp->tx_ring[nextIn].cmdstat & txOwn) {
1398 printk
1399 ("%s: gt64240_tx: device owns descriptor, pkt dropped.\n",
1400 dev->name);
1401 gp->stats.tx_dropped++;
1402 // stop the queue, so Tx timeout can fix it
1403 netif_stop_queue(dev);
1404 spin_unlock_irqrestore(&gp->lock, flags);
1405 return 1;
1407 // Prepare the Descriptor at tx_next_in
1408 gp->tx_skbuff[nextIn] = skb;
1409 gp->tx_ring[nextIn].byte_cnt = skb->len;
1410 gp->tx_ring[nextIn].buff_ptr = virt_to_phys(skb->data);
1412 // make sure packet gets written back to memory
1413 dma_cache_wback_inv((unsigned long) (skb->data), skb->len);
1414 mb();
1416 // Give ownership to device, set first and last desc, enable interrupt
1417 // Setting of ownership bit must be *last*!
1418 gp->tx_ring[nextIn].cmdstat =
1419 txOwn | txGenCRC | txEI | txPad | txFirst | txLast;
1421 if (gt64240_debug > 5) {
1422 dump_tx_desc(dev, nextIn);
1424 // increment tx_next_in with wrap
1425 gp->tx_next_in = (nextIn + 1) % TX_RING_SIZE;
1427 //+prk20aug01:
1428 if (0) { /* ROLLINS */
1429 GT64240ETH_WRITE(gp, GT64240_ETH_CURR_TX_DESC_PTR0,
1430 virt_to_phys(&gp->tx_ring[nextIn]));
1433 if (gt64240_debug > 3) { /*+prk17aug01 */
1434 printk
1435 ("%s: gt64240_tx: TX_PTR0=0x%08x, EthPortStatus=0x%08x\n",
1436 dev->name, GT64240ETH_READ(gp,
1437 GT64240_ETH_CURR_TX_DESC_PTR0),
1438 GT64240ETH_READ(gp, GT64240_ETH_PORT_STATUS));
1440 // If DMA is stopped, restart
1441 if (!((GT64240ETH_READ(gp, GT64240_ETH_PORT_STATUS)) & psrTxLow)) {
1442 GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_COMM,
1443 sdcmrERD | sdcmrTXDL);
1446 if (gt64240_debug > 3) { /*+prk17aug01 */
1447 printk
1448 ("%s: gt64240_tx: TX_PTR0=0x%08x, EthPortStatus=0x%08x\n",
1449 dev->name, GT64240ETH_READ(gp,
1450 GT64240_ETH_CURR_TX_DESC_PTR0),
1451 GT64240ETH_READ(gp, GT64240_ETH_PORT_STATUS));
1453 // increment count and stop queue if full
1454 if (++gp->tx_count >= TX_RING_SIZE) {
1455 gp->tx_full = 1;
1456 netif_stop_queue(dev);
1459 dev->trans_start = jiffies;
1460 spin_unlock_irqrestore(&gp->lock, flags);
1462 return 0;
1466 static int
1467 #ifdef GT64240_NAPI
1468 gt64240_rx(struct net_device *dev, u32 status, int budget)
1469 #else
1470 gt64240_rx(struct net_device *dev, u32 status)
1471 #endif
1473 struct gt64240_private *gp = netdev_priv(dev);
1474 struct sk_buff *skb;
1475 int pkt_len, nextOut, cdp;
1476 gt64240_rd_t *rd;
1477 u32 cmdstat;
1479 if (gt64240_debug > 3)
1480 printk("%s: gt64240_rx: dev=%p, status=%x\n",
1481 dev->name, dev, status);
1483 cdp = (GT64240ETH_READ(gp, GT64240_ETH_CURR_RX_DESC_PTR0)
1484 - gp->rx_ring_dma) / sizeof(gt64240_rd_t);
1486 // Continue until we reach the current descriptor pointer
1487 for (nextOut = gp->rx_next_out; nextOut != cdp;
1488 nextOut = (nextOut + 1) % RX_RING_SIZE) {
1490 #ifdef GT64240_NAPI
1491 if (budget <= 0)
1492 break;
1494 budget--;
1495 #endif
1497 if (--gp->intr_work_done == 0)
1498 break;
1500 if (gt64240_debug > 4)
1501 dump_rx_desc(dev, nextOut);
1503 rd = &gp->rx_ring[nextOut];
1504 cmdstat = rd->cmdstat;
1506 if (gt64240_debug > 3)
1507 printk("%s: isr: Rx desc cmdstat=%x, nextOut=%d\n",
1508 dev->name, cmdstat, nextOut);
1510 if (cmdstat & (u32) rxOwn) {
1511 if (gt64240_debug > 2)
1512 printk
1513 ("%s: gt64240_rx: device owns descriptor!\n",
1514 dev->name);
1515 // DMA is not finished updating descriptor???
1516 // Leave and come back later to pick-up where we left off.
1517 break;
1519 // must be first and last (ie only) buffer of packet
1520 if (!(cmdstat & (u32) rxFirst)
1521 || !(cmdstat & (u32) rxLast)) {
1522 printk
1523 ("%s: gt64240_rx: desc not first and last!\n",
1524 dev->name);
1525 cmdstat |= (u32) rxOwn;
1526 rd->cmdstat = cmdstat;
1527 continue;
1529 // Drop this received pkt if there were any errors
1530 if ((cmdstat & (u32) rxErrorSummary)
1531 || (status & icrRxError)) {
1532 // update the detailed rx error counters that are not covered
1533 // by the MIB counters.
1534 if (cmdstat & (u32) rxOverrun)
1535 gp->stats.rx_fifo_errors++;
1536 cmdstat |= (u32) rxOwn;
1537 rd->cmdstat = cmdstat;
1538 continue;
1541 pkt_len = rd->byte_cnt;
1543 /* Create new skb. */
1544 // skb = dev_alloc_skb(pkt_len+2);
1545 skb = dev_alloc_skb(1538);
1546 if (skb == NULL) {
1547 printk("%s: Memory squeeze, dropping packet.\n",
1548 dev->name);
1549 gp->stats.rx_dropped++;
1550 cmdstat |= (u32) rxOwn;
1551 rd->cmdstat = cmdstat;
1552 continue;
1554 skb->dev = dev;
1555 skb_reserve(skb, 2); /* 16 byte IP header align */
1556 memcpy(skb_put(skb, pkt_len),
1557 &gp->rx_buff[nextOut * PKT_BUF_SZ], pkt_len);
1558 skb->protocol = eth_type_trans(skb, dev);
1560 /* NIC performed some checksum computation */
1561 skb->ip_summed = CHECKSUM_UNNECESSARY;
1562 #ifdef GT64240_NAPI
1563 netif_receive_skb(skb);
1564 #else
1565 netif_rx(skb); /* pass the packet to upper layers */
1566 #endif
1568 // now we can release ownership of this desc back to device
1569 cmdstat |= (u32) rxOwn;
1570 rd->cmdstat = cmdstat;
1572 dev->last_rx = jiffies;
1575 if (gt64240_debug > 3 && nextOut == gp->rx_next_out)
1576 printk("%s: gt64240_rx: RxCDP did not increment?\n",
1577 dev->name);
1579 gp->rx_next_out = nextOut;
1580 return 0;
1584 static void gt64240_tx_timeout(struct net_device *dev)
1586 struct gt64240_private *gp = netdev_priv(dev);
1587 unsigned long flags;
1589 spin_lock_irqsave(&gp->lock, flags);
1592 if (!(gp->last_psr & psrLink)) {
1593 spin_unlock_irqrestore(&gp->lock, flags);
1594 } else {
1595 printk("======------> gt64240_tx_timeout: %d jiffies \n",
1596 GT64240ETH_TX_TIMEOUT);
1598 disable_ether_irq(dev);
1599 spin_unlock_irqrestore(&gp->lock, flags);
1600 reset_tx(dev);
1601 enable_ether_irq(dev);
1603 netif_wake_queue(dev);
1608 static void gt64240_set_rx_mode(struct net_device *dev)
1610 struct gt64240_private *gp = netdev_priv(dev);
1611 unsigned long flags;
1612 struct dev_mc_list *mcptr;
1614 if (gt64240_debug > 3)
1615 printk("%s: gt64240_set_rx_mode: dev=%p, flags=%x\n",
1616 dev->name, dev, dev->flags);
1618 // stop the Receiver DMA
1619 abort(dev, sdcmrAR);
1621 spin_lock_irqsave(&gp->lock, flags);
1623 if (dev->flags & IFF_PROMISC)
1624 GT64240ETH_SETBIT(gp, GT64240_ETH_PORT_CONFIG, pcrPM);
1625 else
1626 GT64240ETH_CLRBIT(gp, GT64240_ETH_PORT_CONFIG, pcrPM);
1628 GT64240ETH_WRITE(gp, GT64240_ETH_PORT_CONFIG,
1629 (PORT_CONFIG | pcrPM | pcrEN));
1632 memset(gp->hash_table, 0, RX_HASH_TABLE_SIZE); // clear hash table
1633 // Add our ethernet address
1634 gt64240_add_hash_entry(dev, dev->dev_addr);
1635 if (dev->mc_count) {
1636 for (mcptr = dev->mc_list; mcptr; mcptr = mcptr->next) {
1637 if (gt64240_debug > 2) {
1638 printk("%s: gt64240_set_rx_mode: addr=\n",
1639 dev->name);
1640 dump_hw_addr(mcptr->dmi_addr);
1642 gt64240_add_hash_entry(dev, mcptr->dmi_addr);
1646 if (gt64240_debug > 3)
1647 printk("%s: gt64240_set_rx: Port Config=%x\n", dev->name,
1648 GT64240ETH_READ(gp, GT64240_ETH_PORT_CONFIG));
1650 // restart Rx DMA
1651 GT64240ETH_WRITE(gp, GT64240_ETH_SDMA_COMM, sdcmrERD);
1653 spin_unlock_irqrestore(&gp->lock, flags);
1656 static struct net_device_stats *gt64240_get_stats(struct net_device *dev)
1658 struct gt64240_private *gp = netdev_priv(dev);
1659 unsigned long flags;
1661 if (gt64240_debug > 3)
1662 printk("%s: gt64240_get_stats: dev=%p\n", dev->name, dev);
1664 if (netif_device_present(dev)) {
1665 spin_lock_irqsave(&gp->lock, flags);
1666 update_stats(gp);
1667 spin_unlock_irqrestore(&gp->lock, flags);
1670 return &gp->stats;