- pre6:
[davej-history.git] / drivers / net / epic100.c
blobbdf5a4839410f35923328b66e34a27e7094755b2
1 /* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
2 /*
3 Written/copyright 1997-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 This driver is for the SMC83c170/175 "EPIC" series, as used on the
13 SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
18 Annapolis MD 21403
20 Information and updates available at
21 http://www.scyld.com/network/epic100.html
23 ---------------------------------------------------------------------
25 Linux kernel-specific changes:
27 LK1.1.2 (jgarzik):
28 * Merge becker version 1.09 (4/08/2000)
30 LK1.1.3:
31 * Major bugfix to 1.09 driver (Francis Romieu)
33 LK1.1.4 (jgarzik):
34 * Merge becker test version 1.09 (5/29/2000)
38 /* These identify the driver base version and may not be removed. */
39 static const char version[] =
40 "epic100.c:v1.09 5/29/2000 Written by Donald Becker <becker@scyld.com>\n";
41 static const char version2[] =
42 " http://www.scyld.com/network/epic100.html\n";
43 static const char version3[] =
44 " (unofficial 2.4.x kernel port, version 1.1.4, August 10, 2000)\n";
46 /* The user-configurable values.
47 These may be modified when a driver module is loaded.*/
49 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
50 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
51 static int max_interrupt_work = 32;
53 /* Used to pass the full-duplex flag, etc. */
54 #define MAX_UNITS 8 /* More are supported, limit only on options */
55 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
56 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
58 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
59 Setting to > 1518 effectively disables this feature. */
60 static int rx_copybreak = 0;
62 /* Operational parameters that are set at compile time. */
64 /* Keep the ring sizes a power of two for operational efficiency.
65 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
66 Making the Tx ring too large decreases the effectiveness of channel
67 bonding and packet priority.
68 There are no ill effects from too-large receive rings. */
69 #define TX_RING_SIZE 16
70 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
71 #define RX_RING_SIZE 32
73 /* Operational parameters that usually are not changed. */
74 /* Time in jiffies before concluding the transmitter is hung. */
75 #define TX_TIMEOUT (2*HZ)
77 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
79 /* Bytes transferred to chip before transmission starts. */
80 /* Initial threshold, increased on underflow, rounded down to 4 byte units. */
81 #define TX_FIFO_THRESH 256
82 #define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
84 #if !defined(__OPTIMIZE__)
85 #warning You must compile this file with the correct options!
86 #warning See the last lines of the source file.
87 #error You must compile this driver with "-O".
88 #endif
90 #include <linux/version.h>
91 #include <linux/module.h>
92 #if LINUX_VERSION_CODE < 0x20300 && defined(MODVERSIONS)
93 #include <linux/modversions.h>
94 #endif
96 #include <linux/kernel.h>
97 #include <linux/string.h>
98 #include <linux/timer.h>
99 #include <linux/errno.h>
100 #include <linux/ioport.h>
101 #include <linux/malloc.h>
102 #include <linux/interrupt.h>
103 #include <linux/pci.h>
104 #include <linux/delay.h>
105 #include <linux/netdevice.h>
106 #include <linux/etherdevice.h>
107 #include <linux/skbuff.h>
108 #include <linux/init.h>
109 #include <linux/spinlock.h>
110 #include <asm/bitops.h>
111 #include <asm/io.h>
113 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
114 MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
115 MODULE_PARM(debug, "i");
116 MODULE_PARM(max_interrupt_work, "i");
117 MODULE_PARM(rx_copybreak, "i");
118 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
119 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
122 Theory of Operation
124 I. Board Compatibility
126 This device driver is designed for the SMC "EPIC/100", the SMC
127 single-chip Ethernet controllers for PCI. This chip is used on
128 the SMC EtherPower II boards.
130 II. Board-specific settings
132 PCI bus devices are configured by the system at boot time, so no jumpers
133 need to be set on the board. The system BIOS will assign the
134 PCI INTA signal to a (preferably otherwise unused) system IRQ line.
135 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
136 interrupt lines.
138 III. Driver operation
140 IIIa. Ring buffers
142 IVb. References
144 http://www.smsc.com/main/datasheets/83c171.pdf
145 http://www.smsc.com/main/datasheets/83c175.pdf
146 http://scyld.com/expert/NWay.html
147 http://www.national.com/pf/DP/DP83840A.html
149 IVc. Errata
154 enum pci_id_flags_bits {
155 /* Set PCI command register bits before calling probe1(). */
156 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
157 /* Read and map the single following PCI BAR. */
158 PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
159 PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
162 enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
164 #define EPIC_TOTAL_SIZE 0x100
165 #ifdef USE_IO_OPS
166 #define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_IO|PCI_ADDR0
167 #else
168 #define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_MEM|PCI_ADDR1
169 #endif
171 #define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
173 typedef enum {
174 SMSC_83C170_0,
175 SMSC_83C170,
176 SMSC_83C175,
177 } chip_t;
180 struct epic_chip_info {
181 const char *name;
182 enum pci_id_flags_bits pci_flags;
183 int io_size; /* Needed for I/O region check or ioremap(). */
184 int drv_flags; /* Driver use, intended as capability flags. */
188 /* indexed by chip_t */
189 static struct epic_chip_info epic_chip_info[] __devinitdata = {
190 { "SMSC EPIC/100 83c170",
191 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN },
192 { "SMSC EPIC/100 83c170",
193 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR },
194 { "SMSC EPIC/C 83c175",
195 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN },
199 static struct pci_device_id epic_pci_tbl[] __devinitdata = {
200 { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
201 { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
202 { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
203 PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
204 { 0,}
206 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
209 #ifndef USE_IO_OPS
210 #undef inb
211 #undef inw
212 #undef inl
213 #undef outb
214 #undef outw
215 #undef outl
216 #define inb readb
217 #define inw readw
218 #define inl readl
219 #define outb writeb
220 #define outw writew
221 #define outl writel
222 #endif
224 /* Offsets to registers, using the (ugh) SMC names. */
225 enum epic_registers {
226 COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
227 PCIBurstCnt=0x18,
228 TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */
229 MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
230 LAN0=64, /* MAC address. */
231 MC0=80, /* Multicast filter table. */
232 RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
233 PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
236 /* Interrupt register bits, using my own meaningful names. */
237 enum IntrStatus {
238 TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
239 PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
240 RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
241 TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
242 RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
244 enum CommandBits {
245 StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
246 StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
249 static u16 media2miictl[16] = {
250 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
251 0, 0, 0, 0, 0, 0, 0, 0 };
253 /* The EPIC100 Rx and Tx buffer descriptors. */
255 struct epic_tx_desc {
256 u32 txstatus;
257 u32 bufaddr;
258 u32 buflength;
259 u32 next;
262 struct epic_rx_desc {
263 u32 rxstatus;
264 u32 bufaddr;
265 u32 buflength;
266 u32 next;
269 enum desc_status_bits {
270 DescOwn=0x8000,
274 struct epic_private {
275 /* Tx and Rx rings first so that they remain paragraph aligned. */
276 struct epic_rx_desc rx_ring[RX_RING_SIZE];
277 struct epic_tx_desc tx_ring[TX_RING_SIZE];
278 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
279 struct sk_buff* tx_skbuff[TX_RING_SIZE];
280 /* The addresses of receive-in-place skbuffs. */
281 struct sk_buff* rx_skbuff[RX_RING_SIZE];
283 /* Ring pointers. */
284 spinlock_t lock; /* Group with Tx control cache line. */
285 unsigned int cur_tx, dirty_tx;
286 struct descriptor *last_tx_desc;
288 unsigned int cur_rx, dirty_rx;
289 unsigned int rx_buf_sz; /* Based on MTU+slack. */
290 struct descriptor *last_rx_desc;
291 long last_rx_time; /* Last Rx, in jiffies. */
293 struct pci_dev *pci_dev; /* PCI bus location. */
294 int chip_flags;
296 struct net_device_stats stats;
297 struct timer_list timer; /* Media selection timer. */
298 int tx_threshold;
299 unsigned char mc_filter[8];
300 signed char phys[4]; /* MII device addresses. */
301 u16 advertising; /* NWay media advertisement */
302 int mii_phy_cnt;
303 unsigned int tx_full:1; /* The Tx queue is full. */
304 unsigned int full_duplex:1; /* Current duplex setting. */
305 unsigned int force_fd:1; /* Full-duplex operation requested. */
306 unsigned int default_port:4; /* Last dev->if_port value. */
307 unsigned int media2:4; /* Secondary monitored media port. */
308 unsigned int medialock:1; /* Don't sense media type. */
309 unsigned int mediasense:1; /* Media sensing in progress. */
312 static int epic_open(struct net_device *dev);
313 static int read_eeprom(long ioaddr, int location);
314 static int mdio_read(long ioaddr, int phy_id, int location);
315 static void mdio_write(long ioaddr, int phy_id, int location, int value);
316 static void epic_restart(struct net_device *dev);
317 static void epic_timer(unsigned long data);
318 static void epic_tx_timeout(struct net_device *dev);
319 static void epic_init_ring(struct net_device *dev);
320 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
321 static int epic_rx(struct net_device *dev);
322 static void epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
323 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
324 static int epic_close(struct net_device *dev);
325 static struct net_device_stats *epic_get_stats(struct net_device *dev);
326 static void set_rx_mode(struct net_device *dev);
330 static int __devinit epic_init_one (struct pci_dev *pdev,
331 const struct pci_device_id *ent)
333 static int card_idx = -1;
334 static int printed_version = 0;
335 struct net_device *dev;
336 struct epic_private *ep;
337 int i, option = 0, duplex = 0;
338 struct epic_chip_info *ci = &epic_chip_info[ent->driver_data];
339 long ioaddr;
340 int chip_idx = (int) ent->driver_data;
342 card_idx++;
344 if (!printed_version++)
345 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
346 version, version2, version3);
348 if ((pci_resource_len(pdev, 0) < ci->io_size) ||
349 (pci_resource_len(pdev, 1) < ci->io_size)) {
350 printk (KERN_ERR "card %d: no PCI region space\n", card_idx);
351 return -ENODEV;
354 i = pci_enable_device(pdev);
355 if (i)
356 return i;
358 pci_set_master(pdev);
360 dev = init_etherdev(NULL, sizeof (*ep));
361 if (!dev) {
362 printk (KERN_ERR "card %d: no memory for eth device\n", card_idx);
363 return -ENOMEM;
366 /* request 100% of both regions 0 and 1, just to make
367 * sure noone else steals our regions while we are talking
368 * to them */
369 if (!request_region (pci_resource_start (pdev, 0),
370 pci_resource_len (pdev, 0), dev->name)) {
371 printk (KERN_ERR "epic100 %d: I/O region busy\n", card_idx);
372 goto err_out_free_netdev;
374 if (!request_mem_region (pci_resource_start (pdev, 1),
375 pci_resource_len (pdev, 1), dev->name)) {
376 printk (KERN_ERR "epic100 %d: I/O region busy\n", card_idx);
377 goto err_out_free_pio;
380 #ifdef USE_IO_OPS
381 ioaddr = pci_resource_start (pdev, 0);
382 #else
383 ioaddr = pci_resource_start (pdev, 1);
384 ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1));
385 if (!ioaddr) {
386 printk (KERN_ERR "epic100 %d: ioremap failed\n", card_idx);
387 goto err_out_free_mmio;
389 #endif
391 if (dev->mem_start) {
392 option = dev->mem_start;
393 duplex = (dev->mem_start & 16) ? 1 : 0;
394 } else if (card_idx >= 0 && card_idx < MAX_UNITS) {
395 if (options[card_idx] >= 0)
396 option = options[card_idx];
397 if (full_duplex[card_idx] >= 0)
398 duplex = full_duplex[card_idx];
401 pdev->driver_data = dev;
403 dev->base_addr = ioaddr;
404 dev->irq = pdev->irq;
406 ep = dev->priv;
407 ep->pci_dev = pdev;
408 ep->chip_flags = ci->drv_flags;
409 spin_lock_init (&ep->lock);
411 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
412 dev->name, ci->name, ioaddr, dev->irq);
414 /* Bring the chip out of low-power mode. */
415 outl(0x4200, ioaddr + GENCTL);
416 /* Magic?! If we don't set this bit the MII interface won't work. */
417 outl(0x0008, ioaddr + TEST1);
419 /* Turn on the MII transceiver. */
420 outl(0x12, ioaddr + MIICfg);
421 if (chip_idx == 1)
422 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
423 outl(0x0200, ioaddr + GENCTL);
425 /* This could also be read from the EEPROM. */
426 for (i = 0; i < 3; i++)
427 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4));
429 for (i = 0; i < 5; i++)
430 printk("%2.2x:", dev->dev_addr[i]);
431 printk("%2.2x.\n", dev->dev_addr[i]);
433 if (debug > 2) {
434 printk(KERN_DEBUG "%s: EEPROM contents\n", dev->name);
435 for (i = 0; i < 64; i++)
436 printk(" %4.4x%s", read_eeprom(ioaddr, i),
437 i % 16 == 15 ? "\n" : "");
440 /* Find the connected MII xcvrs.
441 Doing this in open() would allow detecting external xcvrs later, but
442 takes much time and no cards have external MII. */
444 int phy, phy_idx = 0;
445 for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
446 int mii_status = mdio_read(ioaddr, phy, 1);
447 if (mii_status != 0xffff && mii_status != 0x0000) {
448 ep->phys[phy_idx++] = phy;
449 printk(KERN_INFO "%s: MII transceiver #%d control "
450 "%4.4x status %4.4x.\n",
451 dev->name, phy, mdio_read(ioaddr, phy, 0), mii_status);
454 ep->mii_phy_cnt = phy_idx;
455 if (phy_idx != 0) {
456 phy = ep->phys[0];
457 ep->advertising = mdio_read(ioaddr, phy, 4);
458 printk( KERN_INFO "%s: Autonegotiation advertising %4.4x link "
459 "partner %4.4x.\n",
460 dev->name, ep->advertising, mdio_read(ioaddr, phy, 5));
461 } else if ( ! (ep->chip_flags & NO_MII)) {
462 printk(KERN_WARNING "%s: ***WARNING***: No MII transceiver found!\n",
463 dev->name);
464 /* Use the known PHY address of the EPII. */
465 ep->phys[0] = 3;
469 /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
470 if (ep->chip_flags & MII_PWRDWN)
471 outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
472 outl(0x0008, ioaddr + GENCTL);
474 /* The lower four bits are the media type. */
475 ep->force_fd = duplex;
476 dev->if_port = ep->default_port = option;
477 if (ep->default_port)
478 ep->medialock = 1;
480 /* The Epic-specific entries in the device structure. */
481 dev->open = &epic_open;
482 dev->hard_start_xmit = &epic_start_xmit;
483 dev->stop = &epic_close;
484 dev->get_stats = &epic_get_stats;
485 dev->set_multicast_list = &set_rx_mode;
486 dev->do_ioctl = &mii_ioctl;
487 dev->watchdog_timeo = TX_TIMEOUT;
488 dev->tx_timeout = &epic_tx_timeout;
490 return 0;
492 err_out_iounmap:
493 #ifndef USE_IO_OPS
494 iounmap ((void*) ioaddr);
495 err_out_free_mmio:
496 #endif
497 release_mem_region (pci_resource_start (pdev, 1),
498 pci_resource_len (pdev, 1));
499 err_out_free_pio:
500 release_region (pci_resource_start (pdev, 0),
501 pci_resource_len (pdev, 0));
502 err_out_free_netdev:
503 unregister_netdev(dev);
504 kfree(dev);
505 return -ENODEV;
508 /* Serial EEPROM section. */
510 /* EEPROM_Ctrl bits. */
511 #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
512 #define EE_CS 0x02 /* EEPROM chip select. */
513 #define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */
514 #define EE_WRITE_0 0x01
515 #define EE_WRITE_1 0x09
516 #define EE_DATA_READ 0x10 /* EEPROM chip data out. */
517 #define EE_ENB (0x0001 | EE_CS)
519 /* Delay between EEPROM clock transitions.
520 No extra delay is needed with 33Mhz PCI, but 66Mhz is untested.
523 #define eeprom_delay() inl(ee_addr)
525 /* The EEPROM commands include the alway-set leading bit. */
526 #define EE_WRITE_CMD (5 << 6)
527 #define EE_READ64_CMD (6 << 6)
528 #define EE_READ256_CMD (6 << 8)
529 #define EE_ERASE_CMD (7 << 6)
531 static int read_eeprom(long ioaddr, int location)
533 int i;
534 int retval = 0;
535 long ee_addr = ioaddr + EECTL;
536 int read_cmd = location |
537 (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
539 outl(EE_ENB & ~EE_CS, ee_addr);
540 outl(EE_ENB, ee_addr);
542 /* Shift the read command bits out. */
543 for (i = 12; i >= 0; i--) {
544 short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
545 outl(EE_ENB | dataval, ee_addr);
546 eeprom_delay();
547 outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
548 eeprom_delay();
550 outl(EE_ENB, ee_addr);
552 for (i = 16; i > 0; i--) {
553 outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
554 eeprom_delay();
555 retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
556 outl(EE_ENB, ee_addr);
557 eeprom_delay();
560 /* Terminate the EEPROM access. */
561 outl(EE_ENB & ~EE_CS, ee_addr);
562 return retval;
565 #define MII_READOP 1
566 #define MII_WRITEOP 2
567 static int mdio_read(long ioaddr, int phy_id, int location)
569 int i;
571 outl((phy_id << 9) | (location << 4) | MII_READOP, ioaddr + MIICtrl);
572 /* Typical operation takes < 50 ticks. */
573 for (i = 4000; i > 0; i--)
574 if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0)
575 return inw(ioaddr + MIIData);
576 return 0xffff;
579 static void mdio_write(long ioaddr, int phy_id, int location, int value)
581 int i;
583 outw(value, ioaddr + MIIData);
584 outl((phy_id << 9) | (location << 4) | MII_WRITEOP, ioaddr + MIICtrl);
585 for (i = 10000; i > 0; i--) {
586 if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
587 break;
589 return;
593 static int epic_open(struct net_device *dev)
595 struct epic_private *ep = (struct epic_private *)dev->priv;
596 long ioaddr = dev->base_addr;
597 int i;
598 int retval;
600 ep->full_duplex = ep->force_fd;
602 /* Soft reset the chip. */
603 outl(0x4001, ioaddr + GENCTL);
605 MOD_INC_USE_COUNT;
607 if ((retval = request_irq(dev->irq, &epic_interrupt, SA_SHIRQ, dev->name, dev))) {
608 MOD_DEC_USE_COUNT;
609 return retval;
612 epic_init_ring(dev);
614 outl(0x4000, ioaddr + GENCTL);
615 /* This next magic! line by Ken Yamaguchi.. ?? */
616 outl(0x0008, ioaddr + TEST1);
618 /* Pull the chip out of low-power mode, enable interrupts, and set for
619 PCI read multiple. The MIIcfg setting and strange write order are
620 required by the details of which bits are reset and the transceiver
621 wiring on the Ositech CardBus card.
623 outl(0x12, ioaddr + MIICfg);
624 if (ep->chip_flags & MII_PWRDWN)
625 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
627 #if defined(__powerpc__) || defined(__sparc__) /* Big endian */
628 outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
629 inl(ioaddr + GENCTL);
630 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
631 #else
632 outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
633 inl(ioaddr + GENCTL);
634 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
635 #endif
637 for (i = 0; i < 3; i++)
638 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
640 ep->tx_threshold = TX_FIFO_THRESH;
641 outl(ep->tx_threshold, ioaddr + TxThresh);
643 if (media2miictl[dev->if_port & 15]) {
644 if (ep->mii_phy_cnt)
645 mdio_write(ioaddr, ep->phys[0], 0, media2miictl[dev->if_port&15]);
646 if (dev->if_port == 1) {
647 if (debug > 1)
648 printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
649 "status %4.4x.\n",
650 dev->name, mdio_read(ioaddr, ep->phys[0], 1));
651 outl(0x13, ioaddr + MIICfg);
653 } else {
654 int mii_reg5 = mdio_read(ioaddr, ep->phys[0], 5);
655 if (mii_reg5 != 0xffff) {
656 if ((mii_reg5 & 0x0100) || (mii_reg5 & 0x01C0) == 0x0040)
657 ep->full_duplex = 1;
658 else if (! (mii_reg5 & 0x4000))
659 mdio_write(ioaddr, ep->phys[0], 0, 0x1200);
660 if (debug > 1)
661 printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
662 " register read of %4.4x.\n", dev->name,
663 ep->full_duplex ? "full" : "half",
664 ep->phys[0], mii_reg5);
668 outl(ep->full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
669 outl(virt_to_bus(ep->rx_ring), ioaddr + PRxCDAR);
670 outl(virt_to_bus(ep->tx_ring), ioaddr + PTxCDAR);
672 /* Start the chip's Rx process. */
673 set_rx_mode(dev);
674 outl(StartRx | RxQueued, ioaddr + COMMAND);
676 netif_start_queue(dev);
678 /* Enable interrupts by setting the interrupt mask. */
679 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
680 | CntFull | TxUnderrun | TxDone | TxEmpty
681 | RxError | RxOverflow | RxFull | RxHeader | RxDone,
682 ioaddr + INTMASK);
684 if (debug > 1)
685 printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
686 "%s-duplex.\n",
687 dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
688 ep->full_duplex ? "full" : "half");
690 /* Set the timer to switch to check for link beat and perhaps switch
691 to an alternate media type. */
692 init_timer(&ep->timer);
693 ep->timer.expires = jiffies + 3*HZ;
694 ep->timer.data = (unsigned long)dev;
695 ep->timer.function = &epic_timer; /* timer handler */
696 add_timer(&ep->timer);
698 return 0;
701 /* Reset the chip to recover from a PCI transaction error.
702 This may occur at interrupt time. */
703 static void epic_pause(struct net_device *dev)
705 long ioaddr = dev->base_addr;
706 struct epic_private *ep = (struct epic_private *)dev->priv;
708 netif_stop_queue (dev);
710 /* Disable interrupts by clearing the interrupt mask. */
711 outl(0x00000000, ioaddr + INTMASK);
712 /* Stop the chip's Tx and Rx DMA processes. */
713 outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
715 /* Update the error counts. */
716 if (inw(ioaddr + COMMAND) != 0xffff) {
717 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
718 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
719 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
722 /* Remove the packets on the Rx queue. */
723 epic_rx(dev);
726 static void epic_restart(struct net_device *dev)
728 long ioaddr = dev->base_addr;
729 struct epic_private *ep = (struct epic_private *)dev->priv;
730 int i;
732 printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
733 dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
734 /* Soft reset the chip. */
735 outl(0x0001, ioaddr + GENCTL);
737 udelay(1);
738 /* Duplicate code from epic_open(). */
739 outl(0x0008, ioaddr + TEST1);
741 #if defined(__powerpc__) || defined(__sparc__) /* Big endian */
742 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
743 #else
744 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
745 #endif
746 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
747 if (ep->chip_flags & MII_PWRDWN)
748 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
750 for (i = 0; i < 3; i++)
751 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
753 ep->tx_threshold = TX_FIFO_THRESH;
754 outl(ep->tx_threshold, ioaddr + TxThresh);
755 outl(ep->full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
756 outl(virt_to_bus(&ep->rx_ring[ep->cur_rx%RX_RING_SIZE]), ioaddr + PRxCDAR);
757 outl(virt_to_bus(&ep->tx_ring[ep->dirty_tx%TX_RING_SIZE]),
758 ioaddr + PTxCDAR);
760 /* Start the chip's Rx process. */
761 set_rx_mode(dev);
762 outl(StartRx | RxQueued, ioaddr + COMMAND);
764 /* Enable interrupts by setting the interrupt mask. */
765 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
766 | CntFull | TxUnderrun | TxDone | TxEmpty
767 | RxError | RxOverflow | RxFull | RxHeader | RxDone,
768 ioaddr + INTMASK);
769 printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
770 " interrupt %4.4x.\n",
771 dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
772 (int)inl(ioaddr + INTSTAT));
773 return;
776 static void epic_timer(unsigned long data)
778 struct net_device *dev = (struct net_device *)data;
779 struct epic_private *ep = (struct epic_private *)dev->priv;
780 long ioaddr = dev->base_addr;
781 int next_tick = 60*HZ;
782 int mii_reg5 = ep->mii_phy_cnt ? mdio_read(ioaddr, ep->phys[0], 5) : 0;
783 int negotiated = mii_reg5 & ep->advertising;
784 int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
786 if (debug > 3) {
787 printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
788 dev->name, (int)inl(ioaddr + TxSTAT));
789 printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
790 "IntStatus %4.4x RxStatus %4.4x.\n",
791 dev->name, (int)inl(ioaddr + INTMASK),
792 (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
795 if (! ep->force_fd) {
796 if (ep->full_duplex != duplex) {
797 ep->full_duplex = duplex;
798 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
799 " partner capability of %4.4x.\n", dev->name,
800 ep->full_duplex ? "full" : "half", ep->phys[0], mii_reg5);
801 outl(ep->full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
805 ep->timer.expires = jiffies + next_tick;
806 add_timer(&ep->timer);
809 static void epic_tx_timeout(struct net_device *dev)
811 struct epic_private *ep = (struct epic_private *)dev->priv;
812 long ioaddr = dev->base_addr;
814 if (debug > 0) {
815 printk(KERN_WARNING "%s: Transmit timeout using MII device, "
816 "Tx status %4.4x.\n",
817 dev->name, (int)inw(ioaddr + TxSTAT));
818 if (debug > 1) {
819 printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
820 dev->name, ep->dirty_tx, ep->cur_tx);
823 if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */
824 ep->stats.tx_fifo_errors++;
825 outl(RestartTx, ioaddr + COMMAND);
826 } else {
827 epic_restart(dev);
828 outl(TxQueued, dev->base_addr + COMMAND);
831 dev->trans_start = jiffies;
832 ep->stats.tx_errors++;
833 return;
836 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
837 static void epic_init_ring(struct net_device *dev)
839 struct epic_private *ep = (struct epic_private *)dev->priv;
840 int i;
842 ep->tx_full = 0;
843 ep->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
844 ep->dirty_tx = ep->cur_tx = 0;
845 ep->cur_rx = ep->dirty_rx = 0;
846 ep->last_rx_time = jiffies;
847 ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
849 /* Initialize all Rx descriptors. */
850 for (i = 0; i < RX_RING_SIZE; i++) {
851 ep->rx_ring[i].rxstatus = 0;
852 ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
853 ep->rx_ring[i].next = virt_to_le32desc(&ep->rx_ring[i+1]);
854 ep->rx_skbuff[i] = 0;
856 /* Mark the last entry as wrapping the ring. */
857 ep->rx_ring[i-1].next = virt_to_le32desc(&ep->rx_ring[0]);
859 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
860 for (i = 0; i < RX_RING_SIZE; i++) {
861 struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
862 ep->rx_skbuff[i] = skb;
863 if (skb == NULL)
864 break;
865 skb->dev = dev; /* Mark as being used by this device. */
866 skb_reserve(skb, 2); /* 16 byte align the IP header. */
867 ep->rx_ring[i].bufaddr = virt_to_le32desc(skb->tail);
868 ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
870 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
872 /* The Tx buffer descriptor is filled in as needed, but we
873 do need to clear the ownership bit. */
874 for (i = 0; i < TX_RING_SIZE; i++) {
875 ep->tx_skbuff[i] = 0;
876 ep->tx_ring[i].txstatus = 0x0000;
877 ep->tx_ring[i].next = virt_to_le32desc(&ep->tx_ring[i+1]);
879 ep->tx_ring[i-1].next = virt_to_le32desc(&ep->tx_ring[0]);
880 return;
883 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
885 struct epic_private *ep = (struct epic_private *)dev->priv;
886 int entry, free_count;
887 u32 ctrl_word;
889 /* Caution: the write order is important here, set the field with the
890 "ownership" bit last. */
891 spin_lock_irq(&ep->lock);
893 /* Calculate the next Tx descriptor entry. */
894 free_count = ep->cur_tx - ep->dirty_tx;
895 entry = ep->cur_tx % TX_RING_SIZE;
897 ep->tx_skbuff[entry] = skb;
898 ep->tx_ring[entry].bufaddr = virt_to_le32desc(skb->data);
900 if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
901 ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
902 } else if (free_count == TX_QUEUE_LEN/2) {
903 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
904 } else if (free_count < TX_QUEUE_LEN - 1) {
905 ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */
906 } else {
907 /* Leave room for an additional entry. */
908 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
909 ep->tx_full = 1;
911 ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);
912 ep->tx_ring[entry].txstatus =
913 ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
914 | cpu_to_le32(DescOwn);
916 ep->cur_tx++;
917 if (ep->tx_full)
918 netif_stop_queue(dev);
920 spin_unlock_irq(&ep->lock);
922 /* Trigger an immediate transmit demand. */
923 outl(TxQueued, dev->base_addr + COMMAND);
925 dev->trans_start = jiffies;
926 if (debug > 4)
927 printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
928 "flag %2.2x Tx status %8.8x.\n",
929 dev->name, (int)skb->len, entry, ctrl_word,
930 (int)inl(dev->base_addr + TxSTAT));
932 return 0;
935 /* The interrupt handler does all of the Rx thread work and cleans up
936 after the Tx thread. */
937 static void epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
939 struct net_device *dev = (struct net_device *)dev_instance;
940 struct epic_private *ep = (struct epic_private *)dev->priv;
941 long ioaddr = dev->base_addr;
942 int status, boguscnt = max_interrupt_work;
944 spin_lock(&ep->lock);
946 do {
947 status = inl(ioaddr + INTSTAT);
948 /* Acknowledge all of the current interrupt sources ASAP. */
949 outl(status & 0x00007fff, ioaddr + INTSTAT);
951 if (debug > 4)
952 printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
953 "intstat=%#8.8x.\n",
954 dev->name, status, (int)inl(ioaddr + INTSTAT));
956 if ((status & IntrSummary) == 0)
957 break;
959 if (status & (RxDone | RxStarted | RxEarlyWarn | RxOverflow))
960 epic_rx(dev);
962 if (status & (TxEmpty | TxDone)) {
963 unsigned int dirty_tx, cur_tx;
965 /* Note: if this lock becomes a problem we can narrow the locked
966 region at the cost of occasionally grabbing the lock more
967 times. */
968 cur_tx = ep->cur_tx;
969 dirty_tx = ep->dirty_tx;
970 for (; cur_tx - dirty_tx > 0; dirty_tx++) {
971 int entry = dirty_tx % TX_RING_SIZE;
972 int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
974 if (txstatus & DescOwn)
975 break; /* It still hasn't been Txed */
977 if ( ! (txstatus & 0x0001)) {
978 /* There was an major error, log it. */
979 #ifndef final_version
980 if (debug > 1)
981 printk("%s: Transmit error, Tx status %8.8x.\n",
982 dev->name, txstatus);
983 #endif
984 ep->stats.tx_errors++;
985 if (txstatus & 0x1050) ep->stats.tx_aborted_errors++;
986 if (txstatus & 0x0008) ep->stats.tx_carrier_errors++;
987 if (txstatus & 0x0040) ep->stats.tx_window_errors++;
988 if (txstatus & 0x0010) ep->stats.tx_fifo_errors++;
989 #ifdef ETHER_STATS
990 if (txstatus & 0x1000) ep->stats.collisions16++;
991 #endif
992 } else {
993 #ifdef ETHER_STATS
994 if ((txstatus & 0x0002) != 0) ep->stats.tx_deferred++;
995 #endif
996 ep->stats.collisions += (txstatus >> 8) & 15;
997 ep->stats.tx_packets++;
998 ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1001 /* Free the original skb. */
1002 dev_kfree_skb_irq(ep->tx_skbuff[entry]);
1003 ep->tx_skbuff[entry] = 0;
1006 #ifndef final_version
1007 if (cur_tx - dirty_tx > TX_RING_SIZE) {
1008 printk("%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1009 dev->name, dirty_tx, cur_tx, ep->tx_full);
1010 dirty_tx += TX_RING_SIZE;
1012 #endif
1013 ep->dirty_tx = dirty_tx;
1014 if (ep->tx_full
1015 && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1016 /* The ring is no longer full, clear tbusy. */
1017 ep->tx_full = 0;
1018 netif_wake_queue(dev);
1022 /* Check uncommon events all at once. */
1023 if (status & (CntFull | TxUnderrun | RxOverflow | RxFull |
1024 PCIBusErr170 | PCIBusErr175)) {
1025 if (status == 0xffffffff) /* Chip failed or removed (CardBus). */
1026 break;
1027 /* Always update the error counts to avoid overhead later. */
1028 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1029 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1030 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1032 if (status & TxUnderrun) { /* Tx FIFO underflow. */
1033 ep->stats.tx_fifo_errors++;
1034 outl(ep->tx_threshold += 128, ioaddr + TxThresh);
1035 /* Restart the transmit process. */
1036 outl(RestartTx, ioaddr + COMMAND);
1038 if (status & RxOverflow) { /* Missed a Rx frame. */
1039 ep->stats.rx_errors++;
1041 if (status & (RxOverflow | RxFull))
1042 outw(RxQueued, ioaddr + COMMAND);
1043 if (status & PCIBusErr170) {
1044 printk(KERN_ERR "%s: PCI Bus Error! EPIC status %4.4x.\n",
1045 dev->name, status);
1046 epic_pause(dev);
1047 epic_restart(dev);
1049 /* Clear all error sources. */
1050 outl(status & 0x7f18, ioaddr + INTSTAT);
1052 if (--boguscnt < 0) {
1053 printk(KERN_ERR "%s: Too much work at interrupt, "
1054 "IntrStatus=0x%8.8x.\n",
1055 dev->name, status);
1056 /* Clear all interrupt sources. */
1057 outl(0x0001ffff, ioaddr + INTSTAT);
1058 break;
1060 } while (1);
1062 if (debug > 3)
1063 printk(KERN_DEBUG "%s: exiting interrupt, intr_status=%#4.4x.\n",
1064 dev->name, status);
1066 spin_unlock(&ep->lock);
1069 static int epic_rx(struct net_device *dev)
1071 struct epic_private *ep = (struct epic_private *)dev->priv;
1072 int entry = ep->cur_rx % RX_RING_SIZE;
1073 int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1074 int work_done = 0;
1076 if (debug > 4)
1077 printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
1078 ep->rx_ring[entry].rxstatus);
1079 /* If we own the next entry, it's a new packet. Send it up. */
1080 while (!(le32_to_cpu(ep->rx_ring[entry].rxstatus) & DescOwn)) {
1081 int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);
1083 if (debug > 4)
1084 printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
1085 if (--rx_work_limit < 0)
1086 break;
1087 if (status & 0x2006) {
1088 if (debug > 2)
1089 printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
1090 dev->name, status);
1091 if (status & 0x2000) {
1092 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1093 "multiple buffers, status %4.4x!\n", dev->name, status);
1094 ep->stats.rx_length_errors++;
1095 } else if (status & 0x0006)
1096 /* Rx Frame errors are counted in hardware. */
1097 ep->stats.rx_errors++;
1098 } else {
1099 /* Malloc up new buffer, compatible with net-2e. */
1100 /* Omit the four octet CRC from the length. */
1101 short pkt_len = (status >> 16) - 4;
1102 struct sk_buff *skb;
1104 if (pkt_len > PKT_BUF_SZ - 4) {
1105 printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
1106 "%d bytes.\n",
1107 dev->name, pkt_len, status);
1108 pkt_len = 1514;
1110 /* Check if the packet is long enough to accept without copying
1111 to a minimally-sized skbuff. */
1112 if (pkt_len < rx_copybreak
1113 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1114 skb->dev = dev;
1115 skb_reserve(skb, 2); /* 16 byte align the IP header */
1116 #if 1 /* HAS_IP_COPYSUM */
1117 eth_copy_and_sum(skb, ep->rx_skbuff[entry]->tail, pkt_len, 0);
1118 skb_put(skb, pkt_len);
1119 #else
1120 memcpy(skb_put(skb, pkt_len), ep->rx_skbuff[entry]->tail,
1121 pkt_len);
1122 #endif
1123 } else {
1124 skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1125 ep->rx_skbuff[entry] = NULL;
1127 skb->protocol = eth_type_trans(skb, dev);
1128 netif_rx(skb);
1129 ep->stats.rx_packets++;
1130 ep->stats.rx_bytes += pkt_len;
1132 work_done++;
1133 entry = (++ep->cur_rx) % RX_RING_SIZE;
1136 /* Refill the Rx ring buffers. */
1137 for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1138 entry = ep->dirty_rx % RX_RING_SIZE;
1139 if (ep->rx_skbuff[entry] == NULL) {
1140 struct sk_buff *skb;
1141 skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
1142 if (skb == NULL)
1143 break;
1144 skb->dev = dev; /* Mark as being used by this device. */
1145 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1146 ep->rx_ring[entry].bufaddr = virt_to_le32desc(skb->tail);
1147 work_done++;
1149 ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
1151 return work_done;
1154 static int epic_close(struct net_device *dev)
1156 long ioaddr = dev->base_addr;
1157 struct epic_private *ep = (struct epic_private *)dev->priv;
1158 int i;
1160 netif_stop_queue(dev);
1162 if (debug > 1)
1163 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1164 dev->name, (int)inl(ioaddr + INTSTAT));
1166 del_timer_sync(&ep->timer);
1167 epic_pause(dev);
1168 free_irq(dev->irq, dev);
1170 /* Free all the skbuffs in the Rx queue. */
1171 for (i = 0; i < RX_RING_SIZE; i++) {
1172 struct sk_buff *skb = ep->rx_skbuff[i];
1173 ep->rx_skbuff[i] = 0;
1174 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
1175 ep->rx_ring[i].buflength = 0;
1176 ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1177 if (skb) {
1178 dev_kfree_skb(skb);
1181 for (i = 0; i < TX_RING_SIZE; i++) {
1182 if (ep->tx_skbuff[i])
1183 dev_kfree_skb(ep->tx_skbuff[i]);
1184 ep->tx_skbuff[i] = 0;
1187 /* Green! Leave the chip in low-power mode. */
1188 outl(0x0008, ioaddr + GENCTL);
1190 MOD_DEC_USE_COUNT;
1191 return 0;
1194 static struct net_device_stats *epic_get_stats(struct net_device *dev)
1196 struct epic_private *ep = (struct epic_private *)dev->priv;
1197 long ioaddr = dev->base_addr;
1199 if (netif_running(dev)) {
1200 /* Update the error counts. */
1201 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1202 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1203 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1206 return &ep->stats;
1209 /* Set or clear the multicast filter for this adaptor.
1210 Note that we only use exclusion around actually queueing the
1211 new frame, not around filling ep->setup_frame. This is non-deterministic
1212 when re-entered but still correct. */
1214 /* The little-endian AUTODIN II ethernet CRC calculation.
1215 N.B. Do not use for bulk data, use a table-based routine instead.
1216 This is common code and should be moved to net/core/crc.c */
1217 static unsigned const ethernet_polynomial_le = 0xedb88320U;
1218 static inline unsigned ether_crc_le(int length, unsigned char *data)
1220 unsigned int crc = 0xffffffff; /* Initial value. */
1221 while(--length >= 0) {
1222 unsigned char current_octet = *data++;
1223 int bit;
1224 for (bit = 8; --bit >= 0; current_octet >>= 1) {
1225 if ((crc ^ current_octet) & 1) {
1226 crc >>= 1;
1227 crc ^= ethernet_polynomial_le;
1228 } else
1229 crc >>= 1;
1232 return crc;
1235 static void set_rx_mode(struct net_device *dev)
1237 long ioaddr = dev->base_addr;
1238 struct epic_private *ep = (struct epic_private *)dev->priv;
1239 unsigned char mc_filter[8]; /* Multicast hash filter */
1240 int i;
1242 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1243 outl(0x002C, ioaddr + RxCtrl);
1244 /* Unconditionally log net taps. */
1245 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1246 memset(mc_filter, 0xff, sizeof(mc_filter));
1247 } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
1248 /* There is apparently a chip bug, so the multicast filter
1249 is never enabled. */
1250 /* Too many to filter perfectly -- accept all multicasts. */
1251 memset(mc_filter, 0xff, sizeof(mc_filter));
1252 outl(0x000C, ioaddr + RxCtrl);
1253 } else if (dev->mc_count == 0) {
1254 outl(0x0004, ioaddr + RxCtrl);
1255 return;
1256 } else { /* Never executed, for now. */
1257 struct dev_mc_list *mclist;
1259 memset(mc_filter, 0, sizeof(mc_filter));
1260 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1261 i++, mclist = mclist->next)
1262 set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f,
1263 mc_filter);
1265 /* ToDo: perhaps we need to stop the Tx and Rx process here? */
1266 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1267 for (i = 0; i < 4; i++)
1268 outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
1269 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1271 return;
1274 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1276 long ioaddr = dev->base_addr;
1277 u16 *data = (u16 *)&rq->ifr_data;
1279 switch(cmd) {
1280 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
1281 data[0] = ((struct epic_private *)dev->priv)->phys[0] & 0x1f;
1282 /* Fall Through */
1283 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
1284 if (! netif_running(dev)) {
1285 outl(0x0200, ioaddr + GENCTL);
1286 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1288 data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
1289 if (! netif_running(dev)) {
1290 #ifdef notdef /* Leave on if the ioctl() is used. */
1291 outl(0x0008, ioaddr + GENCTL);
1292 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1293 #endif
1295 return 0;
1296 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
1297 if (!capable(CAP_NET_ADMIN))
1298 return -EPERM;
1299 if (! netif_running(dev)) {
1300 outl(0x0200, ioaddr + GENCTL);
1301 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1303 mdio_write(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2]);
1304 if (! netif_running(dev)) {
1305 #ifdef notdef /* Leave on if the ioctl() is used. */
1306 outl(0x0008, ioaddr + GENCTL);
1307 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1308 #endif
1310 return 0;
1311 default:
1312 return -EOPNOTSUPP;
1317 static void __devexit epic_remove_one (struct pci_dev *pdev)
1319 struct net_device *dev = pdev->driver_data;
1321 unregister_netdev(dev);
1322 #ifndef USE_IO_OPS
1323 iounmap ((void*) dev->base_addr);
1324 #endif
1325 release_mem_region (pci_resource_start (pdev, 1),
1326 pci_resource_len (pdev, 1));
1327 release_region (pci_resource_start (pdev, 0),
1328 pci_resource_len (pdev, 0));
1329 kfree(dev);
1333 static void epic_suspend (struct pci_dev *pdev)
1335 struct net_device *dev = pdev->driver_data;
1336 long ioaddr = dev->base_addr;
1338 epic_pause(dev);
1339 /* Put the chip into low-power mode. */
1340 outl(0x0008, ioaddr + GENCTL);
1344 static void epic_resume (struct pci_dev *pdev)
1346 struct net_device *dev = pdev->driver_data;
1348 epic_restart (dev);
1352 static struct pci_driver epic_driver = {
1353 name: "epic100",
1354 id_table: epic_pci_tbl,
1355 probe: epic_init_one,
1356 remove: epic_remove_one,
1357 suspend: epic_suspend,
1358 resume: epic_resume,
1362 static int __init epic_init (void)
1364 return pci_module_init (&epic_driver);
1368 static void __exit epic_cleanup (void)
1370 pci_unregister_driver (&epic_driver);
1374 module_init(epic_init);
1375 module_exit(epic_cleanup);