[netdrvr] Remove long-unused bits from Becker template drivers
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / epic100.c
blobee34a16eb4e24b8feb2667b774d5627e8168a139
1 /* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
2 /*
3 Written/copyright 1997-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 This driver is for the SMC83c170/175 "EPIC" series, as used on the
13 SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
18 Annapolis MD 21403
20 Information and updates available at
21 http://www.scyld.com/network/epic100.html
23 ---------------------------------------------------------------------
25 Linux kernel-specific changes:
27 LK1.1.2 (jgarzik):
28 * Merge becker version 1.09 (4/08/2000)
30 LK1.1.3:
31 * Major bugfix to 1.09 driver (Francis Romieu)
33 LK1.1.4 (jgarzik):
34 * Merge becker test version 1.09 (5/29/2000)
36 LK1.1.5:
37 * Fix locking (jgarzik)
38 * Limit 83c175 probe to ethernet-class PCI devices (rgooch)
40 LK1.1.6:
41 * Merge becker version 1.11
42 * Move pci_enable_device before any PCI BAR len checks
44 LK1.1.7:
45 * { fill me in }
47 LK1.1.8:
48 * ethtool driver info support (jgarzik)
50 LK1.1.9:
51 * ethtool media get/set support (jgarzik)
53 LK1.1.10:
54 * revert MII transceiver init change (jgarzik)
56 LK1.1.11:
57 * implement ETHTOOL_[GS]SET, _NWAY_RST, _[GS]MSGLVL, _GLINK (jgarzik)
58 * replace some MII-related magic numbers with constants
60 LK1.1.12:
61 * fix power-up sequence
63 LK1.1.13:
64 * revert version 1.1.12, power-up sequence "fix"
66 LK1.1.14 (Kryzsztof Halasa):
67 * fix spurious bad initializations
68 * pound phy a la SMSC's app note on the subject
70 AC1.1.14ac
71 * fix power up/down for ethtool that broke in 1.11
75 #define DRV_NAME "epic100"
76 #define DRV_VERSION "1.11+LK1.1.14+AC1.1.14"
77 #define DRV_RELDATE "June 2, 2004"
79 /* The user-configurable values.
80 These may be modified when a driver module is loaded.*/
82 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
84 /* Used to pass the full-duplex flag, etc. */
85 #define MAX_UNITS 8 /* More are supported, limit only on options */
86 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
87 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
89 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
90 Setting to > 1518 effectively disables this feature. */
91 static int rx_copybreak;
93 /* Operational parameters that are set at compile time. */
95 /* Keep the ring sizes a power of two for operational efficiency.
96 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
97 Making the Tx ring too large decreases the effectiveness of channel
98 bonding and packet priority.
99 There are no ill effects from too-large receive rings. */
100 #define TX_RING_SIZE 256
101 #define TX_QUEUE_LEN 240 /* Limit ring entries actually used. */
102 #define RX_RING_SIZE 256
103 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
104 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc)
106 /* Operational parameters that usually are not changed. */
107 /* Time in jiffies before concluding the transmitter is hung. */
108 #define TX_TIMEOUT (2*HZ)
110 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
112 /* Bytes transferred to chip before transmission starts. */
113 /* Initial threshold, increased on underflow, rounded down to 4 byte units. */
114 #define TX_FIFO_THRESH 256
115 #define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
117 #include <linux/config.h>
118 #include <linux/module.h>
119 #include <linux/kernel.h>
120 #include <linux/string.h>
121 #include <linux/timer.h>
122 #include <linux/errno.h>
123 #include <linux/ioport.h>
124 #include <linux/slab.h>
125 #include <linux/interrupt.h>
126 #include <linux/pci.h>
127 #include <linux/delay.h>
128 #include <linux/netdevice.h>
129 #include <linux/etherdevice.h>
130 #include <linux/skbuff.h>
131 #include <linux/init.h>
132 #include <linux/spinlock.h>
133 #include <linux/ethtool.h>
134 #include <linux/mii.h>
135 #include <linux/crc32.h>
136 #include <linux/bitops.h>
137 #include <asm/io.h>
138 #include <asm/uaccess.h>
140 /* These identify the driver base version and may not be removed. */
141 static char version[] __devinitdata =
142 DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
143 static char version2[] __devinitdata =
144 " http://www.scyld.com/network/epic100.html\n";
145 static char version3[] __devinitdata =
146 " (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
148 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
149 MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
150 MODULE_LICENSE("GPL");
152 module_param(debug, int, 0);
153 module_param(rx_copybreak, int, 0);
154 module_param_array(options, int, NULL, 0);
155 module_param_array(full_duplex, int, NULL, 0);
156 MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
157 MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
158 MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
159 MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
162 Theory of Operation
164 I. Board Compatibility
166 This device driver is designed for the SMC "EPIC/100", the SMC
167 single-chip Ethernet controllers for PCI. This chip is used on
168 the SMC EtherPower II boards.
170 II. Board-specific settings
172 PCI bus devices are configured by the system at boot time, so no jumpers
173 need to be set on the board. The system BIOS will assign the
174 PCI INTA signal to a (preferably otherwise unused) system IRQ line.
175 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
176 interrupt lines.
178 III. Driver operation
180 IIIa. Ring buffers
182 IVb. References
184 http://www.smsc.com/main/datasheets/83c171.pdf
185 http://www.smsc.com/main/datasheets/83c175.pdf
186 http://scyld.com/expert/NWay.html
187 http://www.national.com/pf/DP/DP83840A.html
189 IVc. Errata
194 enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
196 #define EPIC_TOTAL_SIZE 0x100
197 #define USE_IO_OPS 1
199 typedef enum {
200 SMSC_83C170_0,
201 SMSC_83C170,
202 SMSC_83C175,
203 } chip_t;
206 struct epic_chip_info {
207 const char *name;
208 int io_size; /* Needed for I/O region check or ioremap(). */
209 int drv_flags; /* Driver use, intended as capability flags. */
213 /* indexed by chip_t */
214 static const struct epic_chip_info pci_id_tbl[] = {
215 { "SMSC EPIC/100 83c170",
216 EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN },
217 { "SMSC EPIC/100 83c170",
218 EPIC_TOTAL_SIZE, TYPE2_INTR },
219 { "SMSC EPIC/C 83c175",
220 EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN },
224 static struct pci_device_id epic_pci_tbl[] = {
225 { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
226 { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
227 { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
228 PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
229 { 0,}
231 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
234 #ifndef USE_IO_OPS
235 #undef inb
236 #undef inw
237 #undef inl
238 #undef outb
239 #undef outw
240 #undef outl
241 #define inb readb
242 #define inw readw
243 #define inl readl
244 #define outb writeb
245 #define outw writew
246 #define outl writel
247 #endif
249 /* Offsets to registers, using the (ugh) SMC names. */
250 enum epic_registers {
251 COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
252 PCIBurstCnt=0x18,
253 TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */
254 MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
255 LAN0=64, /* MAC address. */
256 MC0=80, /* Multicast filter table. */
257 RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
258 PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
261 /* Interrupt register bits, using my own meaningful names. */
262 enum IntrStatus {
263 TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
264 PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
265 RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
266 TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
267 RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
269 enum CommandBits {
270 StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
271 StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
274 #define EpicRemoved 0xffffffff /* Chip failed or removed (CardBus) */
276 #define EpicNapiEvent (TxEmpty | TxDone | \
277 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
278 #define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent)
280 static const u16 media2miictl[16] = {
281 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
282 0, 0, 0, 0, 0, 0, 0, 0 };
284 /* The EPIC100 Rx and Tx buffer descriptors. */
286 struct epic_tx_desc {
287 u32 txstatus;
288 u32 bufaddr;
289 u32 buflength;
290 u32 next;
293 struct epic_rx_desc {
294 u32 rxstatus;
295 u32 bufaddr;
296 u32 buflength;
297 u32 next;
300 enum desc_status_bits {
301 DescOwn=0x8000,
304 #define PRIV_ALIGN 15 /* Required alignment mask */
305 struct epic_private {
306 struct epic_rx_desc *rx_ring;
307 struct epic_tx_desc *tx_ring;
308 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
309 struct sk_buff* tx_skbuff[TX_RING_SIZE];
310 /* The addresses of receive-in-place skbuffs. */
311 struct sk_buff* rx_skbuff[RX_RING_SIZE];
313 dma_addr_t tx_ring_dma;
314 dma_addr_t rx_ring_dma;
316 /* Ring pointers. */
317 spinlock_t lock; /* Group with Tx control cache line. */
318 spinlock_t napi_lock;
319 unsigned int reschedule_in_poll;
320 unsigned int cur_tx, dirty_tx;
322 unsigned int cur_rx, dirty_rx;
323 u32 irq_mask;
324 unsigned int rx_buf_sz; /* Based on MTU+slack. */
326 struct pci_dev *pci_dev; /* PCI bus location. */
327 int chip_id, chip_flags;
329 struct net_device_stats stats;
330 struct timer_list timer; /* Media selection timer. */
331 int tx_threshold;
332 unsigned char mc_filter[8];
333 signed char phys[4]; /* MII device addresses. */
334 u16 advertising; /* NWay media advertisement */
335 int mii_phy_cnt;
336 struct mii_if_info mii;
337 unsigned int tx_full:1; /* The Tx queue is full. */
338 unsigned int default_port:4; /* Last dev->if_port value. */
341 static int epic_open(struct net_device *dev);
342 static int read_eeprom(long ioaddr, int location);
343 static int mdio_read(struct net_device *dev, int phy_id, int location);
344 static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
345 static void epic_restart(struct net_device *dev);
346 static void epic_timer(unsigned long data);
347 static void epic_tx_timeout(struct net_device *dev);
348 static void epic_init_ring(struct net_device *dev);
349 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
350 static int epic_rx(struct net_device *dev, int budget);
351 static int epic_poll(struct net_device *dev, int *budget);
352 static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
353 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
354 static struct ethtool_ops netdev_ethtool_ops;
355 static int epic_close(struct net_device *dev);
356 static struct net_device_stats *epic_get_stats(struct net_device *dev);
357 static void set_rx_mode(struct net_device *dev);
361 static int __devinit epic_init_one (struct pci_dev *pdev,
362 const struct pci_device_id *ent)
364 static int card_idx = -1;
365 long ioaddr;
366 int chip_idx = (int) ent->driver_data;
367 int irq;
368 struct net_device *dev;
369 struct epic_private *ep;
370 int i, ret, option = 0, duplex = 0;
371 void *ring_space;
372 dma_addr_t ring_dma;
374 /* when built into the kernel, we only print version if device is found */
375 #ifndef MODULE
376 static int printed_version;
377 if (!printed_version++)
378 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
379 version, version2, version3);
380 #endif
382 card_idx++;
384 ret = pci_enable_device(pdev);
385 if (ret)
386 goto out;
387 irq = pdev->irq;
389 if (pci_resource_len(pdev, 0) < pci_id_tbl[chip_idx].io_size) {
390 printk (KERN_ERR "card %d: no PCI region space\n", card_idx);
391 ret = -ENODEV;
392 goto err_out_disable;
395 pci_set_master(pdev);
397 ret = pci_request_regions(pdev, DRV_NAME);
398 if (ret < 0)
399 goto err_out_disable;
401 ret = -ENOMEM;
403 dev = alloc_etherdev(sizeof (*ep));
404 if (!dev) {
405 printk (KERN_ERR "card %d: no memory for eth device\n", card_idx);
406 goto err_out_free_res;
408 SET_MODULE_OWNER(dev);
409 SET_NETDEV_DEV(dev, &pdev->dev);
411 #ifdef USE_IO_OPS
412 ioaddr = pci_resource_start (pdev, 0);
413 #else
414 ioaddr = pci_resource_start (pdev, 1);
415 ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1));
416 if (!ioaddr) {
417 printk (KERN_ERR DRV_NAME " %d: ioremap failed\n", card_idx);
418 goto err_out_free_netdev;
420 #endif
422 pci_set_drvdata(pdev, dev);
423 ep = dev->priv;
424 ep->mii.dev = dev;
425 ep->mii.mdio_read = mdio_read;
426 ep->mii.mdio_write = mdio_write;
427 ep->mii.phy_id_mask = 0x1f;
428 ep->mii.reg_num_mask = 0x1f;
430 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
431 if (!ring_space)
432 goto err_out_iounmap;
433 ep->tx_ring = (struct epic_tx_desc *)ring_space;
434 ep->tx_ring_dma = ring_dma;
436 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
437 if (!ring_space)
438 goto err_out_unmap_tx;
439 ep->rx_ring = (struct epic_rx_desc *)ring_space;
440 ep->rx_ring_dma = ring_dma;
442 if (dev->mem_start) {
443 option = dev->mem_start;
444 duplex = (dev->mem_start & 16) ? 1 : 0;
445 } else if (card_idx >= 0 && card_idx < MAX_UNITS) {
446 if (options[card_idx] >= 0)
447 option = options[card_idx];
448 if (full_duplex[card_idx] >= 0)
449 duplex = full_duplex[card_idx];
452 dev->base_addr = ioaddr;
453 dev->irq = irq;
455 spin_lock_init(&ep->lock);
456 spin_lock_init(&ep->napi_lock);
457 ep->reschedule_in_poll = 0;
459 /* Bring the chip out of low-power mode. */
460 outl(0x4200, ioaddr + GENCTL);
461 /* Magic?! If we don't set this bit the MII interface won't work. */
462 /* This magic is documented in SMSC app note 7.15 */
463 for (i = 16; i > 0; i--)
464 outl(0x0008, ioaddr + TEST1);
466 /* Turn on the MII transceiver. */
467 outl(0x12, ioaddr + MIICfg);
468 if (chip_idx == 1)
469 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
470 outl(0x0200, ioaddr + GENCTL);
472 /* Note: the '175 does not have a serial EEPROM. */
473 for (i = 0; i < 3; i++)
474 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4));
476 if (debug > 2) {
477 printk(KERN_DEBUG DRV_NAME "(%s): EEPROM contents\n",
478 pci_name(pdev));
479 for (i = 0; i < 64; i++)
480 printk(" %4.4x%s", read_eeprom(ioaddr, i),
481 i % 16 == 15 ? "\n" : "");
484 ep->pci_dev = pdev;
485 ep->chip_id = chip_idx;
486 ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
487 ep->irq_mask =
488 (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
489 | CntFull | TxUnderrun | EpicNapiEvent;
491 /* Find the connected MII xcvrs.
492 Doing this in open() would allow detecting external xcvrs later, but
493 takes much time and no cards have external MII. */
495 int phy, phy_idx = 0;
496 for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
497 int mii_status = mdio_read(dev, phy, MII_BMSR);
498 if (mii_status != 0xffff && mii_status != 0x0000) {
499 ep->phys[phy_idx++] = phy;
500 printk(KERN_INFO DRV_NAME "(%s): MII transceiver #%d control "
501 "%4.4x status %4.4x.\n",
502 pci_name(pdev), phy, mdio_read(dev, phy, 0), mii_status);
505 ep->mii_phy_cnt = phy_idx;
506 if (phy_idx != 0) {
507 phy = ep->phys[0];
508 ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
509 printk(KERN_INFO DRV_NAME "(%s): Autonegotiation advertising %4.4x link "
510 "partner %4.4x.\n",
511 pci_name(pdev), ep->mii.advertising, mdio_read(dev, phy, 5));
512 } else if ( ! (ep->chip_flags & NO_MII)) {
513 printk(KERN_WARNING DRV_NAME "(%s): ***WARNING***: No MII transceiver found!\n",
514 pci_name(pdev));
515 /* Use the known PHY address of the EPII. */
516 ep->phys[0] = 3;
518 ep->mii.phy_id = ep->phys[0];
521 /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
522 if (ep->chip_flags & MII_PWRDWN)
523 outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
524 outl(0x0008, ioaddr + GENCTL);
526 /* The lower four bits are the media type. */
527 if (duplex) {
528 ep->mii.force_media = ep->mii.full_duplex = 1;
529 printk(KERN_INFO DRV_NAME "(%s): Forced full duplex operation requested.\n",
530 pci_name(pdev));
532 dev->if_port = ep->default_port = option;
534 /* The Epic-specific entries in the device structure. */
535 dev->open = &epic_open;
536 dev->hard_start_xmit = &epic_start_xmit;
537 dev->stop = &epic_close;
538 dev->get_stats = &epic_get_stats;
539 dev->set_multicast_list = &set_rx_mode;
540 dev->do_ioctl = &netdev_ioctl;
541 dev->ethtool_ops = &netdev_ethtool_ops;
542 dev->watchdog_timeo = TX_TIMEOUT;
543 dev->tx_timeout = &epic_tx_timeout;
544 dev->poll = epic_poll;
545 dev->weight = 64;
547 ret = register_netdev(dev);
548 if (ret < 0)
549 goto err_out_unmap_rx;
551 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
552 dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq);
553 for (i = 0; i < 5; i++)
554 printk("%2.2x:", dev->dev_addr[i]);
555 printk("%2.2x.\n", dev->dev_addr[i]);
557 out:
558 return ret;
560 err_out_unmap_rx:
561 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
562 err_out_unmap_tx:
563 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
564 err_out_iounmap:
565 #ifndef USE_IO_OPS
566 iounmap(ioaddr);
567 err_out_free_netdev:
568 #endif
569 free_netdev(dev);
570 err_out_free_res:
571 pci_release_regions(pdev);
572 err_out_disable:
573 pci_disable_device(pdev);
574 goto out;
577 /* Serial EEPROM section. */
579 /* EEPROM_Ctrl bits. */
580 #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
581 #define EE_CS 0x02 /* EEPROM chip select. */
582 #define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */
583 #define EE_WRITE_0 0x01
584 #define EE_WRITE_1 0x09
585 #define EE_DATA_READ 0x10 /* EEPROM chip data out. */
586 #define EE_ENB (0x0001 | EE_CS)
588 /* Delay between EEPROM clock transitions.
589 This serves to flush the operation to the PCI bus.
592 #define eeprom_delay() inl(ee_addr)
594 /* The EEPROM commands include the alway-set leading bit. */
595 #define EE_WRITE_CMD (5 << 6)
596 #define EE_READ64_CMD (6 << 6)
597 #define EE_READ256_CMD (6 << 8)
598 #define EE_ERASE_CMD (7 << 6)
600 static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
602 long ioaddr = dev->base_addr;
604 outl(0x00000000, ioaddr + INTMASK);
607 static inline void __epic_pci_commit(long ioaddr)
609 #ifndef USE_IO_OPS
610 inl(ioaddr + INTMASK);
611 #endif
614 static inline void epic_napi_irq_off(struct net_device *dev,
615 struct epic_private *ep)
617 long ioaddr = dev->base_addr;
619 outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK);
620 __epic_pci_commit(ioaddr);
623 static inline void epic_napi_irq_on(struct net_device *dev,
624 struct epic_private *ep)
626 long ioaddr = dev->base_addr;
628 /* No need to commit possible posted write */
629 outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK);
632 static int __devinit read_eeprom(long ioaddr, int location)
634 int i;
635 int retval = 0;
636 long ee_addr = ioaddr + EECTL;
637 int read_cmd = location |
638 (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
640 outl(EE_ENB & ~EE_CS, ee_addr);
641 outl(EE_ENB, ee_addr);
643 /* Shift the read command bits out. */
644 for (i = 12; i >= 0; i--) {
645 short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
646 outl(EE_ENB | dataval, ee_addr);
647 eeprom_delay();
648 outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
649 eeprom_delay();
651 outl(EE_ENB, ee_addr);
653 for (i = 16; i > 0; i--) {
654 outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
655 eeprom_delay();
656 retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
657 outl(EE_ENB, ee_addr);
658 eeprom_delay();
661 /* Terminate the EEPROM access. */
662 outl(EE_ENB & ~EE_CS, ee_addr);
663 return retval;
666 #define MII_READOP 1
667 #define MII_WRITEOP 2
668 static int mdio_read(struct net_device *dev, int phy_id, int location)
670 long ioaddr = dev->base_addr;
671 int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
672 int i;
674 outl(read_cmd, ioaddr + MIICtrl);
675 /* Typical operation takes 25 loops. */
676 for (i = 400; i > 0; i--) {
677 barrier();
678 if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
679 /* Work around read failure bug. */
680 if (phy_id == 1 && location < 6
681 && inw(ioaddr + MIIData) == 0xffff) {
682 outl(read_cmd, ioaddr + MIICtrl);
683 continue;
685 return inw(ioaddr + MIIData);
688 return 0xffff;
691 static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
693 long ioaddr = dev->base_addr;
694 int i;
696 outw(value, ioaddr + MIIData);
697 outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
698 for (i = 10000; i > 0; i--) {
699 barrier();
700 if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
701 break;
703 return;
707 static int epic_open(struct net_device *dev)
709 struct epic_private *ep = dev->priv;
710 long ioaddr = dev->base_addr;
711 int i;
712 int retval;
714 /* Soft reset the chip. */
715 outl(0x4001, ioaddr + GENCTL);
717 if ((retval = request_irq(dev->irq, &epic_interrupt, SA_SHIRQ, dev->name, dev)))
718 return retval;
720 epic_init_ring(dev);
722 outl(0x4000, ioaddr + GENCTL);
723 /* This magic is documented in SMSC app note 7.15 */
724 for (i = 16; i > 0; i--)
725 outl(0x0008, ioaddr + TEST1);
727 /* Pull the chip out of low-power mode, enable interrupts, and set for
728 PCI read multiple. The MIIcfg setting and strange write order are
729 required by the details of which bits are reset and the transceiver
730 wiring on the Ositech CardBus card.
732 #if 0
733 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
734 #endif
735 if (ep->chip_flags & MII_PWRDWN)
736 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
738 #if defined(__powerpc__) || defined(__sparc__) /* Big endian */
739 outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
740 inl(ioaddr + GENCTL);
741 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
742 #else
743 outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
744 inl(ioaddr + GENCTL);
745 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
746 #endif
748 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
750 for (i = 0; i < 3; i++)
751 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
753 ep->tx_threshold = TX_FIFO_THRESH;
754 outl(ep->tx_threshold, ioaddr + TxThresh);
756 if (media2miictl[dev->if_port & 15]) {
757 if (ep->mii_phy_cnt)
758 mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
759 if (dev->if_port == 1) {
760 if (debug > 1)
761 printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
762 "status %4.4x.\n",
763 dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
765 } else {
766 int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
767 if (mii_lpa != 0xffff) {
768 if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
769 ep->mii.full_duplex = 1;
770 else if (! (mii_lpa & LPA_LPACK))
771 mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
772 if (debug > 1)
773 printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
774 " register read of %4.4x.\n", dev->name,
775 ep->mii.full_duplex ? "full" : "half",
776 ep->phys[0], mii_lpa);
780 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
781 outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
782 outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
784 /* Start the chip's Rx process. */
785 set_rx_mode(dev);
786 outl(StartRx | RxQueued, ioaddr + COMMAND);
788 netif_start_queue(dev);
790 /* Enable interrupts by setting the interrupt mask. */
791 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
792 | CntFull | TxUnderrun
793 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
795 if (debug > 1)
796 printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
797 "%s-duplex.\n",
798 dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
799 ep->mii.full_duplex ? "full" : "half");
801 /* Set the timer to switch to check for link beat and perhaps switch
802 to an alternate media type. */
803 init_timer(&ep->timer);
804 ep->timer.expires = jiffies + 3*HZ;
805 ep->timer.data = (unsigned long)dev;
806 ep->timer.function = &epic_timer; /* timer handler */
807 add_timer(&ep->timer);
809 return 0;
812 /* Reset the chip to recover from a PCI transaction error.
813 This may occur at interrupt time. */
814 static void epic_pause(struct net_device *dev)
816 long ioaddr = dev->base_addr;
817 struct epic_private *ep = dev->priv;
819 netif_stop_queue (dev);
821 /* Disable interrupts by clearing the interrupt mask. */
822 outl(0x00000000, ioaddr + INTMASK);
823 /* Stop the chip's Tx and Rx DMA processes. */
824 outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
826 /* Update the error counts. */
827 if (inw(ioaddr + COMMAND) != 0xffff) {
828 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
829 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
830 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
833 /* Remove the packets on the Rx queue. */
834 epic_rx(dev, RX_RING_SIZE);
837 static void epic_restart(struct net_device *dev)
839 long ioaddr = dev->base_addr;
840 struct epic_private *ep = dev->priv;
841 int i;
843 /* Soft reset the chip. */
844 outl(0x4001, ioaddr + GENCTL);
846 printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
847 dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
848 udelay(1);
850 /* This magic is documented in SMSC app note 7.15 */
851 for (i = 16; i > 0; i--)
852 outl(0x0008, ioaddr + TEST1);
854 #if defined(__powerpc__) || defined(__sparc__) /* Big endian */
855 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
856 #else
857 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
858 #endif
859 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
860 if (ep->chip_flags & MII_PWRDWN)
861 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
863 for (i = 0; i < 3; i++)
864 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
866 ep->tx_threshold = TX_FIFO_THRESH;
867 outl(ep->tx_threshold, ioaddr + TxThresh);
868 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
869 outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
870 sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);
871 outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
872 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);
874 /* Start the chip's Rx process. */
875 set_rx_mode(dev);
876 outl(StartRx | RxQueued, ioaddr + COMMAND);
878 /* Enable interrupts by setting the interrupt mask. */
879 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
880 | CntFull | TxUnderrun
881 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
883 printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
884 " interrupt %4.4x.\n",
885 dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
886 (int)inl(ioaddr + INTSTAT));
887 return;
890 static void check_media(struct net_device *dev)
892 struct epic_private *ep = dev->priv;
893 long ioaddr = dev->base_addr;
894 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
895 int negotiated = mii_lpa & ep->mii.advertising;
896 int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
898 if (ep->mii.force_media)
899 return;
900 if (mii_lpa == 0xffff) /* Bogus read */
901 return;
902 if (ep->mii.full_duplex != duplex) {
903 ep->mii.full_duplex = duplex;
904 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
905 " partner capability of %4.4x.\n", dev->name,
906 ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
907 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
911 static void epic_timer(unsigned long data)
913 struct net_device *dev = (struct net_device *)data;
914 struct epic_private *ep = dev->priv;
915 long ioaddr = dev->base_addr;
916 int next_tick = 5*HZ;
918 if (debug > 3) {
919 printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
920 dev->name, (int)inl(ioaddr + TxSTAT));
921 printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
922 "IntStatus %4.4x RxStatus %4.4x.\n",
923 dev->name, (int)inl(ioaddr + INTMASK),
924 (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
927 check_media(dev);
929 ep->timer.expires = jiffies + next_tick;
930 add_timer(&ep->timer);
933 static void epic_tx_timeout(struct net_device *dev)
935 struct epic_private *ep = dev->priv;
936 long ioaddr = dev->base_addr;
938 if (debug > 0) {
939 printk(KERN_WARNING "%s: Transmit timeout using MII device, "
940 "Tx status %4.4x.\n",
941 dev->name, (int)inw(ioaddr + TxSTAT));
942 if (debug > 1) {
943 printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
944 dev->name, ep->dirty_tx, ep->cur_tx);
947 if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */
948 ep->stats.tx_fifo_errors++;
949 outl(RestartTx, ioaddr + COMMAND);
950 } else {
951 epic_restart(dev);
952 outl(TxQueued, dev->base_addr + COMMAND);
955 dev->trans_start = jiffies;
956 ep->stats.tx_errors++;
957 if (!ep->tx_full)
958 netif_wake_queue(dev);
961 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
962 static void epic_init_ring(struct net_device *dev)
964 struct epic_private *ep = dev->priv;
965 int i;
967 ep->tx_full = 0;
968 ep->dirty_tx = ep->cur_tx = 0;
969 ep->cur_rx = ep->dirty_rx = 0;
970 ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
972 /* Initialize all Rx descriptors. */
973 for (i = 0; i < RX_RING_SIZE; i++) {
974 ep->rx_ring[i].rxstatus = 0;
975 ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
976 ep->rx_ring[i].next = ep->rx_ring_dma +
977 (i+1)*sizeof(struct epic_rx_desc);
978 ep->rx_skbuff[i] = NULL;
980 /* Mark the last entry as wrapping the ring. */
981 ep->rx_ring[i-1].next = ep->rx_ring_dma;
983 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
984 for (i = 0; i < RX_RING_SIZE; i++) {
985 struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
986 ep->rx_skbuff[i] = skb;
987 if (skb == NULL)
988 break;
989 skb->dev = dev; /* Mark as being used by this device. */
990 skb_reserve(skb, 2); /* 16 byte align the IP header. */
991 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
992 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
993 ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
995 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
997 /* The Tx buffer descriptor is filled in as needed, but we
998 do need to clear the ownership bit. */
999 for (i = 0; i < TX_RING_SIZE; i++) {
1000 ep->tx_skbuff[i] = NULL;
1001 ep->tx_ring[i].txstatus = 0x0000;
1002 ep->tx_ring[i].next = ep->tx_ring_dma +
1003 (i+1)*sizeof(struct epic_tx_desc);
1005 ep->tx_ring[i-1].next = ep->tx_ring_dma;
1006 return;
1009 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
1011 struct epic_private *ep = dev->priv;
1012 int entry, free_count;
1013 u32 ctrl_word;
1014 unsigned long flags;
1016 if (skb_padto(skb, ETH_ZLEN))
1017 return 0;
1019 /* Caution: the write order is important here, set the field with the
1020 "ownership" bit last. */
1022 /* Calculate the next Tx descriptor entry. */
1023 spin_lock_irqsave(&ep->lock, flags);
1024 free_count = ep->cur_tx - ep->dirty_tx;
1025 entry = ep->cur_tx % TX_RING_SIZE;
1027 ep->tx_skbuff[entry] = skb;
1028 ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
1029 skb->len, PCI_DMA_TODEVICE);
1030 if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
1031 ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
1032 } else if (free_count == TX_QUEUE_LEN/2) {
1033 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
1034 } else if (free_count < TX_QUEUE_LEN - 1) {
1035 ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */
1036 } else {
1037 /* Leave room for an additional entry. */
1038 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
1039 ep->tx_full = 1;
1041 ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);
1042 ep->tx_ring[entry].txstatus =
1043 ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
1044 | cpu_to_le32(DescOwn);
1046 ep->cur_tx++;
1047 if (ep->tx_full)
1048 netif_stop_queue(dev);
1050 spin_unlock_irqrestore(&ep->lock, flags);
1051 /* Trigger an immediate transmit demand. */
1052 outl(TxQueued, dev->base_addr + COMMAND);
1054 dev->trans_start = jiffies;
1055 if (debug > 4)
1056 printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
1057 "flag %2.2x Tx status %8.8x.\n",
1058 dev->name, (int)skb->len, entry, ctrl_word,
1059 (int)inl(dev->base_addr + TxSTAT));
1061 return 0;
1064 static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
1065 int status)
1067 struct net_device_stats *stats = &ep->stats;
1069 #ifndef final_version
1070 /* There was an major error, log it. */
1071 if (debug > 1)
1072 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1073 dev->name, status);
1074 #endif
1075 stats->tx_errors++;
1076 if (status & 0x1050)
1077 stats->tx_aborted_errors++;
1078 if (status & 0x0008)
1079 stats->tx_carrier_errors++;
1080 if (status & 0x0040)
1081 stats->tx_window_errors++;
1082 if (status & 0x0010)
1083 stats->tx_fifo_errors++;
1086 static void epic_tx(struct net_device *dev, struct epic_private *ep)
1088 unsigned int dirty_tx, cur_tx;
1091 * Note: if this lock becomes a problem we can narrow the locked
1092 * region at the cost of occasionally grabbing the lock more times.
1094 cur_tx = ep->cur_tx;
1095 for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1096 struct sk_buff *skb;
1097 int entry = dirty_tx % TX_RING_SIZE;
1098 int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
1100 if (txstatus & DescOwn)
1101 break; /* It still hasn't been Txed */
1103 if (likely(txstatus & 0x0001)) {
1104 ep->stats.collisions += (txstatus >> 8) & 15;
1105 ep->stats.tx_packets++;
1106 ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1107 } else
1108 epic_tx_error(dev, ep, txstatus);
1110 /* Free the original skb. */
1111 skb = ep->tx_skbuff[entry];
1112 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1113 skb->len, PCI_DMA_TODEVICE);
1114 dev_kfree_skb_irq(skb);
1115 ep->tx_skbuff[entry] = NULL;
1118 #ifndef final_version
1119 if (cur_tx - dirty_tx > TX_RING_SIZE) {
1120 printk(KERN_WARNING
1121 "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1122 dev->name, dirty_tx, cur_tx, ep->tx_full);
1123 dirty_tx += TX_RING_SIZE;
1125 #endif
1126 ep->dirty_tx = dirty_tx;
1127 if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1128 /* The ring is no longer full, allow new TX entries. */
1129 ep->tx_full = 0;
1130 netif_wake_queue(dev);
1134 /* The interrupt handler does all of the Rx thread work and cleans up
1135 after the Tx thread. */
1136 static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1138 struct net_device *dev = dev_instance;
1139 struct epic_private *ep = dev->priv;
1140 long ioaddr = dev->base_addr;
1141 unsigned int handled = 0;
1142 int status;
1144 status = inl(ioaddr + INTSTAT);
1145 /* Acknowledge all of the current interrupt sources ASAP. */
1146 outl(status & EpicNormalEvent, ioaddr + INTSTAT);
1148 if (debug > 4) {
1149 printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
1150 "intstat=%#8.8x.\n", dev->name, status,
1151 (int)inl(ioaddr + INTSTAT));
1154 if ((status & IntrSummary) == 0)
1155 goto out;
1157 handled = 1;
1159 if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
1160 spin_lock(&ep->napi_lock);
1161 if (netif_rx_schedule_prep(dev)) {
1162 epic_napi_irq_off(dev, ep);
1163 __netif_rx_schedule(dev);
1164 } else
1165 ep->reschedule_in_poll++;
1166 spin_unlock(&ep->napi_lock);
1168 status &= ~EpicNapiEvent;
1170 /* Check uncommon events all at once. */
1171 if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1172 if (status == EpicRemoved)
1173 goto out;
1175 /* Always update the error counts to avoid overhead later. */
1176 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1177 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1178 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1180 if (status & TxUnderrun) { /* Tx FIFO underflow. */
1181 ep->stats.tx_fifo_errors++;
1182 outl(ep->tx_threshold += 128, ioaddr + TxThresh);
1183 /* Restart the transmit process. */
1184 outl(RestartTx, ioaddr + COMMAND);
1186 if (status & PCIBusErr170) {
1187 printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
1188 dev->name, status);
1189 epic_pause(dev);
1190 epic_restart(dev);
1192 /* Clear all error sources. */
1193 outl(status & 0x7f18, ioaddr + INTSTAT);
1196 out:
1197 if (debug > 3) {
1198 printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
1199 dev->name, status);
1202 return IRQ_RETVAL(handled);
1205 static int epic_rx(struct net_device *dev, int budget)
1207 struct epic_private *ep = dev->priv;
1208 int entry = ep->cur_rx % RX_RING_SIZE;
1209 int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1210 int work_done = 0;
1212 if (debug > 4)
1213 printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
1214 ep->rx_ring[entry].rxstatus);
1216 if (rx_work_limit > budget)
1217 rx_work_limit = budget;
1219 /* If we own the next entry, it's a new packet. Send it up. */
1220 while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) {
1221 int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);
1223 if (debug > 4)
1224 printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
1225 if (--rx_work_limit < 0)
1226 break;
1227 if (status & 0x2006) {
1228 if (debug > 2)
1229 printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
1230 dev->name, status);
1231 if (status & 0x2000) {
1232 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1233 "multiple buffers, status %4.4x!\n", dev->name, status);
1234 ep->stats.rx_length_errors++;
1235 } else if (status & 0x0006)
1236 /* Rx Frame errors are counted in hardware. */
1237 ep->stats.rx_errors++;
1238 } else {
1239 /* Malloc up new buffer, compatible with net-2e. */
1240 /* Omit the four octet CRC from the length. */
1241 short pkt_len = (status >> 16) - 4;
1242 struct sk_buff *skb;
1244 if (pkt_len > PKT_BUF_SZ - 4) {
1245 printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
1246 "%d bytes.\n",
1247 dev->name, status, pkt_len);
1248 pkt_len = 1514;
1250 /* Check if the packet is long enough to accept without copying
1251 to a minimally-sized skbuff. */
1252 if (pkt_len < rx_copybreak
1253 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1254 skb->dev = dev;
1255 skb_reserve(skb, 2); /* 16 byte align the IP header */
1256 pci_dma_sync_single_for_cpu(ep->pci_dev,
1257 ep->rx_ring[entry].bufaddr,
1258 ep->rx_buf_sz,
1259 PCI_DMA_FROMDEVICE);
1260 eth_copy_and_sum(skb, ep->rx_skbuff[entry]->data, pkt_len, 0);
1261 skb_put(skb, pkt_len);
1262 pci_dma_sync_single_for_device(ep->pci_dev,
1263 ep->rx_ring[entry].bufaddr,
1264 ep->rx_buf_sz,
1265 PCI_DMA_FROMDEVICE);
1266 } else {
1267 pci_unmap_single(ep->pci_dev,
1268 ep->rx_ring[entry].bufaddr,
1269 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1270 skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1271 ep->rx_skbuff[entry] = NULL;
1273 skb->protocol = eth_type_trans(skb, dev);
1274 netif_receive_skb(skb);
1275 dev->last_rx = jiffies;
1276 ep->stats.rx_packets++;
1277 ep->stats.rx_bytes += pkt_len;
1279 work_done++;
1280 entry = (++ep->cur_rx) % RX_RING_SIZE;
1283 /* Refill the Rx ring buffers. */
1284 for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1285 entry = ep->dirty_rx % RX_RING_SIZE;
1286 if (ep->rx_skbuff[entry] == NULL) {
1287 struct sk_buff *skb;
1288 skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
1289 if (skb == NULL)
1290 break;
1291 skb->dev = dev; /* Mark as being used by this device. */
1292 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1293 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1294 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1295 work_done++;
1297 ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
1299 return work_done;
1302 static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1304 long ioaddr = dev->base_addr;
1305 int status;
1307 status = inl(ioaddr + INTSTAT);
1309 if (status == EpicRemoved)
1310 return;
1311 if (status & RxOverflow) /* Missed a Rx frame. */
1312 ep->stats.rx_errors++;
1313 if (status & (RxOverflow | RxFull))
1314 outw(RxQueued, ioaddr + COMMAND);
1317 static int epic_poll(struct net_device *dev, int *budget)
1319 struct epic_private *ep = dev->priv;
1320 int work_done = 0, orig_budget;
1321 long ioaddr = dev->base_addr;
1323 orig_budget = (*budget > dev->quota) ? dev->quota : *budget;
1325 rx_action:
1327 epic_tx(dev, ep);
1329 work_done += epic_rx(dev, *budget);
1331 epic_rx_err(dev, ep);
1333 *budget -= work_done;
1334 dev->quota -= work_done;
1336 if (netif_running(dev) && (work_done < orig_budget)) {
1337 unsigned long flags;
1338 int more;
1340 /* A bit baroque but it avoids a (space hungry) spin_unlock */
1342 spin_lock_irqsave(&ep->napi_lock, flags);
1344 more = ep->reschedule_in_poll;
1345 if (!more) {
1346 __netif_rx_complete(dev);
1347 outl(EpicNapiEvent, ioaddr + INTSTAT);
1348 epic_napi_irq_on(dev, ep);
1349 } else
1350 ep->reschedule_in_poll--;
1352 spin_unlock_irqrestore(&ep->napi_lock, flags);
1354 if (more)
1355 goto rx_action;
1358 return (work_done >= orig_budget);
1361 static int epic_close(struct net_device *dev)
1363 long ioaddr = dev->base_addr;
1364 struct epic_private *ep = dev->priv;
1365 struct sk_buff *skb;
1366 int i;
1368 netif_stop_queue(dev);
1370 if (debug > 1)
1371 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1372 dev->name, (int)inl(ioaddr + INTSTAT));
1374 del_timer_sync(&ep->timer);
1376 epic_disable_int(dev, ep);
1378 free_irq(dev->irq, dev);
1380 epic_pause(dev);
1382 /* Free all the skbuffs in the Rx queue. */
1383 for (i = 0; i < RX_RING_SIZE; i++) {
1384 skb = ep->rx_skbuff[i];
1385 ep->rx_skbuff[i] = NULL;
1386 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
1387 ep->rx_ring[i].buflength = 0;
1388 if (skb) {
1389 pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
1390 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1391 dev_kfree_skb(skb);
1393 ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1395 for (i = 0; i < TX_RING_SIZE; i++) {
1396 skb = ep->tx_skbuff[i];
1397 ep->tx_skbuff[i] = NULL;
1398 if (!skb)
1399 continue;
1400 pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
1401 skb->len, PCI_DMA_TODEVICE);
1402 dev_kfree_skb(skb);
1405 /* Green! Leave the chip in low-power mode. */
1406 outl(0x0008, ioaddr + GENCTL);
1408 return 0;
1411 static struct net_device_stats *epic_get_stats(struct net_device *dev)
1413 struct epic_private *ep = dev->priv;
1414 long ioaddr = dev->base_addr;
1416 if (netif_running(dev)) {
1417 /* Update the error counts. */
1418 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1419 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1420 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1423 return &ep->stats;
1426 /* Set or clear the multicast filter for this adaptor.
1427 Note that we only use exclusion around actually queueing the
1428 new frame, not around filling ep->setup_frame. This is non-deterministic
1429 when re-entered but still correct. */
1431 static void set_rx_mode(struct net_device *dev)
1433 long ioaddr = dev->base_addr;
1434 struct epic_private *ep = dev->priv;
1435 unsigned char mc_filter[8]; /* Multicast hash filter */
1436 int i;
1438 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1439 outl(0x002C, ioaddr + RxCtrl);
1440 /* Unconditionally log net taps. */
1441 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1442 memset(mc_filter, 0xff, sizeof(mc_filter));
1443 } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
1444 /* There is apparently a chip bug, so the multicast filter
1445 is never enabled. */
1446 /* Too many to filter perfectly -- accept all multicasts. */
1447 memset(mc_filter, 0xff, sizeof(mc_filter));
1448 outl(0x000C, ioaddr + RxCtrl);
1449 } else if (dev->mc_count == 0) {
1450 outl(0x0004, ioaddr + RxCtrl);
1451 return;
1452 } else { /* Never executed, for now. */
1453 struct dev_mc_list *mclist;
1455 memset(mc_filter, 0, sizeof(mc_filter));
1456 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1457 i++, mclist = mclist->next) {
1458 unsigned int bit_nr =
1459 ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
1460 mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1463 /* ToDo: perhaps we need to stop the Tx and Rx process here? */
1464 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1465 for (i = 0; i < 4; i++)
1466 outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
1467 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1469 return;
1472 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1474 struct epic_private *np = dev->priv;
1476 strcpy (info->driver, DRV_NAME);
1477 strcpy (info->version, DRV_VERSION);
1478 strcpy (info->bus_info, pci_name(np->pci_dev));
1481 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1483 struct epic_private *np = dev->priv;
1484 int rc;
1486 spin_lock_irq(&np->lock);
1487 rc = mii_ethtool_gset(&np->mii, cmd);
1488 spin_unlock_irq(&np->lock);
1490 return rc;
1493 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1495 struct epic_private *np = dev->priv;
1496 int rc;
1498 spin_lock_irq(&np->lock);
1499 rc = mii_ethtool_sset(&np->mii, cmd);
1500 spin_unlock_irq(&np->lock);
1502 return rc;
1505 static int netdev_nway_reset(struct net_device *dev)
1507 struct epic_private *np = dev->priv;
1508 return mii_nway_restart(&np->mii);
1511 static u32 netdev_get_link(struct net_device *dev)
1513 struct epic_private *np = dev->priv;
1514 return mii_link_ok(&np->mii);
1517 static u32 netdev_get_msglevel(struct net_device *dev)
1519 return debug;
1522 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1524 debug = value;
1527 static int ethtool_begin(struct net_device *dev)
1529 unsigned long ioaddr = dev->base_addr;
1530 /* power-up, if interface is down */
1531 if (! netif_running(dev)) {
1532 outl(0x0200, ioaddr + GENCTL);
1533 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1535 return 0;
1538 static void ethtool_complete(struct net_device *dev)
1540 unsigned long ioaddr = dev->base_addr;
1541 /* power-down, if interface is down */
1542 if (! netif_running(dev)) {
1543 outl(0x0008, ioaddr + GENCTL);
1544 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1548 static struct ethtool_ops netdev_ethtool_ops = {
1549 .get_drvinfo = netdev_get_drvinfo,
1550 .get_settings = netdev_get_settings,
1551 .set_settings = netdev_set_settings,
1552 .nway_reset = netdev_nway_reset,
1553 .get_link = netdev_get_link,
1554 .get_msglevel = netdev_get_msglevel,
1555 .set_msglevel = netdev_set_msglevel,
1556 .get_sg = ethtool_op_get_sg,
1557 .get_tx_csum = ethtool_op_get_tx_csum,
1558 .begin = ethtool_begin,
1559 .complete = ethtool_complete
1562 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1564 struct epic_private *np = dev->priv;
1565 long ioaddr = dev->base_addr;
1566 struct mii_ioctl_data *data = if_mii(rq);
1567 int rc;
1569 /* power-up, if interface is down */
1570 if (! netif_running(dev)) {
1571 outl(0x0200, ioaddr + GENCTL);
1572 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1575 /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1576 spin_lock_irq(&np->lock);
1577 rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1578 spin_unlock_irq(&np->lock);
1580 /* power-down, if interface is down */
1581 if (! netif_running(dev)) {
1582 outl(0x0008, ioaddr + GENCTL);
1583 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1585 return rc;
1589 static void __devexit epic_remove_one (struct pci_dev *pdev)
1591 struct net_device *dev = pci_get_drvdata(pdev);
1592 struct epic_private *ep = dev->priv;
1594 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1595 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1596 unregister_netdev(dev);
1597 #ifndef USE_IO_OPS
1598 iounmap((void*) dev->base_addr);
1599 #endif
1600 pci_release_regions(pdev);
1601 free_netdev(dev);
1602 pci_disable_device(pdev);
1603 pci_set_drvdata(pdev, NULL);
1604 /* pci_power_off(pdev, -1); */
1608 #ifdef CONFIG_PM
1610 static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1612 struct net_device *dev = pci_get_drvdata(pdev);
1613 long ioaddr = dev->base_addr;
1615 if (!netif_running(dev))
1616 return 0;
1617 epic_pause(dev);
1618 /* Put the chip into low-power mode. */
1619 outl(0x0008, ioaddr + GENCTL);
1620 /* pci_power_off(pdev, -1); */
1621 return 0;
1625 static int epic_resume (struct pci_dev *pdev)
1627 struct net_device *dev = pci_get_drvdata(pdev);
1629 if (!netif_running(dev))
1630 return 0;
1631 epic_restart(dev);
1632 /* pci_power_on(pdev); */
1633 return 0;
1636 #endif /* CONFIG_PM */
1639 static struct pci_driver epic_driver = {
1640 .name = DRV_NAME,
1641 .id_table = epic_pci_tbl,
1642 .probe = epic_init_one,
1643 .remove = __devexit_p(epic_remove_one),
1644 #ifdef CONFIG_PM
1645 .suspend = epic_suspend,
1646 .resume = epic_resume,
1647 #endif /* CONFIG_PM */
1651 static int __init epic_init (void)
1653 /* when a module, this is printed whether or not devices are found in probe */
1654 #ifdef MODULE
1655 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
1656 version, version2, version3);
1657 #endif
1659 return pci_module_init (&epic_driver);
1663 static void __exit epic_cleanup (void)
1665 pci_unregister_driver (&epic_driver);
1669 module_init(epic_init);
1670 module_exit(epic_cleanup);