More meth updates.
[linux-2.6/linux-mips.git] / drivers / net / epic100.c
blob9c4e6ac14a981ad41499baaafb2d316696a4dbd9
1 /* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
2 /*
3 Written/copyright 1997-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 This driver is for the SMC83c170/175 "EPIC" series, as used on the
13 SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
18 Annapolis MD 21403
20 Information and updates available at
21 http://www.scyld.com/network/epic100.html
23 ---------------------------------------------------------------------
25 Linux kernel-specific changes:
27 LK1.1.2 (jgarzik):
28 * Merge becker version 1.09 (4/08/2000)
30 LK1.1.3:
31 * Major bugfix to 1.09 driver (Francis Romieu)
33 LK1.1.4 (jgarzik):
34 * Merge becker test version 1.09 (5/29/2000)
36 LK1.1.5:
37 * Fix locking (jgarzik)
38 * Limit 83c175 probe to ethernet-class PCI devices (rgooch)
40 LK1.1.6:
41 * Merge becker version 1.11
42 * Move pci_enable_device before any PCI BAR len checks
44 LK1.1.7:
45 * { fill me in }
47 LK1.1.8:
48 * ethtool driver info support (jgarzik)
50 LK1.1.9:
51 * ethtool media get/set support (jgarzik)
53 LK1.1.10:
54 * revert MII transceiver init change (jgarzik)
56 LK1.1.11:
57 * implement ETHTOOL_[GS]SET, _NWAY_RST, _[GS]MSGLVL, _GLINK (jgarzik)
58 * replace some MII-related magic numbers with constants
60 LK1.1.12:
61 * fix power-up sequence
63 LK1.1.13:
64 * revert version 1.1.12, power-up sequence "fix"
66 LK1.1.14 (Kryzsztof Halasa):
67 * fix spurious bad initializations
68 * pound phy a la SMSC's app note on the subject
72 #define DRV_NAME "epic100"
73 #define DRV_VERSION "1.11+LK1.1.14"
74 #define DRV_RELDATE "Aug 4, 2002"
76 /* The user-configurable values.
77 These may be modified when a driver module is loaded.*/
79 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
80 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
81 static int max_interrupt_work = 32;
83 /* Used to pass the full-duplex flag, etc. */
84 #define MAX_UNITS 8 /* More are supported, limit only on options */
85 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
86 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
88 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
89 Setting to > 1518 effectively disables this feature. */
90 static int rx_copybreak;
92 /* Operational parameters that are set at compile time. */
94 /* Keep the ring sizes a power of two for operational efficiency.
95 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
96 Making the Tx ring too large decreases the effectiveness of channel
97 bonding and packet priority.
98 There are no ill effects from too-large receive rings. */
99 #define TX_RING_SIZE 16
100 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
101 #define RX_RING_SIZE 32
102 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
103 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc)
105 /* Operational parameters that usually are not changed. */
106 /* Time in jiffies before concluding the transmitter is hung. */
107 #define TX_TIMEOUT (2*HZ)
109 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
111 /* Bytes transferred to chip before transmission starts. */
112 /* Initial threshold, increased on underflow, rounded down to 4 byte units. */
113 #define TX_FIFO_THRESH 256
114 #define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
116 #if !defined(__OPTIMIZE__)
117 #warning You must compile this file with the correct options!
118 #warning See the last lines of the source file.
119 #error You must compile this driver with "-O".
120 #endif
122 #include <linux/config.h>
123 #include <linux/module.h>
124 #include <linux/kernel.h>
125 #include <linux/string.h>
126 #include <linux/timer.h>
127 #include <linux/errno.h>
128 #include <linux/ioport.h>
129 #include <linux/slab.h>
130 #include <linux/interrupt.h>
131 #include <linux/pci.h>
132 #include <linux/delay.h>
133 #include <linux/netdevice.h>
134 #include <linux/etherdevice.h>
135 #include <linux/skbuff.h>
136 #include <linux/init.h>
137 #include <linux/spinlock.h>
138 #include <linux/ethtool.h>
139 #include <linux/mii.h>
140 #include <linux/crc32.h>
141 #include <asm/bitops.h>
142 #include <asm/io.h>
143 #include <asm/uaccess.h>
145 /* These identify the driver base version and may not be removed. */
146 static char version[] __devinitdata =
147 DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
148 static char version2[] __devinitdata =
149 " http://www.scyld.com/network/epic100.html\n";
150 static char version3[] __devinitdata =
151 " (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
153 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
154 MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
155 MODULE_LICENSE("GPL");
157 MODULE_PARM(debug, "i");
158 MODULE_PARM(max_interrupt_work, "i");
159 MODULE_PARM(rx_copybreak, "i");
160 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
161 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
162 MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
163 MODULE_PARM_DESC(max_interrupt_work, "EPIC/100 maximum events handled per interrupt");
164 MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
165 MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
166 MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
169 Theory of Operation
171 I. Board Compatibility
173 This device driver is designed for the SMC "EPIC/100", the SMC
174 single-chip Ethernet controllers for PCI. This chip is used on
175 the SMC EtherPower II boards.
177 II. Board-specific settings
179 PCI bus devices are configured by the system at boot time, so no jumpers
180 need to be set on the board. The system BIOS will assign the
181 PCI INTA signal to a (preferably otherwise unused) system IRQ line.
182 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
183 interrupt lines.
185 III. Driver operation
187 IIIa. Ring buffers
189 IVb. References
191 http://www.smsc.com/main/datasheets/83c171.pdf
192 http://www.smsc.com/main/datasheets/83c175.pdf
193 http://scyld.com/expert/NWay.html
194 http://www.national.com/pf/DP/DP83840A.html
196 IVc. Errata
201 enum pci_id_flags_bits {
202 /* Set PCI command register bits before calling probe1(). */
203 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
204 /* Read and map the single following PCI BAR. */
205 PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
206 PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
209 enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
211 #define EPIC_TOTAL_SIZE 0x100
212 #define USE_IO_OPS 1
213 #ifdef USE_IO_OPS
214 #define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_IO|PCI_ADDR0
215 #else
216 #define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_MEM|PCI_ADDR1
217 #endif
219 typedef enum {
220 SMSC_83C170_0,
221 SMSC_83C170,
222 SMSC_83C175,
223 } chip_t;
226 struct epic_chip_info {
227 const char *name;
228 enum pci_id_flags_bits pci_flags;
229 int io_size; /* Needed for I/O region check or ioremap(). */
230 int drv_flags; /* Driver use, intended as capability flags. */
234 /* indexed by chip_t */
235 static struct epic_chip_info pci_id_tbl[] = {
236 { "SMSC EPIC/100 83c170",
237 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN },
238 { "SMSC EPIC/100 83c170",
239 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR },
240 { "SMSC EPIC/C 83c175",
241 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN },
245 static struct pci_device_id epic_pci_tbl[] __devinitdata = {
246 { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
247 { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
248 { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
249 PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
250 { 0,}
252 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
255 #ifndef USE_IO_OPS
256 #undef inb
257 #undef inw
258 #undef inl
259 #undef outb
260 #undef outw
261 #undef outl
262 #define inb readb
263 #define inw readw
264 #define inl readl
265 #define outb writeb
266 #define outw writew
267 #define outl writel
268 #endif
270 /* Offsets to registers, using the (ugh) SMC names. */
271 enum epic_registers {
272 COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
273 PCIBurstCnt=0x18,
274 TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */
275 MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
276 LAN0=64, /* MAC address. */
277 MC0=80, /* Multicast filter table. */
278 RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
279 PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
282 /* Interrupt register bits, using my own meaningful names. */
283 enum IntrStatus {
284 TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
285 PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
286 RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
287 TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
288 RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
290 enum CommandBits {
291 StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
292 StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
295 static u16 media2miictl[16] = {
296 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
297 0, 0, 0, 0, 0, 0, 0, 0 };
299 /* The EPIC100 Rx and Tx buffer descriptors. */
301 struct epic_tx_desc {
302 u32 txstatus;
303 u32 bufaddr;
304 u32 buflength;
305 u32 next;
308 struct epic_rx_desc {
309 u32 rxstatus;
310 u32 bufaddr;
311 u32 buflength;
312 u32 next;
315 enum desc_status_bits {
316 DescOwn=0x8000,
319 #define PRIV_ALIGN 15 /* Required alignment mask */
320 struct epic_private {
321 struct epic_rx_desc *rx_ring;
322 struct epic_tx_desc *tx_ring;
323 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
324 struct sk_buff* tx_skbuff[TX_RING_SIZE];
325 /* The addresses of receive-in-place skbuffs. */
326 struct sk_buff* rx_skbuff[RX_RING_SIZE];
328 dma_addr_t tx_ring_dma;
329 dma_addr_t rx_ring_dma;
331 /* Ring pointers. */
332 spinlock_t lock; /* Group with Tx control cache line. */
333 unsigned int cur_tx, dirty_tx;
335 unsigned int cur_rx, dirty_rx;
336 unsigned int rx_buf_sz; /* Based on MTU+slack. */
338 struct pci_dev *pci_dev; /* PCI bus location. */
339 int chip_id, chip_flags;
341 struct net_device_stats stats;
342 struct timer_list timer; /* Media selection timer. */
343 int tx_threshold;
344 unsigned char mc_filter[8];
345 signed char phys[4]; /* MII device addresses. */
346 u16 advertising; /* NWay media advertisement */
347 int mii_phy_cnt;
348 struct mii_if_info mii;
349 unsigned int tx_full:1; /* The Tx queue is full. */
350 unsigned int default_port:4; /* Last dev->if_port value. */
353 static int epic_open(struct net_device *dev);
354 static int read_eeprom(long ioaddr, int location);
355 static int mdio_read(struct net_device *dev, int phy_id, int location);
356 static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
357 static void epic_restart(struct net_device *dev);
358 static void epic_timer(unsigned long data);
359 static void epic_tx_timeout(struct net_device *dev);
360 static void epic_init_ring(struct net_device *dev);
361 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
362 static int epic_rx(struct net_device *dev);
363 static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
364 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
365 static int epic_close(struct net_device *dev);
366 static struct net_device_stats *epic_get_stats(struct net_device *dev);
367 static void set_rx_mode(struct net_device *dev);
371 static int __devinit epic_init_one (struct pci_dev *pdev,
372 const struct pci_device_id *ent)
374 static int card_idx = -1;
375 long ioaddr;
376 int chip_idx = (int) ent->driver_data;
377 int irq;
378 struct net_device *dev;
379 struct epic_private *ep;
380 int i, option = 0, duplex = 0;
381 void *ring_space;
382 dma_addr_t ring_dma;
384 /* when built into the kernel, we only print version if device is found */
385 #ifndef MODULE
386 static int printed_version;
387 if (!printed_version++)
388 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
389 version, version2, version3);
390 #endif
392 card_idx++;
394 i = pci_enable_device(pdev);
395 if (i)
396 return i;
397 irq = pdev->irq;
399 if (pci_resource_len(pdev, 0) < pci_id_tbl[chip_idx].io_size) {
400 printk (KERN_ERR "card %d: no PCI region space\n", card_idx);
401 return -ENODEV;
404 pci_set_master(pdev);
406 dev = alloc_etherdev(sizeof (*ep));
407 if (!dev) {
408 printk (KERN_ERR "card %d: no memory for eth device\n", card_idx);
409 return -ENOMEM;
411 SET_MODULE_OWNER(dev);
412 SET_NETDEV_DEV(dev, &pdev->dev);
414 if (pci_request_regions(pdev, DRV_NAME))
415 goto err_out_free_netdev;
417 #ifdef USE_IO_OPS
418 ioaddr = pci_resource_start (pdev, 0);
419 #else
420 ioaddr = pci_resource_start (pdev, 1);
421 ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1));
422 if (!ioaddr) {
423 printk (KERN_ERR DRV_NAME " %d: ioremap failed\n", card_idx);
424 goto err_out_free_res;
426 #endif
428 pci_set_drvdata(pdev, dev);
429 ep = dev->priv;
430 ep->mii.dev = dev;
431 ep->mii.mdio_read = mdio_read;
432 ep->mii.mdio_write = mdio_write;
433 ep->mii.phy_id_mask = 0x1f;
434 ep->mii.reg_num_mask = 0x1f;
436 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
437 if (!ring_space)
438 goto err_out_iounmap;
439 ep->tx_ring = (struct epic_tx_desc *)ring_space;
440 ep->tx_ring_dma = ring_dma;
442 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
443 if (!ring_space)
444 goto err_out_unmap_tx;
445 ep->rx_ring = (struct epic_rx_desc *)ring_space;
446 ep->rx_ring_dma = ring_dma;
448 if (dev->mem_start) {
449 option = dev->mem_start;
450 duplex = (dev->mem_start & 16) ? 1 : 0;
451 } else if (card_idx >= 0 && card_idx < MAX_UNITS) {
452 if (options[card_idx] >= 0)
453 option = options[card_idx];
454 if (full_duplex[card_idx] >= 0)
455 duplex = full_duplex[card_idx];
458 dev->base_addr = ioaddr;
459 dev->irq = irq;
461 spin_lock_init (&ep->lock);
463 /* Bring the chip out of low-power mode. */
464 outl(0x4200, ioaddr + GENCTL);
465 /* Magic?! If we don't set this bit the MII interface won't work. */
466 /* This magic is documented in SMSC app note 7.15 */
467 for (i = 16; i > 0; i--)
468 outl(0x0008, ioaddr + TEST1);
470 /* Turn on the MII transceiver. */
471 outl(0x12, ioaddr + MIICfg);
472 if (chip_idx == 1)
473 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
474 outl(0x0200, ioaddr + GENCTL);
476 /* Note: the '175 does not have a serial EEPROM. */
477 for (i = 0; i < 3; i++)
478 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4));
480 if (debug > 2) {
481 printk(KERN_DEBUG DRV_NAME "(%s): EEPROM contents\n",
482 pdev->slot_name);
483 for (i = 0; i < 64; i++)
484 printk(" %4.4x%s", read_eeprom(ioaddr, i),
485 i % 16 == 15 ? "\n" : "");
488 ep->pci_dev = pdev;
489 ep->chip_id = chip_idx;
490 ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
492 /* Find the connected MII xcvrs.
493 Doing this in open() would allow detecting external xcvrs later, but
494 takes much time and no cards have external MII. */
496 int phy, phy_idx = 0;
497 for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
498 int mii_status = mdio_read(dev, phy, MII_BMSR);
499 if (mii_status != 0xffff && mii_status != 0x0000) {
500 ep->phys[phy_idx++] = phy;
501 printk(KERN_INFO DRV_NAME "(%s): MII transceiver #%d control "
502 "%4.4x status %4.4x.\n",
503 pdev->slot_name, phy, mdio_read(dev, phy, 0), mii_status);
506 ep->mii_phy_cnt = phy_idx;
507 if (phy_idx != 0) {
508 phy = ep->phys[0];
509 ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
510 printk(KERN_INFO DRV_NAME "(%s): Autonegotiation advertising %4.4x link "
511 "partner %4.4x.\n",
512 pdev->slot_name, ep->mii.advertising, mdio_read(dev, phy, 5));
513 } else if ( ! (ep->chip_flags & NO_MII)) {
514 printk(KERN_WARNING DRV_NAME "(%s): ***WARNING***: No MII transceiver found!\n",
515 pdev->slot_name);
516 /* Use the known PHY address of the EPII. */
517 ep->phys[0] = 3;
519 ep->mii.phy_id = ep->phys[0];
522 /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
523 if (ep->chip_flags & MII_PWRDWN)
524 outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
525 outl(0x0008, ioaddr + GENCTL);
527 /* The lower four bits are the media type. */
528 if (duplex) {
529 ep->mii.force_media = ep->mii.full_duplex = 1;
530 printk(KERN_INFO DRV_NAME "(%s): Forced full duplex operation requested.\n",
531 pdev->slot_name);
533 dev->if_port = ep->default_port = option;
535 /* The Epic-specific entries in the device structure. */
536 dev->open = &epic_open;
537 dev->hard_start_xmit = &epic_start_xmit;
538 dev->stop = &epic_close;
539 dev->get_stats = &epic_get_stats;
540 dev->set_multicast_list = &set_rx_mode;
541 dev->do_ioctl = &netdev_ioctl;
542 dev->watchdog_timeo = TX_TIMEOUT;
543 dev->tx_timeout = &epic_tx_timeout;
545 i = register_netdev(dev);
546 if (i)
547 goto err_out_unmap_tx;
549 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
550 dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq);
551 for (i = 0; i < 5; i++)
552 printk("%2.2x:", dev->dev_addr[i]);
553 printk("%2.2x.\n", dev->dev_addr[i]);
555 return 0;
557 err_out_unmap_tx:
558 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
559 err_out_iounmap:
560 #ifndef USE_IO_OPS
561 iounmap(ioaddr);
562 err_out_free_res:
563 #endif
564 pci_release_regions(pdev);
565 err_out_free_netdev:
566 kfree(dev);
567 return -ENODEV;
570 /* Serial EEPROM section. */
572 /* EEPROM_Ctrl bits. */
573 #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
574 #define EE_CS 0x02 /* EEPROM chip select. */
575 #define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */
576 #define EE_WRITE_0 0x01
577 #define EE_WRITE_1 0x09
578 #define EE_DATA_READ 0x10 /* EEPROM chip data out. */
579 #define EE_ENB (0x0001 | EE_CS)
581 /* Delay between EEPROM clock transitions.
582 This serves to flush the operation to the PCI bus.
585 #define eeprom_delay() inl(ee_addr)
587 /* The EEPROM commands include the alway-set leading bit. */
588 #define EE_WRITE_CMD (5 << 6)
589 #define EE_READ64_CMD (6 << 6)
590 #define EE_READ256_CMD (6 << 8)
591 #define EE_ERASE_CMD (7 << 6)
593 static int __devinit read_eeprom(long ioaddr, int location)
595 int i;
596 int retval = 0;
597 long ee_addr = ioaddr + EECTL;
598 int read_cmd = location |
599 (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
601 outl(EE_ENB & ~EE_CS, ee_addr);
602 outl(EE_ENB, ee_addr);
604 /* Shift the read command bits out. */
605 for (i = 12; i >= 0; i--) {
606 short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
607 outl(EE_ENB | dataval, ee_addr);
608 eeprom_delay();
609 outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
610 eeprom_delay();
612 outl(EE_ENB, ee_addr);
614 for (i = 16; i > 0; i--) {
615 outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
616 eeprom_delay();
617 retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
618 outl(EE_ENB, ee_addr);
619 eeprom_delay();
622 /* Terminate the EEPROM access. */
623 outl(EE_ENB & ~EE_CS, ee_addr);
624 return retval;
627 #define MII_READOP 1
628 #define MII_WRITEOP 2
629 static int mdio_read(struct net_device *dev, int phy_id, int location)
631 long ioaddr = dev->base_addr;
632 int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
633 int i;
635 outl(read_cmd, ioaddr + MIICtrl);
636 /* Typical operation takes 25 loops. */
637 for (i = 400; i > 0; i--) {
638 barrier();
639 if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
640 /* Work around read failure bug. */
641 if (phy_id == 1 && location < 6
642 && inw(ioaddr + MIIData) == 0xffff) {
643 outl(read_cmd, ioaddr + MIICtrl);
644 continue;
646 return inw(ioaddr + MIIData);
649 return 0xffff;
652 static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
654 long ioaddr = dev->base_addr;
655 int i;
657 outw(value, ioaddr + MIIData);
658 outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
659 for (i = 10000; i > 0; i--) {
660 barrier();
661 if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
662 break;
664 return;
668 static int epic_open(struct net_device *dev)
670 struct epic_private *ep = dev->priv;
671 long ioaddr = dev->base_addr;
672 int i;
673 int retval;
675 /* Soft reset the chip. */
676 outl(0x4001, ioaddr + GENCTL);
678 if ((retval = request_irq(dev->irq, &epic_interrupt, SA_SHIRQ, dev->name, dev)))
679 return retval;
681 epic_init_ring(dev);
683 outl(0x4000, ioaddr + GENCTL);
684 /* This magic is documented in SMSC app note 7.15 */
685 for (i = 16; i > 0; i--)
686 outl(0x0008, ioaddr + TEST1);
688 /* Pull the chip out of low-power mode, enable interrupts, and set for
689 PCI read multiple. The MIIcfg setting and strange write order are
690 required by the details of which bits are reset and the transceiver
691 wiring on the Ositech CardBus card.
693 #if 0
694 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
695 #endif
696 if (ep->chip_flags & MII_PWRDWN)
697 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
699 #if defined(__powerpc__) || defined(__sparc__) /* Big endian */
700 outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
701 inl(ioaddr + GENCTL);
702 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
703 #else
704 outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
705 inl(ioaddr + GENCTL);
706 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
707 #endif
709 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
711 for (i = 0; i < 3; i++)
712 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
714 ep->tx_threshold = TX_FIFO_THRESH;
715 outl(ep->tx_threshold, ioaddr + TxThresh);
717 if (media2miictl[dev->if_port & 15]) {
718 if (ep->mii_phy_cnt)
719 mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
720 if (dev->if_port == 1) {
721 if (debug > 1)
722 printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
723 "status %4.4x.\n",
724 dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
726 } else {
727 int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
728 if (mii_lpa != 0xffff) {
729 if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
730 ep->mii.full_duplex = 1;
731 else if (! (mii_lpa & LPA_LPACK))
732 mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
733 if (debug > 1)
734 printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
735 " register read of %4.4x.\n", dev->name,
736 ep->mii.full_duplex ? "full" : "half",
737 ep->phys[0], mii_lpa);
741 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
742 outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
743 outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
745 /* Start the chip's Rx process. */
746 set_rx_mode(dev);
747 outl(StartRx | RxQueued, ioaddr + COMMAND);
749 netif_start_queue(dev);
751 /* Enable interrupts by setting the interrupt mask. */
752 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
753 | CntFull | TxUnderrun | TxDone | TxEmpty
754 | RxError | RxOverflow | RxFull | RxHeader | RxDone,
755 ioaddr + INTMASK);
757 if (debug > 1)
758 printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
759 "%s-duplex.\n",
760 dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
761 ep->mii.full_duplex ? "full" : "half");
763 /* Set the timer to switch to check for link beat and perhaps switch
764 to an alternate media type. */
765 init_timer(&ep->timer);
766 ep->timer.expires = jiffies + 3*HZ;
767 ep->timer.data = (unsigned long)dev;
768 ep->timer.function = &epic_timer; /* timer handler */
769 add_timer(&ep->timer);
771 return 0;
774 /* Reset the chip to recover from a PCI transaction error.
775 This may occur at interrupt time. */
776 static void epic_pause(struct net_device *dev)
778 long ioaddr = dev->base_addr;
779 struct epic_private *ep = dev->priv;
781 netif_stop_queue (dev);
783 /* Disable interrupts by clearing the interrupt mask. */
784 outl(0x00000000, ioaddr + INTMASK);
785 /* Stop the chip's Tx and Rx DMA processes. */
786 outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
788 /* Update the error counts. */
789 if (inw(ioaddr + COMMAND) != 0xffff) {
790 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
791 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
792 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
795 /* Remove the packets on the Rx queue. */
796 epic_rx(dev);
799 static void epic_restart(struct net_device *dev)
801 long ioaddr = dev->base_addr;
802 struct epic_private *ep = dev->priv;
803 int i;
805 /* Soft reset the chip. */
806 outl(0x4001, ioaddr + GENCTL);
808 printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
809 dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
810 udelay(1);
812 /* This magic is documented in SMSC app note 7.15 */
813 for (i = 16; i > 0; i--)
814 outl(0x0008, ioaddr + TEST1);
816 #if defined(__powerpc__) || defined(__sparc__) /* Big endian */
817 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
818 #else
819 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
820 #endif
821 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
822 if (ep->chip_flags & MII_PWRDWN)
823 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
825 for (i = 0; i < 3; i++)
826 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
828 ep->tx_threshold = TX_FIFO_THRESH;
829 outl(ep->tx_threshold, ioaddr + TxThresh);
830 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
831 outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
832 sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);
833 outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
834 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);
836 /* Start the chip's Rx process. */
837 set_rx_mode(dev);
838 outl(StartRx | RxQueued, ioaddr + COMMAND);
840 /* Enable interrupts by setting the interrupt mask. */
841 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
842 | CntFull | TxUnderrun | TxDone | TxEmpty
843 | RxError | RxOverflow | RxFull | RxHeader | RxDone,
844 ioaddr + INTMASK);
845 printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
846 " interrupt %4.4x.\n",
847 dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
848 (int)inl(ioaddr + INTSTAT));
849 return;
852 static void check_media(struct net_device *dev)
854 struct epic_private *ep = dev->priv;
855 long ioaddr = dev->base_addr;
856 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
857 int negotiated = mii_lpa & ep->mii.advertising;
858 int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
860 if (ep->mii.force_media)
861 return;
862 if (mii_lpa == 0xffff) /* Bogus read */
863 return;
864 if (ep->mii.full_duplex != duplex) {
865 ep->mii.full_duplex = duplex;
866 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
867 " partner capability of %4.4x.\n", dev->name,
868 ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
869 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
873 static void epic_timer(unsigned long data)
875 struct net_device *dev = (struct net_device *)data;
876 struct epic_private *ep = dev->priv;
877 long ioaddr = dev->base_addr;
878 int next_tick = 5*HZ;
880 if (debug > 3) {
881 printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
882 dev->name, (int)inl(ioaddr + TxSTAT));
883 printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
884 "IntStatus %4.4x RxStatus %4.4x.\n",
885 dev->name, (int)inl(ioaddr + INTMASK),
886 (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
889 check_media(dev);
891 ep->timer.expires = jiffies + next_tick;
892 add_timer(&ep->timer);
895 static void epic_tx_timeout(struct net_device *dev)
897 struct epic_private *ep = dev->priv;
898 long ioaddr = dev->base_addr;
900 if (debug > 0) {
901 printk(KERN_WARNING "%s: Transmit timeout using MII device, "
902 "Tx status %4.4x.\n",
903 dev->name, (int)inw(ioaddr + TxSTAT));
904 if (debug > 1) {
905 printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
906 dev->name, ep->dirty_tx, ep->cur_tx);
909 if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */
910 ep->stats.tx_fifo_errors++;
911 outl(RestartTx, ioaddr + COMMAND);
912 } else {
913 epic_restart(dev);
914 outl(TxQueued, dev->base_addr + COMMAND);
917 dev->trans_start = jiffies;
918 ep->stats.tx_errors++;
919 if (!ep->tx_full)
920 netif_wake_queue(dev);
923 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
924 static void epic_init_ring(struct net_device *dev)
926 struct epic_private *ep = dev->priv;
927 int i;
929 ep->tx_full = 0;
930 ep->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
931 ep->dirty_tx = ep->cur_tx = 0;
932 ep->cur_rx = ep->dirty_rx = 0;
933 ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
935 /* Initialize all Rx descriptors. */
936 for (i = 0; i < RX_RING_SIZE; i++) {
937 ep->rx_ring[i].rxstatus = 0;
938 ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
939 ep->rx_ring[i].next = ep->rx_ring_dma +
940 (i+1)*sizeof(struct epic_rx_desc);
941 ep->rx_skbuff[i] = 0;
943 /* Mark the last entry as wrapping the ring. */
944 ep->rx_ring[i-1].next = ep->rx_ring_dma;
946 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
947 for (i = 0; i < RX_RING_SIZE; i++) {
948 struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
949 ep->rx_skbuff[i] = skb;
950 if (skb == NULL)
951 break;
952 skb->dev = dev; /* Mark as being used by this device. */
953 skb_reserve(skb, 2); /* 16 byte align the IP header. */
954 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
955 skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
956 ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
958 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
960 /* The Tx buffer descriptor is filled in as needed, but we
961 do need to clear the ownership bit. */
962 for (i = 0; i < TX_RING_SIZE; i++) {
963 ep->tx_skbuff[i] = 0;
964 ep->tx_ring[i].txstatus = 0x0000;
965 ep->tx_ring[i].next = ep->tx_ring_dma +
966 (i+1)*sizeof(struct epic_tx_desc);
968 ep->tx_ring[i-1].next = ep->tx_ring_dma;
969 return;
972 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
974 struct epic_private *ep = dev->priv;
975 int entry, free_count;
976 u32 ctrl_word;
977 unsigned long flags;
979 if (skb->len < ETH_ZLEN) {
980 skb = skb_padto(skb, ETH_ZLEN);
981 if (skb == NULL)
982 return 0;
985 /* Caution: the write order is important here, set the field with the
986 "ownership" bit last. */
988 /* Calculate the next Tx descriptor entry. */
989 spin_lock_irqsave(&ep->lock, flags);
990 free_count = ep->cur_tx - ep->dirty_tx;
991 entry = ep->cur_tx % TX_RING_SIZE;
993 ep->tx_skbuff[entry] = skb;
994 ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
995 skb->len, PCI_DMA_TODEVICE);
996 if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
997 ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
998 } else if (free_count == TX_QUEUE_LEN/2) {
999 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
1000 } else if (free_count < TX_QUEUE_LEN - 1) {
1001 ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */
1002 } else {
1003 /* Leave room for an additional entry. */
1004 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
1005 ep->tx_full = 1;
1007 ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);
1008 ep->tx_ring[entry].txstatus =
1009 ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
1010 | cpu_to_le32(DescOwn);
1012 ep->cur_tx++;
1013 if (ep->tx_full)
1014 netif_stop_queue(dev);
1016 spin_unlock_irqrestore(&ep->lock, flags);
1017 /* Trigger an immediate transmit demand. */
1018 outl(TxQueued, dev->base_addr + COMMAND);
1020 dev->trans_start = jiffies;
1021 if (debug > 4)
1022 printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
1023 "flag %2.2x Tx status %8.8x.\n",
1024 dev->name, (int)skb->len, entry, ctrl_word,
1025 (int)inl(dev->base_addr + TxSTAT));
1027 return 0;
1030 /* The interrupt handler does all of the Rx thread work and cleans up
1031 after the Tx thread. */
1032 static irqreturn_t epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1034 struct net_device *dev = dev_instance;
1035 struct epic_private *ep = dev->priv;
1036 long ioaddr = dev->base_addr;
1037 int status, boguscnt = max_interrupt_work;
1038 unsigned int handled = 0;
1040 do {
1041 status = inl(ioaddr + INTSTAT);
1042 /* Acknowledge all of the current interrupt sources ASAP. */
1043 outl(status & 0x00007fff, ioaddr + INTSTAT);
1045 if (debug > 4)
1046 printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
1047 "intstat=%#8.8x.\n",
1048 dev->name, status, (int)inl(ioaddr + INTSTAT));
1050 if ((status & IntrSummary) == 0)
1051 break;
1052 handled = 1;
1054 if (status & (RxDone | RxStarted | RxEarlyWarn | RxOverflow))
1055 epic_rx(dev);
1057 if (status & (TxEmpty | TxDone)) {
1058 unsigned int dirty_tx, cur_tx;
1060 /* Note: if this lock becomes a problem we can narrow the locked
1061 region at the cost of occasionally grabbing the lock more
1062 times. */
1063 spin_lock(&ep->lock);
1064 cur_tx = ep->cur_tx;
1065 dirty_tx = ep->dirty_tx;
1066 for (; cur_tx - dirty_tx > 0; dirty_tx++) {
1067 struct sk_buff *skb;
1068 int entry = dirty_tx % TX_RING_SIZE;
1069 int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
1071 if (txstatus & DescOwn)
1072 break; /* It still hasn't been Txed */
1074 if ( ! (txstatus & 0x0001)) {
1075 /* There was an major error, log it. */
1076 #ifndef final_version
1077 if (debug > 1)
1078 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1079 dev->name, txstatus);
1080 #endif
1081 ep->stats.tx_errors++;
1082 if (txstatus & 0x1050) ep->stats.tx_aborted_errors++;
1083 if (txstatus & 0x0008) ep->stats.tx_carrier_errors++;
1084 if (txstatus & 0x0040) ep->stats.tx_window_errors++;
1085 if (txstatus & 0x0010) ep->stats.tx_fifo_errors++;
1086 } else {
1087 ep->stats.collisions += (txstatus >> 8) & 15;
1088 ep->stats.tx_packets++;
1089 ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1092 /* Free the original skb. */
1093 skb = ep->tx_skbuff[entry];
1094 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1095 skb->len, PCI_DMA_TODEVICE);
1096 dev_kfree_skb_irq(skb);
1097 ep->tx_skbuff[entry] = 0;
1100 #ifndef final_version
1101 if (cur_tx - dirty_tx > TX_RING_SIZE) {
1102 printk(KERN_WARNING "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1103 dev->name, dirty_tx, cur_tx, ep->tx_full);
1104 dirty_tx += TX_RING_SIZE;
1106 #endif
1107 ep->dirty_tx = dirty_tx;
1108 if (ep->tx_full
1109 && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1110 /* The ring is no longer full, allow new TX entries. */
1111 ep->tx_full = 0;
1112 spin_unlock(&ep->lock);
1113 netif_wake_queue(dev);
1114 } else
1115 spin_unlock(&ep->lock);
1118 /* Check uncommon events all at once. */
1119 if (status & (CntFull | TxUnderrun | RxOverflow | RxFull |
1120 PCIBusErr170 | PCIBusErr175)) {
1121 if (status == 0xffffffff) /* Chip failed or removed (CardBus). */
1122 break;
1123 /* Always update the error counts to avoid overhead later. */
1124 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1125 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1126 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1128 if (status & TxUnderrun) { /* Tx FIFO underflow. */
1129 ep->stats.tx_fifo_errors++;
1130 outl(ep->tx_threshold += 128, ioaddr + TxThresh);
1131 /* Restart the transmit process. */
1132 outl(RestartTx, ioaddr + COMMAND);
1134 if (status & RxOverflow) { /* Missed a Rx frame. */
1135 ep->stats.rx_errors++;
1137 if (status & (RxOverflow | RxFull))
1138 outw(RxQueued, ioaddr + COMMAND);
1139 if (status & PCIBusErr170) {
1140 printk(KERN_ERR "%s: PCI Bus Error! EPIC status %4.4x.\n",
1141 dev->name, status);
1142 epic_pause(dev);
1143 epic_restart(dev);
1145 /* Clear all error sources. */
1146 outl(status & 0x7f18, ioaddr + INTSTAT);
1148 if (--boguscnt < 0) {
1149 printk(KERN_ERR "%s: Too much work at interrupt, "
1150 "IntrStatus=0x%8.8x.\n",
1151 dev->name, status);
1152 /* Clear all interrupt sources. */
1153 outl(0x0001ffff, ioaddr + INTSTAT);
1154 break;
1156 } while (1);
1158 if (debug > 3)
1159 printk(KERN_DEBUG "%s: exiting interrupt, intr_status=%#4.4x.\n",
1160 dev->name, status);
1162 return IRQ_RETVAL(handled);
1165 static int epic_rx(struct net_device *dev)
1167 struct epic_private *ep = dev->priv;
1168 int entry = ep->cur_rx % RX_RING_SIZE;
1169 int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1170 int work_done = 0;
1172 if (debug > 4)
1173 printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
1174 ep->rx_ring[entry].rxstatus);
1175 /* If we own the next entry, it's a new packet. Send it up. */
1176 while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) {
1177 int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);
1179 if (debug > 4)
1180 printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
1181 if (--rx_work_limit < 0)
1182 break;
1183 if (status & 0x2006) {
1184 if (debug > 2)
1185 printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
1186 dev->name, status);
1187 if (status & 0x2000) {
1188 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1189 "multiple buffers, status %4.4x!\n", dev->name, status);
1190 ep->stats.rx_length_errors++;
1191 } else if (status & 0x0006)
1192 /* Rx Frame errors are counted in hardware. */
1193 ep->stats.rx_errors++;
1194 } else {
1195 /* Malloc up new buffer, compatible with net-2e. */
1196 /* Omit the four octet CRC from the length. */
1197 short pkt_len = (status >> 16) - 4;
1198 struct sk_buff *skb;
1200 pci_dma_sync_single(ep->pci_dev, ep->rx_ring[entry].bufaddr,
1201 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1202 if (pkt_len > PKT_BUF_SZ - 4) {
1203 printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
1204 "%d bytes.\n",
1205 dev->name, status, pkt_len);
1206 pkt_len = 1514;
1208 /* Check if the packet is long enough to accept without copying
1209 to a minimally-sized skbuff. */
1210 if (pkt_len < rx_copybreak
1211 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1212 skb->dev = dev;
1213 skb_reserve(skb, 2); /* 16 byte align the IP header */
1214 #if 1 /* HAS_IP_COPYSUM */
1215 eth_copy_and_sum(skb, ep->rx_skbuff[entry]->tail, pkt_len, 0);
1216 skb_put(skb, pkt_len);
1217 #else
1218 memcpy(skb_put(skb, pkt_len), ep->rx_skbuff[entry]->tail,
1219 pkt_len);
1220 #endif
1221 } else {
1222 pci_unmap_single(ep->pci_dev,
1223 ep->rx_ring[entry].bufaddr,
1224 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1225 skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1226 ep->rx_skbuff[entry] = NULL;
1228 skb->protocol = eth_type_trans(skb, dev);
1229 netif_rx(skb);
1230 dev->last_rx = jiffies;
1231 ep->stats.rx_packets++;
1232 ep->stats.rx_bytes += pkt_len;
1234 work_done++;
1235 entry = (++ep->cur_rx) % RX_RING_SIZE;
1238 /* Refill the Rx ring buffers. */
1239 for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1240 entry = ep->dirty_rx % RX_RING_SIZE;
1241 if (ep->rx_skbuff[entry] == NULL) {
1242 struct sk_buff *skb;
1243 skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
1244 if (skb == NULL)
1245 break;
1246 skb->dev = dev; /* Mark as being used by this device. */
1247 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1248 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1249 skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1250 work_done++;
1252 ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
1254 return work_done;
1257 static int epic_close(struct net_device *dev)
1259 long ioaddr = dev->base_addr;
1260 struct epic_private *ep = dev->priv;
1261 struct sk_buff *skb;
1262 int i;
1264 netif_stop_queue(dev);
1266 if (debug > 1)
1267 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1268 dev->name, (int)inl(ioaddr + INTSTAT));
1270 del_timer_sync(&ep->timer);
1271 epic_pause(dev);
1272 free_irq(dev->irq, dev);
1274 /* Free all the skbuffs in the Rx queue. */
1275 for (i = 0; i < RX_RING_SIZE; i++) {
1276 skb = ep->rx_skbuff[i];
1277 ep->rx_skbuff[i] = 0;
1278 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
1279 ep->rx_ring[i].buflength = 0;
1280 if (skb) {
1281 pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
1282 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1283 dev_kfree_skb(skb);
1285 ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1287 for (i = 0; i < TX_RING_SIZE; i++) {
1288 skb = ep->tx_skbuff[i];
1289 ep->tx_skbuff[i] = 0;
1290 if (!skb)
1291 continue;
1292 pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
1293 skb->len, PCI_DMA_TODEVICE);
1294 dev_kfree_skb(skb);
1297 /* Green! Leave the chip in low-power mode. */
1298 outl(0x0008, ioaddr + GENCTL);
1300 return 0;
1303 static struct net_device_stats *epic_get_stats(struct net_device *dev)
1305 struct epic_private *ep = dev->priv;
1306 long ioaddr = dev->base_addr;
1308 if (netif_running(dev)) {
1309 /* Update the error counts. */
1310 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1311 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1312 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1315 return &ep->stats;
1318 /* Set or clear the multicast filter for this adaptor.
1319 Note that we only use exclusion around actually queueing the
1320 new frame, not around filling ep->setup_frame. This is non-deterministic
1321 when re-entered but still correct. */
1323 static void set_rx_mode(struct net_device *dev)
1325 long ioaddr = dev->base_addr;
1326 struct epic_private *ep = dev->priv;
1327 unsigned char mc_filter[8]; /* Multicast hash filter */
1328 int i;
1330 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1331 outl(0x002C, ioaddr + RxCtrl);
1332 /* Unconditionally log net taps. */
1333 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1334 memset(mc_filter, 0xff, sizeof(mc_filter));
1335 } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
1336 /* There is apparently a chip bug, so the multicast filter
1337 is never enabled. */
1338 /* Too many to filter perfectly -- accept all multicasts. */
1339 memset(mc_filter, 0xff, sizeof(mc_filter));
1340 outl(0x000C, ioaddr + RxCtrl);
1341 } else if (dev->mc_count == 0) {
1342 outl(0x0004, ioaddr + RxCtrl);
1343 return;
1344 } else { /* Never executed, for now. */
1345 struct dev_mc_list *mclist;
1347 memset(mc_filter, 0, sizeof(mc_filter));
1348 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1349 i++, mclist = mclist->next) {
1350 unsigned int bit_nr =
1351 ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
1352 mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1355 /* ToDo: perhaps we need to stop the Tx and Rx process here? */
1356 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1357 for (i = 0; i < 4; i++)
1358 outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
1359 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1361 return;
1364 static int netdev_ethtool_ioctl (struct net_device *dev, void *useraddr)
1366 struct epic_private *np = dev->priv;
1367 u32 ethcmd;
1369 if (copy_from_user (&ethcmd, useraddr, sizeof (ethcmd)))
1370 return -EFAULT;
1372 switch (ethcmd) {
1373 case ETHTOOL_GDRVINFO: {
1374 struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
1375 strcpy (info.driver, DRV_NAME);
1376 strcpy (info.version, DRV_VERSION);
1377 strcpy (info.bus_info, np->pci_dev->slot_name);
1378 if (copy_to_user (useraddr, &info, sizeof (info)))
1379 return -EFAULT;
1380 return 0;
1383 /* get settings */
1384 case ETHTOOL_GSET: {
1385 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1386 spin_lock_irq(&np->lock);
1387 mii_ethtool_gset(&np->mii, &ecmd);
1388 spin_unlock_irq(&np->lock);
1389 if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1390 return -EFAULT;
1391 return 0;
1393 /* set settings */
1394 case ETHTOOL_SSET: {
1395 int r;
1396 struct ethtool_cmd ecmd;
1397 if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1398 return -EFAULT;
1399 spin_lock_irq(&np->lock);
1400 r = mii_ethtool_sset(&np->mii, &ecmd);
1401 spin_unlock_irq(&np->lock);
1402 return r;
1404 /* restart autonegotiation */
1405 case ETHTOOL_NWAY_RST: {
1406 return mii_nway_restart(&np->mii);
1408 /* get link status */
1409 case ETHTOOL_GLINK: {
1410 struct ethtool_value edata = {ETHTOOL_GLINK};
1411 edata.data = mii_link_ok(&np->mii);
1412 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1413 return -EFAULT;
1414 return 0;
1417 /* get message-level */
1418 case ETHTOOL_GMSGLVL: {
1419 struct ethtool_value edata = {ETHTOOL_GMSGLVL};
1420 edata.data = debug;
1421 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1422 return -EFAULT;
1423 return 0;
1425 /* set message-level */
1426 case ETHTOOL_SMSGLVL: {
1427 struct ethtool_value edata;
1428 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1429 return -EFAULT;
1430 debug = edata.data;
1431 return 0;
1433 default:
1434 break;
1437 return -EOPNOTSUPP;
1440 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1442 struct epic_private *np = dev->priv;
1443 long ioaddr = dev->base_addr;
1444 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
1445 int rc;
1447 /* power-up, if interface is down */
1448 if (! netif_running(dev)) {
1449 outl(0x0200, ioaddr + GENCTL);
1450 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1453 /* ethtool commands */
1454 if (cmd == SIOCETHTOOL)
1455 rc = netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
1457 /* all other ioctls (the SIOC[GS]MIIxxx ioctls) */
1458 else {
1459 spin_lock_irq(&np->lock);
1460 rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1461 spin_unlock_irq(&np->lock);
1464 /* power-down, if interface is down */
1465 if (! netif_running(dev)) {
1466 outl(0x0008, ioaddr + GENCTL);
1467 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1469 return rc;
1473 static void __devexit epic_remove_one (struct pci_dev *pdev)
1475 struct net_device *dev = pci_get_drvdata(pdev);
1476 struct epic_private *ep = dev->priv;
1478 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1479 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1480 unregister_netdev(dev);
1481 #ifndef USE_IO_OPS
1482 iounmap((void*) dev->base_addr);
1483 #endif
1484 pci_release_regions(pdev);
1485 kfree(dev);
1486 pci_set_drvdata(pdev, NULL);
1487 /* pci_power_off(pdev, -1); */
1491 #ifdef CONFIG_PM
1493 static int epic_suspend (struct pci_dev *pdev, u32 state)
1495 struct net_device *dev = pci_get_drvdata(pdev);
1496 long ioaddr = dev->base_addr;
1498 if (!netif_running(dev))
1499 return 0;
1500 epic_pause(dev);
1501 /* Put the chip into low-power mode. */
1502 outl(0x0008, ioaddr + GENCTL);
1503 /* pci_power_off(pdev, -1); */
1504 return 0;
1508 static int epic_resume (struct pci_dev *pdev)
1510 struct net_device *dev = pci_get_drvdata(pdev);
1512 if (!netif_running(dev))
1513 return 0;
1514 epic_restart(dev);
1515 /* pci_power_on(pdev); */
1516 return 0;
1519 #endif /* CONFIG_PM */
1522 static struct pci_driver epic_driver = {
1523 .name = DRV_NAME,
1524 .id_table = epic_pci_tbl,
1525 .probe = epic_init_one,
1526 .remove = __devexit_p(epic_remove_one),
1527 #ifdef CONFIG_PM
1528 .suspend = epic_suspend,
1529 .resume = epic_resume,
1530 #endif /* CONFIG_PM */
1534 static int __init epic_init (void)
1536 /* when a module, this is printed whether or not devices are found in probe */
1537 #ifdef MODULE
1538 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
1539 version, version2, version3);
1540 #endif
1542 return pci_module_init (&epic_driver);
1546 static void __exit epic_cleanup (void)
1548 pci_unregister_driver (&epic_driver);
1552 module_init(epic_init);
1553 module_exit(epic_cleanup);