[netdrvr] Remove Linux-specific changelogs from several Becker template drivers
[linux-2.6/linux-2.6-openrd.git] / drivers / net / via-rhine.c
blobe48e76ce73a08112bc9c15973be217a0ea9cd2fd
1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2 /*
3 Written 1998-2001 by Donald Becker.
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
28 [link no longer provides useful info -jgarzik]
32 #define DRV_NAME "via-rhine"
33 #define DRV_VERSION "1.4.0"
34 #define DRV_RELDATE "June-27-2006"
37 /* A few user-configurable values.
38 These may be modified when a driver module is loaded. */
40 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
41 static int max_interrupt_work = 20;
43 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
44 Setting to > 1518 effectively disables this feature. */
45 static int rx_copybreak;
48 * In case you are looking for 'options[]' or 'full_duplex[]', they
49 * are gone. Use ethtool(8) instead.
52 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
53 The Rhine has a 64 element 8390-like hash table. */
54 static const int multicast_filter_limit = 32;
57 /* Operational parameters that are set at compile time. */
59 /* Keep the ring sizes a power of two for compile efficiency.
60 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
61 Making the Tx ring too large decreases the effectiveness of channel
62 bonding and packet priority.
63 There are no ill effects from too-large receive rings. */
64 #define TX_RING_SIZE 16
65 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
66 #define RX_RING_SIZE 16
69 /* Operational parameters that usually are not changed. */
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT (2*HZ)
74 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
76 #include <linux/module.h>
77 #include <linux/moduleparam.h>
78 #include <linux/kernel.h>
79 #include <linux/string.h>
80 #include <linux/timer.h>
81 #include <linux/errno.h>
82 #include <linux/ioport.h>
83 #include <linux/slab.h>
84 #include <linux/interrupt.h>
85 #include <linux/pci.h>
86 #include <linux/dma-mapping.h>
87 #include <linux/netdevice.h>
88 #include <linux/etherdevice.h>
89 #include <linux/skbuff.h>
90 #include <linux/init.h>
91 #include <linux/delay.h>
92 #include <linux/mii.h>
93 #include <linux/ethtool.h>
94 #include <linux/crc32.h>
95 #include <linux/bitops.h>
96 #include <asm/processor.h> /* Processor type for cache alignment. */
97 #include <asm/io.h>
98 #include <asm/irq.h>
99 #include <asm/uaccess.h>
101 /* These identify the driver base version and may not be removed. */
102 static char version[] __devinitdata =
103 KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
105 /* This driver was written to use PCI memory space. Some early versions
106 of the Rhine may only work correctly with I/O space accesses. */
107 #ifdef CONFIG_VIA_RHINE_MMIO
108 #define USE_MMIO
109 #else
110 #endif
112 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
113 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
114 MODULE_LICENSE("GPL");
116 module_param(max_interrupt_work, int, 0);
117 module_param(debug, int, 0);
118 module_param(rx_copybreak, int, 0);
119 MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
120 MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
121 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
124 Theory of Operation
126 I. Board Compatibility
128 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
129 controller.
131 II. Board-specific settings
133 Boards with this chip are functional only in a bus-master PCI slot.
135 Many operational settings are loaded from the EEPROM to the Config word at
136 offset 0x78. For most of these settings, this driver assumes that they are
137 correct.
138 If this driver is compiled to use PCI memory space operations the EEPROM
139 must be configured to enable memory ops.
141 III. Driver operation
143 IIIa. Ring buffers
145 This driver uses two statically allocated fixed-size descriptor lists
146 formed into rings by a branch from the final descriptor to the beginning of
147 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
149 IIIb/c. Transmit/Receive Structure
151 This driver attempts to use a zero-copy receive and transmit scheme.
153 Alas, all data buffers are required to start on a 32 bit boundary, so
154 the driver must often copy transmit packets into bounce buffers.
156 The driver allocates full frame size skbuffs for the Rx ring buffers at
157 open() time and passes the skb->data field to the chip as receive data
158 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
159 a fresh skbuff is allocated and the frame is copied to the new skbuff.
160 When the incoming frame is larger, the skbuff is passed directly up the
161 protocol stack. Buffers consumed this way are replaced by newly allocated
162 skbuffs in the last phase of rhine_rx().
164 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
165 using a full-sized skbuff for small frames vs. the copying costs of larger
166 frames. New boards are typically used in generously configured machines
167 and the underfilled buffers have negligible impact compared to the benefit of
168 a single allocation size, so the default value of zero results in never
169 copying packets. When copying is done, the cost is usually mitigated by using
170 a combined copy/checksum routine. Copying also preloads the cache, which is
171 most useful with small frames.
173 Since the VIA chips are only able to transfer data to buffers on 32 bit
174 boundaries, the IP header at offset 14 in an ethernet frame isn't
175 longword aligned for further processing. Copying these unaligned buffers
176 has the beneficial effect of 16-byte aligning the IP header.
178 IIId. Synchronization
180 The driver runs as two independent, single-threaded flows of control. One
181 is the send-packet routine, which enforces single-threaded use by the
182 dev->priv->lock spinlock. The other thread is the interrupt handler, which
183 is single threaded by the hardware and interrupt handling software.
185 The send packet thread has partial control over the Tx ring. It locks the
186 dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
187 is not available it stops the transmit queue by calling netif_stop_queue.
189 The interrupt handler has exclusive control over the Rx ring and records stats
190 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
191 empty by incrementing the dirty_tx mark. If at least half of the entries in
192 the Rx ring are available the transmit queue is woken up if it was stopped.
194 IV. Notes
196 IVb. References
198 Preliminary VT86C100A manual from http://www.via.com.tw/
199 http://www.scyld.com/expert/100mbps.html
200 http://www.scyld.com/expert/NWay.html
201 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
202 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
205 IVc. Errata
207 The VT86C100A manual is not reliable information.
208 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
209 in significant performance degradation for bounce buffer copies on transmit
210 and unaligned IP headers on receive.
211 The chip does not pad to minimum transmit length.
216 /* This table drives the PCI probe routines. It's mostly boilerplate in all
217 of the drivers, and will likely be provided by some future kernel.
218 Note the matching code -- the first table entry matchs all 56** cards but
219 second only the 1234 card.
222 enum rhine_revs {
223 VT86C100A = 0x00,
224 VTunknown0 = 0x20,
225 VT6102 = 0x40,
226 VT8231 = 0x50, /* Integrated MAC */
227 VT8233 = 0x60, /* Integrated MAC */
228 VT8235 = 0x74, /* Integrated MAC */
229 VT8237 = 0x78, /* Integrated MAC */
230 VTunknown1 = 0x7C,
231 VT6105 = 0x80,
232 VT6105_B0 = 0x83,
233 VT6105L = 0x8A,
234 VT6107 = 0x8C,
235 VTunknown2 = 0x8E,
236 VT6105M = 0x90, /* Management adapter */
239 enum rhine_quirks {
240 rqWOL = 0x0001, /* Wake-On-LAN support */
241 rqForceReset = 0x0002,
242 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
243 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
244 rqRhineI = 0x0100, /* See comment below */
247 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
248 * MMIO as well as for the collision counter and the Tx FIFO underflow
249 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
252 /* Beware of PCI posted writes */
253 #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
255 static struct pci_device_id rhine_pci_tbl[] =
257 {0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT86C100A */
258 {0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6102 */
259 {0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* 6105{,L,LOM} */
260 {0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6105M */
261 { } /* terminate list */
263 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
266 /* Offsets to the device registers. */
267 enum register_offsets {
268 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
269 ChipCmd1=0x09,
270 IntrStatus=0x0C, IntrEnable=0x0E,
271 MulticastFilter0=0x10, MulticastFilter1=0x14,
272 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
273 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
274 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
275 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
276 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
277 StickyHW=0x83, IntrStatus2=0x84,
278 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
279 WOLcrClr1=0xA6, WOLcgClr=0xA7,
280 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
283 /* Bits in ConfigD */
284 enum backoff_bits {
285 BackOptional=0x01, BackModify=0x02,
286 BackCaptureEffect=0x04, BackRandom=0x08
289 #ifdef USE_MMIO
290 /* Registers we check that mmio and reg are the same. */
291 static const int mmio_verify_registers[] = {
292 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
295 #endif
297 /* Bits in the interrupt status/mask registers. */
298 enum intr_status_bits {
299 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
300 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
301 IntrPCIErr=0x0040,
302 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
303 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
304 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
305 IntrRxWakeUp=0x8000,
306 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
307 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
308 IntrTxErrSummary=0x082218,
311 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
312 enum wol_bits {
313 WOLucast = 0x10,
314 WOLmagic = 0x20,
315 WOLbmcast = 0x30,
316 WOLlnkon = 0x40,
317 WOLlnkoff = 0x80,
320 /* The Rx and Tx buffer descriptors. */
321 struct rx_desc {
322 s32 rx_status;
323 u32 desc_length; /* Chain flag, Buffer/frame length */
324 u32 addr;
325 u32 next_desc;
327 struct tx_desc {
328 s32 tx_status;
329 u32 desc_length; /* Chain flag, Tx Config, Frame length */
330 u32 addr;
331 u32 next_desc;
334 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
335 #define TXDESC 0x00e08000
337 enum rx_status_bits {
338 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
341 /* Bits in *_desc.*_status */
342 enum desc_status_bits {
343 DescOwn=0x80000000
346 /* Bits in ChipCmd. */
347 enum chip_cmd_bits {
348 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
349 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
350 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
351 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
354 struct rhine_private {
355 /* Descriptor rings */
356 struct rx_desc *rx_ring;
357 struct tx_desc *tx_ring;
358 dma_addr_t rx_ring_dma;
359 dma_addr_t tx_ring_dma;
361 /* The addresses of receive-in-place skbuffs. */
362 struct sk_buff *rx_skbuff[RX_RING_SIZE];
363 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
365 /* The saved address of a sent-in-place packet/buffer, for later free(). */
366 struct sk_buff *tx_skbuff[TX_RING_SIZE];
367 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
369 /* Tx bounce buffers (Rhine-I only) */
370 unsigned char *tx_buf[TX_RING_SIZE];
371 unsigned char *tx_bufs;
372 dma_addr_t tx_bufs_dma;
374 struct pci_dev *pdev;
375 long pioaddr;
376 struct net_device_stats stats;
377 spinlock_t lock;
379 /* Frequently used values: keep some adjacent for cache effect. */
380 u32 quirks;
381 struct rx_desc *rx_head_desc;
382 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
383 unsigned int cur_tx, dirty_tx;
384 unsigned int rx_buf_sz; /* Based on MTU+slack. */
385 u8 wolopts;
387 u8 tx_thresh, rx_thresh;
389 struct mii_if_info mii_if;
390 void __iomem *base;
393 static int mdio_read(struct net_device *dev, int phy_id, int location);
394 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
395 static int rhine_open(struct net_device *dev);
396 static void rhine_tx_timeout(struct net_device *dev);
397 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
398 static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
399 static void rhine_tx(struct net_device *dev);
400 static void rhine_rx(struct net_device *dev);
401 static void rhine_error(struct net_device *dev, int intr_status);
402 static void rhine_set_rx_mode(struct net_device *dev);
403 static struct net_device_stats *rhine_get_stats(struct net_device *dev);
404 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
405 static struct ethtool_ops netdev_ethtool_ops;
406 static int rhine_close(struct net_device *dev);
407 static void rhine_shutdown (struct pci_dev *pdev);
409 #define RHINE_WAIT_FOR(condition) do { \
410 int i=1024; \
411 while (!(condition) && --i) \
413 if (debug > 1 && i < 512) \
414 printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
415 DRV_NAME, 1024-i, __func__, __LINE__); \
416 } while(0)
418 static inline u32 get_intr_status(struct net_device *dev)
420 struct rhine_private *rp = netdev_priv(dev);
421 void __iomem *ioaddr = rp->base;
422 u32 intr_status;
424 intr_status = ioread16(ioaddr + IntrStatus);
425 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
426 if (rp->quirks & rqStatusWBRace)
427 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
428 return intr_status;
432 * Get power related registers into sane state.
433 * Notify user about past WOL event.
435 static void rhine_power_init(struct net_device *dev)
437 struct rhine_private *rp = netdev_priv(dev);
438 void __iomem *ioaddr = rp->base;
439 u16 wolstat;
441 if (rp->quirks & rqWOL) {
442 /* Make sure chip is in power state D0 */
443 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
445 /* Disable "force PME-enable" */
446 iowrite8(0x80, ioaddr + WOLcgClr);
448 /* Clear power-event config bits (WOL) */
449 iowrite8(0xFF, ioaddr + WOLcrClr);
450 /* More recent cards can manage two additional patterns */
451 if (rp->quirks & rq6patterns)
452 iowrite8(0x03, ioaddr + WOLcrClr1);
454 /* Save power-event status bits */
455 wolstat = ioread8(ioaddr + PwrcsrSet);
456 if (rp->quirks & rq6patterns)
457 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
459 /* Clear power-event status bits */
460 iowrite8(0xFF, ioaddr + PwrcsrClr);
461 if (rp->quirks & rq6patterns)
462 iowrite8(0x03, ioaddr + PwrcsrClr1);
464 if (wolstat) {
465 char *reason;
466 switch (wolstat) {
467 case WOLmagic:
468 reason = "Magic packet";
469 break;
470 case WOLlnkon:
471 reason = "Link went up";
472 break;
473 case WOLlnkoff:
474 reason = "Link went down";
475 break;
476 case WOLucast:
477 reason = "Unicast packet";
478 break;
479 case WOLbmcast:
480 reason = "Multicast/broadcast packet";
481 break;
482 default:
483 reason = "Unknown";
485 printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
486 DRV_NAME, reason);
491 static void rhine_chip_reset(struct net_device *dev)
493 struct rhine_private *rp = netdev_priv(dev);
494 void __iomem *ioaddr = rp->base;
496 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
497 IOSYNC;
499 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
500 printk(KERN_INFO "%s: Reset not complete yet. "
501 "Trying harder.\n", DRV_NAME);
503 /* Force reset */
504 if (rp->quirks & rqForceReset)
505 iowrite8(0x40, ioaddr + MiscCmd);
507 /* Reset can take somewhat longer (rare) */
508 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
511 if (debug > 1)
512 printk(KERN_INFO "%s: Reset %s.\n", dev->name,
513 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
514 "failed" : "succeeded");
517 #ifdef USE_MMIO
518 static void enable_mmio(long pioaddr, u32 quirks)
520 int n;
521 if (quirks & rqRhineI) {
522 /* More recent docs say that this bit is reserved ... */
523 n = inb(pioaddr + ConfigA) | 0x20;
524 outb(n, pioaddr + ConfigA);
525 } else {
526 n = inb(pioaddr + ConfigD) | 0x80;
527 outb(n, pioaddr + ConfigD);
530 #endif
533 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
534 * (plus 0x6C for Rhine-I/II)
536 static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
538 struct rhine_private *rp = netdev_priv(dev);
539 void __iomem *ioaddr = rp->base;
541 outb(0x20, pioaddr + MACRegEEcsr);
542 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
544 #ifdef USE_MMIO
546 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
547 * MMIO. If reloading EEPROM was done first this could be avoided, but
548 * it is not known if that still works with the "win98-reboot" problem.
550 enable_mmio(pioaddr, rp->quirks);
551 #endif
553 /* Turn off EEPROM-controlled wake-up (magic packet) */
554 if (rp->quirks & rqWOL)
555 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
559 #ifdef CONFIG_NET_POLL_CONTROLLER
560 static void rhine_poll(struct net_device *dev)
562 disable_irq(dev->irq);
563 rhine_interrupt(dev->irq, (void *)dev, NULL);
564 enable_irq(dev->irq);
566 #endif
568 static void rhine_hw_init(struct net_device *dev, long pioaddr)
570 struct rhine_private *rp = netdev_priv(dev);
572 /* Reset the chip to erase previous misconfiguration. */
573 rhine_chip_reset(dev);
575 /* Rhine-I needs extra time to recuperate before EEPROM reload */
576 if (rp->quirks & rqRhineI)
577 msleep(5);
579 /* Reload EEPROM controlled bytes cleared by soft reset */
580 rhine_reload_eeprom(pioaddr, dev);
583 static int __devinit rhine_init_one(struct pci_dev *pdev,
584 const struct pci_device_id *ent)
586 struct net_device *dev;
587 struct rhine_private *rp;
588 int i, rc;
589 u8 pci_rev;
590 u32 quirks;
591 long pioaddr;
592 long memaddr;
593 void __iomem *ioaddr;
594 int io_size, phy_id;
595 const char *name;
596 #ifdef USE_MMIO
597 int bar = 1;
598 #else
599 int bar = 0;
600 #endif
602 /* when built into the kernel, we only print version if device is found */
603 #ifndef MODULE
604 static int printed_version;
605 if (!printed_version++)
606 printk(version);
607 #endif
609 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
611 io_size = 256;
612 phy_id = 0;
613 quirks = 0;
614 name = "Rhine";
615 if (pci_rev < VTunknown0) {
616 quirks = rqRhineI;
617 io_size = 128;
619 else if (pci_rev >= VT6102) {
620 quirks = rqWOL | rqForceReset;
621 if (pci_rev < VT6105) {
622 name = "Rhine II";
623 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
625 else {
626 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
627 if (pci_rev >= VT6105_B0)
628 quirks |= rq6patterns;
629 if (pci_rev < VT6105M)
630 name = "Rhine III";
631 else
632 name = "Rhine III (Management Adapter)";
636 rc = pci_enable_device(pdev);
637 if (rc)
638 goto err_out;
640 /* this should always be supported */
641 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
642 if (rc) {
643 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
644 "the card!?\n");
645 goto err_out;
648 /* sanity check */
649 if ((pci_resource_len(pdev, 0) < io_size) ||
650 (pci_resource_len(pdev, 1) < io_size)) {
651 rc = -EIO;
652 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
653 goto err_out;
656 pioaddr = pci_resource_start(pdev, 0);
657 memaddr = pci_resource_start(pdev, 1);
659 pci_set_master(pdev);
661 dev = alloc_etherdev(sizeof(struct rhine_private));
662 if (!dev) {
663 rc = -ENOMEM;
664 printk(KERN_ERR "alloc_etherdev failed\n");
665 goto err_out;
667 SET_MODULE_OWNER(dev);
668 SET_NETDEV_DEV(dev, &pdev->dev);
670 rp = netdev_priv(dev);
671 rp->quirks = quirks;
672 rp->pioaddr = pioaddr;
673 rp->pdev = pdev;
675 rc = pci_request_regions(pdev, DRV_NAME);
676 if (rc)
677 goto err_out_free_netdev;
679 ioaddr = pci_iomap(pdev, bar, io_size);
680 if (!ioaddr) {
681 rc = -EIO;
682 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
683 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
684 goto err_out_free_res;
687 #ifdef USE_MMIO
688 enable_mmio(pioaddr, quirks);
690 /* Check that selected MMIO registers match the PIO ones */
691 i = 0;
692 while (mmio_verify_registers[i]) {
693 int reg = mmio_verify_registers[i++];
694 unsigned char a = inb(pioaddr+reg);
695 unsigned char b = readb(ioaddr+reg);
696 if (a != b) {
697 rc = -EIO;
698 printk(KERN_ERR "MMIO do not match PIO [%02x] "
699 "(%02x != %02x)\n", reg, a, b);
700 goto err_out_unmap;
703 #endif /* USE_MMIO */
705 dev->base_addr = (unsigned long)ioaddr;
706 rp->base = ioaddr;
708 /* Get chip registers into a sane state */
709 rhine_power_init(dev);
710 rhine_hw_init(dev, pioaddr);
712 for (i = 0; i < 6; i++)
713 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
714 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
716 if (!is_valid_ether_addr(dev->perm_addr)) {
717 rc = -EIO;
718 printk(KERN_ERR "Invalid MAC address\n");
719 goto err_out_unmap;
722 /* For Rhine-I/II, phy_id is loaded from EEPROM */
723 if (!phy_id)
724 phy_id = ioread8(ioaddr + 0x6C);
726 dev->irq = pdev->irq;
728 spin_lock_init(&rp->lock);
729 rp->mii_if.dev = dev;
730 rp->mii_if.mdio_read = mdio_read;
731 rp->mii_if.mdio_write = mdio_write;
732 rp->mii_if.phy_id_mask = 0x1f;
733 rp->mii_if.reg_num_mask = 0x1f;
735 /* The chip-specific entries in the device structure. */
736 dev->open = rhine_open;
737 dev->hard_start_xmit = rhine_start_tx;
738 dev->stop = rhine_close;
739 dev->get_stats = rhine_get_stats;
740 dev->set_multicast_list = rhine_set_rx_mode;
741 dev->do_ioctl = netdev_ioctl;
742 dev->ethtool_ops = &netdev_ethtool_ops;
743 dev->tx_timeout = rhine_tx_timeout;
744 dev->watchdog_timeo = TX_TIMEOUT;
745 #ifdef CONFIG_NET_POLL_CONTROLLER
746 dev->poll_controller = rhine_poll;
747 #endif
748 if (rp->quirks & rqRhineI)
749 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
751 /* dev->name not defined before register_netdev()! */
752 rc = register_netdev(dev);
753 if (rc)
754 goto err_out_unmap;
756 printk(KERN_INFO "%s: VIA %s at 0x%lx, ",
757 dev->name, name,
758 #ifdef USE_MMIO
759 memaddr
760 #else
761 (long)ioaddr
762 #endif
765 for (i = 0; i < 5; i++)
766 printk("%2.2x:", dev->dev_addr[i]);
767 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
769 pci_set_drvdata(pdev, dev);
772 u16 mii_cmd;
773 int mii_status = mdio_read(dev, phy_id, 1);
774 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
775 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
776 if (mii_status != 0xffff && mii_status != 0x0000) {
777 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
778 printk(KERN_INFO "%s: MII PHY found at address "
779 "%d, status 0x%4.4x advertising %4.4x "
780 "Link %4.4x.\n", dev->name, phy_id,
781 mii_status, rp->mii_if.advertising,
782 mdio_read(dev, phy_id, 5));
784 /* set IFF_RUNNING */
785 if (mii_status & BMSR_LSTATUS)
786 netif_carrier_on(dev);
787 else
788 netif_carrier_off(dev);
792 rp->mii_if.phy_id = phy_id;
794 return 0;
796 err_out_unmap:
797 pci_iounmap(pdev, ioaddr);
798 err_out_free_res:
799 pci_release_regions(pdev);
800 err_out_free_netdev:
801 free_netdev(dev);
802 err_out:
803 return rc;
806 static int alloc_ring(struct net_device* dev)
808 struct rhine_private *rp = netdev_priv(dev);
809 void *ring;
810 dma_addr_t ring_dma;
812 ring = pci_alloc_consistent(rp->pdev,
813 RX_RING_SIZE * sizeof(struct rx_desc) +
814 TX_RING_SIZE * sizeof(struct tx_desc),
815 &ring_dma);
816 if (!ring) {
817 printk(KERN_ERR "Could not allocate DMA memory.\n");
818 return -ENOMEM;
820 if (rp->quirks & rqRhineI) {
821 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
822 PKT_BUF_SZ * TX_RING_SIZE,
823 &rp->tx_bufs_dma);
824 if (rp->tx_bufs == NULL) {
825 pci_free_consistent(rp->pdev,
826 RX_RING_SIZE * sizeof(struct rx_desc) +
827 TX_RING_SIZE * sizeof(struct tx_desc),
828 ring, ring_dma);
829 return -ENOMEM;
833 rp->rx_ring = ring;
834 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
835 rp->rx_ring_dma = ring_dma;
836 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
838 return 0;
841 static void free_ring(struct net_device* dev)
843 struct rhine_private *rp = netdev_priv(dev);
845 pci_free_consistent(rp->pdev,
846 RX_RING_SIZE * sizeof(struct rx_desc) +
847 TX_RING_SIZE * sizeof(struct tx_desc),
848 rp->rx_ring, rp->rx_ring_dma);
849 rp->tx_ring = NULL;
851 if (rp->tx_bufs)
852 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
853 rp->tx_bufs, rp->tx_bufs_dma);
855 rp->tx_bufs = NULL;
859 static void alloc_rbufs(struct net_device *dev)
861 struct rhine_private *rp = netdev_priv(dev);
862 dma_addr_t next;
863 int i;
865 rp->dirty_rx = rp->cur_rx = 0;
867 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
868 rp->rx_head_desc = &rp->rx_ring[0];
869 next = rp->rx_ring_dma;
871 /* Init the ring entries */
872 for (i = 0; i < RX_RING_SIZE; i++) {
873 rp->rx_ring[i].rx_status = 0;
874 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
875 next += sizeof(struct rx_desc);
876 rp->rx_ring[i].next_desc = cpu_to_le32(next);
877 rp->rx_skbuff[i] = NULL;
879 /* Mark the last entry as wrapping the ring. */
880 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
882 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
883 for (i = 0; i < RX_RING_SIZE; i++) {
884 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
885 rp->rx_skbuff[i] = skb;
886 if (skb == NULL)
887 break;
888 skb->dev = dev; /* Mark as being used by this device. */
890 rp->rx_skbuff_dma[i] =
891 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
892 PCI_DMA_FROMDEVICE);
894 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
895 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
897 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
900 static void free_rbufs(struct net_device* dev)
902 struct rhine_private *rp = netdev_priv(dev);
903 int i;
905 /* Free all the skbuffs in the Rx queue. */
906 for (i = 0; i < RX_RING_SIZE; i++) {
907 rp->rx_ring[i].rx_status = 0;
908 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
909 if (rp->rx_skbuff[i]) {
910 pci_unmap_single(rp->pdev,
911 rp->rx_skbuff_dma[i],
912 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
913 dev_kfree_skb(rp->rx_skbuff[i]);
915 rp->rx_skbuff[i] = NULL;
919 static void alloc_tbufs(struct net_device* dev)
921 struct rhine_private *rp = netdev_priv(dev);
922 dma_addr_t next;
923 int i;
925 rp->dirty_tx = rp->cur_tx = 0;
926 next = rp->tx_ring_dma;
927 for (i = 0; i < TX_RING_SIZE; i++) {
928 rp->tx_skbuff[i] = NULL;
929 rp->tx_ring[i].tx_status = 0;
930 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
931 next += sizeof(struct tx_desc);
932 rp->tx_ring[i].next_desc = cpu_to_le32(next);
933 if (rp->quirks & rqRhineI)
934 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
936 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
940 static void free_tbufs(struct net_device* dev)
942 struct rhine_private *rp = netdev_priv(dev);
943 int i;
945 for (i = 0; i < TX_RING_SIZE; i++) {
946 rp->tx_ring[i].tx_status = 0;
947 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
948 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
949 if (rp->tx_skbuff[i]) {
950 if (rp->tx_skbuff_dma[i]) {
951 pci_unmap_single(rp->pdev,
952 rp->tx_skbuff_dma[i],
953 rp->tx_skbuff[i]->len,
954 PCI_DMA_TODEVICE);
956 dev_kfree_skb(rp->tx_skbuff[i]);
958 rp->tx_skbuff[i] = NULL;
959 rp->tx_buf[i] = NULL;
963 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
965 struct rhine_private *rp = netdev_priv(dev);
966 void __iomem *ioaddr = rp->base;
968 mii_check_media(&rp->mii_if, debug, init_media);
970 if (rp->mii_if.full_duplex)
971 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
972 ioaddr + ChipCmd1);
973 else
974 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
975 ioaddr + ChipCmd1);
976 if (debug > 1)
977 printk(KERN_INFO "%s: force_media %d, carrier %d\n", dev->name,
978 rp->mii_if.force_media, netif_carrier_ok(dev));
981 /* Called after status of force_media possibly changed */
982 static void rhine_set_carrier(struct mii_if_info *mii)
984 if (mii->force_media) {
985 /* autoneg is off: Link is always assumed to be up */
986 if (!netif_carrier_ok(mii->dev))
987 netif_carrier_on(mii->dev);
989 else /* Let MMI library update carrier status */
990 rhine_check_media(mii->dev, 0);
991 if (debug > 1)
992 printk(KERN_INFO "%s: force_media %d, carrier %d\n",
993 mii->dev->name, mii->force_media,
994 netif_carrier_ok(mii->dev));
997 static void init_registers(struct net_device *dev)
999 struct rhine_private *rp = netdev_priv(dev);
1000 void __iomem *ioaddr = rp->base;
1001 int i;
1003 for (i = 0; i < 6; i++)
1004 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1006 /* Initialize other registers. */
1007 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1008 /* Configure initial FIFO thresholds. */
1009 iowrite8(0x20, ioaddr + TxConfig);
1010 rp->tx_thresh = 0x20;
1011 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1013 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1014 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1016 rhine_set_rx_mode(dev);
1018 /* Enable interrupts by setting the interrupt mask. */
1019 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1020 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1021 IntrTxDone | IntrTxError | IntrTxUnderrun |
1022 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1023 ioaddr + IntrEnable);
1025 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1026 ioaddr + ChipCmd);
1027 rhine_check_media(dev, 1);
1030 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1031 static void rhine_enable_linkmon(void __iomem *ioaddr)
1033 iowrite8(0, ioaddr + MIICmd);
1034 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1035 iowrite8(0x80, ioaddr + MIICmd);
1037 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1039 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1042 /* Disable MII link status auto-polling (required for MDIO access) */
1043 static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1045 iowrite8(0, ioaddr + MIICmd);
1047 if (quirks & rqRhineI) {
1048 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1050 /* Can be called from ISR. Evil. */
1051 mdelay(1);
1053 /* 0x80 must be set immediately before turning it off */
1054 iowrite8(0x80, ioaddr + MIICmd);
1056 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1058 /* Heh. Now clear 0x80 again. */
1059 iowrite8(0, ioaddr + MIICmd);
1061 else
1062 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1065 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1067 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1069 struct rhine_private *rp = netdev_priv(dev);
1070 void __iomem *ioaddr = rp->base;
1071 int result;
1073 rhine_disable_linkmon(ioaddr, rp->quirks);
1075 /* rhine_disable_linkmon already cleared MIICmd */
1076 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1077 iowrite8(regnum, ioaddr + MIIRegAddr);
1078 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1079 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1080 result = ioread16(ioaddr + MIIData);
1082 rhine_enable_linkmon(ioaddr);
1083 return result;
1086 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1088 struct rhine_private *rp = netdev_priv(dev);
1089 void __iomem *ioaddr = rp->base;
1091 rhine_disable_linkmon(ioaddr, rp->quirks);
1093 /* rhine_disable_linkmon already cleared MIICmd */
1094 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1095 iowrite8(regnum, ioaddr + MIIRegAddr);
1096 iowrite16(value, ioaddr + MIIData);
1097 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1098 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1100 rhine_enable_linkmon(ioaddr);
1103 static int rhine_open(struct net_device *dev)
1105 struct rhine_private *rp = netdev_priv(dev);
1106 void __iomem *ioaddr = rp->base;
1107 int rc;
1109 rc = request_irq(rp->pdev->irq, &rhine_interrupt, IRQF_SHARED, dev->name,
1110 dev);
1111 if (rc)
1112 return rc;
1114 if (debug > 1)
1115 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1116 dev->name, rp->pdev->irq);
1118 rc = alloc_ring(dev);
1119 if (rc) {
1120 free_irq(rp->pdev->irq, dev);
1121 return rc;
1123 alloc_rbufs(dev);
1124 alloc_tbufs(dev);
1125 rhine_chip_reset(dev);
1126 init_registers(dev);
1127 if (debug > 2)
1128 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1129 "MII status: %4.4x.\n",
1130 dev->name, ioread16(ioaddr + ChipCmd),
1131 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1133 netif_start_queue(dev);
1135 return 0;
1138 static void rhine_tx_timeout(struct net_device *dev)
1140 struct rhine_private *rp = netdev_priv(dev);
1141 void __iomem *ioaddr = rp->base;
1143 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1144 "%4.4x, resetting...\n",
1145 dev->name, ioread16(ioaddr + IntrStatus),
1146 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1148 /* protect against concurrent rx interrupts */
1149 disable_irq(rp->pdev->irq);
1151 spin_lock(&rp->lock);
1153 /* clear all descriptors */
1154 free_tbufs(dev);
1155 free_rbufs(dev);
1156 alloc_tbufs(dev);
1157 alloc_rbufs(dev);
1159 /* Reinitialize the hardware. */
1160 rhine_chip_reset(dev);
1161 init_registers(dev);
1163 spin_unlock(&rp->lock);
1164 enable_irq(rp->pdev->irq);
1166 dev->trans_start = jiffies;
1167 rp->stats.tx_errors++;
1168 netif_wake_queue(dev);
1171 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1173 struct rhine_private *rp = netdev_priv(dev);
1174 void __iomem *ioaddr = rp->base;
1175 unsigned entry;
1177 /* Caution: the write order is important here, set the field
1178 with the "ownership" bits last. */
1180 /* Calculate the next Tx descriptor entry. */
1181 entry = rp->cur_tx % TX_RING_SIZE;
1183 if (skb_padto(skb, ETH_ZLEN))
1184 return 0;
1186 rp->tx_skbuff[entry] = skb;
1188 if ((rp->quirks & rqRhineI) &&
1189 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
1190 /* Must use alignment buffer. */
1191 if (skb->len > PKT_BUF_SZ) {
1192 /* packet too long, drop it */
1193 dev_kfree_skb(skb);
1194 rp->tx_skbuff[entry] = NULL;
1195 rp->stats.tx_dropped++;
1196 return 0;
1199 /* Padding is not copied and so must be redone. */
1200 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1201 if (skb->len < ETH_ZLEN)
1202 memset(rp->tx_buf[entry] + skb->len, 0,
1203 ETH_ZLEN - skb->len);
1204 rp->tx_skbuff_dma[entry] = 0;
1205 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1206 (rp->tx_buf[entry] -
1207 rp->tx_bufs));
1208 } else {
1209 rp->tx_skbuff_dma[entry] =
1210 pci_map_single(rp->pdev, skb->data, skb->len,
1211 PCI_DMA_TODEVICE);
1212 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1215 rp->tx_ring[entry].desc_length =
1216 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1218 /* lock eth irq */
1219 spin_lock_irq(&rp->lock);
1220 wmb();
1221 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1222 wmb();
1224 rp->cur_tx++;
1226 /* Non-x86 Todo: explicitly flush cache lines here. */
1228 /* Wake the potentially-idle transmit channel */
1229 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1230 ioaddr + ChipCmd1);
1231 IOSYNC;
1233 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1234 netif_stop_queue(dev);
1236 dev->trans_start = jiffies;
1238 spin_unlock_irq(&rp->lock);
1240 if (debug > 4) {
1241 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1242 dev->name, rp->cur_tx-1, entry);
1244 return 0;
1247 /* The interrupt handler does all of the Rx thread work and cleans up
1248 after the Tx thread. */
1249 static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1251 struct net_device *dev = dev_instance;
1252 struct rhine_private *rp = netdev_priv(dev);
1253 void __iomem *ioaddr = rp->base;
1254 u32 intr_status;
1255 int boguscnt = max_interrupt_work;
1256 int handled = 0;
1258 while ((intr_status = get_intr_status(dev))) {
1259 handled = 1;
1261 /* Acknowledge all of the current interrupt sources ASAP. */
1262 if (intr_status & IntrTxDescRace)
1263 iowrite8(0x08, ioaddr + IntrStatus2);
1264 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1265 IOSYNC;
1267 if (debug > 4)
1268 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1269 dev->name, intr_status);
1271 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1272 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
1273 rhine_rx(dev);
1275 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1276 if (intr_status & IntrTxErrSummary) {
1277 /* Avoid scavenging before Tx engine turned off */
1278 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1279 if (debug > 2 &&
1280 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1281 printk(KERN_WARNING "%s: "
1282 "rhine_interrupt() Tx engine"
1283 "still on.\n", dev->name);
1285 rhine_tx(dev);
1288 /* Abnormal error summary/uncommon events handlers. */
1289 if (intr_status & (IntrPCIErr | IntrLinkChange |
1290 IntrStatsMax | IntrTxError | IntrTxAborted |
1291 IntrTxUnderrun | IntrTxDescRace))
1292 rhine_error(dev, intr_status);
1294 if (--boguscnt < 0) {
1295 printk(KERN_WARNING "%s: Too much work at interrupt, "
1296 "status=%#8.8x.\n",
1297 dev->name, intr_status);
1298 break;
1302 if (debug > 3)
1303 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1304 dev->name, ioread16(ioaddr + IntrStatus));
1305 return IRQ_RETVAL(handled);
1308 /* This routine is logically part of the interrupt handler, but isolated
1309 for clarity. */
1310 static void rhine_tx(struct net_device *dev)
1312 struct rhine_private *rp = netdev_priv(dev);
1313 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1315 spin_lock(&rp->lock);
1317 /* find and cleanup dirty tx descriptors */
1318 while (rp->dirty_tx != rp->cur_tx) {
1319 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1320 if (debug > 6)
1321 printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n",
1322 entry, txstatus);
1323 if (txstatus & DescOwn)
1324 break;
1325 if (txstatus & 0x8000) {
1326 if (debug > 1)
1327 printk(KERN_DEBUG "%s: Transmit error, "
1328 "Tx status %8.8x.\n",
1329 dev->name, txstatus);
1330 rp->stats.tx_errors++;
1331 if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
1332 if (txstatus & 0x0200) rp->stats.tx_window_errors++;
1333 if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
1334 if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
1335 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1336 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1337 rp->stats.tx_fifo_errors++;
1338 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1339 break; /* Keep the skb - we try again */
1341 /* Transmitter restarted in 'abnormal' handler. */
1342 } else {
1343 if (rp->quirks & rqRhineI)
1344 rp->stats.collisions += (txstatus >> 3) & 0x0F;
1345 else
1346 rp->stats.collisions += txstatus & 0x0F;
1347 if (debug > 6)
1348 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1349 (txstatus >> 3) & 0xF,
1350 txstatus & 0xF);
1351 rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1352 rp->stats.tx_packets++;
1354 /* Free the original skb. */
1355 if (rp->tx_skbuff_dma[entry]) {
1356 pci_unmap_single(rp->pdev,
1357 rp->tx_skbuff_dma[entry],
1358 rp->tx_skbuff[entry]->len,
1359 PCI_DMA_TODEVICE);
1361 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1362 rp->tx_skbuff[entry] = NULL;
1363 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1365 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1366 netif_wake_queue(dev);
1368 spin_unlock(&rp->lock);
1371 /* This routine is logically part of the interrupt handler, but isolated
1372 for clarity and better register allocation. */
1373 static void rhine_rx(struct net_device *dev)
1375 struct rhine_private *rp = netdev_priv(dev);
1376 int entry = rp->cur_rx % RX_RING_SIZE;
1377 int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx;
1379 if (debug > 4) {
1380 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1381 dev->name, entry,
1382 le32_to_cpu(rp->rx_head_desc->rx_status));
1385 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1386 while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
1387 struct rx_desc *desc = rp->rx_head_desc;
1388 u32 desc_status = le32_to_cpu(desc->rx_status);
1389 int data_size = desc_status >> 16;
1391 if (debug > 4)
1392 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
1393 desc_status);
1394 if (--boguscnt < 0)
1395 break;
1396 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1397 if ((desc_status & RxWholePkt) != RxWholePkt) {
1398 printk(KERN_WARNING "%s: Oversized Ethernet "
1399 "frame spanned multiple buffers, entry "
1400 "%#x length %d status %8.8x!\n",
1401 dev->name, entry, data_size,
1402 desc_status);
1403 printk(KERN_WARNING "%s: Oversized Ethernet "
1404 "frame %p vs %p.\n", dev->name,
1405 rp->rx_head_desc, &rp->rx_ring[entry]);
1406 rp->stats.rx_length_errors++;
1407 } else if (desc_status & RxErr) {
1408 /* There was a error. */
1409 if (debug > 2)
1410 printk(KERN_DEBUG "rhine_rx() Rx "
1411 "error was %8.8x.\n",
1412 desc_status);
1413 rp->stats.rx_errors++;
1414 if (desc_status & 0x0030) rp->stats.rx_length_errors++;
1415 if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
1416 if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
1417 if (desc_status & 0x0002) {
1418 /* this can also be updated outside the interrupt handler */
1419 spin_lock(&rp->lock);
1420 rp->stats.rx_crc_errors++;
1421 spin_unlock(&rp->lock);
1424 } else {
1425 struct sk_buff *skb;
1426 /* Length should omit the CRC */
1427 int pkt_len = data_size - 4;
1429 /* Check if the packet is long enough to accept without
1430 copying to a minimally-sized skbuff. */
1431 if (pkt_len < rx_copybreak &&
1432 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1433 skb->dev = dev;
1434 skb_reserve(skb, 2); /* 16 byte align the IP header */
1435 pci_dma_sync_single_for_cpu(rp->pdev,
1436 rp->rx_skbuff_dma[entry],
1437 rp->rx_buf_sz,
1438 PCI_DMA_FROMDEVICE);
1440 eth_copy_and_sum(skb,
1441 rp->rx_skbuff[entry]->data,
1442 pkt_len, 0);
1443 skb_put(skb, pkt_len);
1444 pci_dma_sync_single_for_device(rp->pdev,
1445 rp->rx_skbuff_dma[entry],
1446 rp->rx_buf_sz,
1447 PCI_DMA_FROMDEVICE);
1448 } else {
1449 skb = rp->rx_skbuff[entry];
1450 if (skb == NULL) {
1451 printk(KERN_ERR "%s: Inconsistent Rx "
1452 "descriptor chain.\n",
1453 dev->name);
1454 break;
1456 rp->rx_skbuff[entry] = NULL;
1457 skb_put(skb, pkt_len);
1458 pci_unmap_single(rp->pdev,
1459 rp->rx_skbuff_dma[entry],
1460 rp->rx_buf_sz,
1461 PCI_DMA_FROMDEVICE);
1463 skb->protocol = eth_type_trans(skb, dev);
1464 netif_rx(skb);
1465 dev->last_rx = jiffies;
1466 rp->stats.rx_bytes += pkt_len;
1467 rp->stats.rx_packets++;
1469 entry = (++rp->cur_rx) % RX_RING_SIZE;
1470 rp->rx_head_desc = &rp->rx_ring[entry];
1473 /* Refill the Rx ring buffers. */
1474 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1475 struct sk_buff *skb;
1476 entry = rp->dirty_rx % RX_RING_SIZE;
1477 if (rp->rx_skbuff[entry] == NULL) {
1478 skb = dev_alloc_skb(rp->rx_buf_sz);
1479 rp->rx_skbuff[entry] = skb;
1480 if (skb == NULL)
1481 break; /* Better luck next round. */
1482 skb->dev = dev; /* Mark as being used by this device. */
1483 rp->rx_skbuff_dma[entry] =
1484 pci_map_single(rp->pdev, skb->data,
1485 rp->rx_buf_sz,
1486 PCI_DMA_FROMDEVICE);
1487 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1489 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1494 * Clears the "tally counters" for CRC errors and missed frames(?).
1495 * It has been reported that some chips need a write of 0 to clear
1496 * these, for others the counters are set to 1 when written to and
1497 * instead cleared when read. So we clear them both ways ...
1499 static inline void clear_tally_counters(void __iomem *ioaddr)
1501 iowrite32(0, ioaddr + RxMissed);
1502 ioread16(ioaddr + RxCRCErrs);
1503 ioread16(ioaddr + RxMissed);
1506 static void rhine_restart_tx(struct net_device *dev) {
1507 struct rhine_private *rp = netdev_priv(dev);
1508 void __iomem *ioaddr = rp->base;
1509 int entry = rp->dirty_tx % TX_RING_SIZE;
1510 u32 intr_status;
1513 * If new errors occured, we need to sort them out before doing Tx.
1514 * In that case the ISR will be back here RSN anyway.
1516 intr_status = get_intr_status(dev);
1518 if ((intr_status & IntrTxErrSummary) == 0) {
1520 /* We know better than the chip where it should continue. */
1521 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1522 ioaddr + TxRingPtr);
1524 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1525 ioaddr + ChipCmd);
1526 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1527 ioaddr + ChipCmd1);
1528 IOSYNC;
1530 else {
1531 /* This should never happen */
1532 if (debug > 1)
1533 printk(KERN_WARNING "%s: rhine_restart_tx() "
1534 "Another error occured %8.8x.\n",
1535 dev->name, intr_status);
1540 static void rhine_error(struct net_device *dev, int intr_status)
1542 struct rhine_private *rp = netdev_priv(dev);
1543 void __iomem *ioaddr = rp->base;
1545 spin_lock(&rp->lock);
1547 if (intr_status & IntrLinkChange)
1548 rhine_check_media(dev, 0);
1549 if (intr_status & IntrStatsMax) {
1550 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1551 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1552 clear_tally_counters(ioaddr);
1554 if (intr_status & IntrTxAborted) {
1555 if (debug > 1)
1556 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1557 dev->name, intr_status);
1559 if (intr_status & IntrTxUnderrun) {
1560 if (rp->tx_thresh < 0xE0)
1561 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1562 if (debug > 1)
1563 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1564 "threshold now %2.2x.\n",
1565 dev->name, rp->tx_thresh);
1567 if (intr_status & IntrTxDescRace) {
1568 if (debug > 2)
1569 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1570 dev->name);
1572 if ((intr_status & IntrTxError) &&
1573 (intr_status & (IntrTxAborted |
1574 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1575 if (rp->tx_thresh < 0xE0) {
1576 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1578 if (debug > 1)
1579 printk(KERN_INFO "%s: Unspecified error. Tx "
1580 "threshold now %2.2x.\n",
1581 dev->name, rp->tx_thresh);
1583 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1584 IntrTxError))
1585 rhine_restart_tx(dev);
1587 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1588 IntrTxError | IntrTxAborted | IntrNormalSummary |
1589 IntrTxDescRace)) {
1590 if (debug > 1)
1591 printk(KERN_ERR "%s: Something Wicked happened! "
1592 "%8.8x.\n", dev->name, intr_status);
1595 spin_unlock(&rp->lock);
1598 static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1600 struct rhine_private *rp = netdev_priv(dev);
1601 void __iomem *ioaddr = rp->base;
1602 unsigned long flags;
1604 spin_lock_irqsave(&rp->lock, flags);
1605 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1606 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1607 clear_tally_counters(ioaddr);
1608 spin_unlock_irqrestore(&rp->lock, flags);
1610 return &rp->stats;
1613 static void rhine_set_rx_mode(struct net_device *dev)
1615 struct rhine_private *rp = netdev_priv(dev);
1616 void __iomem *ioaddr = rp->base;
1617 u32 mc_filter[2]; /* Multicast hash filter */
1618 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1620 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1621 /* Unconditionally log net taps. */
1622 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
1623 dev->name);
1624 rx_mode = 0x1C;
1625 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1626 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1627 } else if ((dev->mc_count > multicast_filter_limit)
1628 || (dev->flags & IFF_ALLMULTI)) {
1629 /* Too many to match, or accept all multicasts. */
1630 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1631 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1632 rx_mode = 0x0C;
1633 } else {
1634 struct dev_mc_list *mclist;
1635 int i;
1636 memset(mc_filter, 0, sizeof(mc_filter));
1637 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1638 i++, mclist = mclist->next) {
1639 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1641 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1643 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1644 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1645 rx_mode = 0x0C;
1647 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1650 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1652 struct rhine_private *rp = netdev_priv(dev);
1654 strcpy(info->driver, DRV_NAME);
1655 strcpy(info->version, DRV_VERSION);
1656 strcpy(info->bus_info, pci_name(rp->pdev));
1659 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1661 struct rhine_private *rp = netdev_priv(dev);
1662 int rc;
1664 spin_lock_irq(&rp->lock);
1665 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1666 spin_unlock_irq(&rp->lock);
1668 return rc;
1671 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1673 struct rhine_private *rp = netdev_priv(dev);
1674 int rc;
1676 spin_lock_irq(&rp->lock);
1677 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1678 spin_unlock_irq(&rp->lock);
1679 rhine_set_carrier(&rp->mii_if);
1681 return rc;
1684 static int netdev_nway_reset(struct net_device *dev)
1686 struct rhine_private *rp = netdev_priv(dev);
1688 return mii_nway_restart(&rp->mii_if);
1691 static u32 netdev_get_link(struct net_device *dev)
1693 struct rhine_private *rp = netdev_priv(dev);
1695 return mii_link_ok(&rp->mii_if);
1698 static u32 netdev_get_msglevel(struct net_device *dev)
1700 return debug;
1703 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1705 debug = value;
1708 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1710 struct rhine_private *rp = netdev_priv(dev);
1712 if (!(rp->quirks & rqWOL))
1713 return;
1715 spin_lock_irq(&rp->lock);
1716 wol->supported = WAKE_PHY | WAKE_MAGIC |
1717 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1718 wol->wolopts = rp->wolopts;
1719 spin_unlock_irq(&rp->lock);
1722 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1724 struct rhine_private *rp = netdev_priv(dev);
1725 u32 support = WAKE_PHY | WAKE_MAGIC |
1726 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1728 if (!(rp->quirks & rqWOL))
1729 return -EINVAL;
1731 if (wol->wolopts & ~support)
1732 return -EINVAL;
1734 spin_lock_irq(&rp->lock);
1735 rp->wolopts = wol->wolopts;
1736 spin_unlock_irq(&rp->lock);
1738 return 0;
1741 static struct ethtool_ops netdev_ethtool_ops = {
1742 .get_drvinfo = netdev_get_drvinfo,
1743 .get_settings = netdev_get_settings,
1744 .set_settings = netdev_set_settings,
1745 .nway_reset = netdev_nway_reset,
1746 .get_link = netdev_get_link,
1747 .get_msglevel = netdev_get_msglevel,
1748 .set_msglevel = netdev_set_msglevel,
1749 .get_wol = rhine_get_wol,
1750 .set_wol = rhine_set_wol,
1751 .get_sg = ethtool_op_get_sg,
1752 .get_tx_csum = ethtool_op_get_tx_csum,
1753 .get_perm_addr = ethtool_op_get_perm_addr,
1756 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1758 struct rhine_private *rp = netdev_priv(dev);
1759 int rc;
1761 if (!netif_running(dev))
1762 return -EINVAL;
1764 spin_lock_irq(&rp->lock);
1765 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1766 spin_unlock_irq(&rp->lock);
1767 rhine_set_carrier(&rp->mii_if);
1769 return rc;
1772 static int rhine_close(struct net_device *dev)
1774 struct rhine_private *rp = netdev_priv(dev);
1775 void __iomem *ioaddr = rp->base;
1777 spin_lock_irq(&rp->lock);
1779 netif_stop_queue(dev);
1781 if (debug > 1)
1782 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1783 "status was %4.4x.\n",
1784 dev->name, ioread16(ioaddr + ChipCmd));
1786 /* Switch to loopback mode to avoid hardware races. */
1787 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1789 /* Disable interrupts by clearing the interrupt mask. */
1790 iowrite16(0x0000, ioaddr + IntrEnable);
1792 /* Stop the chip's Tx and Rx processes. */
1793 iowrite16(CmdStop, ioaddr + ChipCmd);
1795 spin_unlock_irq(&rp->lock);
1797 free_irq(rp->pdev->irq, dev);
1798 free_rbufs(dev);
1799 free_tbufs(dev);
1800 free_ring(dev);
1802 return 0;
1806 static void __devexit rhine_remove_one(struct pci_dev *pdev)
1808 struct net_device *dev = pci_get_drvdata(pdev);
1809 struct rhine_private *rp = netdev_priv(dev);
1811 unregister_netdev(dev);
1813 pci_iounmap(pdev, rp->base);
1814 pci_release_regions(pdev);
1816 free_netdev(dev);
1817 pci_disable_device(pdev);
1818 pci_set_drvdata(pdev, NULL);
1821 static void rhine_shutdown (struct pci_dev *pdev)
1823 struct net_device *dev = pci_get_drvdata(pdev);
1824 struct rhine_private *rp = netdev_priv(dev);
1825 void __iomem *ioaddr = rp->base;
1827 if (!(rp->quirks & rqWOL))
1828 return; /* Nothing to do for non-WOL adapters */
1830 rhine_power_init(dev);
1832 /* Make sure we use pattern 0, 1 and not 4, 5 */
1833 if (rp->quirks & rq6patterns)
1834 iowrite8(0x04, ioaddr + 0xA7);
1836 if (rp->wolopts & WAKE_MAGIC) {
1837 iowrite8(WOLmagic, ioaddr + WOLcrSet);
1839 * Turn EEPROM-controlled wake-up back on -- some hardware may
1840 * not cooperate otherwise.
1842 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
1845 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1846 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
1848 if (rp->wolopts & WAKE_PHY)
1849 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
1851 if (rp->wolopts & WAKE_UCAST)
1852 iowrite8(WOLucast, ioaddr + WOLcrSet);
1854 if (rp->wolopts) {
1855 /* Enable legacy WOL (for old motherboards) */
1856 iowrite8(0x01, ioaddr + PwcfgSet);
1857 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
1860 /* Hit power state D3 (sleep) */
1861 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1863 /* TODO: Check use of pci_enable_wake() */
1867 #ifdef CONFIG_PM
1868 static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
1870 struct net_device *dev = pci_get_drvdata(pdev);
1871 struct rhine_private *rp = netdev_priv(dev);
1872 unsigned long flags;
1874 if (!netif_running(dev))
1875 return 0;
1877 netif_device_detach(dev);
1878 pci_save_state(pdev);
1880 spin_lock_irqsave(&rp->lock, flags);
1881 rhine_shutdown(pdev);
1882 spin_unlock_irqrestore(&rp->lock, flags);
1884 free_irq(dev->irq, dev);
1885 return 0;
1888 static int rhine_resume(struct pci_dev *pdev)
1890 struct net_device *dev = pci_get_drvdata(pdev);
1891 struct rhine_private *rp = netdev_priv(dev);
1892 unsigned long flags;
1893 int ret;
1895 if (!netif_running(dev))
1896 return 0;
1898 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
1899 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
1901 ret = pci_set_power_state(pdev, PCI_D0);
1902 if (debug > 1)
1903 printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
1904 dev->name, ret ? "failed" : "succeeded", ret);
1906 pci_restore_state(pdev);
1908 spin_lock_irqsave(&rp->lock, flags);
1909 #ifdef USE_MMIO
1910 enable_mmio(rp->pioaddr, rp->quirks);
1911 #endif
1912 rhine_power_init(dev);
1913 free_tbufs(dev);
1914 free_rbufs(dev);
1915 alloc_tbufs(dev);
1916 alloc_rbufs(dev);
1917 init_registers(dev);
1918 spin_unlock_irqrestore(&rp->lock, flags);
1920 netif_device_attach(dev);
1922 return 0;
1924 #endif /* CONFIG_PM */
1926 static struct pci_driver rhine_driver = {
1927 .name = DRV_NAME,
1928 .id_table = rhine_pci_tbl,
1929 .probe = rhine_init_one,
1930 .remove = __devexit_p(rhine_remove_one),
1931 #ifdef CONFIG_PM
1932 .suspend = rhine_suspend,
1933 .resume = rhine_resume,
1934 #endif /* CONFIG_PM */
1935 .shutdown = rhine_shutdown,
1939 static int __init rhine_init(void)
1941 /* when a module, this is printed whether or not devices are found in probe */
1942 #ifdef MODULE
1943 printk(version);
1944 #endif
1945 return pci_module_init(&rhine_driver);
1949 static void __exit rhine_cleanup(void)
1951 pci_unregister_driver(&rhine_driver);
1955 module_init(rhine_init);
1956 module_exit(rhine_cleanup);