i2c-mv64xxx: Fix random oops at boot
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / via-rhine.c
blob56864ff3f112d631e945129690785407094f8b01
1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2 /*
3 Written 1998-2001 by Donald Becker.
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
30 Linux kernel version history:
32 LK1.1.0:
33 - Jeff Garzik: softnet 'n stuff
35 LK1.1.1:
36 - Justin Guyett: softnet and locking fixes
37 - Jeff Garzik: use PCI interface
39 LK1.1.2:
40 - Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions
42 LK1.1.3:
43 - Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c
44 code) update "Theory of Operation" with
45 softnet/locking changes
46 - Dave Miller: PCI DMA and endian fixups
47 - Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation
49 LK1.1.4:
50 - Urban Widmark: fix gcc 2.95.2 problem and
51 remove writel's to fixed address 0x7c
53 LK1.1.5:
54 - Urban Widmark: mdio locking, bounce buffer changes
55 merges from Beckers 1.05 version
56 added netif_running_on/off support
58 LK1.1.6:
59 - Urban Widmark: merges from Beckers 1.08b version (VT6102 + mdio)
60 set netif_running_on/off on startup, del_timer_sync
62 LK1.1.7:
63 - Manfred Spraul: added reset into tx_timeout
65 LK1.1.9:
66 - Urban Widmark: merges from Beckers 1.10 version
67 (media selection + eeprom reload)
68 - David Vrabel: merges from D-Link "1.11" version
69 (disable WOL and PME on startup)
71 LK1.1.10:
72 - Manfred Spraul: use "singlecopy" for unaligned buffers
73 don't allocate bounce buffers for !ReqTxAlign cards
75 LK1.1.11:
76 - David Woodhouse: Set dev->base_addr before the first time we call
77 wait_for_reset(). It's a lot happier that way.
78 Free np->tx_bufs only if we actually allocated it.
80 LK1.1.12:
81 - Martin Eriksson: Allow Memory-Mapped IO to be enabled.
83 LK1.1.13 (jgarzik):
84 - Add ethtool support
85 - Replace some MII-related magic numbers with constants
87 LK1.1.14 (Ivan G.):
88 - fixes comments for Rhine-III
89 - removes W_MAX_TIMEOUT (unused)
90 - adds HasDavicomPhy for Rhine-I (basis: linuxfet driver; my card
91 is R-I and has Davicom chip, flag is referenced in kernel driver)
92 - sends chip_id as a parameter to wait_for_reset since np is not
93 initialized on first call
94 - changes mmio "else if (chip_id==VT6102)" to "else" so it will work
95 for Rhine-III's (documentation says same bit is correct)
96 - transmit frame queue message is off by one - fixed
97 - adds IntrNormalSummary to "Something Wicked" exclusion list
98 so normal interrupts will not trigger the message (src: Donald Becker)
99 (Roger Luethi)
100 - show confused chip where to continue after Tx error
101 - location of collision counter is chip specific
102 - allow selecting backoff algorithm (module parameter)
104 LK1.1.15 (jgarzik):
105 - Use new MII lib helper generic_mii_ioctl
107 LK1.1.16 (Roger Luethi)
108 - Etherleak fix
109 - Handle Tx buffer underrun
110 - Fix bugs in full duplex handling
111 - New reset code uses "force reset" cmd on Rhine-II
112 - Various clean ups
114 LK1.1.17 (Roger Luethi)
115 - Fix race in via_rhine_start_tx()
116 - On errors, wait for Tx engine to turn off before scavenging
117 - Handle Tx descriptor write-back race on Rhine-II
118 - Force flushing for PCI posted writes
119 - More reset code changes
121 LK1.1.18 (Roger Luethi)
122 - No filtering multicast in promisc mode (Edward Peng)
123 - Fix for Rhine-I Tx timeouts
125 LK1.1.19 (Roger Luethi)
126 - Increase Tx threshold for unspecified errors
128 LK1.2.0-2.6 (Roger Luethi)
129 - Massive clean-up
130 - Rewrite PHY, media handling (remove options, full_duplex, backoff)
131 - Fix Tx engine race for good
132 - Craig Brind: Zero padded aligned buffers for short packets.
136 #define DRV_NAME "via-rhine"
137 #define DRV_VERSION "1.2.0-2.6"
138 #define DRV_RELDATE "June-10-2004"
141 /* A few user-configurable values.
142 These may be modified when a driver module is loaded. */
144 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
145 static int max_interrupt_work = 20;
147 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
148 Setting to > 1518 effectively disables this feature. */
149 static int rx_copybreak;
152 * In case you are looking for 'options[]' or 'full_duplex[]', they
153 * are gone. Use ethtool(8) instead.
156 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
157 The Rhine has a 64 element 8390-like hash table. */
158 static const int multicast_filter_limit = 32;
161 /* Operational parameters that are set at compile time. */
163 /* Keep the ring sizes a power of two for compile efficiency.
164 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
165 Making the Tx ring too large decreases the effectiveness of channel
166 bonding and packet priority.
167 There are no ill effects from too-large receive rings. */
168 #define TX_RING_SIZE 16
169 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
170 #define RX_RING_SIZE 16
173 /* Operational parameters that usually are not changed. */
175 /* Time in jiffies before concluding the transmitter is hung. */
176 #define TX_TIMEOUT (2*HZ)
178 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
180 #include <linux/module.h>
181 #include <linux/moduleparam.h>
182 #include <linux/kernel.h>
183 #include <linux/string.h>
184 #include <linux/timer.h>
185 #include <linux/errno.h>
186 #include <linux/ioport.h>
187 #include <linux/slab.h>
188 #include <linux/interrupt.h>
189 #include <linux/pci.h>
190 #include <linux/dma-mapping.h>
191 #include <linux/netdevice.h>
192 #include <linux/etherdevice.h>
193 #include <linux/skbuff.h>
194 #include <linux/init.h>
195 #include <linux/delay.h>
196 #include <linux/mii.h>
197 #include <linux/ethtool.h>
198 #include <linux/crc32.h>
199 #include <linux/bitops.h>
200 #include <asm/processor.h> /* Processor type for cache alignment. */
201 #include <asm/io.h>
202 #include <asm/irq.h>
203 #include <asm/uaccess.h>
205 /* These identify the driver base version and may not be removed. */
206 static char version[] __devinitdata =
207 KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
209 /* This driver was written to use PCI memory space. Some early versions
210 of the Rhine may only work correctly with I/O space accesses. */
211 #ifdef CONFIG_VIA_RHINE_MMIO
212 #define USE_MMIO
213 #else
214 #endif
216 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
217 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
218 MODULE_LICENSE("GPL");
220 module_param(max_interrupt_work, int, 0);
221 module_param(debug, int, 0);
222 module_param(rx_copybreak, int, 0);
223 MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
224 MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
225 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
228 Theory of Operation
230 I. Board Compatibility
232 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
233 controller.
235 II. Board-specific settings
237 Boards with this chip are functional only in a bus-master PCI slot.
239 Many operational settings are loaded from the EEPROM to the Config word at
240 offset 0x78. For most of these settings, this driver assumes that they are
241 correct.
242 If this driver is compiled to use PCI memory space operations the EEPROM
243 must be configured to enable memory ops.
245 III. Driver operation
247 IIIa. Ring buffers
249 This driver uses two statically allocated fixed-size descriptor lists
250 formed into rings by a branch from the final descriptor to the beginning of
251 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
253 IIIb/c. Transmit/Receive Structure
255 This driver attempts to use a zero-copy receive and transmit scheme.
257 Alas, all data buffers are required to start on a 32 bit boundary, so
258 the driver must often copy transmit packets into bounce buffers.
260 The driver allocates full frame size skbuffs for the Rx ring buffers at
261 open() time and passes the skb->data field to the chip as receive data
262 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
263 a fresh skbuff is allocated and the frame is copied to the new skbuff.
264 When the incoming frame is larger, the skbuff is passed directly up the
265 protocol stack. Buffers consumed this way are replaced by newly allocated
266 skbuffs in the last phase of rhine_rx().
268 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
269 using a full-sized skbuff for small frames vs. the copying costs of larger
270 frames. New boards are typically used in generously configured machines
271 and the underfilled buffers have negligible impact compared to the benefit of
272 a single allocation size, so the default value of zero results in never
273 copying packets. When copying is done, the cost is usually mitigated by using
274 a combined copy/checksum routine. Copying also preloads the cache, which is
275 most useful with small frames.
277 Since the VIA chips are only able to transfer data to buffers on 32 bit
278 boundaries, the IP header at offset 14 in an ethernet frame isn't
279 longword aligned for further processing. Copying these unaligned buffers
280 has the beneficial effect of 16-byte aligning the IP header.
282 IIId. Synchronization
284 The driver runs as two independent, single-threaded flows of control. One
285 is the send-packet routine, which enforces single-threaded use by the
286 dev->priv->lock spinlock. The other thread is the interrupt handler, which
287 is single threaded by the hardware and interrupt handling software.
289 The send packet thread has partial control over the Tx ring. It locks the
290 dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
291 is not available it stops the transmit queue by calling netif_stop_queue.
293 The interrupt handler has exclusive control over the Rx ring and records stats
294 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
295 empty by incrementing the dirty_tx mark. If at least half of the entries in
296 the Rx ring are available the transmit queue is woken up if it was stopped.
298 IV. Notes
300 IVb. References
302 Preliminary VT86C100A manual from http://www.via.com.tw/
303 http://www.scyld.com/expert/100mbps.html
304 http://www.scyld.com/expert/NWay.html
305 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
306 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
309 IVc. Errata
311 The VT86C100A manual is not reliable information.
312 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
313 in significant performance degradation for bounce buffer copies on transmit
314 and unaligned IP headers on receive.
315 The chip does not pad to minimum transmit length.
320 /* This table drives the PCI probe routines. It's mostly boilerplate in all
321 of the drivers, and will likely be provided by some future kernel.
322 Note the matching code -- the first table entry matchs all 56** cards but
323 second only the 1234 card.
326 enum rhine_revs {
327 VT86C100A = 0x00,
328 VTunknown0 = 0x20,
329 VT6102 = 0x40,
330 VT8231 = 0x50, /* Integrated MAC */
331 VT8233 = 0x60, /* Integrated MAC */
332 VT8235 = 0x74, /* Integrated MAC */
333 VT8237 = 0x78, /* Integrated MAC */
334 VTunknown1 = 0x7C,
335 VT6105 = 0x80,
336 VT6105_B0 = 0x83,
337 VT6105L = 0x8A,
338 VT6107 = 0x8C,
339 VTunknown2 = 0x8E,
340 VT6105M = 0x90, /* Management adapter */
343 enum rhine_quirks {
344 rqWOL = 0x0001, /* Wake-On-LAN support */
345 rqForceReset = 0x0002,
346 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
347 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
348 rqRhineI = 0x0100, /* See comment below */
351 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
352 * MMIO as well as for the collision counter and the Tx FIFO underflow
353 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
356 /* Beware of PCI posted writes */
357 #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
359 static struct pci_device_id rhine_pci_tbl[] =
361 {0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT86C100A */
362 {0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6102 */
363 {0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* 6105{,L,LOM} */
364 {0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6105M */
365 { } /* terminate list */
367 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
370 /* Offsets to the device registers. */
371 enum register_offsets {
372 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
373 ChipCmd1=0x09,
374 IntrStatus=0x0C, IntrEnable=0x0E,
375 MulticastFilter0=0x10, MulticastFilter1=0x14,
376 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
377 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
378 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
379 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
380 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
381 StickyHW=0x83, IntrStatus2=0x84,
382 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
383 WOLcrClr1=0xA6, WOLcgClr=0xA7,
384 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
387 /* Bits in ConfigD */
388 enum backoff_bits {
389 BackOptional=0x01, BackModify=0x02,
390 BackCaptureEffect=0x04, BackRandom=0x08
393 #ifdef USE_MMIO
394 /* Registers we check that mmio and reg are the same. */
395 static const int mmio_verify_registers[] = {
396 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
399 #endif
401 /* Bits in the interrupt status/mask registers. */
402 enum intr_status_bits {
403 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
404 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
405 IntrPCIErr=0x0040,
406 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
407 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
408 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
409 IntrRxWakeUp=0x8000,
410 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
411 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
412 IntrTxErrSummary=0x082218,
415 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
416 enum wol_bits {
417 WOLucast = 0x10,
418 WOLmagic = 0x20,
419 WOLbmcast = 0x30,
420 WOLlnkon = 0x40,
421 WOLlnkoff = 0x80,
424 /* The Rx and Tx buffer descriptors. */
425 struct rx_desc {
426 s32 rx_status;
427 u32 desc_length; /* Chain flag, Buffer/frame length */
428 u32 addr;
429 u32 next_desc;
431 struct tx_desc {
432 s32 tx_status;
433 u32 desc_length; /* Chain flag, Tx Config, Frame length */
434 u32 addr;
435 u32 next_desc;
438 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
439 #define TXDESC 0x00e08000
441 enum rx_status_bits {
442 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
445 /* Bits in *_desc.*_status */
446 enum desc_status_bits {
447 DescOwn=0x80000000
450 /* Bits in ChipCmd. */
451 enum chip_cmd_bits {
452 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
453 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
454 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
455 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
458 struct rhine_private {
459 /* Descriptor rings */
460 struct rx_desc *rx_ring;
461 struct tx_desc *tx_ring;
462 dma_addr_t rx_ring_dma;
463 dma_addr_t tx_ring_dma;
465 /* The addresses of receive-in-place skbuffs. */
466 struct sk_buff *rx_skbuff[RX_RING_SIZE];
467 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
469 /* The saved address of a sent-in-place packet/buffer, for later free(). */
470 struct sk_buff *tx_skbuff[TX_RING_SIZE];
471 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
473 /* Tx bounce buffers */
474 unsigned char *tx_buf[TX_RING_SIZE];
475 unsigned char *tx_bufs;
476 dma_addr_t tx_bufs_dma;
478 struct pci_dev *pdev;
479 long pioaddr;
480 struct net_device_stats stats;
481 spinlock_t lock;
483 /* Frequently used values: keep some adjacent for cache effect. */
484 u32 quirks;
485 struct rx_desc *rx_head_desc;
486 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
487 unsigned int cur_tx, dirty_tx;
488 unsigned int rx_buf_sz; /* Based on MTU+slack. */
489 u8 wolopts;
491 u8 tx_thresh, rx_thresh;
493 struct mii_if_info mii_if;
494 struct work_struct tx_timeout_task;
495 struct work_struct check_media_task;
496 void __iomem *base;
499 static int mdio_read(struct net_device *dev, int phy_id, int location);
500 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
501 static int rhine_open(struct net_device *dev);
502 static void rhine_tx_timeout(struct net_device *dev);
503 static void rhine_tx_timeout_task(struct net_device *dev);
504 static void rhine_check_media_task(struct net_device *dev);
505 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
506 static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
507 static void rhine_tx(struct net_device *dev);
508 static void rhine_rx(struct net_device *dev);
509 static void rhine_error(struct net_device *dev, int intr_status);
510 static void rhine_set_rx_mode(struct net_device *dev);
511 static struct net_device_stats *rhine_get_stats(struct net_device *dev);
512 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
513 static struct ethtool_ops netdev_ethtool_ops;
514 static int rhine_close(struct net_device *dev);
515 static void rhine_shutdown (struct pci_dev *pdev);
517 #define RHINE_WAIT_FOR(condition) do { \
518 int i=1024; \
519 while (!(condition) && --i) \
521 if (debug > 1 && i < 512) \
522 printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
523 DRV_NAME, 1024-i, __func__, __LINE__); \
524 } while(0)
526 static inline u32 get_intr_status(struct net_device *dev)
528 struct rhine_private *rp = netdev_priv(dev);
529 void __iomem *ioaddr = rp->base;
530 u32 intr_status;
532 intr_status = ioread16(ioaddr + IntrStatus);
533 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
534 if (rp->quirks & rqStatusWBRace)
535 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
536 return intr_status;
540 * Get power related registers into sane state.
541 * Notify user about past WOL event.
543 static void rhine_power_init(struct net_device *dev)
545 struct rhine_private *rp = netdev_priv(dev);
546 void __iomem *ioaddr = rp->base;
547 u16 wolstat;
549 if (rp->quirks & rqWOL) {
550 /* Make sure chip is in power state D0 */
551 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
553 /* Disable "force PME-enable" */
554 iowrite8(0x80, ioaddr + WOLcgClr);
556 /* Clear power-event config bits (WOL) */
557 iowrite8(0xFF, ioaddr + WOLcrClr);
558 /* More recent cards can manage two additional patterns */
559 if (rp->quirks & rq6patterns)
560 iowrite8(0x03, ioaddr + WOLcrClr1);
562 /* Save power-event status bits */
563 wolstat = ioread8(ioaddr + PwrcsrSet);
564 if (rp->quirks & rq6patterns)
565 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
567 /* Clear power-event status bits */
568 iowrite8(0xFF, ioaddr + PwrcsrClr);
569 if (rp->quirks & rq6patterns)
570 iowrite8(0x03, ioaddr + PwrcsrClr1);
572 if (wolstat) {
573 char *reason;
574 switch (wolstat) {
575 case WOLmagic:
576 reason = "Magic packet";
577 break;
578 case WOLlnkon:
579 reason = "Link went up";
580 break;
581 case WOLlnkoff:
582 reason = "Link went down";
583 break;
584 case WOLucast:
585 reason = "Unicast packet";
586 break;
587 case WOLbmcast:
588 reason = "Multicast/broadcast packet";
589 break;
590 default:
591 reason = "Unknown";
593 printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
594 DRV_NAME, reason);
599 static void rhine_chip_reset(struct net_device *dev)
601 struct rhine_private *rp = netdev_priv(dev);
602 void __iomem *ioaddr = rp->base;
604 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
605 IOSYNC;
607 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
608 printk(KERN_INFO "%s: Reset not complete yet. "
609 "Trying harder.\n", DRV_NAME);
611 /* Force reset */
612 if (rp->quirks & rqForceReset)
613 iowrite8(0x40, ioaddr + MiscCmd);
615 /* Reset can take somewhat longer (rare) */
616 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
619 if (debug > 1)
620 printk(KERN_INFO "%s: Reset %s.\n", dev->name,
621 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
622 "failed" : "succeeded");
625 #ifdef USE_MMIO
626 static void enable_mmio(long pioaddr, u32 quirks)
628 int n;
629 if (quirks & rqRhineI) {
630 /* More recent docs say that this bit is reserved ... */
631 n = inb(pioaddr + ConfigA) | 0x20;
632 outb(n, pioaddr + ConfigA);
633 } else {
634 n = inb(pioaddr + ConfigD) | 0x80;
635 outb(n, pioaddr + ConfigD);
638 #endif
641 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
642 * (plus 0x6C for Rhine-I/II)
644 static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
646 struct rhine_private *rp = netdev_priv(dev);
647 void __iomem *ioaddr = rp->base;
649 outb(0x20, pioaddr + MACRegEEcsr);
650 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
652 #ifdef USE_MMIO
654 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
655 * MMIO. If reloading EEPROM was done first this could be avoided, but
656 * it is not known if that still works with the "win98-reboot" problem.
658 enable_mmio(pioaddr, rp->quirks);
659 #endif
661 /* Turn off EEPROM-controlled wake-up (magic packet) */
662 if (rp->quirks & rqWOL)
663 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
667 #ifdef CONFIG_NET_POLL_CONTROLLER
668 static void rhine_poll(struct net_device *dev)
670 disable_irq(dev->irq);
671 rhine_interrupt(dev->irq, (void *)dev, NULL);
672 enable_irq(dev->irq);
674 #endif
676 static void rhine_hw_init(struct net_device *dev, long pioaddr)
678 struct rhine_private *rp = netdev_priv(dev);
680 /* Reset the chip to erase previous misconfiguration. */
681 rhine_chip_reset(dev);
683 /* Rhine-I needs extra time to recuperate before EEPROM reload */
684 if (rp->quirks & rqRhineI)
685 msleep(5);
687 /* Reload EEPROM controlled bytes cleared by soft reset */
688 rhine_reload_eeprom(pioaddr, dev);
691 static int __devinit rhine_init_one(struct pci_dev *pdev,
692 const struct pci_device_id *ent)
694 struct net_device *dev;
695 struct rhine_private *rp;
696 int i, rc;
697 u8 pci_rev;
698 u32 quirks;
699 long pioaddr;
700 long memaddr;
701 void __iomem *ioaddr;
702 int io_size, phy_id;
703 const char *name;
704 #ifdef USE_MMIO
705 int bar = 1;
706 #else
707 int bar = 0;
708 #endif
710 /* when built into the kernel, we only print version if device is found */
711 #ifndef MODULE
712 static int printed_version;
713 if (!printed_version++)
714 printk(version);
715 #endif
717 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
719 io_size = 256;
720 phy_id = 0;
721 quirks = 0;
722 name = "Rhine";
723 if (pci_rev < VTunknown0) {
724 quirks = rqRhineI;
725 io_size = 128;
727 else if (pci_rev >= VT6102) {
728 quirks = rqWOL | rqForceReset;
729 if (pci_rev < VT6105) {
730 name = "Rhine II";
731 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
733 else {
734 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
735 if (pci_rev >= VT6105_B0)
736 quirks |= rq6patterns;
737 if (pci_rev < VT6105M)
738 name = "Rhine III";
739 else
740 name = "Rhine III (Management Adapter)";
744 rc = pci_enable_device(pdev);
745 if (rc)
746 goto err_out;
748 /* this should always be supported */
749 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
750 if (rc) {
751 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
752 "the card!?\n");
753 goto err_out;
756 /* sanity check */
757 if ((pci_resource_len(pdev, 0) < io_size) ||
758 (pci_resource_len(pdev, 1) < io_size)) {
759 rc = -EIO;
760 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
761 goto err_out;
764 pioaddr = pci_resource_start(pdev, 0);
765 memaddr = pci_resource_start(pdev, 1);
767 pci_set_master(pdev);
769 dev = alloc_etherdev(sizeof(struct rhine_private));
770 if (!dev) {
771 rc = -ENOMEM;
772 printk(KERN_ERR "alloc_etherdev failed\n");
773 goto err_out;
775 SET_MODULE_OWNER(dev);
776 SET_NETDEV_DEV(dev, &pdev->dev);
778 rp = netdev_priv(dev);
779 rp->quirks = quirks;
780 rp->pioaddr = pioaddr;
781 rp->pdev = pdev;
783 rc = pci_request_regions(pdev, DRV_NAME);
784 if (rc)
785 goto err_out_free_netdev;
787 ioaddr = pci_iomap(pdev, bar, io_size);
788 if (!ioaddr) {
789 rc = -EIO;
790 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
791 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
792 goto err_out_free_res;
795 #ifdef USE_MMIO
796 enable_mmio(pioaddr, quirks);
798 /* Check that selected MMIO registers match the PIO ones */
799 i = 0;
800 while (mmio_verify_registers[i]) {
801 int reg = mmio_verify_registers[i++];
802 unsigned char a = inb(pioaddr+reg);
803 unsigned char b = readb(ioaddr+reg);
804 if (a != b) {
805 rc = -EIO;
806 printk(KERN_ERR "MMIO do not match PIO [%02x] "
807 "(%02x != %02x)\n", reg, a, b);
808 goto err_out_unmap;
811 #endif /* USE_MMIO */
813 dev->base_addr = (unsigned long)ioaddr;
814 rp->base = ioaddr;
816 /* Get chip registers into a sane state */
817 rhine_power_init(dev);
818 rhine_hw_init(dev, pioaddr);
820 for (i = 0; i < 6; i++)
821 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
822 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
824 if (!is_valid_ether_addr(dev->perm_addr)) {
825 rc = -EIO;
826 printk(KERN_ERR "Invalid MAC address\n");
827 goto err_out_unmap;
830 /* For Rhine-I/II, phy_id is loaded from EEPROM */
831 if (!phy_id)
832 phy_id = ioread8(ioaddr + 0x6C);
834 dev->irq = pdev->irq;
836 spin_lock_init(&rp->lock);
837 rp->mii_if.dev = dev;
838 rp->mii_if.mdio_read = mdio_read;
839 rp->mii_if.mdio_write = mdio_write;
840 rp->mii_if.phy_id_mask = 0x1f;
841 rp->mii_if.reg_num_mask = 0x1f;
843 /* The chip-specific entries in the device structure. */
844 dev->open = rhine_open;
845 dev->hard_start_xmit = rhine_start_tx;
846 dev->stop = rhine_close;
847 dev->get_stats = rhine_get_stats;
848 dev->set_multicast_list = rhine_set_rx_mode;
849 dev->do_ioctl = netdev_ioctl;
850 dev->ethtool_ops = &netdev_ethtool_ops;
851 dev->tx_timeout = rhine_tx_timeout;
852 dev->watchdog_timeo = TX_TIMEOUT;
853 #ifdef CONFIG_NET_POLL_CONTROLLER
854 dev->poll_controller = rhine_poll;
855 #endif
856 if (rp->quirks & rqRhineI)
857 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
859 INIT_WORK(&rp->tx_timeout_task,
860 (void (*)(void *))rhine_tx_timeout_task, dev);
862 INIT_WORK(&rp->check_media_task,
863 (void (*)(void *))rhine_check_media_task, dev);
865 /* dev->name not defined before register_netdev()! */
866 rc = register_netdev(dev);
867 if (rc)
868 goto err_out_unmap;
870 printk(KERN_INFO "%s: VIA %s at 0x%lx, ",
871 dev->name, name,
872 #ifdef USE_MMIO
873 memaddr
874 #else
875 (long)ioaddr
876 #endif
879 for (i = 0; i < 5; i++)
880 printk("%2.2x:", dev->dev_addr[i]);
881 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
883 pci_set_drvdata(pdev, dev);
886 u16 mii_cmd;
887 int mii_status = mdio_read(dev, phy_id, 1);
888 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
889 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
890 if (mii_status != 0xffff && mii_status != 0x0000) {
891 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
892 printk(KERN_INFO "%s: MII PHY found at address "
893 "%d, status 0x%4.4x advertising %4.4x "
894 "Link %4.4x.\n", dev->name, phy_id,
895 mii_status, rp->mii_if.advertising,
896 mdio_read(dev, phy_id, 5));
898 /* set IFF_RUNNING */
899 if (mii_status & BMSR_LSTATUS)
900 netif_carrier_on(dev);
901 else
902 netif_carrier_off(dev);
906 rp->mii_if.phy_id = phy_id;
908 return 0;
910 err_out_unmap:
911 pci_iounmap(pdev, ioaddr);
912 err_out_free_res:
913 pci_release_regions(pdev);
914 err_out_free_netdev:
915 free_netdev(dev);
916 err_out:
917 return rc;
920 static int alloc_ring(struct net_device* dev)
922 struct rhine_private *rp = netdev_priv(dev);
923 void *ring;
924 dma_addr_t ring_dma;
926 ring = pci_alloc_consistent(rp->pdev,
927 RX_RING_SIZE * sizeof(struct rx_desc) +
928 TX_RING_SIZE * sizeof(struct tx_desc),
929 &ring_dma);
930 if (!ring) {
931 printk(KERN_ERR "Could not allocate DMA memory.\n");
932 return -ENOMEM;
934 if (rp->quirks & rqRhineI) {
935 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
936 PKT_BUF_SZ * TX_RING_SIZE,
937 &rp->tx_bufs_dma);
938 if (rp->tx_bufs == NULL) {
939 pci_free_consistent(rp->pdev,
940 RX_RING_SIZE * sizeof(struct rx_desc) +
941 TX_RING_SIZE * sizeof(struct tx_desc),
942 ring, ring_dma);
943 return -ENOMEM;
947 rp->rx_ring = ring;
948 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
949 rp->rx_ring_dma = ring_dma;
950 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
952 return 0;
955 static void free_ring(struct net_device* dev)
957 struct rhine_private *rp = netdev_priv(dev);
959 pci_free_consistent(rp->pdev,
960 RX_RING_SIZE * sizeof(struct rx_desc) +
961 TX_RING_SIZE * sizeof(struct tx_desc),
962 rp->rx_ring, rp->rx_ring_dma);
963 rp->tx_ring = NULL;
965 if (rp->tx_bufs)
966 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
967 rp->tx_bufs, rp->tx_bufs_dma);
969 rp->tx_bufs = NULL;
973 static void alloc_rbufs(struct net_device *dev)
975 struct rhine_private *rp = netdev_priv(dev);
976 dma_addr_t next;
977 int i;
979 rp->dirty_rx = rp->cur_rx = 0;
981 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
982 rp->rx_head_desc = &rp->rx_ring[0];
983 next = rp->rx_ring_dma;
985 /* Init the ring entries */
986 for (i = 0; i < RX_RING_SIZE; i++) {
987 rp->rx_ring[i].rx_status = 0;
988 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
989 next += sizeof(struct rx_desc);
990 rp->rx_ring[i].next_desc = cpu_to_le32(next);
991 rp->rx_skbuff[i] = NULL;
993 /* Mark the last entry as wrapping the ring. */
994 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
996 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
997 for (i = 0; i < RX_RING_SIZE; i++) {
998 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
999 rp->rx_skbuff[i] = skb;
1000 if (skb == NULL)
1001 break;
1002 skb->dev = dev; /* Mark as being used by this device. */
1004 rp->rx_skbuff_dma[i] =
1005 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
1006 PCI_DMA_FROMDEVICE);
1008 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1009 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1011 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1014 static void free_rbufs(struct net_device* dev)
1016 struct rhine_private *rp = netdev_priv(dev);
1017 int i;
1019 /* Free all the skbuffs in the Rx queue. */
1020 for (i = 0; i < RX_RING_SIZE; i++) {
1021 rp->rx_ring[i].rx_status = 0;
1022 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1023 if (rp->rx_skbuff[i]) {
1024 pci_unmap_single(rp->pdev,
1025 rp->rx_skbuff_dma[i],
1026 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1027 dev_kfree_skb(rp->rx_skbuff[i]);
1029 rp->rx_skbuff[i] = NULL;
1033 static void alloc_tbufs(struct net_device* dev)
1035 struct rhine_private *rp = netdev_priv(dev);
1036 dma_addr_t next;
1037 int i;
1039 rp->dirty_tx = rp->cur_tx = 0;
1040 next = rp->tx_ring_dma;
1041 for (i = 0; i < TX_RING_SIZE; i++) {
1042 rp->tx_skbuff[i] = NULL;
1043 rp->tx_ring[i].tx_status = 0;
1044 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1045 next += sizeof(struct tx_desc);
1046 rp->tx_ring[i].next_desc = cpu_to_le32(next);
1047 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1049 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1053 static void free_tbufs(struct net_device* dev)
1055 struct rhine_private *rp = netdev_priv(dev);
1056 int i;
1058 for (i = 0; i < TX_RING_SIZE; i++) {
1059 rp->tx_ring[i].tx_status = 0;
1060 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1061 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1062 if (rp->tx_skbuff[i]) {
1063 if (rp->tx_skbuff_dma[i]) {
1064 pci_unmap_single(rp->pdev,
1065 rp->tx_skbuff_dma[i],
1066 rp->tx_skbuff[i]->len,
1067 PCI_DMA_TODEVICE);
1069 dev_kfree_skb(rp->tx_skbuff[i]);
1071 rp->tx_skbuff[i] = NULL;
1072 rp->tx_buf[i] = NULL;
1076 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1078 struct rhine_private *rp = netdev_priv(dev);
1079 void __iomem *ioaddr = rp->base;
1081 mii_check_media(&rp->mii_if, debug, init_media);
1083 if (rp->mii_if.full_duplex)
1084 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1085 ioaddr + ChipCmd1);
1086 else
1087 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1088 ioaddr + ChipCmd1);
1091 static void rhine_check_media_task(struct net_device *dev)
1093 rhine_check_media(dev, 0);
1096 static void init_registers(struct net_device *dev)
1098 struct rhine_private *rp = netdev_priv(dev);
1099 void __iomem *ioaddr = rp->base;
1100 int i;
1102 for (i = 0; i < 6; i++)
1103 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1105 /* Initialize other registers. */
1106 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1107 /* Configure initial FIFO thresholds. */
1108 iowrite8(0x20, ioaddr + TxConfig);
1109 rp->tx_thresh = 0x20;
1110 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1112 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1113 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1115 rhine_set_rx_mode(dev);
1117 /* Enable interrupts by setting the interrupt mask. */
1118 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1119 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1120 IntrTxDone | IntrTxError | IntrTxUnderrun |
1121 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1122 ioaddr + IntrEnable);
1124 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1125 ioaddr + ChipCmd);
1126 rhine_check_media(dev, 1);
1129 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1130 static void rhine_enable_linkmon(void __iomem *ioaddr)
1132 iowrite8(0, ioaddr + MIICmd);
1133 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1134 iowrite8(0x80, ioaddr + MIICmd);
1136 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1138 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1141 /* Disable MII link status auto-polling (required for MDIO access) */
1142 static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1144 iowrite8(0, ioaddr + MIICmd);
1146 if (quirks & rqRhineI) {
1147 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1149 /* Do not call from ISR! */
1150 msleep(1);
1152 /* 0x80 must be set immediately before turning it off */
1153 iowrite8(0x80, ioaddr + MIICmd);
1155 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1157 /* Heh. Now clear 0x80 again. */
1158 iowrite8(0, ioaddr + MIICmd);
1160 else
1161 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1164 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1166 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1168 struct rhine_private *rp = netdev_priv(dev);
1169 void __iomem *ioaddr = rp->base;
1170 int result;
1172 rhine_disable_linkmon(ioaddr, rp->quirks);
1174 /* rhine_disable_linkmon already cleared MIICmd */
1175 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1176 iowrite8(regnum, ioaddr + MIIRegAddr);
1177 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1178 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1179 result = ioread16(ioaddr + MIIData);
1181 rhine_enable_linkmon(ioaddr);
1182 return result;
1185 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1187 struct rhine_private *rp = netdev_priv(dev);
1188 void __iomem *ioaddr = rp->base;
1190 rhine_disable_linkmon(ioaddr, rp->quirks);
1192 /* rhine_disable_linkmon already cleared MIICmd */
1193 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1194 iowrite8(regnum, ioaddr + MIIRegAddr);
1195 iowrite16(value, ioaddr + MIIData);
1196 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1197 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1199 rhine_enable_linkmon(ioaddr);
1202 static int rhine_open(struct net_device *dev)
1204 struct rhine_private *rp = netdev_priv(dev);
1205 void __iomem *ioaddr = rp->base;
1206 int rc;
1208 rc = request_irq(rp->pdev->irq, &rhine_interrupt, SA_SHIRQ, dev->name,
1209 dev);
1210 if (rc)
1211 return rc;
1213 if (debug > 1)
1214 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1215 dev->name, rp->pdev->irq);
1217 rc = alloc_ring(dev);
1218 if (rc) {
1219 free_irq(rp->pdev->irq, dev);
1220 return rc;
1222 alloc_rbufs(dev);
1223 alloc_tbufs(dev);
1224 rhine_chip_reset(dev);
1225 init_registers(dev);
1226 if (debug > 2)
1227 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1228 "MII status: %4.4x.\n",
1229 dev->name, ioread16(ioaddr + ChipCmd),
1230 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1232 netif_start_queue(dev);
1234 return 0;
1237 static void rhine_tx_timeout(struct net_device *dev)
1239 struct rhine_private *rp = netdev_priv(dev);
1242 * Move bulk of work outside of interrupt context
1244 schedule_work(&rp->tx_timeout_task);
1247 static void rhine_tx_timeout_task(struct net_device *dev)
1249 struct rhine_private *rp = netdev_priv(dev);
1250 void __iomem *ioaddr = rp->base;
1252 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1253 "%4.4x, resetting...\n",
1254 dev->name, ioread16(ioaddr + IntrStatus),
1255 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1257 /* protect against concurrent rx interrupts */
1258 disable_irq(rp->pdev->irq);
1260 spin_lock(&rp->lock);
1262 /* clear all descriptors */
1263 free_tbufs(dev);
1264 free_rbufs(dev);
1265 alloc_tbufs(dev);
1266 alloc_rbufs(dev);
1268 /* Reinitialize the hardware. */
1269 rhine_chip_reset(dev);
1270 init_registers(dev);
1272 spin_unlock(&rp->lock);
1273 enable_irq(rp->pdev->irq);
1275 dev->trans_start = jiffies;
1276 rp->stats.tx_errors++;
1277 netif_wake_queue(dev);
1280 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1282 struct rhine_private *rp = netdev_priv(dev);
1283 void __iomem *ioaddr = rp->base;
1284 unsigned entry;
1286 /* Caution: the write order is important here, set the field
1287 with the "ownership" bits last. */
1289 /* Calculate the next Tx descriptor entry. */
1290 entry = rp->cur_tx % TX_RING_SIZE;
1292 if (skb->len < ETH_ZLEN) {
1293 skb = skb_padto(skb, ETH_ZLEN);
1294 if (skb == NULL)
1295 return 0;
1298 rp->tx_skbuff[entry] = skb;
1300 if ((rp->quirks & rqRhineI) &&
1301 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
1302 /* Must use alignment buffer. */
1303 if (skb->len > PKT_BUF_SZ) {
1304 /* packet too long, drop it */
1305 dev_kfree_skb(skb);
1306 rp->tx_skbuff[entry] = NULL;
1307 rp->stats.tx_dropped++;
1308 return 0;
1311 /* Padding is not copied and so must be redone. */
1312 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1313 if (skb->len < ETH_ZLEN)
1314 memset(rp->tx_buf[entry] + skb->len, 0,
1315 ETH_ZLEN - skb->len);
1316 rp->tx_skbuff_dma[entry] = 0;
1317 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1318 (rp->tx_buf[entry] -
1319 rp->tx_bufs));
1320 } else {
1321 rp->tx_skbuff_dma[entry] =
1322 pci_map_single(rp->pdev, skb->data, skb->len,
1323 PCI_DMA_TODEVICE);
1324 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1327 rp->tx_ring[entry].desc_length =
1328 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1330 /* lock eth irq */
1331 spin_lock_irq(&rp->lock);
1332 wmb();
1333 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1334 wmb();
1336 rp->cur_tx++;
1338 /* Non-x86 Todo: explicitly flush cache lines here. */
1340 /* Wake the potentially-idle transmit channel */
1341 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1342 ioaddr + ChipCmd1);
1343 IOSYNC;
1345 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1346 netif_stop_queue(dev);
1348 dev->trans_start = jiffies;
1350 spin_unlock_irq(&rp->lock);
1352 if (debug > 4) {
1353 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1354 dev->name, rp->cur_tx-1, entry);
1356 return 0;
1359 /* The interrupt handler does all of the Rx thread work and cleans up
1360 after the Tx thread. */
1361 static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1363 struct net_device *dev = dev_instance;
1364 struct rhine_private *rp = netdev_priv(dev);
1365 void __iomem *ioaddr = rp->base;
1366 u32 intr_status;
1367 int boguscnt = max_interrupt_work;
1368 int handled = 0;
1370 while ((intr_status = get_intr_status(dev))) {
1371 handled = 1;
1373 /* Acknowledge all of the current interrupt sources ASAP. */
1374 if (intr_status & IntrTxDescRace)
1375 iowrite8(0x08, ioaddr + IntrStatus2);
1376 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1377 IOSYNC;
1379 if (debug > 4)
1380 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1381 dev->name, intr_status);
1383 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1384 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
1385 rhine_rx(dev);
1387 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1388 if (intr_status & IntrTxErrSummary) {
1389 /* Avoid scavenging before Tx engine turned off */
1390 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1391 if (debug > 2 &&
1392 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1393 printk(KERN_WARNING "%s: "
1394 "rhine_interrupt() Tx engine"
1395 "still on.\n", dev->name);
1397 rhine_tx(dev);
1400 /* Abnormal error summary/uncommon events handlers. */
1401 if (intr_status & (IntrPCIErr | IntrLinkChange |
1402 IntrStatsMax | IntrTxError | IntrTxAborted |
1403 IntrTxUnderrun | IntrTxDescRace))
1404 rhine_error(dev, intr_status);
1406 if (--boguscnt < 0) {
1407 printk(KERN_WARNING "%s: Too much work at interrupt, "
1408 "status=%#8.8x.\n",
1409 dev->name, intr_status);
1410 break;
1414 if (debug > 3)
1415 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1416 dev->name, ioread16(ioaddr + IntrStatus));
1417 return IRQ_RETVAL(handled);
1420 /* This routine is logically part of the interrupt handler, but isolated
1421 for clarity. */
1422 static void rhine_tx(struct net_device *dev)
1424 struct rhine_private *rp = netdev_priv(dev);
1425 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1427 spin_lock(&rp->lock);
1429 /* find and cleanup dirty tx descriptors */
1430 while (rp->dirty_tx != rp->cur_tx) {
1431 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1432 if (debug > 6)
1433 printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n",
1434 entry, txstatus);
1435 if (txstatus & DescOwn)
1436 break;
1437 if (txstatus & 0x8000) {
1438 if (debug > 1)
1439 printk(KERN_DEBUG "%s: Transmit error, "
1440 "Tx status %8.8x.\n",
1441 dev->name, txstatus);
1442 rp->stats.tx_errors++;
1443 if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
1444 if (txstatus & 0x0200) rp->stats.tx_window_errors++;
1445 if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
1446 if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
1447 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1448 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1449 rp->stats.tx_fifo_errors++;
1450 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1451 break; /* Keep the skb - we try again */
1453 /* Transmitter restarted in 'abnormal' handler. */
1454 } else {
1455 if (rp->quirks & rqRhineI)
1456 rp->stats.collisions += (txstatus >> 3) & 0x0F;
1457 else
1458 rp->stats.collisions += txstatus & 0x0F;
1459 if (debug > 6)
1460 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1461 (txstatus >> 3) & 0xF,
1462 txstatus & 0xF);
1463 rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1464 rp->stats.tx_packets++;
1466 /* Free the original skb. */
1467 if (rp->tx_skbuff_dma[entry]) {
1468 pci_unmap_single(rp->pdev,
1469 rp->tx_skbuff_dma[entry],
1470 rp->tx_skbuff[entry]->len,
1471 PCI_DMA_TODEVICE);
1473 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1474 rp->tx_skbuff[entry] = NULL;
1475 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1477 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1478 netif_wake_queue(dev);
1480 spin_unlock(&rp->lock);
1483 /* This routine is logically part of the interrupt handler, but isolated
1484 for clarity and better register allocation. */
1485 static void rhine_rx(struct net_device *dev)
1487 struct rhine_private *rp = netdev_priv(dev);
1488 int entry = rp->cur_rx % RX_RING_SIZE;
1489 int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx;
1491 if (debug > 4) {
1492 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1493 dev->name, entry,
1494 le32_to_cpu(rp->rx_head_desc->rx_status));
1497 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1498 while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
1499 struct rx_desc *desc = rp->rx_head_desc;
1500 u32 desc_status = le32_to_cpu(desc->rx_status);
1501 int data_size = desc_status >> 16;
1503 if (debug > 4)
1504 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
1505 desc_status);
1506 if (--boguscnt < 0)
1507 break;
1508 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1509 if ((desc_status & RxWholePkt) != RxWholePkt) {
1510 printk(KERN_WARNING "%s: Oversized Ethernet "
1511 "frame spanned multiple buffers, entry "
1512 "%#x length %d status %8.8x!\n",
1513 dev->name, entry, data_size,
1514 desc_status);
1515 printk(KERN_WARNING "%s: Oversized Ethernet "
1516 "frame %p vs %p.\n", dev->name,
1517 rp->rx_head_desc, &rp->rx_ring[entry]);
1518 rp->stats.rx_length_errors++;
1519 } else if (desc_status & RxErr) {
1520 /* There was a error. */
1521 if (debug > 2)
1522 printk(KERN_DEBUG "rhine_rx() Rx "
1523 "error was %8.8x.\n",
1524 desc_status);
1525 rp->stats.rx_errors++;
1526 if (desc_status & 0x0030) rp->stats.rx_length_errors++;
1527 if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
1528 if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
1529 if (desc_status & 0x0002) {
1530 /* this can also be updated outside the interrupt handler */
1531 spin_lock(&rp->lock);
1532 rp->stats.rx_crc_errors++;
1533 spin_unlock(&rp->lock);
1536 } else {
1537 struct sk_buff *skb;
1538 /* Length should omit the CRC */
1539 int pkt_len = data_size - 4;
1541 /* Check if the packet is long enough to accept without
1542 copying to a minimally-sized skbuff. */
1543 if (pkt_len < rx_copybreak &&
1544 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1545 skb->dev = dev;
1546 skb_reserve(skb, 2); /* 16 byte align the IP header */
1547 pci_dma_sync_single_for_cpu(rp->pdev,
1548 rp->rx_skbuff_dma[entry],
1549 rp->rx_buf_sz,
1550 PCI_DMA_FROMDEVICE);
1552 eth_copy_and_sum(skb,
1553 rp->rx_skbuff[entry]->data,
1554 pkt_len, 0);
1555 skb_put(skb, pkt_len);
1556 pci_dma_sync_single_for_device(rp->pdev,
1557 rp->rx_skbuff_dma[entry],
1558 rp->rx_buf_sz,
1559 PCI_DMA_FROMDEVICE);
1560 } else {
1561 skb = rp->rx_skbuff[entry];
1562 if (skb == NULL) {
1563 printk(KERN_ERR "%s: Inconsistent Rx "
1564 "descriptor chain.\n",
1565 dev->name);
1566 break;
1568 rp->rx_skbuff[entry] = NULL;
1569 skb_put(skb, pkt_len);
1570 pci_unmap_single(rp->pdev,
1571 rp->rx_skbuff_dma[entry],
1572 rp->rx_buf_sz,
1573 PCI_DMA_FROMDEVICE);
1575 skb->protocol = eth_type_trans(skb, dev);
1576 netif_rx(skb);
1577 dev->last_rx = jiffies;
1578 rp->stats.rx_bytes += pkt_len;
1579 rp->stats.rx_packets++;
1581 entry = (++rp->cur_rx) % RX_RING_SIZE;
1582 rp->rx_head_desc = &rp->rx_ring[entry];
1585 /* Refill the Rx ring buffers. */
1586 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1587 struct sk_buff *skb;
1588 entry = rp->dirty_rx % RX_RING_SIZE;
1589 if (rp->rx_skbuff[entry] == NULL) {
1590 skb = dev_alloc_skb(rp->rx_buf_sz);
1591 rp->rx_skbuff[entry] = skb;
1592 if (skb == NULL)
1593 break; /* Better luck next round. */
1594 skb->dev = dev; /* Mark as being used by this device. */
1595 rp->rx_skbuff_dma[entry] =
1596 pci_map_single(rp->pdev, skb->data,
1597 rp->rx_buf_sz,
1598 PCI_DMA_FROMDEVICE);
1599 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1601 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1606 * Clears the "tally counters" for CRC errors and missed frames(?).
1607 * It has been reported that some chips need a write of 0 to clear
1608 * these, for others the counters are set to 1 when written to and
1609 * instead cleared when read. So we clear them both ways ...
1611 static inline void clear_tally_counters(void __iomem *ioaddr)
1613 iowrite32(0, ioaddr + RxMissed);
1614 ioread16(ioaddr + RxCRCErrs);
1615 ioread16(ioaddr + RxMissed);
1618 static void rhine_restart_tx(struct net_device *dev) {
1619 struct rhine_private *rp = netdev_priv(dev);
1620 void __iomem *ioaddr = rp->base;
1621 int entry = rp->dirty_tx % TX_RING_SIZE;
1622 u32 intr_status;
1625 * If new errors occured, we need to sort them out before doing Tx.
1626 * In that case the ISR will be back here RSN anyway.
1628 intr_status = get_intr_status(dev);
1630 if ((intr_status & IntrTxErrSummary) == 0) {
1632 /* We know better than the chip where it should continue. */
1633 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1634 ioaddr + TxRingPtr);
1636 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1637 ioaddr + ChipCmd);
1638 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1639 ioaddr + ChipCmd1);
1640 IOSYNC;
1642 else {
1643 /* This should never happen */
1644 if (debug > 1)
1645 printk(KERN_WARNING "%s: rhine_restart_tx() "
1646 "Another error occured %8.8x.\n",
1647 dev->name, intr_status);
1652 static void rhine_error(struct net_device *dev, int intr_status)
1654 struct rhine_private *rp = netdev_priv(dev);
1655 void __iomem *ioaddr = rp->base;
1657 spin_lock(&rp->lock);
1659 if (intr_status & IntrLinkChange)
1660 schedule_work(&rp->check_media_task);
1661 if (intr_status & IntrStatsMax) {
1662 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1663 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1664 clear_tally_counters(ioaddr);
1666 if (intr_status & IntrTxAborted) {
1667 if (debug > 1)
1668 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1669 dev->name, intr_status);
1671 if (intr_status & IntrTxUnderrun) {
1672 if (rp->tx_thresh < 0xE0)
1673 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1674 if (debug > 1)
1675 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1676 "threshold now %2.2x.\n",
1677 dev->name, rp->tx_thresh);
1679 if (intr_status & IntrTxDescRace) {
1680 if (debug > 2)
1681 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1682 dev->name);
1684 if ((intr_status & IntrTxError) &&
1685 (intr_status & (IntrTxAborted |
1686 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1687 if (rp->tx_thresh < 0xE0) {
1688 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1690 if (debug > 1)
1691 printk(KERN_INFO "%s: Unspecified error. Tx "
1692 "threshold now %2.2x.\n",
1693 dev->name, rp->tx_thresh);
1695 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1696 IntrTxError))
1697 rhine_restart_tx(dev);
1699 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1700 IntrTxError | IntrTxAborted | IntrNormalSummary |
1701 IntrTxDescRace)) {
1702 if (debug > 1)
1703 printk(KERN_ERR "%s: Something Wicked happened! "
1704 "%8.8x.\n", dev->name, intr_status);
1707 spin_unlock(&rp->lock);
1710 static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1712 struct rhine_private *rp = netdev_priv(dev);
1713 void __iomem *ioaddr = rp->base;
1714 unsigned long flags;
1716 spin_lock_irqsave(&rp->lock, flags);
1717 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1718 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1719 clear_tally_counters(ioaddr);
1720 spin_unlock_irqrestore(&rp->lock, flags);
1722 return &rp->stats;
1725 static void rhine_set_rx_mode(struct net_device *dev)
1727 struct rhine_private *rp = netdev_priv(dev);
1728 void __iomem *ioaddr = rp->base;
1729 u32 mc_filter[2]; /* Multicast hash filter */
1730 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1732 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1733 /* Unconditionally log net taps. */
1734 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
1735 dev->name);
1736 rx_mode = 0x1C;
1737 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1738 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1739 } else if ((dev->mc_count > multicast_filter_limit)
1740 || (dev->flags & IFF_ALLMULTI)) {
1741 /* Too many to match, or accept all multicasts. */
1742 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1743 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1744 rx_mode = 0x0C;
1745 } else {
1746 struct dev_mc_list *mclist;
1747 int i;
1748 memset(mc_filter, 0, sizeof(mc_filter));
1749 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1750 i++, mclist = mclist->next) {
1751 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1753 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1755 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1756 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1757 rx_mode = 0x0C;
1759 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1762 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1764 struct rhine_private *rp = netdev_priv(dev);
1766 strcpy(info->driver, DRV_NAME);
1767 strcpy(info->version, DRV_VERSION);
1768 strcpy(info->bus_info, pci_name(rp->pdev));
1771 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1773 struct rhine_private *rp = netdev_priv(dev);
1774 int rc;
1776 spin_lock_irq(&rp->lock);
1777 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1778 spin_unlock_irq(&rp->lock);
1780 return rc;
1783 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1785 struct rhine_private *rp = netdev_priv(dev);
1786 int rc;
1788 spin_lock_irq(&rp->lock);
1789 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1790 spin_unlock_irq(&rp->lock);
1792 return rc;
1795 static int netdev_nway_reset(struct net_device *dev)
1797 struct rhine_private *rp = netdev_priv(dev);
1799 return mii_nway_restart(&rp->mii_if);
1802 static u32 netdev_get_link(struct net_device *dev)
1804 struct rhine_private *rp = netdev_priv(dev);
1806 return mii_link_ok(&rp->mii_if);
1809 static u32 netdev_get_msglevel(struct net_device *dev)
1811 return debug;
1814 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1816 debug = value;
1819 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1821 struct rhine_private *rp = netdev_priv(dev);
1823 if (!(rp->quirks & rqWOL))
1824 return;
1826 spin_lock_irq(&rp->lock);
1827 wol->supported = WAKE_PHY | WAKE_MAGIC |
1828 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1829 wol->wolopts = rp->wolopts;
1830 spin_unlock_irq(&rp->lock);
1833 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1835 struct rhine_private *rp = netdev_priv(dev);
1836 u32 support = WAKE_PHY | WAKE_MAGIC |
1837 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1839 if (!(rp->quirks & rqWOL))
1840 return -EINVAL;
1842 if (wol->wolopts & ~support)
1843 return -EINVAL;
1845 spin_lock_irq(&rp->lock);
1846 rp->wolopts = wol->wolopts;
1847 spin_unlock_irq(&rp->lock);
1849 return 0;
1852 static struct ethtool_ops netdev_ethtool_ops = {
1853 .get_drvinfo = netdev_get_drvinfo,
1854 .get_settings = netdev_get_settings,
1855 .set_settings = netdev_set_settings,
1856 .nway_reset = netdev_nway_reset,
1857 .get_link = netdev_get_link,
1858 .get_msglevel = netdev_get_msglevel,
1859 .set_msglevel = netdev_set_msglevel,
1860 .get_wol = rhine_get_wol,
1861 .set_wol = rhine_set_wol,
1862 .get_sg = ethtool_op_get_sg,
1863 .get_tx_csum = ethtool_op_get_tx_csum,
1864 .get_perm_addr = ethtool_op_get_perm_addr,
1867 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1869 struct rhine_private *rp = netdev_priv(dev);
1870 int rc;
1872 if (!netif_running(dev))
1873 return -EINVAL;
1875 spin_lock_irq(&rp->lock);
1876 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1877 spin_unlock_irq(&rp->lock);
1879 return rc;
1882 static int rhine_close(struct net_device *dev)
1884 struct rhine_private *rp = netdev_priv(dev);
1885 void __iomem *ioaddr = rp->base;
1887 spin_lock_irq(&rp->lock);
1889 netif_stop_queue(dev);
1891 if (debug > 1)
1892 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1893 "status was %4.4x.\n",
1894 dev->name, ioread16(ioaddr + ChipCmd));
1896 /* Switch to loopback mode to avoid hardware races. */
1897 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1899 /* Disable interrupts by clearing the interrupt mask. */
1900 iowrite16(0x0000, ioaddr + IntrEnable);
1902 /* Stop the chip's Tx and Rx processes. */
1903 iowrite16(CmdStop, ioaddr + ChipCmd);
1905 spin_unlock_irq(&rp->lock);
1907 free_irq(rp->pdev->irq, dev);
1909 flush_scheduled_work();
1911 free_rbufs(dev);
1912 free_tbufs(dev);
1913 free_ring(dev);
1915 return 0;
1919 static void __devexit rhine_remove_one(struct pci_dev *pdev)
1921 struct net_device *dev = pci_get_drvdata(pdev);
1922 struct rhine_private *rp = netdev_priv(dev);
1924 unregister_netdev(dev);
1926 pci_iounmap(pdev, rp->base);
1927 pci_release_regions(pdev);
1929 free_netdev(dev);
1930 pci_disable_device(pdev);
1931 pci_set_drvdata(pdev, NULL);
1934 static void rhine_shutdown (struct pci_dev *pdev)
1936 struct net_device *dev = pci_get_drvdata(pdev);
1937 struct rhine_private *rp = netdev_priv(dev);
1938 void __iomem *ioaddr = rp->base;
1940 if (!(rp->quirks & rqWOL))
1941 return; /* Nothing to do for non-WOL adapters */
1943 rhine_power_init(dev);
1945 /* Make sure we use pattern 0, 1 and not 4, 5 */
1946 if (rp->quirks & rq6patterns)
1947 iowrite8(0x04, ioaddr + 0xA7);
1949 if (rp->wolopts & WAKE_MAGIC) {
1950 iowrite8(WOLmagic, ioaddr + WOLcrSet);
1952 * Turn EEPROM-controlled wake-up back on -- some hardware may
1953 * not cooperate otherwise.
1955 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
1958 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1959 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
1961 if (rp->wolopts & WAKE_PHY)
1962 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
1964 if (rp->wolopts & WAKE_UCAST)
1965 iowrite8(WOLucast, ioaddr + WOLcrSet);
1967 if (rp->wolopts) {
1968 /* Enable legacy WOL (for old motherboards) */
1969 iowrite8(0x01, ioaddr + PwcfgSet);
1970 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
1973 /* Hit power state D3 (sleep) */
1974 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1976 /* TODO: Check use of pci_enable_wake() */
1980 #ifdef CONFIG_PM
1981 static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
1983 struct net_device *dev = pci_get_drvdata(pdev);
1984 struct rhine_private *rp = netdev_priv(dev);
1985 unsigned long flags;
1987 if (!netif_running(dev))
1988 return 0;
1990 netif_device_detach(dev);
1991 pci_save_state(pdev);
1993 spin_lock_irqsave(&rp->lock, flags);
1994 rhine_shutdown(pdev);
1995 spin_unlock_irqrestore(&rp->lock, flags);
1997 free_irq(dev->irq, dev);
1998 return 0;
2001 static int rhine_resume(struct pci_dev *pdev)
2003 struct net_device *dev = pci_get_drvdata(pdev);
2004 struct rhine_private *rp = netdev_priv(dev);
2005 unsigned long flags;
2006 int ret;
2008 if (!netif_running(dev))
2009 return 0;
2011 if (request_irq(dev->irq, rhine_interrupt, SA_SHIRQ, dev->name, dev))
2012 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
2014 ret = pci_set_power_state(pdev, PCI_D0);
2015 if (debug > 1)
2016 printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
2017 dev->name, ret ? "failed" : "succeeded", ret);
2019 pci_restore_state(pdev);
2021 spin_lock_irqsave(&rp->lock, flags);
2022 #ifdef USE_MMIO
2023 enable_mmio(rp->pioaddr, rp->quirks);
2024 #endif
2025 rhine_power_init(dev);
2026 free_tbufs(dev);
2027 free_rbufs(dev);
2028 alloc_tbufs(dev);
2029 alloc_rbufs(dev);
2030 init_registers(dev);
2031 spin_unlock_irqrestore(&rp->lock, flags);
2033 netif_device_attach(dev);
2035 return 0;
2037 #endif /* CONFIG_PM */
2039 static struct pci_driver rhine_driver = {
2040 .name = DRV_NAME,
2041 .id_table = rhine_pci_tbl,
2042 .probe = rhine_init_one,
2043 .remove = __devexit_p(rhine_remove_one),
2044 #ifdef CONFIG_PM
2045 .suspend = rhine_suspend,
2046 .resume = rhine_resume,
2047 #endif /* CONFIG_PM */
2048 .shutdown = rhine_shutdown,
2052 static int __init rhine_init(void)
2054 /* when a module, this is printed whether or not devices are found in probe */
2055 #ifdef MODULE
2056 printk(version);
2057 #endif
2058 return pci_module_init(&rhine_driver);
2062 static void __exit rhine_cleanup(void)
2064 pci_unregister_driver(&rhine_driver);
2068 module_init(rhine_init);
2069 module_exit(rhine_cleanup);