- David Miller: sparc and net updates. Fix merge_segments.
[davej-history.git] / drivers / net / via-rhine.c
bloba1d4629af1783523e371c88174e120ce3eee8dfe
1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2 /*
3 Written 1998-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 This driver is designed for the VIA VT86c100A Rhine-II PCI Fast Ethernet
13 controller. It also works with the older 3043 Rhine-I chip.
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
18 Annapolis MD 21403
21 This driver contains some changes from the original Donald Becker
22 version. He may or may not be interested in bug reports on this
23 code. You can find his versions at:
24 http://www.scyld.com/network/via-rhine.html
27 Linux kernel version history:
29 LK1.1.0:
30 - Jeff Garzik: softnet 'n stuff
32 LK1.1.1:
33 - Justin Guyett: softnet and locking fixes
34 - Jeff Garzik: use PCI interface
36 LK1.1.2:
37 - Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions
39 LK1.1.3:
40 - Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c
41 code) update "Theory of Operation" with
42 softnet/locking changes
43 - Dave Miller: PCI DMA and endian fixups
44 - Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation
46 LK1.1.4:
47 - Urban Widmark: fix gcc 2.95.2 problem and
48 remove writel's to fixed address 0x7c
50 LK1.1.5:
51 - Urban Widmark: mdio locking, bounce buffer changes
52 merges from Beckers 1.05 version
53 added netif_running_on/off support
55 LK1.1.6:
56 - Urban Widmark: merges from Beckers 1.08b version (VT6102 + mdio)
57 set netif_running_on/off on startup, del_timer_sync
61 /* A few user-configurable values.
62 These may be modified when a driver module is loaded. */
64 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
65 static int max_interrupt_work = 20;
67 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
68 Setting to > 1518 effectively disables this feature. */
69 static int rx_copybreak = 0;
71 /* Used to pass the media type, etc.
72 Both 'options[]' and 'full_duplex[]' should exist for driver
73 interoperability.
74 The media type is usually passed in 'options[]'.
76 #define MAX_UNITS 8 /* More are supported, limit only on options */
77 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
78 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
80 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
81 The Rhine has a 64 element 8390-like hash table. */
82 static const int multicast_filter_limit = 32;
85 /* Operational parameters that are set at compile time. */
87 /* Keep the ring sizes a power of two for compile efficiency.
88 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
89 Making the Tx ring too large decreases the effectiveness of channel
90 bonding and packet priority.
91 There are no ill effects from too-large receive rings. */
92 #define TX_RING_SIZE 16
93 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
94 #define RX_RING_SIZE 16
97 /* Operational parameters that usually are not changed. */
99 /* Time in jiffies before concluding the transmitter is hung. */
100 #define TX_TIMEOUT (2*HZ)
102 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
105 #if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
106 #warning You must compile this file with the correct options!
107 #warning See the last lines of the source file.
108 #error You must compile this driver with "-O".
109 #endif
111 #include <linux/module.h>
112 #include <linux/kernel.h>
113 #include <linux/string.h>
114 #include <linux/timer.h>
115 #include <linux/errno.h>
116 #include <linux/ioport.h>
117 #include <linux/malloc.h>
118 #include <linux/interrupt.h>
119 #include <linux/pci.h>
120 #include <linux/netdevice.h>
121 #include <linux/etherdevice.h>
122 #include <linux/skbuff.h>
123 #include <linux/init.h>
124 #include <asm/processor.h> /* Processor type for cache alignment. */
125 #include <asm/bitops.h>
126 #include <asm/io.h>
128 /* These identify the driver base version and may not be removed. */
129 static char version1[] __devinitdata =
130 "via-rhine.c:v1.08b-LK1.1.6 8/9/2000 Written by Donald Becker\n";
131 static char version2[] __devinitdata =
132 " http://www.scyld.com/network/via-rhine.html\n";
136 /* This driver was written to use PCI memory space, however most versions
137 of the Rhine only work correctly with I/O space accesses. */
138 #if defined(VIA_USE_MEMORY)
139 #warning Many adapters using the VIA Rhine chip are not configured to work
140 #warning with PCI memory space accesses.
141 #else
142 #define USE_IO
143 #undef readb
144 #undef readw
145 #undef readl
146 #undef writeb
147 #undef writew
148 #undef writel
149 #define readb inb
150 #define readw inw
151 #define readl inl
152 #define writeb outb
153 #define writew outw
154 #define writel outl
155 #endif
157 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
158 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
159 MODULE_PARM(max_interrupt_work, "i");
160 MODULE_PARM(debug, "i");
161 MODULE_PARM(rx_copybreak, "i");
162 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
163 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
167 Theory of Operation
169 I. Board Compatibility
171 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
172 controller.
174 II. Board-specific settings
176 Boards with this chip are functional only in a bus-master PCI slot.
178 Many operational settings are loaded from the EEPROM to the Config word at
179 offset 0x78. This driver assumes that they are correct.
180 If this driver is compiled to use PCI memory space operations the EEPROM
181 must be configured to enable memory ops.
183 III. Driver operation
185 IIIa. Ring buffers
187 This driver uses two statically allocated fixed-size descriptor lists
188 formed into rings by a branch from the final descriptor to the beginning of
189 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
191 IIIb/c. Transmit/Receive Structure
193 This driver attempts to use a zero-copy receive and transmit scheme.
195 Alas, all data buffers are required to start on a 32 bit boundary, so
196 the driver must often copy transmit packets into bounce buffers.
198 The driver allocates full frame size skbuffs for the Rx ring buffers at
199 open() time and passes the skb->data field to the chip as receive data
200 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
201 a fresh skbuff is allocated and the frame is copied to the new skbuff.
202 When the incoming frame is larger, the skbuff is passed directly up the
203 protocol stack. Buffers consumed this way are replaced by newly allocated
204 skbuffs in the last phase of via_rhine_rx().
206 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
207 using a full-sized skbuff for small frames vs. the copying costs of larger
208 frames. New boards are typically used in generously configured machines
209 and the underfilled buffers have negligible impact compared to the benefit of
210 a single allocation size, so the default value of zero results in never
211 copying packets. When copying is done, the cost is usually mitigated by using
212 a combined copy/checksum routine. Copying also preloads the cache, which is
213 most useful with small frames.
215 Since the VIA chips are only able to transfer data to buffers on 32 bit
216 boundaries, the the IP header at offset 14 in an ethernet frame isn't
217 longword aligned for further processing. Copying these unaligned buffers
218 has the beneficial effect of 16-byte aligning the IP header.
220 IIId. Synchronization
222 The driver runs as two independent, single-threaded flows of control. One
223 is the send-packet routine, which enforces single-threaded use by the
224 dev->priv->lock spinlock. The other thread is the interrupt handler, which
225 is single threaded by the hardware and interrupt handling software.
227 The send packet thread has partial control over the Tx ring. It locks the
228 dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
229 is not available it stops the transmit queue by calling netif_stop_queue.
231 The interrupt handler has exclusive control over the Rx ring and records stats
232 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
233 empty by incrementing the dirty_tx mark. If at least half of the entries in
234 the Rx ring are available the transmit queue is woken up if it was stopped.
236 IV. Notes
238 IVb. References
240 Preliminary VT86C100A manual from http://www.via.com.tw/
241 http://www.scyld.com/expert/100mbps.html
242 http://www.scyld.com/expert/NWay.html
244 IVc. Errata
246 The VT86C100A manual is not reliable information.
247 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
248 in significant performance degradation for bounce buffer copies on transmit
249 and unaligned IP headers on receive.
250 The chip does not pad to minimum transmit length.
256 /* This table drives the PCI probe routines. It's mostly boilerplate in all
257 of the drivers, and will likely be provided by some future kernel.
258 Note the matching code -- the first table entry matchs all 56** cards but
259 second only the 1234 card.
262 enum pci_flags_bit {
263 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
264 PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
267 enum via_rhine_chips {
268 VT86C100A = 0,
269 VT6102,
270 VT3043,
273 struct via_rhine_chip_info {
274 const char *name;
275 u16 pci_flags;
276 int io_size;
277 int drv_flags;
281 enum chip_capability_flags {
282 CanHaveMII=1, HasESIPhy=2, HasDavicomPhy=4,
283 ReqTxAlign=0x10, HasWOL=0x20, };
285 #if defined(VIA_USE_MEMORY)
286 #define RHINE_IOTYPE (PCI_USES_MEM | PCI_USES_MASTER | PCI_ADDR1)
287 #define RHINEII_IOSIZE 4096
288 #else
289 #define RHINE_IOTYPE (PCI_USES_IO | PCI_USES_MASTER | PCI_ADDR0)
290 #define RHINEII_IOSIZE 256
291 #endif
293 /* directly indexed by enum via_rhine_chips, above */
294 static struct via_rhine_chip_info via_rhine_chip_info[] __devinitdata =
296 { "VIA VT86C100A Rhine", RHINE_IOTYPE, 128,
297 CanHaveMII | ReqTxAlign },
298 { "VIA VT6102 Rhine-II", RHINE_IOTYPE, RHINEII_IOSIZE,
299 CanHaveMII | HasWOL },
300 { "VIA VT3043 Rhine", RHINE_IOTYPE, 128,
301 CanHaveMII | ReqTxAlign }
304 static struct pci_device_id via_rhine_pci_tbl[] __devinitdata =
306 {0x1106, 0x6100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT86C100A},
307 {0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6102},
308 {0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT3043},
309 {0,} /* terminate list */
311 MODULE_DEVICE_TABLE(pci, via_rhine_pci_tbl);
314 /* Offsets to the device registers. */
315 enum register_offsets {
316 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
317 IntrStatus=0x0C, IntrEnable=0x0E,
318 MulticastFilter0=0x10, MulticastFilter1=0x14,
319 RxRingPtr=0x18, TxRingPtr=0x1C,
320 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
321 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72,
322 Config=0x78, ConfigA=0x7A, RxMissed=0x7C, RxCRCErrs=0x7E,
323 StickyHW=0x83, WOLcrClr=0xA4, WOLcgClr=0xA7, PwrcsrClr=0xAC,
326 /* Bits in the interrupt status/mask registers. */
327 enum intr_status_bits {
328 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
329 IntrTxDone=0x0002, IntrTxAbort=0x0008, IntrTxUnderrun=0x0010,
330 IntrPCIErr=0x0040,
331 IntrStatsMax=0x0080, IntrRxEarly=0x0100, IntrMIIChange=0x0200,
332 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
333 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
334 IntrRxWakeUp=0x8000,
335 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
338 /* MII interface, status flags.
339 Not to be confused with the MIIStatus register ... */
340 enum mii_status_bits {
341 MIICap100T4 = 0x8000,
342 MIICap10100HdFd = 0x7800,
343 MIIPreambleSupr = 0x0040,
344 MIIAutoNegCompleted = 0x0020,
345 MIIRemoteFault = 0x0010,
346 MIICapAutoNeg = 0x0008,
347 MIILink = 0x0004,
348 MIIJabber = 0x0002,
349 MIIExtended = 0x0001
352 /* The Rx and Tx buffer descriptors. */
353 struct rx_desc {
354 s32 rx_status;
355 u32 desc_length;
356 u32 addr;
357 u32 next_desc;
359 struct tx_desc {
360 s32 tx_status;
361 u32 desc_length;
362 u32 addr;
363 u32 next_desc;
366 /* Bits in *_desc.status */
367 enum rx_status_bits {
368 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
371 enum desc_status_bits {
372 DescOwn=0x80000000, DescEndPacket=0x4000, DescIntr=0x1000,
375 /* Bits in ChipCmd. */
376 enum chip_cmd_bits {
377 CmdInit=0x0001, CmdStart=0x0002, CmdStop=0x0004, CmdRxOn=0x0008,
378 CmdTxOn=0x0010, CmdTxDemand=0x0020, CmdRxDemand=0x0040,
379 CmdEarlyRx=0x0100, CmdEarlyTx=0x0200, CmdFDuplex=0x0400,
380 CmdNoTxPoll=0x0800, CmdReset=0x8000,
383 struct netdev_private {
384 /* Descriptor rings */
385 struct rx_desc *rx_ring;
386 struct tx_desc *tx_ring;
387 dma_addr_t rx_ring_dma;
388 dma_addr_t tx_ring_dma;
390 /* The addresses of receive-in-place skbuffs. */
391 struct sk_buff *rx_skbuff[RX_RING_SIZE];
392 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
394 /* The saved address of a sent-in-place packet/buffer, for later free(). */
395 struct sk_buff *tx_skbuff[TX_RING_SIZE];
396 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
398 /* Tx bounce buffers */
399 unsigned char *tx_buf[TX_RING_SIZE];
400 unsigned char *tx_bufs;
401 dma_addr_t tx_bufs_dma;
403 struct pci_dev *pdev;
404 struct net_device_stats stats;
405 struct timer_list timer; /* Media monitoring timer. */
406 spinlock_t lock;
408 /* Frequently used values: keep some adjacent for cache effect. */
409 int chip_id, drv_flags;
410 struct rx_desc *rx_head_desc;
411 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
412 unsigned int cur_tx, dirty_tx;
413 unsigned int rx_buf_sz; /* Based on MTU+slack. */
414 u16 chip_cmd; /* Current setting for ChipCmd */
416 /* These values are keep track of the transceiver/media in use. */
417 unsigned int full_duplex:1; /* Full-duplex operation requested. */
418 unsigned int duplex_lock:1;
419 unsigned int default_port:4; /* Last dev->if_port value. */
420 u8 tx_thresh, rx_thresh;
422 /* MII transceiver section. */
423 u16 advertising; /* NWay media advertisement */
424 unsigned char phys[2]; /* MII device addresses. */
425 u16 mii_status; /* last read MII status */
428 static int mdio_read(struct net_device *dev, int phy_id, int location);
429 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
430 static int via_rhine_open(struct net_device *dev);
431 static void via_rhine_check_duplex(struct net_device *dev);
432 static void via_rhine_timer(unsigned long data);
433 static void via_rhine_tx_timeout(struct net_device *dev);
434 static void via_rhine_init_ring(struct net_device *dev);
435 static int via_rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
436 static void via_rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
437 static void via_rhine_tx(struct net_device *dev);
438 static void via_rhine_rx(struct net_device *dev);
439 static void via_rhine_error(struct net_device *dev, int intr_status);
440 static void via_rhine_set_rx_mode(struct net_device *dev);
441 static struct net_device_stats *via_rhine_get_stats(struct net_device *dev);
442 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
443 static int via_rhine_close(struct net_device *dev);
444 static inline void clear_tally_counters(long ioaddr);
447 static int __devinit via_rhine_init_one (struct pci_dev *pdev,
448 const struct pci_device_id *ent)
450 struct net_device *dev;
451 struct netdev_private *np;
452 int i, option;
453 int chip_id = (int) ent->driver_data;
454 int irq = pdev->irq;
455 static int card_idx = -1;
456 static int did_version = 0;
457 long ioaddr;
458 int io_size;
459 int pci_flags;
460 void *ring;
461 dma_addr_t ring_dma;
463 /* print version once and once only */
464 if (! did_version++) {
465 printk (KERN_INFO "%s", version1);
466 printk (KERN_INFO "%s", version2);
469 card_idx++;
470 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
471 io_size = via_rhine_chip_info[chip_id].io_size;
472 pci_flags = via_rhine_chip_info[chip_id].pci_flags;
474 /* this should always be supported */
475 if (!pci_dma_supported(pdev, 0xffffffff)) {
476 printk(KERN_ERR "32-bit PCI DMA addresses not supported by the card!?\n");
477 goto err_out;
480 /* sanity check */
481 if ((pci_resource_len (pdev, 0) < io_size) ||
482 (pci_resource_len (pdev, 1) < io_size)) {
483 printk (KERN_ERR "Insufficient PCI resources, aborting\n");
484 goto err_out;
487 /* allocate pci dma space for rx and tx descriptor rings */
488 ring = pci_alloc_consistent(pdev,
489 RX_RING_SIZE * sizeof(struct rx_desc) +
490 TX_RING_SIZE * sizeof(struct tx_desc),
491 &ring_dma);
492 if (!ring) {
493 printk(KERN_ERR "Could not allocate DMA memory.\n");
494 goto err_out;
497 ioaddr = pci_resource_start (pdev, pci_flags & PCI_ADDR0 ? 0 : 1);
499 if (pci_enable_device (pdev))
500 goto err_out_free_dma;
502 if (pci_flags & PCI_USES_MASTER)
503 pci_set_master (pdev);
505 dev = init_etherdev(NULL, sizeof(*np));
506 if (dev == NULL) {
507 printk (KERN_ERR "init_ethernet failed for card #%d\n",
508 card_idx);
509 goto err_out_free_dma;
511 SET_MODULE_OWNER(dev);
513 /* request all PIO and MMIO regions just to make sure
514 * noone else attempts to use any portion of our I/O space */
515 if (!request_region (pci_resource_start (pdev, 0),
516 pci_resource_len (pdev, 0), dev->name)) {
517 printk (KERN_ERR "request_region failed for device %s, region 0x%X @ 0x%lX\n",
518 dev->name, io_size,
519 pci_resource_start (pdev, 0));
520 goto err_out_free_netdev;
522 if (!request_mem_region (pci_resource_start (pdev, 1),
523 pci_resource_len (pdev, 1), dev->name)) {
524 printk (KERN_ERR "request_mem_region failed for device %s, region 0x%X @ 0x%lX\n",
525 dev->name, io_size,
526 pci_resource_start (pdev, 1));
527 goto err_out_free_pio;
530 #ifndef USE_IO
531 ioaddr = (long) ioremap (ioaddr, io_size);
532 if (!ioaddr) {
533 printk (KERN_ERR "ioremap failed for device %s, region 0x%X @ 0x%X\n",
534 dev->name, io_size,
535 pci_resource_start (pdev, 1));
536 goto err_out_free_mmio;
538 #endif
540 printk(KERN_INFO "%s: %s at 0x%lx, ",
541 dev->name, via_rhine_chip_info[chip_id].name, ioaddr);
543 /* Ideally we would read the EEPROM but access may be locked. */
544 for (i = 0; i < 6; i++)
545 dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
546 for (i = 0; i < 5; i++)
547 printk("%2.2x:", dev->dev_addr[i]);
548 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
550 /* Reset the chip to erase previous misconfiguration. */
551 writew(CmdReset, ioaddr + ChipCmd);
553 dev->base_addr = ioaddr;
554 dev->irq = irq;
556 np = dev->priv;
557 spin_lock_init (&np->lock);
558 np->chip_id = chip_id;
559 np->drv_flags = via_rhine_chip_info[chip_id].drv_flags;
560 np->pdev = pdev;
561 np->rx_ring = ring;
562 np->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
563 np->rx_ring_dma = ring_dma;
564 np->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
566 if (dev->mem_start)
567 option = dev->mem_start;
569 /* The lower four bits are the media type. */
570 if (option > 0) {
571 if (option & 0x200)
572 np->full_duplex = 1;
573 np->default_port = option & 15;
575 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
576 np->full_duplex = 1;
578 if (np->full_duplex)
579 np->duplex_lock = 1;
581 /* The chip-specific entries in the device structure. */
582 dev->open = via_rhine_open;
583 dev->hard_start_xmit = via_rhine_start_tx;
584 dev->stop = via_rhine_close;
585 dev->get_stats = via_rhine_get_stats;
586 dev->set_multicast_list = via_rhine_set_rx_mode;
587 dev->do_ioctl = mii_ioctl;
588 dev->tx_timeout = via_rhine_tx_timeout;
589 dev->watchdog_timeo = TX_TIMEOUT;
591 pdev->driver_data = dev;
593 if (np->drv_flags & CanHaveMII) {
594 int phy, phy_idx = 0;
595 np->phys[0] = 1; /* Standard for this chip. */
596 for (phy = 1; phy < 32 && phy_idx < 4; phy++) {
597 int mii_status = mdio_read(dev, phy, 1);
598 if (mii_status != 0xffff && mii_status != 0x0000) {
599 np->phys[phy_idx++] = phy;
600 np->advertising = mdio_read(dev, phy, 4);
601 printk(KERN_INFO "%s: MII PHY found at address %d, status "
602 "0x%4.4x advertising %4.4x Link %4.4x.\n",
603 dev->name, phy, mii_status, np->advertising,
604 mdio_read(dev, phy, 5));
606 /* set IFF_RUNNING */
607 if (mii_status & MIILink)
608 netif_carrier_on(dev);
609 else
610 netif_carrier_off(dev);
615 return 0;
617 #ifndef USE_IO
618 /* note this is ifdef'd because the ioremap is ifdef'd...
619 * so additional exit conditions above this must move
620 * release_mem_region outside of the ifdef */
621 err_out_free_mmio:
622 release_mem_region(pci_resource_start (pdev, 1),
623 pci_resource_len (pdev, 1));
624 #endif
625 err_out_free_pio:
626 release_region(pci_resource_start (pdev, 0),
627 pci_resource_len (pdev, 0));
628 err_out_free_netdev:
629 unregister_netdev (dev);
630 kfree (dev);
631 err_out_free_dma:
632 pci_free_consistent(pdev,
633 RX_RING_SIZE * sizeof(struct rx_desc) +
634 TX_RING_SIZE * sizeof(struct tx_desc),
635 ring, ring_dma);
636 err_out:
637 return -ENODEV;
641 /* Read and write over the MII Management Data I/O (MDIO) interface. */
643 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
645 long ioaddr = dev->base_addr;
646 int boguscnt = 1024;
648 /* Wait for a previous command to complete. */
649 while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
651 writeb(0x00, ioaddr + MIICmd);
652 writeb(phy_id, ioaddr + MIIPhyAddr);
653 writeb(regnum, ioaddr + MIIRegAddr);
654 writeb(0x40, ioaddr + MIICmd); /* Trigger read */
655 boguscnt = 1024;
656 while ((readb(ioaddr + MIICmd) & 0x40) && --boguscnt > 0)
658 return readw(ioaddr + MIIData);
661 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
663 struct netdev_private *np = (struct netdev_private *)dev->priv;
664 long ioaddr = dev->base_addr;
665 int boguscnt = 1024;
667 if (phy_id == np->phys[0]) {
668 switch (regnum) {
669 case 0: /* Is user forcing speed/duplex? */
670 if (value & 0x9000) /* Autonegotiation. */
671 np->duplex_lock = 0;
672 else
673 np->full_duplex = (value & 0x0100) ? 1 : 0;
674 break;
675 case 4:
676 np->advertising = value;
677 break;
681 /* Wait for a previous command to complete. */
682 while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
684 writeb(0x00, ioaddr + MIICmd);
685 writeb(phy_id, ioaddr + MIIPhyAddr);
686 writeb(regnum, ioaddr + MIIRegAddr);
687 writew(value, ioaddr + MIIData);
688 writeb(0x20, ioaddr + MIICmd); /* Trigger write. */
692 static int via_rhine_open(struct net_device *dev)
694 struct netdev_private *np = (struct netdev_private *)dev->priv;
695 long ioaddr = dev->base_addr;
696 int i;
698 /* Reset the chip. */
699 writew(CmdReset, ioaddr + ChipCmd);
701 i = request_irq(dev->irq, &via_rhine_interrupt, SA_SHIRQ, dev->name, dev);
702 if (i)
703 return i;
705 if (debug > 1)
706 printk(KERN_DEBUG "%s: via_rhine_open() irq %d.\n",
707 dev->name, dev->irq);
709 np->tx_bufs = pci_alloc_consistent(np->pdev, PKT_BUF_SZ * TX_RING_SIZE,
710 &np->tx_bufs_dma);
711 if (np->tx_bufs == NULL) {
712 free_irq(dev->irq, dev);
713 return -ENOMEM;
716 via_rhine_init_ring(dev);
718 writel(np->rx_ring_dma, ioaddr + RxRingPtr);
719 writel(np->tx_ring_dma, ioaddr + TxRingPtr);
721 for (i = 0; i < 6; i++)
722 writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
724 /* Initialize other registers. */
725 writew(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
726 /* Configure the FIFO thresholds. */
727 writeb(0x20, ioaddr + TxConfig); /* Initial threshold 32 bytes */
728 np->tx_thresh = 0x20;
729 np->rx_thresh = 0x60; /* Written in via_rhine_set_rx_mode(). */
731 if (dev->if_port == 0)
732 dev->if_port = np->default_port;
734 netif_start_queue(dev);
736 via_rhine_set_rx_mode(dev);
738 /* Enable interrupts by setting the interrupt mask. */
739 writew(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow| IntrRxDropped|
740 IntrTxDone | IntrTxAbort | IntrTxUnderrun |
741 IntrPCIErr | IntrStatsMax | IntrLinkChange | IntrMIIChange,
742 ioaddr + IntrEnable);
744 np->chip_cmd = CmdStart|CmdTxOn|CmdRxOn|CmdNoTxPoll;
745 if (np->duplex_lock)
746 np->chip_cmd |= CmdFDuplex;
747 writew(np->chip_cmd, ioaddr + ChipCmd);
749 via_rhine_check_duplex(dev);
751 /* The LED outputs of various MII xcvrs should be configured. */
752 /* For NS or Mison phys, turn on bit 1 in register 0x17 */
753 /* For ESI phys, turn on bit 7 in register 0x17. */
754 mdio_write(dev, np->phys[0], 0x17, mdio_read(dev, np->phys[0], 0x17) |
755 (np->drv_flags & HasESIPhy) ? 0x0080 : 0x0001);
757 if (debug > 2)
758 printk(KERN_DEBUG "%s: Done via_rhine_open(), status %4.4x "
759 "MII status: %4.4x.\n",
760 dev->name, readw(ioaddr + ChipCmd),
761 mdio_read(dev, np->phys[0], 1));
763 /* Set the timer to check for link beat. */
764 init_timer(&np->timer);
765 np->timer.expires = jiffies + 2;
766 np->timer.data = (unsigned long)dev;
767 np->timer.function = &via_rhine_timer; /* timer handler */
768 add_timer(&np->timer);
770 return 0;
773 static void via_rhine_check_duplex(struct net_device *dev)
775 struct netdev_private *np = (struct netdev_private *)dev->priv;
776 long ioaddr = dev->base_addr;
777 int mii_reg5 = mdio_read(dev, np->phys[0], 5);
778 int negotiated = mii_reg5 & np->advertising;
779 int duplex;
781 if (np->duplex_lock || mii_reg5 == 0xffff)
782 return;
783 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
784 if (np->full_duplex != duplex) {
785 np->full_duplex = duplex;
786 if (debug)
787 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
788 " partner capability of %4.4x.\n", dev->name,
789 duplex ? "full" : "half", np->phys[0], mii_reg5);
790 if (duplex)
791 np->chip_cmd |= CmdFDuplex;
792 else
793 np->chip_cmd &= ~CmdFDuplex;
794 writew(np->chip_cmd, ioaddr + ChipCmd);
799 static void via_rhine_timer(unsigned long data)
801 struct net_device *dev = (struct net_device *)data;
802 struct netdev_private *np = (struct netdev_private *)dev->priv;
803 long ioaddr = dev->base_addr;
804 int next_tick = 10*HZ;
805 int mii_status;
807 if (debug > 3) {
808 printk(KERN_DEBUG "%s: VIA Rhine monitor tick, status %4.4x.\n",
809 dev->name, readw(ioaddr + IntrStatus));
812 spin_lock_irq (&np->lock);
814 via_rhine_check_duplex(dev);
816 /* make IFF_RUNNING follow the MII status bit "Link established" */
817 mii_status = mdio_read(dev, np->phys[0], 1);
818 if ( (mii_status & MIILink) != (np->mii_status & MIILink) ) {
819 if (mii_status & MIILink)
820 netif_carrier_on(dev);
821 else
822 netif_carrier_off(dev);
824 np->mii_status = mii_status;
826 spin_unlock_irq (&np->lock);
828 np->timer.expires = jiffies + next_tick;
829 add_timer(&np->timer);
833 static void via_rhine_tx_timeout (struct net_device *dev)
835 struct netdev_private *np = (struct netdev_private *) dev->priv;
836 long ioaddr = dev->base_addr;
838 /* Lock to protect mdio_read and access to stats. A friendly
839 advice to the implementor of the XXXs in this function is to be
840 sure not to spin too long (whatever that means :) */
841 spin_lock_irq (&np->lock);
843 printk (KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
844 "%4.4x, resetting...\n",
845 dev->name, readw (ioaddr + IntrStatus),
846 mdio_read (dev, np->phys[0], 1));
848 /* XXX Perhaps we should reinitialize the hardware here. */
849 dev->if_port = 0;
851 /* Stop and restart the chip's Tx processes . */
852 /* XXX to do */
854 /* Trigger an immediate transmit demand. */
855 /* XXX to do */
857 dev->trans_start = jiffies;
858 np->stats.tx_errors++;
860 spin_unlock_irq (&np->lock);
864 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
865 static void via_rhine_init_ring(struct net_device *dev)
867 struct netdev_private *np = (struct netdev_private *)dev->priv;
868 int i;
869 dma_addr_t next = np->rx_ring_dma;
871 np->cur_rx = np->cur_tx = 0;
872 np->dirty_rx = np->dirty_tx = 0;
874 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
875 np->rx_head_desc = &np->rx_ring[0];
877 for (i = 0; i < RX_RING_SIZE; i++) {
878 np->rx_ring[i].rx_status = 0;
879 np->rx_ring[i].desc_length = cpu_to_le32(np->rx_buf_sz);
880 next += sizeof(struct rx_desc);
881 np->rx_ring[i].next_desc = cpu_to_le32(next);
882 np->rx_skbuff[i] = 0;
884 /* Mark the last entry as wrapping the ring. */
885 np->rx_ring[i-1].next_desc = cpu_to_le32(np->rx_ring_dma);
887 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
888 for (i = 0; i < RX_RING_SIZE; i++) {
889 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
890 np->rx_skbuff[i] = skb;
891 if (skb == NULL)
892 break;
893 skb->dev = dev; /* Mark as being used by this device. */
895 np->rx_skbuff_dma[i] =
896 pci_map_single(np->pdev, skb->tail, np->rx_buf_sz,
897 PCI_DMA_FROMDEVICE);
899 np->rx_ring[i].addr = cpu_to_le32(np->rx_skbuff_dma[i]);
900 np->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
902 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
904 next = np->tx_ring_dma;
905 for (i = 0; i < TX_RING_SIZE; i++) {
906 np->tx_skbuff[i] = 0;
907 np->tx_ring[i].tx_status = 0;
908 np->tx_ring[i].desc_length = cpu_to_le32(0x00e08000);
909 next += sizeof(struct tx_desc);
910 np->tx_ring[i].next_desc = cpu_to_le32(next);
911 np->tx_buf[i] = &np->tx_bufs[i * PKT_BUF_SZ];
913 np->tx_ring[i-1].next_desc = cpu_to_le32(np->tx_ring_dma);
915 return;
918 static int via_rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
920 struct netdev_private *np = (struct netdev_private *)dev->priv;
921 unsigned entry;
923 /* Caution: the write order is important here, set the field
924 with the "ownership" bits last. */
926 /* lock eth irq */
927 spin_lock_irq (&np->lock);
929 /* Calculate the next Tx descriptor entry. */
930 entry = np->cur_tx % TX_RING_SIZE;
932 np->tx_skbuff[entry] = skb;
934 if ((np->drv_flags & ReqTxAlign) && ((long)skb->data & 3)) {
935 /* Must use alignment buffer. */
936 memcpy(np->tx_buf[entry], skb->data, skb->len);
937 np->tx_skbuff_dma[entry] = 0;
938 np->tx_ring[entry].addr = cpu_to_le32(np->tx_bufs_dma +
939 (np->tx_buf[entry] - np->tx_bufs));
940 } else {
941 np->tx_skbuff_dma[entry] =
942 pci_map_single(np->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
943 np->tx_ring[entry].addr = cpu_to_le32(np->tx_skbuff_dma[entry]);
946 np->tx_ring[entry].desc_length =
947 cpu_to_le32(0x00E08000 | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
948 np->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
950 np->cur_tx++;
952 /* Non-x86 Todo: explicitly flush cache lines here. */
954 /* Wake the potentially-idle transmit channel. */
955 writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
957 if (np->cur_tx == np->dirty_tx + TX_QUEUE_LEN)
958 netif_stop_queue(dev);
960 dev->trans_start = jiffies;
962 spin_unlock_irq (&np->lock);
964 if (debug > 4) {
965 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
966 dev->name, np->cur_tx, entry);
968 return 0;
971 /* The interrupt handler does all of the Rx thread work and cleans up
972 after the Tx thread. */
973 static void via_rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
975 struct net_device *dev = (struct net_device *)dev_instance;
976 long ioaddr;
977 u32 intr_status;
978 int boguscnt = max_interrupt_work;
980 ioaddr = dev->base_addr;
982 while ((intr_status = readw(ioaddr + IntrStatus))) {
983 /* Acknowledge all of the current interrupt sources ASAP. */
984 writew(intr_status & 0xffff, ioaddr + IntrStatus);
986 if (debug > 4)
987 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
988 dev->name, intr_status);
990 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
991 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
992 via_rhine_rx(dev);
994 if (intr_status & (IntrTxDone | IntrTxAbort | IntrTxUnderrun |
995 IntrTxAborted))
996 via_rhine_tx(dev);
998 /* Abnormal error summary/uncommon events handlers. */
999 if (intr_status & (IntrPCIErr | IntrLinkChange | IntrMIIChange |
1000 IntrStatsMax | IntrTxAbort | IntrTxUnderrun))
1001 via_rhine_error(dev, intr_status);
1003 if (--boguscnt < 0) {
1004 printk(KERN_WARNING "%s: Too much work at interrupt, "
1005 "status=0x%4.4x.\n",
1006 dev->name, intr_status);
1007 break;
1011 if (debug > 3)
1012 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1013 dev->name, readw(ioaddr + IntrStatus));
1016 /* This routine is logically part of the interrupt handler, but isolated
1017 for clarity. */
1018 static void via_rhine_tx(struct net_device *dev)
1020 struct netdev_private *np = (struct netdev_private *)dev->priv;
1021 int txstatus = 0, entry = np->dirty_tx % TX_RING_SIZE;
1023 spin_lock (&np->lock);
1025 /* find and cleanup dirty tx descriptors */
1026 while (np->dirty_tx != np->cur_tx) {
1027 txstatus = le32_to_cpu(np->tx_ring[entry].tx_status);
1028 if (txstatus & DescOwn)
1029 break;
1030 if (debug > 6)
1031 printk(KERN_DEBUG " Tx scavenge %d status %8.8x.\n",
1032 entry, txstatus);
1033 if (txstatus & 0x8000) {
1034 if (debug > 1)
1035 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1036 dev->name, txstatus);
1037 np->stats.tx_errors++;
1038 if (txstatus & 0x0400) np->stats.tx_carrier_errors++;
1039 if (txstatus & 0x0200) np->stats.tx_window_errors++;
1040 if (txstatus & 0x0100) np->stats.tx_aborted_errors++;
1041 if (txstatus & 0x0080) np->stats.tx_heartbeat_errors++;
1042 if (txstatus & 0x0002) np->stats.tx_fifo_errors++;
1043 /* Transmitter restarted in 'abnormal' handler. */
1044 } else {
1045 np->stats.collisions += (txstatus >> 3) & 15;
1046 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1047 np->stats.tx_packets++;
1049 /* Free the original skb. */
1050 if (np->tx_skbuff_dma[entry]) {
1051 pci_unmap_single(np->pdev,
1052 np->tx_skbuff_dma[entry],
1053 np->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1055 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1056 np->tx_skbuff[entry] = NULL;
1057 entry = (++np->dirty_tx) % TX_RING_SIZE;
1059 if ((np->cur_tx - np->dirty_tx) < TX_QUEUE_LEN - 4)
1060 netif_wake_queue (dev);
1062 spin_unlock (&np->lock);
1065 /* This routine is logically part of the interrupt handler, but isolated
1066 for clarity and better register allocation. */
1067 static void via_rhine_rx(struct net_device *dev)
1069 struct netdev_private *np = (struct netdev_private *)dev->priv;
1070 int entry = np->cur_rx % RX_RING_SIZE;
1071 int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1073 if (debug > 4) {
1074 printk(KERN_DEBUG " In via_rhine_rx(), entry %d status %8.8x.\n",
1075 entry, le32_to_cpu(np->rx_head_desc->rx_status));
1078 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1079 while ( ! (np->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
1080 struct rx_desc *desc = np->rx_head_desc;
1081 u32 desc_status = le32_to_cpu(desc->rx_status);
1082 int data_size = desc_status >> 16;
1084 if (debug > 4)
1085 printk(KERN_DEBUG " via_rhine_rx() status is %8.8x.\n",
1086 desc_status);
1087 if (--boguscnt < 0)
1088 break;
1089 if ( (desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1090 if ((desc_status & RxWholePkt) != RxWholePkt) {
1091 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1092 "multiple buffers, entry %#x length %d status %8.8x!\n",
1093 dev->name, entry, data_size, desc_status);
1094 printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n",
1095 dev->name, np->rx_head_desc, &np->rx_ring[entry]);
1096 np->stats.rx_length_errors++;
1097 } else if (desc_status & RxErr) {
1098 /* There was a error. */
1099 if (debug > 2)
1100 printk(KERN_DEBUG " via_rhine_rx() Rx error was %8.8x.\n",
1101 desc_status);
1102 np->stats.rx_errors++;
1103 if (desc_status & 0x0030) np->stats.rx_length_errors++;
1104 if (desc_status & 0x0048) np->stats.rx_fifo_errors++;
1105 if (desc_status & 0x0004) np->stats.rx_frame_errors++;
1106 if (desc_status & 0x0002) {
1107 /* this can also be updated outside the interrupt handler */
1108 spin_lock (&np->lock);
1109 np->stats.rx_crc_errors++;
1110 spin_unlock (&np->lock);
1113 } else {
1114 struct sk_buff *skb;
1115 /* Length should omit the CRC */
1116 int pkt_len = data_size - 4;
1118 /* Check if the packet is long enough to accept without copying
1119 to a minimally-sized skbuff. */
1120 if (pkt_len < rx_copybreak &&
1121 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1122 skb->dev = dev;
1123 skb_reserve(skb, 2); /* 16 byte align the IP header */
1124 pci_dma_sync_single(np->pdev, np->rx_skbuff_dma[entry],
1125 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1127 /* *_IP_COPYSUM isn't defined anywhere and eth_copy_and_sum
1128 is memcpy for all archs so this is kind of pointless right
1129 now ... or? */
1130 #if HAS_IP_COPYSUM /* Call copy + cksum if available. */
1131 eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
1132 skb_put(skb, pkt_len);
1133 #else
1134 memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
1135 pkt_len);
1136 #endif
1137 } else {
1138 skb = np->rx_skbuff[entry];
1139 if (skb == NULL) {
1140 printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1141 dev->name);
1142 break;
1144 np->rx_skbuff[entry] = NULL;
1145 skb_put(skb, pkt_len);
1146 pci_unmap_single(np->pdev, np->rx_skbuff_dma[entry],
1147 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1149 skb->protocol = eth_type_trans(skb, dev);
1150 netif_rx(skb);
1151 dev->last_rx = jiffies;
1152 np->stats.rx_bytes += skb->len;
1153 np->stats.rx_packets++;
1155 entry = (++np->cur_rx) % RX_RING_SIZE;
1156 np->rx_head_desc = &np->rx_ring[entry];
1159 /* Refill the Rx ring buffers. */
1160 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1161 struct sk_buff *skb;
1162 entry = np->dirty_rx % RX_RING_SIZE;
1163 if (np->rx_skbuff[entry] == NULL) {
1164 skb = dev_alloc_skb(np->rx_buf_sz);
1165 np->rx_skbuff[entry] = skb;
1166 if (skb == NULL)
1167 break; /* Better luck next round. */
1168 skb->dev = dev; /* Mark as being used by this device. */
1169 np->rx_skbuff_dma[entry] =
1170 pci_map_single(np->pdev, skb->tail, np->rx_buf_sz,
1171 PCI_DMA_FROMDEVICE);
1172 np->rx_ring[entry].addr = cpu_to_le32(np->rx_skbuff_dma[entry]);
1174 np->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1177 /* Pre-emptively restart Rx engine. */
1178 writew(CmdRxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
1181 static void via_rhine_error(struct net_device *dev, int intr_status)
1183 struct netdev_private *np = (struct netdev_private *)dev->priv;
1184 long ioaddr = dev->base_addr;
1186 spin_lock (&np->lock);
1188 if (intr_status & (IntrMIIChange | IntrLinkChange)) {
1189 if (readb(ioaddr + MIIStatus) & 0x02) {
1190 /* Link failed, restart autonegotiation. */
1191 if (np->drv_flags & HasDavicomPhy)
1192 mdio_write(dev, np->phys[0], 0, 0x3300);
1193 } else
1194 via_rhine_check_duplex(dev);
1195 if (debug)
1196 printk(KERN_ERR "%s: MII status changed: Autonegotiation "
1197 "advertising %4.4x partner %4.4x.\n", dev->name,
1198 mdio_read(dev, np->phys[0], 4),
1199 mdio_read(dev, np->phys[0], 5));
1201 if (intr_status & IntrStatsMax) {
1202 np->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
1203 np->stats.rx_missed_errors += readw(ioaddr + RxMissed);
1204 clear_tally_counters(ioaddr);
1206 if (intr_status & IntrTxAbort) {
1207 /* Stats counted in Tx-done handler, just restart Tx. */
1208 writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
1210 if (intr_status & IntrTxUnderrun) {
1211 if (np->tx_thresh < 0xE0)
1212 writeb(np->tx_thresh += 0x20, ioaddr + TxConfig);
1213 if (debug > 1)
1214 printk(KERN_INFO "%s: Transmitter underrun, increasing Tx "
1215 "threshold setting to %2.2x.\n", dev->name, np->tx_thresh);
1217 if ((intr_status & ~( IntrLinkChange | IntrStatsMax |
1218 IntrTxAbort | IntrTxAborted)) && debug > 1) {
1219 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1220 dev->name, intr_status);
1221 /* Recovery for other fault sources not known. */
1222 writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
1225 spin_unlock (&np->lock);
1228 static struct net_device_stats *via_rhine_get_stats(struct net_device *dev)
1230 struct netdev_private *np = (struct netdev_private *)dev->priv;
1231 long ioaddr = dev->base_addr;
1232 unsigned long flags;
1234 spin_lock_irqsave(&np->lock, flags);
1235 np->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
1236 np->stats.rx_missed_errors += readw(ioaddr + RxMissed);
1237 clear_tally_counters(ioaddr);
1238 spin_unlock_irqrestore(&np->lock, flags);
1240 return &np->stats;
1243 /* Clears the "tally counters" for CRC errors and missed frames(?).
1244 It has been reported that some chips need a write of 0 to clear
1245 these, for others the counters are set to 1 when written to and
1246 instead cleared when read. So we clear them both ways ... */
1247 static inline void clear_tally_counters(const long ioaddr)
1249 writel(0, ioaddr + RxMissed);
1250 readw(ioaddr + RxCRCErrs);
1251 readw(ioaddr + RxMissed);
1255 /* The big-endian AUTODIN II ethernet CRC calculation.
1256 N.B. Do not use for bulk data, use a table-based routine instead.
1257 This is common code and should be moved to net/core/crc.c */
1258 static unsigned const ethernet_polynomial = 0x04c11db7U;
1259 static inline u32 ether_crc(int length, unsigned char *data)
1261 int crc = -1;
1263 while(--length >= 0) {
1264 unsigned char current_octet = *data++;
1265 int bit;
1266 for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
1267 crc = (crc << 1) ^
1268 ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
1271 return crc;
1274 static void via_rhine_set_rx_mode(struct net_device *dev)
1276 struct netdev_private *np = (struct netdev_private *)dev->priv;
1277 long ioaddr = dev->base_addr;
1278 u32 mc_filter[2]; /* Multicast hash filter */
1279 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1281 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1282 /* Unconditionally log net taps. */
1283 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1284 rx_mode = 0x1C;
1285 } else if ((dev->mc_count > multicast_filter_limit)
1286 || (dev->flags & IFF_ALLMULTI)) {
1287 /* Too many to match, or accept all multicasts. */
1288 writel(0xffffffff, ioaddr + MulticastFilter0);
1289 writel(0xffffffff, ioaddr + MulticastFilter1);
1290 rx_mode = 0x0C;
1291 } else {
1292 struct dev_mc_list *mclist;
1293 int i;
1294 memset(mc_filter, 0, sizeof(mc_filter));
1295 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1296 i++, mclist = mclist->next) {
1297 set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26, mc_filter);
1299 writel(mc_filter[0], ioaddr + MulticastFilter0);
1300 writel(mc_filter[1], ioaddr + MulticastFilter1);
1301 rx_mode = 0x0C;
1303 writeb(np->rx_thresh | rx_mode, ioaddr + RxConfig);
1306 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1308 struct netdev_private *np = (struct netdev_private *)dev->priv;
1309 u16 *data = (u16 *)&rq->ifr_data;
1310 unsigned long flags;
1311 int retval;
1313 spin_lock_irqsave(&np->lock, flags);
1314 retval = 0;
1316 switch(cmd) {
1317 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
1318 data[0] = np->phys[0] & 0x1f;
1319 /* Fall Through */
1320 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
1321 data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
1322 break;
1323 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
1324 if (!capable(CAP_NET_ADMIN)) {
1325 retval = -EPERM;
1326 break;
1328 mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
1329 break;
1330 default:
1331 retval = -EOPNOTSUPP;
1334 spin_unlock_irqrestore(&np->lock, flags);
1335 return retval;
1338 static int via_rhine_close(struct net_device *dev)
1340 long ioaddr = dev->base_addr;
1341 struct netdev_private *np = (struct netdev_private *)dev->priv;
1342 int i;
1343 unsigned long flags;
1345 del_timer_sync(&np->timer);
1347 spin_lock_irqsave(&np->lock, flags);
1349 netif_stop_queue(dev);
1351 if (debug > 1)
1352 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1353 dev->name, readw(ioaddr + ChipCmd));
1355 /* Switch to loopback mode to avoid hardware races. */
1356 writeb(np->tx_thresh | 0x01, ioaddr + TxConfig);
1358 /* Disable interrupts by clearing the interrupt mask. */
1359 writew(0x0000, ioaddr + IntrEnable);
1361 /* Stop the chip's Tx and Rx processes. */
1362 writew(CmdStop, ioaddr + ChipCmd);
1364 spin_unlock_irqrestore(&np->lock, flags);
1366 /* Make sure there is no irq-handler running on a different CPU. */
1367 synchronize_irq();
1369 free_irq(dev->irq, dev);
1371 /* Free all the skbuffs in the Rx queue. */
1372 for (i = 0; i < RX_RING_SIZE; i++) {
1373 np->rx_ring[i].rx_status = 0;
1374 np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1375 if (np->rx_skbuff[i]) {
1376 pci_unmap_single(np->pdev,
1377 np->rx_skbuff_dma[i],
1378 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1379 dev_kfree_skb(np->rx_skbuff[i]);
1381 np->rx_skbuff[i] = 0;
1384 /* Free all the skbuffs in the Tx queue, and also any bounce buffers. */
1385 for (i = 0; i < TX_RING_SIZE; i++) {
1386 np->tx_ring[i].tx_status = 0;
1387 np->tx_ring[i].desc_length = cpu_to_le32(0x00e08000);
1388 np->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1389 if (np->tx_skbuff[i]) {
1390 if (np->tx_skbuff_dma[i]) {
1391 pci_unmap_single(np->pdev,
1392 np->tx_skbuff_dma[i],
1393 np->tx_skbuff[i]->len, PCI_DMA_TODEVICE);
1395 dev_kfree_skb(np->tx_skbuff[i]);
1397 np->tx_skbuff[i] = 0;
1398 np->tx_buf[i] = 0;
1400 pci_free_consistent(np->pdev, PKT_BUF_SZ * TX_RING_SIZE,
1401 np->tx_bufs, np->tx_bufs_dma);
1403 return 0;
1407 static void __devexit via_rhine_remove_one (struct pci_dev *pdev)
1409 struct net_device *dev = pdev->driver_data;
1410 struct netdev_private *np = (struct netdev_private *)(dev->priv);
1412 unregister_netdev(dev);
1414 release_region(pci_resource_start (pdev, 0),
1415 pci_resource_len (pdev, 0));
1416 release_mem_region(pci_resource_start (pdev, 1),
1417 pci_resource_len (pdev, 1));
1419 #ifndef USE_IO
1420 iounmap((char *)(dev->base_addr));
1421 #endif
1423 pci_free_consistent(pdev,
1424 RX_RING_SIZE * sizeof(struct rx_desc) +
1425 TX_RING_SIZE * sizeof(struct tx_desc),
1426 np->rx_ring, np->rx_ring_dma);
1428 kfree(dev);
1432 static struct pci_driver via_rhine_driver = {
1433 name: "via-rhine",
1434 id_table: via_rhine_pci_tbl,
1435 probe: via_rhine_init_one,
1436 remove: via_rhine_remove_one,
1440 static int __init via_rhine_init (void)
1442 return pci_module_init (&via_rhine_driver);
1446 static void __exit via_rhine_cleanup (void)
1448 pci_unregister_driver (&via_rhine_driver);
1452 module_init(via_rhine_init);
1453 module_exit(via_rhine_cleanup);
1457 * Local variables:
1458 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c via-rhine.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
1459 * c-indent-level: 4
1460 * c-basic-offset: 4
1461 * tab-width: 4
1462 * End: