Import 2.4.0-test4
[davej-history.git] / drivers / net / via-rhine.c
blob67db42e974e70b01371306ee96827aa7e650dbbc
1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2 /*
3 Written 1998-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 This driver is designed for the VIA VT86c100A Rhine-II PCI Fast Ethernet
13 controller. It also works with the older 3043 Rhine-I chip.
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
18 Annapolis MD 21403
21 This driver contains some changes from the original Donald Becker
22 version. He may or may not be interested in bug reports on this
23 code. You can find his versions at:
24 http://www.scyld.com/network/via-rhine.html
27 Linux kernel version history:
29 LK1.1.0:
30 - Jeff Garzik: softnet 'n stuff
32 LK1.1.1:
33 - Justin Guyett: softnet and locking fixes
34 - Jeff Garzik: use PCI interface
36 LK1.1.2:
37 - Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions
39 LK1.1.3:
40 - Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c
41 code) update "Theory of Operation" with
42 softnet/locking changes
43 - Dave Miller: PCI DMA and endian fixups
44 - Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation
46 LK1.1.4:
47 - Urban Widmark: fix gcc 2.95.2 problem and
48 remove writel's to fixed address 0x7c
50 LK1.1.5:
51 - Urban Widmark: mdio locking, bounce buffer changes
52 merges from Beckers 1.05 version
53 added netif_running_on/off support
56 /* A few user-configurable values.
57 These may be modified when a driver module is loaded. */
59 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
60 static int max_interrupt_work = 20;
62 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
63 Setting to > 1518 effectively disables this feature. */
64 static int rx_copybreak = 0;
66 /* Used to pass the media type, etc.
67 Both 'options[]' and 'full_duplex[]' should exist for driver
68 interoperability.
69 The media type is usually passed in 'options[]'.
71 #define MAX_UNITS 8 /* More are supported, limit only on options */
72 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
73 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
75 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
76 The Rhine has a 64 element 8390-like hash table. */
77 static const int multicast_filter_limit = 32;
80 /* Operational parameters that are set at compile time. */
82 /* Keep the ring sizes a power of two for compile efficiency.
83 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
84 Making the Tx ring too large decreases the effectiveness of channel
85 bonding and packet priority.
86 There are no ill effects from too-large receive rings. */
87 #define TX_RING_SIZE 16
88 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
89 #define RX_RING_SIZE 16
92 /* Operational parameters that usually are not changed. */
94 /* Time in jiffies before concluding the transmitter is hung. */
95 #define TX_TIMEOUT (2*HZ)
97 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
100 #if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
101 #warning You must compile this file with the correct options!
102 #warning See the last lines of the source file.
103 #error You must compile this driver with "-O".
104 #endif
106 #include <linux/module.h>
107 #include <linux/kernel.h>
108 #include <linux/string.h>
109 #include <linux/timer.h>
110 #include <linux/errno.h>
111 #include <linux/ioport.h>
112 #include <linux/malloc.h>
113 #include <linux/interrupt.h>
114 #include <linux/pci.h>
115 #include <linux/netdevice.h>
116 #include <linux/etherdevice.h>
117 #include <linux/skbuff.h>
118 #include <linux/init.h>
119 #include <asm/processor.h> /* Processor type for cache alignment. */
120 #include <asm/bitops.h>
121 #include <asm/io.h>
123 /* These identify the driver base version and may not be removed. */
124 static char version1[] __devinitdata =
125 "via-rhine.c:v1.05-LK1.1.5 5/2/2000 Written by Donald Becker\n";
126 static char version2[] __devinitdata =
127 " http://www.scyld.com/network/via-rhine.html\n";
131 /* This driver was written to use PCI memory space, however most versions
132 of the Rhine only work correctly with I/O space accesses. */
133 #if defined(VIA_USE_MEMORY)
134 #warning Many adapters using the VIA Rhine chip are not configured to work
135 #warning with PCI memory space accesses.
136 #else
137 #define USE_IO
138 #undef readb
139 #undef readw
140 #undef readl
141 #undef writeb
142 #undef writew
143 #undef writel
144 #define readb inb
145 #define readw inw
146 #define readl inl
147 #define writeb outb
148 #define writew outw
149 #define writel outl
150 #endif
152 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
153 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
154 MODULE_PARM(max_interrupt_work, "i");
155 MODULE_PARM(debug, "i");
156 MODULE_PARM(rx_copybreak, "i");
157 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
158 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
162 Theory of Operation
164 I. Board Compatibility
166 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
167 controller.
169 II. Board-specific settings
171 Boards with this chip are functional only in a bus-master PCI slot.
173 Many operational settings are loaded from the EEPROM to the Config word at
174 offset 0x78. This driver assumes that they are correct.
175 If this driver is compiled to use PCI memory space operations the EEPROM
176 must be configured to enable memory ops.
178 III. Driver operation
180 IIIa. Ring buffers
182 This driver uses two statically allocated fixed-size descriptor lists
183 formed into rings by a branch from the final descriptor to the beginning of
184 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
186 IIIb/c. Transmit/Receive Structure
188 This driver attempts to use a zero-copy receive and transmit scheme.
190 Alas, all data buffers are required to start on a 32 bit boundary, so
191 the driver must often copy transmit packets into bounce buffers.
193 The driver allocates full frame size skbuffs for the Rx ring buffers at
194 open() time and passes the skb->data field to the chip as receive data
195 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
196 a fresh skbuff is allocated and the frame is copied to the new skbuff.
197 When the incoming frame is larger, the skbuff is passed directly up the
198 protocol stack. Buffers consumed this way are replaced by newly allocated
199 skbuffs in the last phase of via_rhine_rx().
201 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
202 using a full-sized skbuff for small frames vs. the copying costs of larger
203 frames. New boards are typically used in generously configured machines
204 and the underfilled buffers have negligible impact compared to the benefit of
205 a single allocation size, so the default value of zero results in never
206 copying packets. When copying is done, the cost is usually mitigated by using
207 a combined copy/checksum routine. Copying also preloads the cache, which is
208 most useful with small frames.
210 Since the VIA chips are only able to transfer data to buffers on 32 bit
211 boundaries, the the IP header at offset 14 in an ethernet frame isn't
212 longword aligned for further processing. Copying these unaligned buffers
213 has the beneficial effect of 16-byte aligning the IP header.
215 IIId. Synchronization
217 The driver runs as two independent, single-threaded flows of control. One
218 is the send-packet routine, which enforces single-threaded use by the
219 dev->priv->lock spinlock. The other thread is the interrupt handler, which
220 is single threaded by the hardware and interrupt handling software.
222 The send packet thread has partial control over the Tx ring. It locks the
223 dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
224 is not available it stops the transmit queue by calling netif_stop_queue.
226 The interrupt handler has exclusive control over the Rx ring and records stats
227 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
228 empty by incrementing the dirty_tx mark. If at least half of the entries in
229 the Rx ring are available the transmit queue is woken up if it was stopped.
231 IV. Notes
233 IVb. References
235 Preliminary VT86C100A manual from http://www.via.com.tw/
236 http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
237 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
239 IVc. Errata
241 The VT86C100A manual is not reliable information.
242 The chip does not handle unaligned transmit or receive buffers, resulting
243 in significant performance degradation for bounce buffer copies on transmit
244 and unaligned IP headers on receive.
245 The chip does not pad to minimum transmit length.
251 /* This table drives the PCI probe routines. It's mostly boilerplate in all
252 of the drivers, and will likely be provided by some future kernel.
253 Note the matching code -- the first table entry matchs all 56** cards but
254 second only the 1234 card.
257 enum pci_flags_bit {
258 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
259 PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
262 enum via_rhine_chips {
263 VT86C100A = 0,
264 VT3043,
267 struct via_rhine_chip_info {
268 const char *name;
269 u16 pci_flags;
270 int io_size;
271 int drv_flags;
275 enum chip_capability_flags {CanHaveMII=1, HasESIPhy=2 };
277 #if defined(VIA_USE_MEMORY)
278 #define RHINE_IOTYPE (PCI_USES_MEM | PCI_USES_MASTER | PCI_ADDR1)
279 #else
280 #define RHINE_IOTYPE (PCI_USES_IO | PCI_USES_MASTER | PCI_ADDR0)
281 #endif
283 /* directly indexed by enum via_rhine_chips, above */
284 static struct via_rhine_chip_info via_rhine_chip_info[] __devinitdata =
286 { "VIA VT86C100A Rhine-II", RHINE_IOTYPE, 128, CanHaveMII },
287 { "VIA VT3043 Rhine", RHINE_IOTYPE, 128, CanHaveMII }
290 static struct pci_device_id via_rhine_pci_tbl[] __devinitdata =
292 {0x1106, 0x6100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT86C100A},
293 {0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT3043},
294 {0,} /* terminate list */
296 MODULE_DEVICE_TABLE(pci, via_rhine_pci_tbl);
299 /* Offsets to the device registers. */
300 enum register_offsets {
301 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
302 IntrStatus=0x0C, IntrEnable=0x0E,
303 MulticastFilter0=0x10, MulticastFilter1=0x14,
304 RxRingPtr=0x18, TxRingPtr=0x1C,
305 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
306 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72,
307 Config=0x78, RxMissed=0x7C, RxCRCErrs=0x7E,
310 /* Bits in the interrupt status/mask registers. */
311 enum intr_status_bits {
312 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
313 IntrTxDone=0x0002, IntrTxAbort=0x0008, IntrTxUnderrun=0x0010,
314 IntrPCIErr=0x0040,
315 IntrStatsMax=0x0080, IntrRxEarly=0x0100, IntrMIIChange=0x0200,
316 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
317 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
318 IntrRxWakeUp=0x8000,
319 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
322 /* MII interface, status flags.
323 Not to be confused with the MIIStatus register ... */
324 enum mii_status_bits {
325 MIICap100T4 = 0x8000,
326 MIICap10100HdFd = 0x7800,
327 MIIPreambleSupr = 0x0040,
328 MIIAutoNegCompleted = 0x0020,
329 MIIRemoteFault = 0x0010,
330 MIICapAutoNeg = 0x0008,
331 MIILink = 0x0004,
332 MIIJabber = 0x0002,
333 MIIExtended = 0x0001
336 /* The Rx and Tx buffer descriptors. */
337 struct rx_desc {
338 s32 rx_status;
339 u32 desc_length;
340 u32 addr;
341 u32 next_desc;
343 struct tx_desc {
344 s32 tx_status;
345 u32 desc_length;
346 u32 addr;
347 u32 next_desc;
350 /* Bits in *_desc.status */
351 enum rx_status_bits {
352 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
355 enum desc_status_bits {
356 DescOwn=0x80000000, DescEndPacket=0x4000, DescIntr=0x1000,
359 /* Bits in ChipCmd. */
360 enum chip_cmd_bits {
361 CmdInit=0x0001, CmdStart=0x0002, CmdStop=0x0004, CmdRxOn=0x0008,
362 CmdTxOn=0x0010, CmdTxDemand=0x0020, CmdRxDemand=0x0040,
363 CmdEarlyRx=0x0100, CmdEarlyTx=0x0200, CmdFDuplex=0x0400,
364 CmdNoTxPoll=0x0800, CmdReset=0x8000,
367 struct netdev_private {
368 /* Descriptor rings */
369 struct rx_desc *rx_ring;
370 struct tx_desc *tx_ring;
371 dma_addr_t rx_ring_dma;
372 dma_addr_t tx_ring_dma;
374 /* The addresses of receive-in-place skbuffs. */
375 struct sk_buff *rx_skbuff[RX_RING_SIZE];
376 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
378 /* The saved address of a sent-in-place packet/buffer, for later free(). */
379 struct sk_buff *tx_skbuff[TX_RING_SIZE];
380 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
382 /* Tx bounce buffers */
383 unsigned char *tx_buf[TX_RING_SIZE];
384 unsigned char *tx_bufs;
385 dma_addr_t tx_bufs_dma;
387 struct pci_dev *pdev;
388 struct net_device_stats stats;
389 struct timer_list timer; /* Media monitoring timer. */
390 spinlock_t lock;
392 /* Frequently used values: keep some adjacent for cache effect. */
393 int chip_id, drv_flags;
394 struct rx_desc *rx_head_desc;
395 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
396 unsigned int cur_tx, dirty_tx;
397 unsigned int rx_buf_sz; /* Based on MTU+slack. */
398 u16 chip_cmd; /* Current setting for ChipCmd */
400 /* These values are keep track of the transceiver/media in use. */
401 unsigned int full_duplex:1; /* Full-duplex operation requested. */
402 unsigned int duplex_lock:1;
403 unsigned int default_port:4; /* Last dev->if_port value. */
404 u8 tx_thresh, rx_thresh;
406 /* MII transceiver section. */
407 u16 advertising; /* NWay media advertisement */
408 unsigned char phys[2]; /* MII device addresses. */
409 u16 mii_status; /* last read MII status */
412 static int mdio_read(struct net_device *dev, int phy_id, int location);
413 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
414 static int via_rhine_open(struct net_device *dev);
415 static void via_rhine_check_duplex(struct net_device *dev);
416 static void via_rhine_timer(unsigned long data);
417 static void via_rhine_tx_timeout(struct net_device *dev);
418 static void via_rhine_init_ring(struct net_device *dev);
419 static int via_rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
420 static void via_rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
421 static void via_rhine_tx(struct net_device *dev);
422 static void via_rhine_rx(struct net_device *dev);
423 static void via_rhine_error(struct net_device *dev, int intr_status);
424 static void via_rhine_set_rx_mode(struct net_device *dev);
425 static struct net_device_stats *via_rhine_get_stats(struct net_device *dev);
426 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
427 static int via_rhine_close(struct net_device *dev);
428 static inline void clear_tally_counters(long ioaddr);
431 static int __devinit via_rhine_init_one (struct pci_dev *pdev,
432 const struct pci_device_id *ent)
434 struct net_device *dev;
435 struct netdev_private *np;
436 int i, option;
437 int chip_id = (int) ent->driver_data;
438 int irq = pdev->irq;
439 static int card_idx = -1;
440 static int did_version = 0;
441 long ioaddr;
442 int io_size;
443 int pci_flags;
444 void *ring;
445 dma_addr_t ring_dma;
447 /* print version once and once only */
448 if (! did_version++) {
449 printk (KERN_INFO "%s", version1);
450 printk (KERN_INFO "%s", version2);
453 card_idx++;
454 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
455 io_size = via_rhine_chip_info[chip_id].io_size;
456 pci_flags = via_rhine_chip_info[chip_id].pci_flags;
458 /* this should always be supported */
459 if (!pci_dma_supported(pdev, 0xffffffff)) {
460 printk(KERN_ERR "32-bit PCI DMA addresses not supported by the card!?\n");
461 goto err_out;
464 /* sanity check */
465 if ((pci_resource_len (pdev, 0) < io_size) ||
466 (pci_resource_len (pdev, 1) < io_size)) {
467 printk (KERN_ERR "Insufficient PCI resources, aborting\n");
468 goto err_out;
471 /* allocate pci dma space for rx and tx descriptor rings */
472 ring = pci_alloc_consistent(pdev,
473 RX_RING_SIZE * sizeof(struct rx_desc) +
474 TX_RING_SIZE * sizeof(struct tx_desc),
475 &ring_dma);
476 if (!ring) {
477 printk(KERN_ERR "Could not allocate DMA memory.\n");
478 goto err_out;
481 ioaddr = pci_resource_start (pdev, pci_flags & PCI_ADDR0 ? 0 : 1);
483 if (pci_enable_device (pdev))
484 goto err_out_free_dma;
486 if (pci_flags & PCI_USES_MASTER)
487 pci_set_master (pdev);
489 dev = init_etherdev(NULL, sizeof(*np));
490 if (dev == NULL) {
491 printk (KERN_ERR "init_ethernet failed for card #%d\n",
492 card_idx);
493 goto err_out_free_dma;
496 /* request all PIO and MMIO regions just to make sure
497 * noone else attempts to use any portion of our I/O space */
498 if (!request_region (pci_resource_start (pdev, 0),
499 pci_resource_len (pdev, 0), dev->name)) {
500 printk (KERN_ERR "request_region failed for device %s, region 0x%X @ 0x%lX\n",
501 dev->name, io_size,
502 pci_resource_start (pdev, 0));
503 goto err_out_free_netdev;
505 if (!request_mem_region (pci_resource_start (pdev, 1),
506 pci_resource_len (pdev, 1), dev->name)) {
507 printk (KERN_ERR "request_mem_region failed for device %s, region 0x%X @ 0x%lX\n",
508 dev->name, io_size,
509 pci_resource_start (pdev, 1));
510 goto err_out_free_pio;
513 #ifndef USE_IO
514 ioaddr = (long) ioremap (ioaddr, io_size);
515 if (!ioaddr) {
516 printk (KERN_ERR "ioremap failed for device %s, region 0x%X @ 0x%X\n",
517 dev->name, io_size,
518 pci_resource_start (pdev, 1));
519 goto err_out_free_mmio;
521 #endif
523 printk(KERN_INFO "%s: %s at 0x%lx, ",
524 dev->name, via_rhine_chip_info[chip_id].name, ioaddr);
526 /* Ideally we would read the EEPROM but access may be locked. */
527 for (i = 0; i < 6; i++)
528 dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
529 for (i = 0; i < 5; i++)
530 printk("%2.2x:", dev->dev_addr[i]);
531 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
533 /* Reset the chip to erase previous misconfiguration. */
534 writew(CmdReset, ioaddr + ChipCmd);
536 dev->base_addr = ioaddr;
537 dev->irq = irq;
539 np = dev->priv;
540 spin_lock_init (&np->lock);
541 np->chip_id = chip_id;
542 np->drv_flags = via_rhine_chip_info[chip_id].drv_flags;
543 np->pdev = pdev;
544 np->rx_ring = ring;
545 np->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
546 np->rx_ring_dma = ring_dma;
547 np->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
549 if (dev->mem_start)
550 option = dev->mem_start;
552 /* The lower four bits are the media type. */
553 if (option > 0) {
554 if (option & 0x200)
555 np->full_duplex = 1;
556 np->default_port = option & 15;
558 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
559 np->full_duplex = 1;
561 if (np->full_duplex)
562 np->duplex_lock = 1;
564 /* The chip-specific entries in the device structure. */
565 dev->open = via_rhine_open;
566 dev->hard_start_xmit = via_rhine_start_tx;
567 dev->stop = via_rhine_close;
568 dev->get_stats = via_rhine_get_stats;
569 dev->set_multicast_list = via_rhine_set_rx_mode;
570 dev->do_ioctl = mii_ioctl;
571 dev->tx_timeout = via_rhine_tx_timeout;
572 dev->watchdog_timeo = TX_TIMEOUT;
574 pdev->driver_data = dev;
576 if (np->drv_flags & CanHaveMII) {
577 int phy, phy_idx = 0;
578 np->phys[0] = 1; /* Standard for this chip. */
579 for (phy = 1; phy < 32 && phy_idx < 4; phy++) {
580 int mii_status = mdio_read(dev, phy, 1);
581 if (mii_status != 0xffff && mii_status != 0x0000) {
582 np->phys[phy_idx++] = phy;
583 np->advertising = mdio_read(dev, phy, 4);
584 printk(KERN_INFO "%s: MII PHY found at address %d, status "
585 "0x%4.4x advertising %4.4x Link %4.4x.\n",
586 dev->name, phy, mii_status, np->advertising,
587 mdio_read(dev, phy, 5));
592 return 0;
594 #ifndef USE_IO
595 /* note this is ifdef'd because the ioremap is ifdef'd...
596 * so additional exit conditions above this must move
597 * release_mem_region outside of the ifdef */
598 err_out_free_mmio:
599 release_mem_region(pci_resource_start (pdev, 1),
600 pci_resource_len (pdev, 1));
601 #endif
602 err_out_free_pio:
603 release_region(pci_resource_start (pdev, 0),
604 pci_resource_len (pdev, 0));
605 err_out_free_netdev:
606 unregister_netdev (dev);
607 kfree (dev);
608 err_out_free_dma:
609 pci_free_consistent(pdev,
610 RX_RING_SIZE * sizeof(struct rx_desc) +
611 TX_RING_SIZE * sizeof(struct tx_desc),
612 ring, ring_dma);
613 err_out:
614 return -ENODEV;
618 /* Read and write over the MII Management Data I/O (MDIO) interface. */
620 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
622 long ioaddr = dev->base_addr;
623 int boguscnt = 1024;
625 /* Wait for a previous command to complete. */
626 while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
628 writeb(0x00, ioaddr + MIICmd);
629 writeb(phy_id, ioaddr + MIIPhyAddr);
630 writeb(regnum, ioaddr + MIIRegAddr);
631 writeb(0x40, ioaddr + MIICmd); /* Trigger read */
632 boguscnt = 1024;
633 while ((readb(ioaddr + MIICmd) & 0x40) && --boguscnt > 0)
635 return readw(ioaddr + MIIData);
638 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
640 struct netdev_private *np = (struct netdev_private *)dev->priv;
641 long ioaddr = dev->base_addr;
642 int boguscnt = 1024;
644 if (phy_id == np->phys[0] && regnum == 4)
645 np->advertising = value;
646 /* Wait for a previous command to complete. */
647 while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
649 writeb(0x00, ioaddr + MIICmd);
650 writeb(phy_id, ioaddr + MIIPhyAddr);
651 writeb(regnum, ioaddr + MIIRegAddr);
652 writew(value, ioaddr + MIIData);
653 writeb(0x20, ioaddr + MIICmd); /* Trigger write. */
657 static int via_rhine_open(struct net_device *dev)
659 struct netdev_private *np = (struct netdev_private *)dev->priv;
660 long ioaddr = dev->base_addr;
661 int i;
663 MOD_INC_USE_COUNT;
665 /* Reset the chip. */
666 writew(CmdReset, ioaddr + ChipCmd);
668 if (request_irq(dev->irq, &via_rhine_interrupt, SA_SHIRQ, dev->name, dev)) {
669 MOD_DEC_USE_COUNT;
670 return -EBUSY;
673 if (debug > 1)
674 printk(KERN_DEBUG "%s: via_rhine_open() irq %d.\n",
675 dev->name, dev->irq);
677 np->tx_bufs = pci_alloc_consistent(np->pdev, PKT_BUF_SZ * TX_RING_SIZE,
678 &np->tx_bufs_dma);
679 if (np->tx_bufs == NULL) {
680 free_irq(dev->irq, dev);
681 MOD_DEC_USE_COUNT;
682 return -ENOMEM;
685 via_rhine_init_ring(dev);
687 writel(np->rx_ring_dma, ioaddr + RxRingPtr);
688 writel(np->tx_ring_dma, ioaddr + TxRingPtr);
690 for (i = 0; i < 6; i++)
691 writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
693 /* Initialize other registers. */
694 writew(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
695 /* Configure the FIFO thresholds. */
696 writeb(0x20, ioaddr + TxConfig); /* Initial threshold 32 bytes */
697 np->tx_thresh = 0x20;
698 np->rx_thresh = 0x60; /* Written in via_rhine_set_rx_mode(). */
700 if (dev->if_port == 0)
701 dev->if_port = np->default_port;
703 netif_start_queue(dev);
705 via_rhine_set_rx_mode(dev);
707 /* Enable interrupts by setting the interrupt mask. */
708 writew(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow| IntrRxDropped|
709 IntrTxDone | IntrTxAbort | IntrTxUnderrun |
710 IntrPCIErr | IntrStatsMax | IntrLinkChange | IntrMIIChange,
711 ioaddr + IntrEnable);
713 np->chip_cmd = CmdStart|CmdTxOn|CmdRxOn|CmdNoTxPoll;
714 if (np->duplex_lock)
715 np->chip_cmd |= CmdFDuplex;
716 writew(np->chip_cmd, ioaddr + ChipCmd);
718 via_rhine_check_duplex(dev);
720 /* The LED outputs of various MII xcvrs should be configured. */
721 /* For NS or Mison phys, turn on bit 1 in register 0x17 */
722 /* For ESI phys, turn on bit 7 in register 0x17. */
723 mdio_write(dev, np->phys[0], 0x17, mdio_read(dev, np->phys[0], 0x17) |
724 (np->drv_flags & HasESIPhy) ? 0x0080 : 0x0001);
726 if (debug > 2)
727 printk(KERN_DEBUG "%s: Done via_rhine_open(), status %4.4x "
728 "MII status: %4.4x.\n",
729 dev->name, readw(ioaddr + ChipCmd),
730 mdio_read(dev, np->phys[0], 1));
732 /* Set the timer to check for link beat. */
733 init_timer(&np->timer);
734 np->timer.expires = jiffies + 2;
735 np->timer.data = (unsigned long)dev;
736 np->timer.function = &via_rhine_timer; /* timer handler */
737 add_timer(&np->timer);
739 return 0;
742 static void via_rhine_check_duplex(struct net_device *dev)
744 struct netdev_private *np = (struct netdev_private *)dev->priv;
745 long ioaddr = dev->base_addr;
746 int mii_reg5 = mdio_read(dev, np->phys[0], 5);
747 int negotiated = mii_reg5 & np->advertising;
748 int duplex;
750 if (np->duplex_lock || mii_reg5 == 0xffff)
751 return;
752 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
753 if (np->full_duplex != duplex) {
754 np->full_duplex = duplex;
755 if (debug)
756 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
757 " partner capability of %4.4x.\n", dev->name,
758 duplex ? "full" : "half", np->phys[0], mii_reg5);
759 if (duplex)
760 np->chip_cmd |= CmdFDuplex;
761 else
762 np->chip_cmd &= ~CmdFDuplex;
763 writew(np->chip_cmd, ioaddr + ChipCmd);
768 static void via_rhine_timer(unsigned long data)
770 struct net_device *dev = (struct net_device *)data;
771 struct netdev_private *np = (struct netdev_private *)dev->priv;
772 long ioaddr = dev->base_addr;
773 int next_tick = 10*HZ;
774 int mii_status;
776 if (debug > 3) {
777 printk(KERN_DEBUG "%s: VIA Rhine monitor tick, status %4.4x.\n",
778 dev->name, readw(ioaddr + IntrStatus));
781 spin_lock_irq (&np->lock);
783 via_rhine_check_duplex(dev);
785 /* make IFF_RUNNING follow the MII status bit "Link established" */
786 mii_status = mdio_read(dev, np->phys[0], 1);
787 if ( (mii_status & MIILink) != (np->mii_status & MIILink) ) {
788 if (mii_status & MIILink)
789 netif_carrier_on(dev);
790 else
791 netif_carrier_off(dev);
793 np->mii_status = mii_status;
795 spin_unlock_irq (&np->lock);
797 np->timer.expires = jiffies + next_tick;
798 add_timer(&np->timer);
802 static void via_rhine_tx_timeout (struct net_device *dev)
804 struct netdev_private *np = (struct netdev_private *) dev->priv;
805 long ioaddr = dev->base_addr;
807 /* Lock to protect mdio_read and access to stats. A friendly
808 advice to the implementor of the XXXs in this function is to be
809 sure not to spin too long (whatever that means :) */
810 spin_lock_irq (&np->lock);
812 printk (KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
813 "%4.4x, resetting...\n",
814 dev->name, readw (ioaddr + IntrStatus),
815 mdio_read (dev, np->phys[0], 1));
817 /* XXX Perhaps we should reinitialize the hardware here. */
818 dev->if_port = 0;
820 /* Stop and restart the chip's Tx processes . */
821 /* XXX to do */
823 /* Trigger an immediate transmit demand. */
824 /* XXX to do */
826 dev->trans_start = jiffies;
827 np->stats.tx_errors++;
829 spin_unlock_irq (&np->lock);
833 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
834 static void via_rhine_init_ring(struct net_device *dev)
836 struct netdev_private *np = (struct netdev_private *)dev->priv;
837 int i;
838 dma_addr_t next = np->rx_ring_dma;
840 np->cur_rx = np->cur_tx = 0;
841 np->dirty_rx = np->dirty_tx = 0;
843 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
844 np->rx_head_desc = &np->rx_ring[0];
846 for (i = 0; i < RX_RING_SIZE; i++) {
847 np->rx_ring[i].rx_status = 0;
848 np->rx_ring[i].desc_length = cpu_to_le32(np->rx_buf_sz);
849 next += sizeof(struct rx_desc);
850 np->rx_ring[i].next_desc = cpu_to_le32(next);
851 np->rx_skbuff[i] = 0;
853 /* Mark the last entry as wrapping the ring. */
854 np->rx_ring[i-1].next_desc = cpu_to_le32(np->rx_ring_dma);
856 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
857 for (i = 0; i < RX_RING_SIZE; i++) {
858 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
859 np->rx_skbuff[i] = skb;
860 if (skb == NULL)
861 break;
862 skb->dev = dev; /* Mark as being used by this device. */
864 np->rx_skbuff_dma[i] =
865 pci_map_single(np->pdev, skb->tail, np->rx_buf_sz,
866 PCI_DMA_FROMDEVICE);
868 np->rx_ring[i].addr = cpu_to_le32(np->rx_skbuff_dma[i]);
869 np->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
871 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
873 next = np->tx_ring_dma;
874 for (i = 0; i < TX_RING_SIZE; i++) {
875 np->tx_skbuff[i] = 0;
876 np->tx_ring[i].tx_status = 0;
877 np->tx_ring[i].desc_length = cpu_to_le32(0x00e08000);
878 next += sizeof(struct tx_desc);
879 np->tx_ring[i].next_desc = cpu_to_le32(next);
880 np->tx_buf[i] = &np->tx_bufs[i * PKT_BUF_SZ];
882 np->tx_ring[i-1].next_desc = cpu_to_le32(np->tx_ring_dma);
884 return;
887 static int via_rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
889 struct netdev_private *np = (struct netdev_private *)dev->priv;
890 unsigned entry;
892 /* Caution: the write order is important here, set the field
893 with the "ownership" bits last. */
895 /* lock eth irq */
896 spin_lock_irq (&np->lock);
898 /* Calculate the next Tx descriptor entry. */
899 entry = np->cur_tx % TX_RING_SIZE;
901 np->tx_skbuff[entry] = skb;
903 if ((long)skb->data & 3) { /* Must use alignment buffer. */
904 memcpy(np->tx_buf[entry], skb->data, skb->len);
905 np->tx_skbuff_dma[entry] = 0;
906 np->tx_ring[entry].addr = cpu_to_le32(np->tx_bufs_dma +
907 (np->tx_buf[entry] - np->tx_bufs));
908 } else {
909 np->tx_skbuff_dma[entry] =
910 pci_map_single(np->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
911 np->tx_ring[entry].addr = cpu_to_le32(np->tx_skbuff_dma[entry]);
914 np->tx_ring[entry].desc_length =
915 cpu_to_le32(0x00E08000 | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
916 np->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
918 np->cur_tx++;
920 /* Non-x86 Todo: explicitly flush cache lines here. */
922 /* Wake the potentially-idle transmit channel. */
923 writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
925 if (np->cur_tx == np->dirty_tx + TX_QUEUE_LEN)
926 netif_stop_queue(dev);
928 dev->trans_start = jiffies;
930 spin_unlock_irq (&np->lock);
932 if (debug > 4) {
933 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
934 dev->name, np->cur_tx, entry);
936 return 0;
939 /* The interrupt handler does all of the Rx thread work and cleans up
940 after the Tx thread. */
941 static void via_rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
943 struct net_device *dev = (struct net_device *)dev_instance;
944 long ioaddr;
945 u32 intr_status;
946 int boguscnt = max_interrupt_work;
948 ioaddr = dev->base_addr;
950 while ((intr_status = readw(ioaddr + IntrStatus))) {
951 /* Acknowledge all of the current interrupt sources ASAP. */
952 writew(intr_status & 0xffff, ioaddr + IntrStatus);
954 if (debug > 4)
955 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
956 dev->name, intr_status);
958 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
959 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
960 via_rhine_rx(dev);
962 if (intr_status & (IntrTxDone | IntrTxAbort | IntrTxUnderrun |
963 IntrTxAborted))
964 via_rhine_tx(dev);
966 /* Abnormal error summary/uncommon events handlers. */
967 if (intr_status & (IntrPCIErr | IntrLinkChange | IntrMIIChange |
968 IntrStatsMax | IntrTxAbort | IntrTxUnderrun))
969 via_rhine_error(dev, intr_status);
971 if (--boguscnt < 0) {
972 printk(KERN_WARNING "%s: Too much work at interrupt, "
973 "status=0x%4.4x.\n",
974 dev->name, intr_status);
975 break;
979 if (debug > 3)
980 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
981 dev->name, readw(ioaddr + IntrStatus));
984 /* This routine is logically part of the interrupt handler, but isolated
985 for clarity. */
986 static void via_rhine_tx(struct net_device *dev)
988 struct netdev_private *np = (struct netdev_private *)dev->priv;
989 int txstatus = 0, entry = np->dirty_tx % TX_RING_SIZE;
991 spin_lock (&np->lock);
993 /* find and cleanup dirty tx descriptors */
994 while (np->dirty_tx != np->cur_tx) {
995 txstatus = le32_to_cpu(np->tx_ring[entry].tx_status);
996 if (txstatus & DescOwn)
997 break;
998 if (debug > 6)
999 printk(KERN_DEBUG " Tx scavenge %d status %8.8x.\n",
1000 entry, txstatus);
1001 if (txstatus & 0x8000) {
1002 if (debug > 1)
1003 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1004 dev->name, txstatus);
1005 np->stats.tx_errors++;
1006 if (txstatus & 0x0400) np->stats.tx_carrier_errors++;
1007 if (txstatus & 0x0200) np->stats.tx_window_errors++;
1008 if (txstatus & 0x0100) np->stats.tx_aborted_errors++;
1009 if (txstatus & 0x0080) np->stats.tx_heartbeat_errors++;
1010 if (txstatus & 0x0002) np->stats.tx_fifo_errors++;
1011 /* Transmitter restarted in 'abnormal' handler. */
1012 } else {
1013 np->stats.collisions += (txstatus >> 3) & 15;
1014 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1015 np->stats.tx_packets++;
1017 /* Free the original skb. */
1018 if (np->tx_skbuff_dma[entry]) {
1019 pci_unmap_single(np->pdev,
1020 np->tx_skbuff_dma[entry],
1021 np->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1023 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1024 np->tx_skbuff[entry] = NULL;
1025 entry = (++np->dirty_tx) % TX_RING_SIZE;
1027 if ((np->cur_tx - np->dirty_tx) < TX_QUEUE_LEN - 4)
1028 netif_wake_queue (dev);
1030 spin_unlock (&np->lock);
1033 /* This routine is logically part of the interrupt handler, but isolated
1034 for clarity and better register allocation. */
1035 static void via_rhine_rx(struct net_device *dev)
1037 struct netdev_private *np = (struct netdev_private *)dev->priv;
1038 int entry = np->cur_rx % RX_RING_SIZE;
1039 int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1041 if (debug > 4) {
1042 printk(KERN_DEBUG " In via_rhine_rx(), entry %d status %8.8x.\n",
1043 entry, le32_to_cpu(np->rx_head_desc->rx_status));
1046 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1047 while ( ! (np->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
1048 struct rx_desc *desc = np->rx_head_desc;
1049 u32 desc_status = le32_to_cpu(desc->rx_status);
1050 int data_size = desc_status >> 16;
1052 if (debug > 4)
1053 printk(KERN_DEBUG " via_rhine_rx() status is %8.8x.\n",
1054 desc_status);
1055 if (--boguscnt < 0)
1056 break;
1057 if ( (desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1058 if ((desc_status & RxWholePkt) != RxWholePkt) {
1059 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1060 "multiple buffers, entry %#x length %d status %8.8x!\n",
1061 dev->name, entry, data_size, desc_status);
1062 printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n",
1063 dev->name, np->rx_head_desc, &np->rx_ring[entry]);
1064 np->stats.rx_length_errors++;
1065 } else if (desc_status & RxErr) {
1066 /* There was a error. */
1067 if (debug > 2)
1068 printk(KERN_DEBUG " via_rhine_rx() Rx error was %8.8x.\n",
1069 desc_status);
1070 np->stats.rx_errors++;
1071 if (desc_status & 0x0030) np->stats.rx_length_errors++;
1072 if (desc_status & 0x0048) np->stats.rx_fifo_errors++;
1073 if (desc_status & 0x0004) np->stats.rx_frame_errors++;
1074 if (desc_status & 0x0002) {
1075 /* this can also be updated outside the interrupt handler */
1076 spin_lock (&np->lock);
1077 np->stats.rx_crc_errors++;
1078 spin_unlock (&np->lock);
1081 } else {
1082 struct sk_buff *skb;
1083 /* Length should omit the CRC */
1084 int pkt_len = data_size - 4;
1086 /* Check if the packet is long enough to accept without copying
1087 to a minimally-sized skbuff. */
1088 if (pkt_len < rx_copybreak &&
1089 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1090 skb->dev = dev;
1091 skb_reserve(skb, 2); /* 16 byte align the IP header */
1092 pci_dma_sync_single(np->pdev, np->rx_skbuff_dma[entry],
1093 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1095 /* *_IP_COPYSUM isn't defined anywhere and eth_copy_and_sum
1096 is memcpy for all archs so this is kind of pointless right
1097 now ... or? */
1098 #if HAS_IP_COPYSUM /* Call copy + cksum if available. */
1099 eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
1100 skb_put(skb, pkt_len);
1101 #else
1102 memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
1103 pkt_len);
1104 #endif
1105 } else {
1106 skb = np->rx_skbuff[entry];
1107 if (skb == NULL) {
1108 printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1109 dev->name);
1110 break;
1112 np->rx_skbuff[entry] = NULL;
1113 skb_put(skb, pkt_len);
1114 pci_unmap_single(np->pdev, np->rx_skbuff_dma[entry],
1115 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1117 skb->protocol = eth_type_trans(skb, dev);
1118 netif_rx(skb);
1119 dev->last_rx = jiffies;
1120 np->stats.rx_bytes += skb->len;
1121 np->stats.rx_packets++;
1123 entry = (++np->cur_rx) % RX_RING_SIZE;
1124 np->rx_head_desc = &np->rx_ring[entry];
1127 /* Refill the Rx ring buffers. */
1128 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1129 struct sk_buff *skb;
1130 entry = np->dirty_rx % RX_RING_SIZE;
1131 if (np->rx_skbuff[entry] == NULL) {
1132 skb = dev_alloc_skb(np->rx_buf_sz);
1133 np->rx_skbuff[entry] = skb;
1134 if (skb == NULL)
1135 break; /* Better luck next round. */
1136 skb->dev = dev; /* Mark as being used by this device. */
1137 np->rx_skbuff_dma[entry] =
1138 pci_map_single(np->pdev, skb->tail, np->rx_buf_sz,
1139 PCI_DMA_FROMDEVICE);
1140 np->rx_ring[entry].addr = cpu_to_le32(np->rx_skbuff_dma[entry]);
1142 np->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1145 /* Pre-emptively restart Rx engine. */
1146 writew(CmdRxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
1149 static void via_rhine_error(struct net_device *dev, int intr_status)
1151 struct netdev_private *np = (struct netdev_private *)dev->priv;
1152 long ioaddr = dev->base_addr;
1154 spin_lock (&np->lock);
1156 if (intr_status & (IntrMIIChange | IntrLinkChange)) {
1157 if (readb(ioaddr + MIIStatus) & 0x02)
1158 /* Link failed, restart autonegotiation. */
1159 mdio_write(dev, np->phys[0], 0, 0x3300);
1160 else
1161 via_rhine_check_duplex(dev);
1162 if (debug)
1163 printk(KERN_ERR "%s: MII status changed: Autonegotiation "
1164 "advertising %4.4x partner %4.4x.\n", dev->name,
1165 mdio_read(dev, np->phys[0], 4),
1166 mdio_read(dev, np->phys[0], 5));
1168 if (intr_status & IntrStatsMax) {
1169 np->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
1170 np->stats.rx_missed_errors += readw(ioaddr + RxMissed);
1171 clear_tally_counters(ioaddr);
1173 if (intr_status & IntrTxAbort) {
1174 /* Stats counted in Tx-done handler, just restart Tx. */
1175 writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
1177 if (intr_status & IntrTxUnderrun) {
1178 if (np->tx_thresh < 0xE0)
1179 writeb(np->tx_thresh += 0x20, ioaddr + TxConfig);
1180 if (debug > 1)
1181 printk(KERN_INFO "%s: Transmitter underrun, increasing Tx "
1182 "threshold setting to %2.2x.\n", dev->name, np->tx_thresh);
1184 if ((intr_status & ~( IntrLinkChange | IntrStatsMax |
1185 IntrTxAbort | IntrTxAborted)) && debug > 1) {
1186 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1187 dev->name, intr_status);
1188 /* Recovery for other fault sources not known. */
1189 writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
1192 spin_unlock (&np->lock);
1195 static struct net_device_stats *via_rhine_get_stats(struct net_device *dev)
1197 struct netdev_private *np = (struct netdev_private *)dev->priv;
1198 long ioaddr = dev->base_addr;
1199 unsigned long flags;
1201 spin_lock_irqsave(&np->lock, flags);
1202 np->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
1203 np->stats.rx_missed_errors += readw(ioaddr + RxMissed);
1204 clear_tally_counters(ioaddr);
1205 spin_unlock_irqrestore(&np->lock, flags);
1207 return &np->stats;
1210 /* Clears the "tally counters" for CRC errors and missed frames(?).
1211 It has been reported that some chips need a write of 0 to clear
1212 these, for others the counters are set to 1 when written to and
1213 instead cleared when read. So we clear them both ways ... */
1214 static inline void clear_tally_counters(const long ioaddr)
1216 writel(0, ioaddr + RxMissed);
1217 readw(ioaddr + RxCRCErrs);
1218 readw(ioaddr + RxMissed);
1222 /* The big-endian AUTODIN II ethernet CRC calculation.
1223 N.B. Do not use for bulk data, use a table-based routine instead.
1224 This is common code and should be moved to net/core/crc.c */
1225 static unsigned const ethernet_polynomial = 0x04c11db7U;
1226 static inline u32 ether_crc(int length, unsigned char *data)
1228 int crc = -1;
1230 while(--length >= 0) {
1231 unsigned char current_octet = *data++;
1232 int bit;
1233 for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
1234 crc = (crc << 1) ^
1235 ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
1238 return crc;
1241 static void via_rhine_set_rx_mode(struct net_device *dev)
1243 struct netdev_private *np = (struct netdev_private *)dev->priv;
1244 long ioaddr = dev->base_addr;
1245 u32 mc_filter[2]; /* Multicast hash filter */
1246 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1248 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1249 /* Unconditionally log net taps. */
1250 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1251 rx_mode = 0x1C;
1252 } else if ((dev->mc_count > multicast_filter_limit)
1253 || (dev->flags & IFF_ALLMULTI)) {
1254 /* Too many to match, or accept all multicasts. */
1255 writel(0xffffffff, ioaddr + MulticastFilter0);
1256 writel(0xffffffff, ioaddr + MulticastFilter1);
1257 rx_mode = 0x0C;
1258 } else {
1259 struct dev_mc_list *mclist;
1260 int i;
1261 memset(mc_filter, 0, sizeof(mc_filter));
1262 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1263 i++, mclist = mclist->next) {
1264 set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26, mc_filter);
1266 writel(mc_filter[0], ioaddr + MulticastFilter0);
1267 writel(mc_filter[1], ioaddr + MulticastFilter1);
1268 rx_mode = 0x0C;
1270 writeb(np->rx_thresh | rx_mode, ioaddr + RxConfig);
1273 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1275 struct netdev_private *np = (struct netdev_private *)dev->priv;
1276 u16 *data = (u16 *)&rq->ifr_data;
1277 unsigned long flags;
1278 int retval;
1280 spin_lock_irqsave(&np->lock, flags);
1281 retval = 0;
1283 switch(cmd) {
1284 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
1285 data[0] = np->phys[0] & 0x1f;
1286 /* Fall Through */
1287 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
1288 data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
1289 break;
1290 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
1291 if (!capable(CAP_NET_ADMIN)) {
1292 retval = -EPERM;
1293 break;
1295 mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
1296 break;
1297 default:
1298 retval = -EOPNOTSUPP;
1301 spin_unlock_irqrestore(&np->lock, flags);
1302 return retval;
1305 static int via_rhine_close(struct net_device *dev)
1307 long ioaddr = dev->base_addr;
1308 struct netdev_private *np = (struct netdev_private *)dev->priv;
1309 int i;
1310 unsigned long flags;
1312 spin_lock_irqsave(&np->lock, flags);
1314 netif_stop_queue(dev);
1316 if (debug > 1)
1317 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1318 dev->name, readw(ioaddr + ChipCmd));
1320 /* Disable interrupts by clearing the interrupt mask. */
1321 writew(0x0000, ioaddr + IntrEnable);
1323 /* Stop the chip's Tx and Rx processes. */
1324 writew(CmdStop, ioaddr + ChipCmd);
1326 del_timer(&np->timer);
1328 spin_unlock_irqrestore(&np->lock, flags);
1330 /* Make sure there is no irq-handler running on a different CPU. */
1331 synchronize_irq();
1333 free_irq(dev->irq, dev);
1335 /* Free all the skbuffs in the Rx queue. */
1336 for (i = 0; i < RX_RING_SIZE; i++) {
1337 np->rx_ring[i].rx_status = 0;
1338 np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1339 if (np->rx_skbuff[i]) {
1340 pci_unmap_single(np->pdev,
1341 np->rx_skbuff_dma[i],
1342 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1343 dev_kfree_skb(np->rx_skbuff[i]);
1345 np->rx_skbuff[i] = 0;
1348 /* Free all the skbuffs in the Tx queue, and also any bounce buffers. */
1349 for (i = 0; i < TX_RING_SIZE; i++) {
1350 np->tx_ring[i].tx_status = 0;
1351 np->tx_ring[i].desc_length = cpu_to_le32(0x00e08000);
1352 np->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1353 if (np->tx_skbuff[i]) {
1354 if (np->tx_skbuff_dma[i]) {
1355 pci_unmap_single(np->pdev,
1356 np->tx_skbuff_dma[i],
1357 np->tx_skbuff[i]->len, PCI_DMA_TODEVICE);
1359 dev_kfree_skb(np->tx_skbuff[i]);
1361 np->tx_skbuff[i] = 0;
1362 np->tx_buf[i] = 0;
1364 pci_free_consistent(np->pdev, PKT_BUF_SZ * TX_RING_SIZE,
1365 np->tx_bufs, np->tx_bufs_dma);
1367 MOD_DEC_USE_COUNT;
1369 return 0;
1373 static void __devexit via_rhine_remove_one (struct pci_dev *pdev)
1375 struct net_device *dev = pdev->driver_data;
1376 struct netdev_private *np = (struct netdev_private *)(dev->priv);
1378 unregister_netdev(dev);
1380 release_region(pci_resource_start (pdev, 0),
1381 pci_resource_len (pdev, 0));
1382 release_mem_region(pci_resource_start (pdev, 1),
1383 pci_resource_len (pdev, 1));
1385 #ifndef USE_IO
1386 iounmap((char *)(dev->base_addr));
1387 #endif
1389 pci_free_consistent(pdev,
1390 RX_RING_SIZE * sizeof(struct rx_desc) +
1391 TX_RING_SIZE * sizeof(struct tx_desc),
1392 np->rx_ring, np->rx_ring_dma);
1394 kfree(dev);
1398 static struct pci_driver via_rhine_driver = {
1399 name: "via-rhine",
1400 id_table: via_rhine_pci_tbl,
1401 probe: via_rhine_init_one,
1402 remove: via_rhine_remove_one,
1406 static int __init via_rhine_init (void)
1408 return pci_module_init (&via_rhine_driver);
1412 static void __exit via_rhine_cleanup (void)
1414 pci_unregister_driver (&via_rhine_driver);
1418 module_init(via_rhine_init);
1419 module_exit(via_rhine_cleanup);
1423 * Local variables:
1424 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c via-rhine.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
1425 * c-indent-level: 4
1426 * c-basic-offset: 4
1427 * tab-width: 4
1428 * End: