Import 2.3.13pre7
[davej-history.git] / drivers / net / via-rhine.c
blobd242e6bf13934dd2b3251f86c641fc0ec991bb4c
1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2 /*
3 Written 1998-1999 by Donald Becker.
5 This software may be used and distributed according to the terms
6 of the GNU Public License (GPL), incorporated herein by reference.
7 Drivers derived from this code also fall under the GPL and must retain
8 this authorship and copyright notice.
10 This driver is designed for the VIA VT86c100A Rhine-II PCI Fast Ethernet
11 controller. It also works with the older 3043 Rhine-I chip.
13 The author may be reached as becker@cesdis.edu, or
14 Donald Becker
15 312 Severn Ave. #W302
16 Annapolis MD 21403
18 Support and updates available at
19 http://cesdis.gsfc.nasa.gov/linux/drivers/via-rhine.html
22 static const char *versionA =
23 "via-rhine.c:v1.01 2/27/99 Written by Donald Becker\n";
24 static const char *versionB =
25 " http://cesdis.gsfc.nasa.gov/linux/drivers/via-rhine.html\n";
27 /* A few user-configurable values. These may be modified when a driver
28 module is loaded.*/
30 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
31 static int max_interrupt_work = 20;
32 static int min_pci_latency = 64;
34 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
35 Setting to > 1518 effectively disables this feature. */
36 static int rx_copybreak = 0;
38 /* Used to pass the media type, etc.
39 Both 'options[]' and 'full_duplex[]' should exist for driver
40 interoperability.
41 The media type is usually passed in 'options[]'.
43 #define MAX_UNITS 8 /* More are supported, limit only on options */
44 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
45 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
47 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
48 The Rhine has a 64 element 8390-like hash table. */
49 static const int multicast_filter_limit = 32;
51 /* Operational parameters that are set at compile time. */
53 /* Keep the ring sizes a power of two for compile efficiency.
54 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
55 Making the Tx ring too large decreases the effectiveness of channel
56 bonding and packet priority.
57 There are no ill effects from too-large receive rings. */
58 #define TX_RING_SIZE 8
59 #define RX_RING_SIZE 16
61 /* Operational parameters that usually are not changed. */
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (2*HZ)
65 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
67 #include <linux/module.h>
68 #include <linux/kernel.h>
69 #include <linux/version.h>
70 #include <linux/string.h>
71 #include <linux/timer.h>
72 #include <linux/errno.h>
73 #include <linux/ioport.h>
74 #include <linux/malloc.h>
75 #include <linux/interrupt.h>
76 #include <linux/pci.h>
77 #include <linux/netdevice.h>
78 #include <linux/etherdevice.h>
79 #include <linux/skbuff.h>
80 #include <asm/processor.h> /* Processor type for cache alignment. */
81 #include <asm/bitops.h>
82 #include <asm/io.h>
84 /* This driver was written to use PCI memory space, however some x86
85 motherboards only configure I/O space accesses correctly. */
86 #if defined(__i386__) && !defined(VIA_USE_MEMORY)
87 #define VIA_USE_IO
88 #endif
89 #ifdef VIA_USE_IO
90 #undef readb
91 #undef readw
92 #undef readl
93 #undef writeb
94 #undef writew
95 #undef writel
96 #define readb inb
97 #define readw inw
98 #define readl inl
99 #define writeb outb
100 #define writew outw
101 #define writel outl
102 #endif
104 /* Kernel compatibility defines, some common to David Hind's PCMCIA package.
105 This is only in the support-all-kernels source code. */
107 #define RUN_AT(x) (jiffies + (x))
109 #if (LINUX_VERSION_CODE >= 0x20100)
110 char kernel_version[] = UTS_RELEASE;
111 #else
112 #ifndef __alpha__
113 #define ioremap vremap
114 #define iounmap vfree
115 #endif
116 #endif
117 #if defined(MODULE) && LINUX_VERSION_CODE > 0x20115
118 MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
119 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
120 MODULE_PARM(max_interrupt_work, "i");
121 MODULE_PARM(min_pci_latency, "i");
122 MODULE_PARM(debug, "i");
123 MODULE_PARM(rx_copybreak, "i");
124 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
125 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
126 #endif
127 #if LINUX_VERSION_CODE < 0x20123
128 #define test_and_set_bit(val, addr) set_bit(val, addr)
129 #endif
130 #if LINUX_VERSION_CODE <= 0x20139
131 #define net_device_stats enet_statistics
132 #else
133 #define NETSTATS_VER2
134 #endif
135 #if LINUX_VERSION_CODE < 0x20155 || defined(CARDBUS)
136 /* Grrrr, the PCI code changed, but did not consider CardBus... */
137 #include <linux/bios32.h>
138 #define PCI_SUPPORT_VER1
139 #else
140 #define PCI_SUPPORT_VER2
141 #endif
142 #if LINUX_VERSION_CODE < 0x20159
143 #define dev_free_skb(skb) dev_kfree_skb(skb, FREE_WRITE);
144 #else
145 #define dev_free_skb(skb) dev_kfree_skb(skb);
146 #endif
150 Theory of Operation
152 I. Board Compatibility
154 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
155 controller.
157 II. Board-specific settings
159 Boards with this chip are functional only in a bus-master PCI slot.
161 Many operational settings are loaded from the EEPROM to the Config word at
162 offset 0x78. This driver assumes that they are correct.
163 If this driver is compiled to use PCI memory space operations the EEPROM
164 must be configured to enable memory ops.
166 III. Driver operation
168 IIIa. Ring buffers
170 This driver uses two statically allocated fixed-size descriptor lists
171 formed into rings by a branch from the final descriptor to the beginning of
172 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
174 IIIb/c. Transmit/Receive Structure
176 This driver attempts to use a zero-copy receive and transmit scheme.
178 Alas, all data buffers are required to start on a 32 bit boundary, so
179 the driver must often copy transmit packets into bounce buffers.
181 The driver allocates full frame size skbuffs for the Rx ring buffers at
182 open() time and passes the skb->data field to the chip as receive data
183 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
184 a fresh skbuff is allocated and the frame is copied to the new skbuff.
185 When the incoming frame is larger, the skbuff is passed directly up the
186 protocol stack. Buffers consumed this way are replaced by newly allocated
187 skbuffs in the last phase of netdev_rx().
189 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
190 using a full-sized skbuff for small frames vs. the copying costs of larger
191 frames. New boards are typically used in generously configured machines
192 and the underfilled buffers have negligible impact compared to the benefit of
193 a single allocation size, so the default value of zero results in never
194 copying packets. When copying is done, the cost is usually mitigated by using
195 a combined copy/checksum routine. Copying also preloads the cache, which is
196 most useful with small frames.
198 Since the VIA chips are only able to transfer data to buffers on 32 bit
199 boundaries, the the IP header at offset 14 in an ethernet frame isn't
200 longword aligned for further processing. Copying these unaligned buffers
201 has the beneficial effect of 16-byte aligning the IP header.
203 IIId. Synchronization
205 The driver runs as two independent, single-threaded flows of control. One
206 is the send-packet routine, which enforces single-threaded use by the
207 dev->tbusy flag. The other thread is the interrupt handler, which is single
208 threaded by the hardware and interrupt handling software.
210 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
211 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
212 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
213 the 'lp->tx_full' flag.
215 The interrupt handler has exclusive control over the Rx ring and records stats
216 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
217 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
218 clears both the tx_full and tbusy flags.
220 IV. Notes
222 IVb. References
224 Preliminary VT86C100A manual from http://www.via.com.tw/
225 http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
226 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
228 IVc. Errata
230 The VT86C100A manual is not reliable information.
231 The chip does not handle unaligned transmit or receive buffers, resulting
232 in significant performance degradation for bounce buffer copies on transmit
233 and unaligned IP headers on receive.
234 The chip does not pad to minimum transmit length.
240 /* This table drives the PCI probe routines. It's mostly boilerplate in all
241 of the drivers, and will likely be provided by some future kernel.
242 Note the matching code -- the first table entry matchs all 56** cards but
243 second only the 1234 card.
245 enum pci_flags_bit {
246 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
247 PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
249 struct pci_id_info {
250 const char *name;
251 u16 vendor_id, device_id, device_id_mask, flags;
252 int io_size;
253 struct device *(*probe1)(int pci_bus, int pci_devfn, struct device *dev,
254 long ioaddr, int irq, int chip_idx, int fnd_cnt);
257 static struct device *via_probe1(int pci_bus, int pci_devfn,
258 struct device *dev, long ioaddr, int irq,
259 int chp_idx, int fnd_cnt);
261 static struct pci_id_info pci_tbl[] = {
262 { "VIA VT86C100A Rhine-II", 0x1106, 0x6100, 0xffff,
263 PCI_USES_MEM|PCI_USES_IO|PCI_USES_MEM|PCI_USES_MASTER, 128, via_probe1},
264 { "VIA VT3043 Rhine", 0x1106, 0x3043, 0xffff,
265 PCI_USES_IO|PCI_USES_MEM|PCI_USES_MASTER, 128, via_probe1},
266 {0,}, /* 0 terminated list. */
270 /* A chip capabilities table, matching the entries in pci_tbl[] above. */
271 enum chip_capability_flags {CanHaveMII=1, };
272 struct chip_info {
273 int io_size;
274 int flags;
275 } static cap_tbl[] = {
276 {128, CanHaveMII, },
277 {128, CanHaveMII, },
281 /* Offsets to the device registers.
283 enum register_offsets {
284 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
285 IntrStatus=0x0C, IntrEnable=0x0E,
286 MulticastFilter0=0x10, MulticastFilter1=0x14,
287 RxRingPtr=0x18, TxRingPtr=0x1C,
288 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIConfig=0x6E,
289 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72,
290 Config=0x78, RxMissed=0x7C, RxCRCErrs=0x7E,
293 /* Bits in the interrupt status/mask registers. */
294 enum intr_status_bits {
295 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
296 IntrTxDone=0x0002, IntrTxAbort=0x0008, IntrTxUnderrun=0x0010,
297 IntrPCIErr=0x0040,
298 IntrStatsMax=0x0080, IntrRxEarly=0x0100, IntrMIIChange=0x0200,
299 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
300 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
301 IntrRxWakeUp=0x8000,
302 IntrNormalSummary=0x0003, IntrAbnormalSummary=0x8260,
306 /* The Rx and Tx buffer descriptors. */
307 struct rx_desc {
308 u16 rx_status;
309 u16 rx_length;
310 u32 desc_length;
311 u32 addr;
312 u32 next_desc;
314 struct tx_desc {
315 u16 tx_status;
316 u16 tx_own;
317 u32 desc_length;
318 u32 addr;
319 u32 next_desc;
322 /* Bits in *_desc.status */
323 enum rx_status_bits {
324 RxDescOwn=0x80000000, RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F};
325 enum desc_status_bits {
326 DescOwn=0x8000, DescEndPacket=0x4000, DescIntr=0x1000,
329 /* Bits in ChipCmd. */
330 enum chip_cmd_bits {
331 CmdInit=0x0001, CmdStart=0x0002, CmdStop=0x0004, CmdRxOn=0x0008,
332 CmdTxOn=0x0010, CmdTxDemand=0x0020, CmdRxDemand=0x0040,
333 CmdEarlyRx=0x0100, CmdEarlyTx=0x0200, CmdFDuplex=0x0400,
334 CmdNoTxPoll=0x0800, CmdReset=0x8000,
337 struct netdev_private {
338 /* Descriptor rings first for alignment. */
339 struct rx_desc rx_ring[RX_RING_SIZE];
340 struct tx_desc tx_ring[TX_RING_SIZE];
341 /* The addresses of receive-in-place skbuffs. */
342 struct sk_buff* rx_skbuff[RX_RING_SIZE];
343 /* The saved address of a sent-in-place packet/buffer, for later free(). */
344 struct sk_buff* tx_skbuff[TX_RING_SIZE];
345 unsigned char *tx_buf[TX_RING_SIZE]; /* Tx bounce buffers */
346 unsigned char *tx_bufs; /* Tx bounce buffer region. */
347 struct device *next_module; /* Link for devices of this type. */
348 struct net_device_stats stats;
349 struct timer_list timer; /* Media monitoring timer. */
350 unsigned char pci_bus, pci_devfn;
351 /* Frequently used values: keep some adjacent for cache effect. */
352 int chip_id;
353 long in_interrupt; /* Word-long for SMP locks. */
354 struct rx_desc *rx_head_desc;
355 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
356 unsigned int cur_tx, dirty_tx;
357 unsigned int rx_buf_sz; /* Based on MTU+slack. */
358 u16 chip_cmd; /* Current setting for ChipCmd */
359 unsigned int tx_full:1; /* The Tx queue is full. */
360 /* These values are keep track of the transceiver/media in use. */
361 unsigned int full_duplex:1; /* Full-duplex operation requested. */
362 unsigned int duplex_lock:1;
363 unsigned int medialock:1; /* Do not sense media. */
364 unsigned int default_port:4; /* Last dev->if_port value. */
365 u8 tx_thresh, rx_thresh;
366 /* MII transceiver section. */
367 int mii_cnt; /* MII device addresses. */
368 u16 advertising; /* NWay media advertisement */
369 unsigned char phys[2]; /* MII device addresses. */
372 static int mdio_read(struct device *dev, int phy_id, int location);
373 static void mdio_write(struct device *dev, int phy_id, int location, int value);
374 static int netdev_open(struct device *dev);
375 static void check_duplex(struct device *dev);
376 static void netdev_timer(unsigned long data);
377 static void tx_timeout(struct device *dev);
378 static void init_ring(struct device *dev);
379 static int start_tx(struct sk_buff *skb, struct device *dev);
380 static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
381 static int netdev_rx(struct device *dev);
382 static void netdev_error(struct device *dev, int intr_status);
383 static void set_rx_mode(struct device *dev);
384 static struct net_device_stats *get_stats(struct device *dev);
385 static int mii_ioctl(struct device *dev, struct ifreq *rq, int cmd);
386 static int netdev_close(struct device *dev);
390 /* A list of our installed devices, for removing the driver module. */
391 static struct device *root_net_dev = NULL;
393 /* Ideally we would detect all network cards in slot order. That would
394 be best done a central PCI probe dispatch, which wouldn't work
395 well when dynamically adding drivers. So instead we detect just the
396 cards we know about in slot order. */
398 static int pci_etherdev_probe(struct device *dev, struct pci_id_info pci_tbl[])
400 int cards_found = 0;
401 int pci_index = 0;
402 unsigned char pci_bus, pci_device_fn;
404 if ( ! pcibios_present())
405 return -ENODEV;
407 for (;pci_index < 0xff; pci_index++) {
408 u16 vendor, device, pci_command, new_command;
409 int chip_idx, irq;
410 long pciaddr;
411 long ioaddr;
413 if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8, pci_index,
414 &pci_bus, &pci_device_fn)
415 != PCIBIOS_SUCCESSFUL)
416 break;
417 pcibios_read_config_word(pci_bus, pci_device_fn,
418 PCI_VENDOR_ID, &vendor);
419 pcibios_read_config_word(pci_bus, pci_device_fn,
420 PCI_DEVICE_ID, &device);
422 for (chip_idx = 0; pci_tbl[chip_idx].vendor_id; chip_idx++)
423 if (vendor == pci_tbl[chip_idx].vendor_id
424 && (device & pci_tbl[chip_idx].device_id_mask) ==
425 pci_tbl[chip_idx].device_id)
426 break;
427 if (pci_tbl[chip_idx].vendor_id == 0) /* Compiled out! */
428 continue;
431 #if defined(PCI_SUPPORT_VER2)
432 struct pci_dev *pdev = pci_find_slot(pci_bus, pci_device_fn);
433 #ifdef VIA_USE_IO
434 pciaddr = pdev->resource[0].start;
435 #else
436 pciaddr = pdev->resource[1].start;
437 #endif
438 irq = pdev->irq;
439 #else
440 u32 pci_memaddr;
441 u8 pci_irq_line;
442 pcibios_read_config_byte(pci_bus, pci_device_fn,
443 PCI_INTERRUPT_LINE, &pci_irq_line);
444 #ifdef VIA_USE_IO
445 pcibios_read_config_dword(pci_bus, pci_device_fn,
446 PCI_BASE_ADDRESS_0, &pci_memaddr);
447 pciaddr = pci_memaddr;
448 #else
449 pcibios_read_config_dword(pci_bus, pci_device_fn,
450 PCI_BASE_ADDRESS_1, &pci_memaddr);
451 pciaddr = pci_memaddr;
452 #endif
453 irq = pci_irq_line;
454 #endif
457 if (debug > 2)
458 printk(KERN_INFO "Found %s at PCI address %#lx, IRQ %d.\n",
459 pci_tbl[chip_idx].name, pciaddr, irq);
461 if (pci_tbl[chip_idx].flags & PCI_USES_IO) {
462 ioaddr = pciaddr & ~3;
463 if (check_region(ioaddr, pci_tbl[chip_idx].io_size))
464 continue;
465 } else if ((ioaddr = (long)ioremap(pciaddr & ~0xf,
466 pci_tbl[chip_idx].io_size)) == 0) {
467 printk(KERN_INFO "Failed to map PCI address %#lx.\n",
468 pciaddr);
469 continue;
472 pcibios_read_config_word(pci_bus, pci_device_fn,
473 PCI_COMMAND, &pci_command);
474 new_command = pci_command | (pci_tbl[chip_idx].flags & 7);
475 if (pci_command != new_command) {
476 printk(KERN_INFO " The PCI BIOS has not enabled the"
477 " device at %d/%d! Updating PCI command %4.4x->%4.4x.\n",
478 pci_bus, pci_device_fn, pci_command, new_command);
479 pcibios_write_config_word(pci_bus, pci_device_fn,
480 PCI_COMMAND, new_command);
483 dev = pci_tbl[chip_idx].probe1(pci_bus, pci_device_fn, dev, ioaddr,
484 irq, chip_idx, cards_found);
486 if (dev && (pci_tbl[chip_idx].flags & PCI_COMMAND_MASTER)) {
487 u8 pci_latency;
488 pcibios_read_config_byte(pci_bus, pci_device_fn,
489 PCI_LATENCY_TIMER, &pci_latency);
490 if (pci_latency < min_pci_latency) {
491 printk(KERN_INFO " PCI latency timer (CFLT) is "
492 "unreasonably low at %d. Setting to %d clocks.\n",
493 pci_latency, min_pci_latency);
494 pcibios_write_config_byte(pci_bus, pci_device_fn,
495 PCI_LATENCY_TIMER, min_pci_latency);
498 dev = 0;
499 cards_found++;
502 return cards_found ? 0 : -ENODEV;
505 #ifndef MODULE
506 int via_rhine_probe(struct device *dev)
508 printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
509 return pci_etherdev_probe(dev, pci_tbl);
511 #endif
513 static struct device *via_probe1(int pci_bus, int pci_devfn,
514 struct device *dev, long ioaddr, int irq,
515 int chip_id, int card_idx)
517 struct netdev_private *np;
518 int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
520 dev = init_etherdev(dev, 0);
522 printk(KERN_INFO "%s: %s at 0x%lx, ",
523 dev->name, pci_tbl[chip_id].name, ioaddr);
525 /* Ideally we would be read the EEPROM but access may be locked. */
526 for (i = 0; i <6; i++)
527 dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
528 for (i = 0; i < 5; i++)
529 printk("%2.2x:", dev->dev_addr[i]);
530 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
532 #ifdef VIA_USE_IO
533 request_region(ioaddr, pci_tbl[chip_id].io_size, dev->name);
534 #endif
536 /* Reset the chip to erase previous misconfiguration. */
537 writew(CmdReset, ioaddr + ChipCmd);
539 dev->base_addr = ioaddr;
540 dev->irq = irq;
542 /* Make certain the descriptor lists are cache-aligned. */
543 np = (void *)(((long)kmalloc(sizeof(*np), GFP_KERNEL) + 31) & ~31);
544 memset(np, 0, sizeof(*np));
545 dev->priv = np;
547 np->next_module = root_net_dev;
548 root_net_dev = dev;
550 np->pci_bus = pci_bus;
551 np->pci_devfn = pci_devfn;
552 np->chip_id = chip_id;
554 if (dev->mem_start)
555 option = dev->mem_start;
557 /* The lower four bits are the media type. */
558 if (option > 0) {
559 if (option & 0x200)
560 np->full_duplex = 1;
561 np->default_port = option & 15;
562 if (np->default_port)
563 np->medialock = 1;
565 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
566 np->full_duplex = 1;
568 if (np->full_duplex)
569 np->duplex_lock = 1;
571 /* The chip-specific entries in the device structure. */
572 dev->open = &netdev_open;
573 dev->hard_start_xmit = &start_tx;
574 dev->stop = &netdev_close;
575 dev->get_stats = &get_stats;
576 dev->set_multicast_list = &set_rx_mode;
577 dev->do_ioctl = &mii_ioctl;
579 if (cap_tbl[np->chip_id].flags & CanHaveMII) {
580 int phy, phy_idx = 0;
581 np->phys[0] = 1; /* Standard for this chip. */
582 for (phy = 1; phy < 32 && phy_idx < 4; phy++) {
583 int mii_status = mdio_read(dev, phy, 1);
584 if (mii_status != 0xffff && mii_status != 0x0000) {
585 np->phys[phy_idx++] = phy;
586 np->advertising = mdio_read(dev, phy, 4);
587 printk(KERN_INFO "%s: MII PHY found at address %d, status "
588 "0x%4.4x advertising %4.4x Link %4.4x.\n",
589 dev->name, phy, mii_status, np->advertising,
590 mdio_read(dev, phy, 5));
593 np->mii_cnt = phy_idx;
596 return dev;
600 /* Read and write over the MII Management Data I/O (MDIO) interface. */
602 static int mdio_read(struct device *dev, int phy_id, int regnum)
604 long ioaddr = dev->base_addr;
605 int boguscnt = 1024;
607 /* Wait for a previous command to complete. */
608 while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
610 writeb(0x00, ioaddr + MIICmd);
611 writeb(phy_id, ioaddr + MIIPhyAddr);
612 writeb(regnum, ioaddr + MIIRegAddr);
613 writeb(0x40, ioaddr + MIICmd); /* Trigger read */
614 boguscnt = 1024;
615 while ((readb(ioaddr + MIICmd) & 0x40) && --boguscnt > 0)
617 return readw(ioaddr + MIIData);
620 static void mdio_write(struct device *dev, int phy_id, int regnum, int value)
622 long ioaddr = dev->base_addr;
623 int boguscnt = 1024;
625 /* Wait for a previous command to complete. */
626 while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
628 writeb(0x00, ioaddr + MIICmd);
629 writeb(phy_id, ioaddr + MIIPhyAddr);
630 writeb(regnum, ioaddr + MIIRegAddr);
631 writew(value, ioaddr + MIIData);
632 writeb(0x20, ioaddr + MIICmd); /* Trigger write. */
633 return;
637 static int netdev_open(struct device *dev)
639 struct netdev_private *np = (struct netdev_private *)dev->priv;
640 long ioaddr = dev->base_addr;
641 int i;
643 /* Reset the chip. */
644 writew(CmdReset, ioaddr + ChipCmd);
646 if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev))
647 return -EAGAIN;
649 if (debug > 1)
650 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
651 dev->name, dev->irq);
653 MOD_INC_USE_COUNT;
655 init_ring(dev);
657 writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
658 writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
660 for (i = 0; i < 6; i++)
661 writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
663 /* Initialize other registers. */
664 writew(0x0006, ioaddr + PCIConfig); /* Tune configuration??? */
665 /* Configure the FIFO thresholds. */
666 writeb(0x20, ioaddr + TxConfig); /* Initial threshold 32 bytes */
667 np->tx_thresh = 0x20;
668 np->rx_thresh = 0x60; /* Written in set_rx_mode(). */
670 if (dev->if_port == 0)
671 dev->if_port = np->default_port;
673 dev->tbusy = 0;
674 dev->interrupt = 0;
675 np->in_interrupt = 0;
677 set_rx_mode(dev);
679 dev->start = 1;
681 /* Enable interrupts by setting the interrupt mask. */
682 writew(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow| IntrRxDropped|
683 IntrTxDone | IntrTxAbort | IntrTxUnderrun |
684 IntrPCIErr | IntrStatsMax | IntrLinkChange | IntrMIIChange,
685 ioaddr + IntrEnable);
687 np->chip_cmd = CmdStart|CmdTxOn|CmdRxOn|CmdNoTxPoll;
688 if (np->duplex_lock)
689 np->chip_cmd |= CmdFDuplex;
690 writew(np->chip_cmd, ioaddr + ChipCmd);
692 check_duplex(dev);
694 if (debug > 2)
695 printk(KERN_DEBUG "%s: Done netdev_open(), status %4.4x "
696 "MII status: %4.4x.\n",
697 dev->name, readw(ioaddr + ChipCmd),
698 mdio_read(dev, np->phys[0], 1));
700 /* Set the timer to check for link beat. */
701 init_timer(&np->timer);
702 np->timer.expires = RUN_AT(1);
703 np->timer.data = (unsigned long)dev;
704 np->timer.function = &netdev_timer; /* timer handler */
705 add_timer(&np->timer);
707 return 0;
710 static void check_duplex(struct device *dev)
712 struct netdev_private *np = (struct netdev_private *)dev->priv;
713 long ioaddr = dev->base_addr;
714 int mii_reg5 = mdio_read(dev, np->phys[0], 5);
715 int duplex;
717 if (np->duplex_lock || mii_reg5 == 0xffff)
718 return;
719 duplex = (mii_reg5 & 0x0100) || (mii_reg5 & 0x01C0) == 0x0040;
720 if (np->full_duplex != duplex) {
721 np->full_duplex = duplex;
722 if (debug)
723 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
724 " partner capability of %4.4x.\n", dev->name,
725 duplex ? "full" : "half", np->phys[0], mii_reg5);
726 if (duplex)
727 np->chip_cmd |= CmdFDuplex;
728 else
729 np->chip_cmd &= ~CmdFDuplex;
730 writew(np->chip_cmd, ioaddr + ChipCmd);
734 static void netdev_timer(unsigned long data)
736 struct device *dev = (struct device *)data;
737 struct netdev_private *np = (struct netdev_private *)dev->priv;
738 long ioaddr = dev->base_addr;
739 int next_tick = 10*HZ;
741 if (debug > 3) {
742 printk(KERN_DEBUG "%s: VIA Rhine monitor tick, status %4.4x.\n",
743 dev->name, readw(ioaddr + IntrStatus));
745 check_duplex(dev);
747 np->timer.expires = RUN_AT(next_tick);
748 add_timer(&np->timer);
751 static void tx_timeout(struct device *dev)
753 struct netdev_private *np = (struct netdev_private *)dev->priv;
754 long ioaddr = dev->base_addr;
756 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
757 "%4.4x, resetting...\n",
758 dev->name, readw(ioaddr + IntrStatus),
759 mdio_read(dev, np->phys[0], 1));
761 /* Perhaps we should reinitialize the hardware here. */
762 dev->if_port = 0;
763 /* Stop and restart the chip's Tx processes . */
765 /* Trigger an immediate transmit demand. */
767 dev->trans_start = jiffies;
768 np->stats.tx_errors++;
769 return;
773 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
774 static void init_ring(struct device *dev)
776 struct netdev_private *np = (struct netdev_private *)dev->priv;
777 int i;
779 np->tx_full = 0;
780 np->cur_rx = np->cur_tx = 0;
781 np->dirty_rx = np->dirty_tx = 0;
783 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
784 np->rx_head_desc = &np->rx_ring[0];
786 for (i = 0; i < RX_RING_SIZE; i++) {
787 np->rx_ring[i].rx_status = 0;
788 np->rx_ring[i].rx_length = 0;
789 np->rx_ring[i].desc_length = np->rx_buf_sz;
790 np->rx_ring[i].next_desc = virt_to_bus(&np->rx_ring[i+1]);
791 np->rx_skbuff[i] = 0;
793 /* Mark the last entry as wrapping the ring. */
794 np->rx_ring[i-1].next_desc = virt_to_bus(&np->rx_ring[0]);
796 /* Fill in the Rx buffers. */
797 for (i = 0; i < RX_RING_SIZE; i++) {
798 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
799 np->rx_skbuff[i] = skb;
800 if (skb == NULL)
801 break;
802 skb->dev = dev; /* Mark as being used by this device. */
803 np->rx_ring[i].addr = virt_to_bus(skb->tail);
804 np->rx_ring[i].rx_status = 0;
805 np->rx_ring[i].rx_length = DescOwn;
807 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
809 for (i = 0; i < TX_RING_SIZE; i++) {
810 np->tx_skbuff[i] = 0;
811 np->tx_ring[i].tx_own = 0;
812 np->tx_ring[i].desc_length = 0x00e08000;
813 np->tx_ring[i].next_desc = virt_to_bus(&np->tx_ring[i+1]);
814 np->tx_buf[i] = kmalloc(PKT_BUF_SZ, GFP_KERNEL);
816 np->tx_ring[i-1].next_desc = virt_to_bus(&np->tx_ring[0]);
818 return;
821 static int start_tx(struct sk_buff *skb, struct device *dev)
823 struct netdev_private *np = (struct netdev_private *)dev->priv;
824 unsigned entry;
826 /* Block a timer-based transmit from overlapping. This could better be
827 done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
828 if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
829 if (jiffies - dev->trans_start < TX_TIMEOUT)
830 return 1;
831 tx_timeout(dev);
832 return 1;
835 /* Caution: the write order is important here, set the field
836 with the "ownership" bits last. */
838 /* Calculate the next Tx descriptor entry. */
839 entry = np->cur_tx % TX_RING_SIZE;
841 np->tx_skbuff[entry] = skb;
843 if ((long)skb->data & 3) { /* Must use alignment buffer. */
844 if (np->tx_buf[entry] == NULL &&
845 (np->tx_buf[entry] = kmalloc(PKT_BUF_SZ, GFP_KERNEL)) == NULL)
846 return 1;
847 memcpy(np->tx_buf[entry], skb->data, skb->len);
848 np->tx_ring[entry].addr = virt_to_bus(np->tx_buf[entry]);
849 } else
850 np->tx_ring[entry].addr = virt_to_bus(skb->data);
852 np->tx_ring[entry].desc_length = 0x00E08000 |
853 (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN);
854 np->tx_ring[entry].tx_own = DescOwn;
856 np->cur_tx++;
858 /* Non-x86 Todo: explicitly flush cache lines here. */
860 /* Wake the potentially-idle transmit channel. */
861 writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
863 if (np->cur_tx - np->dirty_tx < TX_RING_SIZE - 1)
864 clear_bit(0, (void*)&dev->tbusy); /* Typical path */
865 else
866 np->tx_full = 1;
867 dev->trans_start = jiffies;
869 if (debug > 4) {
870 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
871 dev->name, np->cur_tx, entry);
873 return 0;
876 /* The interrupt handler does all of the Rx thread work and cleans up
877 after the Tx thread. */
878 static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
880 struct device *dev = (struct device *)dev_instance;
881 struct netdev_private *np;
882 long ioaddr, boguscnt = max_interrupt_work;
884 ioaddr = dev->base_addr;
885 np = (struct netdev_private *)dev->priv;
886 #if defined(__i386__)
887 /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
888 if (test_and_set_bit(0, (void*)&dev->interrupt)) {
889 printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
890 dev->name);
891 dev->interrupt = 0; /* Avoid halting machine. */
892 return;
894 #else
895 if (dev->interrupt) {
896 printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name);
897 return;
899 dev->interrupt = 1;
900 #endif
902 do {
903 u32 intr_status = readw(ioaddr + IntrStatus);
905 /* Acknowledge all of the current interrupt sources ASAP. */
906 writew(intr_status & 0xffff, ioaddr + IntrStatus);
908 if (debug > 4)
909 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
910 dev->name, intr_status);
912 if (intr_status == 0)
913 break;
915 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
916 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
917 netdev_rx(dev);
919 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
920 int entry = np->dirty_tx % TX_RING_SIZE;
921 int txstatus;
922 if (np->tx_ring[entry].tx_own)
923 break;
924 txstatus = np->tx_ring[entry].tx_status;
925 if (debug > 6)
926 printk(KERN_DEBUG " Tx scavenge %d status %4.4x.\n",
927 entry, txstatus);
928 if (txstatus & 0x8000) {
929 if (debug > 1)
930 printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",
931 dev->name, txstatus);
932 np->stats.tx_errors++;
933 if (txstatus & 0x0400) np->stats.tx_carrier_errors++;
934 if (txstatus & 0x0200) np->stats.tx_window_errors++;
935 if (txstatus & 0x0100) np->stats.tx_aborted_errors++;
936 if (txstatus & 0x0080) np->stats.tx_heartbeat_errors++;
937 if (txstatus & 0x0002) np->stats.tx_fifo_errors++;
938 #ifdef ETHER_STATS
939 if (txstatus & 0x0100) np->stats.collisions16++;
940 #endif
941 /* Transmitter restarted in 'abnormal' handler. */
942 } else {
943 #ifdef ETHER_STATS
944 if (txstatus & 0x0001) np->stats.tx_deferred++;
945 #endif
946 np->stats.collisions += (txstatus >> 3) & 15;
947 #if defined(NETSTATS_VER2)
948 np->stats.tx_bytes += np->tx_ring[entry].desc_length & 0x7ff;
949 #endif
950 np->stats.tx_packets++;
952 /* Free the original skb. */
953 dev_free_skb(np->tx_skbuff[entry]);
954 np->tx_skbuff[entry] = 0;
956 if (np->tx_full && dev->tbusy
957 && np->cur_tx - np->dirty_tx < TX_RING_SIZE - 4) {
958 /* The ring is no longer full, clear tbusy. */
959 np->tx_full = 0;
960 clear_bit(0, (void*)&dev->tbusy);
961 mark_bh(NET_BH);
964 /* Abnormal error summary/uncommon events handlers. */
965 if (intr_status & (IntrPCIErr | IntrLinkChange | IntrMIIChange |
966 IntrStatsMax | IntrTxAbort | IntrTxUnderrun))
967 netdev_error(dev, intr_status);
969 if (--boguscnt < 0) {
970 printk(KERN_WARNING "%s: Too much work at interrupt, "
971 "status=0x%4.4x.\n",
972 dev->name, intr_status);
973 break;
975 } while (1);
977 if (debug > 3)
978 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
979 dev->name, readw(ioaddr + IntrStatus));
981 #if defined(__i386__)
982 clear_bit(0, (void*)&dev->interrupt);
983 #else
984 dev->interrupt = 0;
985 #endif
986 return;
989 /* This routine is logically part of the interrupt handler, but isolated
990 for clarity and better register allocation. */
991 static int netdev_rx(struct device *dev)
993 struct netdev_private *np = (struct netdev_private *)dev->priv;
994 int entry = np->cur_rx % RX_RING_SIZE;
995 int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
997 if (debug > 4) {
998 printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
999 entry, np->rx_head_desc->rx_length);
1002 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1003 while ( ! (np->rx_head_desc->rx_length & DescOwn)) {
1004 struct rx_desc *desc = np->rx_head_desc;
1005 int data_size = desc->rx_length;
1006 u16 desc_status = desc->rx_status;
1008 if (debug > 4)
1009 printk(KERN_DEBUG " netdev_rx() status is %4.4x.\n",
1010 desc_status);
1011 if (--boguscnt < 0)
1012 break;
1013 if ( (desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1014 if ((desc_status & RxWholePkt) != RxWholePkt) {
1015 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1016 "multiple buffers, entry %#x length %d status %4.4x!\n",
1017 dev->name, np->cur_rx, data_size, desc_status);
1018 printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n",
1019 dev->name, np->rx_head_desc,
1020 &np->rx_ring[np->cur_rx % RX_RING_SIZE]);
1021 np->stats.rx_length_errors++;
1022 } else if (desc_status & RxErr) {
1023 /* There was a error. */
1024 if (debug > 2)
1025 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1026 desc_status);
1027 np->stats.rx_errors++;
1028 if (desc_status & 0x0030) np->stats.rx_length_errors++;
1029 if (desc_status & 0x0048) np->stats.rx_fifo_errors++;
1030 if (desc_status & 0x0004) np->stats.rx_frame_errors++;
1031 if (desc_status & 0x0002) np->stats.rx_crc_errors++;
1033 } else {
1034 struct sk_buff *skb;
1035 /* Length should omit the CRC */
1036 u16 pkt_len = data_size - 4;
1038 /* Check if the packet is long enough to accept without copying
1039 to a minimally-sized skbuff. */
1040 if (pkt_len < rx_copybreak
1041 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1042 skb->dev = dev;
1043 skb_reserve(skb, 2); /* 16 byte align the IP header */
1044 #if ! defined(__alpha__) || USE_IP_COPYSUM /* Avoid misaligned on Alpha */
1045 eth_copy_and_sum(skb, bus_to_virt(desc->addr),
1046 pkt_len, 0);
1047 skb_put(skb, pkt_len);
1048 #else
1049 memcpy(skb_put(skb,pkt_len), bus_to_virt(desc->addr), pkt_len);
1050 #endif
1051 } else {
1052 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1053 np->rx_skbuff[entry] = NULL;
1055 skb->protocol = eth_type_trans(skb, dev);
1056 netif_rx(skb);
1057 dev->last_rx = jiffies;
1058 np->stats.rx_packets++;
1060 entry = (++np->cur_rx) % RX_RING_SIZE;
1061 np->rx_head_desc = &np->rx_ring[entry];
1064 /* Refill the Rx ring buffers. */
1065 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1066 struct sk_buff *skb;
1067 entry = np->dirty_rx % RX_RING_SIZE;
1068 if (np->rx_skbuff[entry] == NULL) {
1069 skb = dev_alloc_skb(np->rx_buf_sz);
1070 np->rx_skbuff[entry] = skb;
1071 if (skb == NULL)
1072 break; /* Better luck next round. */
1073 skb->dev = dev; /* Mark as being used by this device. */
1074 np->rx_ring[entry].addr = virt_to_bus(skb->tail);
1076 np->rx_ring[entry].rx_status = 0;
1077 np->rx_ring[entry].rx_length = DescOwn;
1080 /* Pre-emptively restart Rx engine. */
1081 writew(CmdRxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
1082 return 0;
1085 static void netdev_error(struct device *dev, int intr_status)
1087 struct netdev_private *np = (struct netdev_private *)dev->priv;
1088 long ioaddr = dev->base_addr;
1090 if (intr_status & (IntrMIIChange | IntrLinkChange)) {
1091 if (readb(ioaddr + MIIStatus) & 0x02)
1092 /* Link failed, restart autonegotiation. */
1093 mdio_write(dev, np->phys[0], 0, 0x3300);
1094 else
1095 check_duplex(dev);
1096 if (debug)
1097 printk(KERN_ERR "%s: MII status changed: Autonegotiation "
1098 "advertising %4.4x partner %4.4x.\n", dev->name,
1099 mdio_read(dev, np->phys[0], 4),
1100 mdio_read(dev, np->phys[0], 5));
1102 if (intr_status & IntrStatsMax) {
1103 np->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
1104 np->stats.rx_missed_errors += readw(ioaddr + RxMissed);
1105 writel(0, RxMissed);
1107 if (intr_status & IntrTxAbort) {
1108 /* Stats counted in Tx-done handler, just restart Tx. */
1109 writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
1111 if (intr_status & IntrTxUnderrun) {
1112 if (np->tx_thresh < 0xE0)
1113 writeb(np->tx_thresh += 0x20, ioaddr + TxConfig);
1114 if (debug > 1)
1115 printk(KERN_INFO "%s: Transmitter underrun, increasing Tx "
1116 "threshold setting to %2.2x.\n", dev->name, np->tx_thresh);
1118 if ((intr_status & ~(IntrLinkChange|IntrStatsMax|IntrTxAbort)) && debug) {
1119 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1120 dev->name, intr_status);
1121 /* Recovery for other fault sources not known. */
1122 writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
1126 static struct enet_statistics *get_stats(struct device *dev)
1128 struct netdev_private *np = (struct netdev_private *)dev->priv;
1129 long ioaddr = dev->base_addr;
1131 /* Nominally we should lock this segment of code for SMP, although
1132 the vulnerability window is very small and statistics are
1133 non-critical. */
1134 np->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
1135 np->stats.rx_missed_errors += readw(ioaddr + RxMissed);
1136 writel(0, RxMissed);
1138 return &np->stats;
1141 /* The big-endian AUTODIN II ethernet CRC calculation.
1142 N.B. Do not use for bulk data, use a table-based routine instead.
1143 This is common code and should be moved to net/core/crc.c */
1144 static unsigned const ethernet_polynomial = 0x04c11db7U;
1145 static inline u32 ether_crc(int length, unsigned char *data)
1147 int crc = -1;
1149 while(--length >= 0) {
1150 unsigned char current_octet = *data++;
1151 int bit;
1152 for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
1153 crc = (crc << 1) ^
1154 ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
1157 return crc;
1160 static void set_rx_mode(struct device *dev)
1162 struct netdev_private *np = (struct netdev_private *)dev->priv;
1163 long ioaddr = dev->base_addr;
1164 u32 mc_filter[2]; /* Multicast hash filter */
1165 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1167 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1168 /* Unconditionally log net taps. */
1169 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1170 rx_mode = 0x1C;
1171 } else if ((dev->mc_count > multicast_filter_limit)
1172 || (dev->flags & IFF_ALLMULTI)) {
1173 /* Too many to match, or accept all multicasts. */
1174 rx_mode = 0x0C;
1175 } else {
1176 struct dev_mc_list *mclist;
1177 int i;
1178 memset(mc_filter, 0, sizeof(mc_filter));
1179 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1180 i++, mclist = mclist->next) {
1181 set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26,
1182 mc_filter);
1184 writel(mc_filter[0], ioaddr + MulticastFilter0);
1185 writel(mc_filter[1], ioaddr + MulticastFilter1);
1186 rx_mode = 0x08;
1188 writeb(np->rx_thresh | rx_mode, ioaddr + RxConfig);
1191 static int mii_ioctl(struct device *dev, struct ifreq *rq, int cmd)
1193 u16 *data = (u16 *)&rq->ifr_data;
1195 switch(cmd) {
1196 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
1197 data[0] = ((struct netdev_private *)dev->priv)->phys[0] & 0x1f;
1198 /* Fall Through */
1199 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
1200 data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
1201 return 0;
1202 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
1203 if (!suser())
1204 return -EPERM;
1205 mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
1206 return 0;
1207 default:
1208 return -EOPNOTSUPP;
1212 static int netdev_close(struct device *dev)
1214 long ioaddr = dev->base_addr;
1215 struct netdev_private *np = (struct netdev_private *)dev->priv;
1216 int i;
1218 dev->start = 0;
1219 dev->tbusy = 1;
1221 if (debug > 1)
1222 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1223 dev->name, readw(ioaddr + ChipCmd));
1225 /* Disable interrupts by clearing the interrupt mask. */
1226 writew(0x0000, ioaddr + IntrEnable);
1228 /* Stop the chip's Tx and Rx processes. */
1229 writew(CmdStop, ioaddr + ChipCmd);
1231 del_timer(&np->timer);
1233 free_irq(dev->irq, dev);
1235 /* Free all the skbuffs in the Rx queue. */
1236 for (i = 0; i < RX_RING_SIZE; i++) {
1237 np->rx_ring[i].rx_length = 0;
1238 np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
1239 if (np->rx_skbuff[i]) {
1240 #if LINUX_VERSION_CODE < 0x20100
1241 np->rx_skbuff[i]->free = 1;
1242 #endif
1243 dev_free_skb(np->rx_skbuff[i]);
1245 np->rx_skbuff[i] = 0;
1247 for (i = 0; i < TX_RING_SIZE; i++) {
1248 if (np->tx_skbuff[i])
1249 dev_free_skb(np->tx_skbuff[i]);
1250 np->tx_skbuff[i] = 0;
1253 MOD_DEC_USE_COUNT;
1255 return 0;
1259 #ifdef MODULE
1260 int init_module(void)
1262 if (debug) /* Emit version even if no cards detected. */
1263 printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
1264 #ifdef CARDBUS
1265 register_driver(&etherdev_ops);
1266 return 0;
1267 #else
1268 return pci_etherdev_probe(NULL, pci_tbl);
1269 #endif
1272 void cleanup_module(void)
1275 #ifdef CARDBUS
1276 unregister_driver(&etherdev_ops);
1277 #endif
1279 /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
1280 while (root_net_dev) {
1281 struct netdev_private *np =
1282 (struct netdev_private *)(root_net_dev->priv);
1283 unregister_netdev(root_net_dev);
1284 #ifdef VIA_USE_IO
1285 release_region(root_net_dev->base_addr, pci_tbl[np->chip_id].io_size);
1286 #else
1287 iounmap((char *)(root_net_dev->base_addr));
1288 #endif
1289 kfree(root_net_dev);
1290 root_net_dev = np->next_module;
1291 #if 0
1292 kfree(np); /* Assumption: no struct realignment. */
1293 #endif
1297 #endif /* MODULE */
1300 * Local variables:
1301 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c via-rhine.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
1302 * SMP-compile-command: "gcc -D__SMP__ -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c via-rhine.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
1303 * c-indent-level: 4
1304 * c-basic-offset: 4
1305 * tab-width: 4
1306 * End: