MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / drivers / net / tulip / winbond-840.c
blobd3efcadca48c5edd3f111a8959b42182ee5b5ae3
1 /* winbond-840.c: A Linux PCI network adapter device driver. */
2 /*
3 Written 1998-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
17 Support and updates available at
18 http://www.scyld.com/network/drivers.html
20 Do not remove the copyright information.
21 Do not change the version information unless an improvement has been made.
22 Merely removing my name, as Compex has done in the past, does not count
23 as an improvement.
25 Changelog:
26 * ported to 2.4
27 ???
28 * spin lock update, memory barriers, new style dma mappings
29 limit each tx buffer to < 1024 bytes
30 remove DescIntr from Rx descriptors (that's an Tx flag)
31 remove next pointer from Tx descriptors
32 synchronize tx_q_bytes
33 software reset in tx_timeout
34 Copyright (C) 2000 Manfred Spraul
35 * further cleanups
36 power management.
37 support for big endian descriptors
38 Copyright (C) 2001 Manfred Spraul
39 * ethtool support (jgarzik)
40 * Replace some MII-related magic numbers with constants (jgarzik)
42 TODO:
43 * enable pci_power_off
44 * Wake-On-LAN
47 #define DRV_NAME "winbond-840"
48 #define DRV_VERSION "1.01-d"
49 #define DRV_RELDATE "Nov-17-2001"
52 /* Automatically extracted configuration info:
53 probe-func: winbond840_probe
54 config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
56 c-help-name: Winbond W89c840 PCI Ethernet support
57 c-help-symbol: CONFIG_WINBOND_840
58 c-help: This driver is for the Winbond W89c840 chip. It also works with
59 c-help: the TX9882 chip on the Compex RL100-ATX board.
60 c-help: More specific information and updates are available from
61 c-help: http://www.scyld.com/network/drivers.html
64 /* The user-configurable values.
65 These may be modified when a driver module is loaded.*/
67 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
68 static int max_interrupt_work = 20;
69 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
70 The '840 uses a 64 element hash table based on the Ethernet CRC. */
71 static int multicast_filter_limit = 32;
73 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
74 Setting to > 1518 effectively disables this feature. */
75 static int rx_copybreak;
77 /* Used to pass the media type, etc.
78 Both 'options[]' and 'full_duplex[]' should exist for driver
79 interoperability.
80 The media type is usually passed in 'options[]'.
82 #define MAX_UNITS 8 /* More are supported, limit only on options */
83 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
84 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
86 /* Operational parameters that are set at compile time. */
88 /* Keep the ring sizes a power of two for compile efficiency.
89 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
90 Making the Tx ring too large decreases the effectiveness of channel
91 bonding and packet priority.
92 There are no ill effects from too-large receive rings. */
93 #define TX_RING_SIZE 16
94 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
95 #define TX_QUEUE_LEN_RESTART 5
96 #define RX_RING_SIZE 32
98 #define TX_BUFLIMIT (1024-128)
100 /* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
101 To avoid overflowing we don't queue again until we have room for a
102 full-size packet.
104 #define TX_FIFO_SIZE (2048)
105 #define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
108 /* Operational parameters that usually are not changed. */
109 /* Time in jiffies before concluding the transmitter is hung. */
110 #define TX_TIMEOUT (2*HZ)
112 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
114 /* Include files, designed to support most kernel versions 2.0.0 and later. */
115 #include <linux/module.h>
116 #include <linux/kernel.h>
117 #include <linux/string.h>
118 #include <linux/timer.h>
119 #include <linux/errno.h>
120 #include <linux/ioport.h>
121 #include <linux/slab.h>
122 #include <linux/interrupt.h>
123 #include <linux/pci.h>
124 #include <linux/netdevice.h>
125 #include <linux/etherdevice.h>
126 #include <linux/skbuff.h>
127 #include <linux/init.h>
128 #include <linux/delay.h>
129 #include <linux/ethtool.h>
130 #include <linux/mii.h>
131 #include <linux/rtnetlink.h>
132 #include <linux/crc32.h>
133 #include <asm/uaccess.h>
134 #include <asm/processor.h> /* Processor type for cache alignment. */
135 #include <asm/bitops.h>
136 #include <asm/io.h>
137 #include <asm/irq.h>
139 /* These identify the driver base version and may not be removed. */
140 static char version[] __devinitdata =
141 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
142 KERN_INFO " http://www.scyld.com/network/drivers.html\n";
144 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
145 MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
146 MODULE_LICENSE("GPL");
148 MODULE_PARM(max_interrupt_work, "i");
149 MODULE_PARM(debug, "i");
150 MODULE_PARM(rx_copybreak, "i");
151 MODULE_PARM(multicast_filter_limit, "i");
152 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
153 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
154 MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
155 MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
156 MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
157 MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
158 MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
159 MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
162 Theory of Operation
164 I. Board Compatibility
166 This driver is for the Winbond w89c840 chip.
168 II. Board-specific settings
170 None.
172 III. Driver operation
174 This chip is very similar to the Digital 21*4* "Tulip" family. The first
175 twelve registers and the descriptor format are nearly identical. Read a
176 Tulip manual for operational details.
178 A significant difference is that the multicast filter and station address are
179 stored in registers rather than loaded through a pseudo-transmit packet.
181 Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a
182 full-sized packet we must use both data buffers in a descriptor. Thus the
183 driver uses ring mode where descriptors are implicitly sequential in memory,
184 rather than using the second descriptor address as a chain pointer to
185 subsequent descriptors.
187 IV. Notes
189 If you are going to almost clone a Tulip, why not go all the way and avoid
190 the need for a new driver?
192 IVb. References
194 http://www.scyld.com/expert/100mbps.html
195 http://www.scyld.com/expert/NWay.html
196 http://www.winbond.com.tw/
198 IVc. Errata
200 A horrible bug exists in the transmit FIFO. Apparently the chip doesn't
201 correctly detect a full FIFO, and queuing more than 2048 bytes may result in
202 silent data corruption.
204 Test with 'ping -s 10000' on a fast computer.
211 PCI probe table.
213 enum pci_id_flags_bits {
214 /* Set PCI command register bits before calling probe1(). */
215 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
216 /* Read and map the single following PCI BAR. */
217 PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
218 PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
220 enum chip_capability_flags {
221 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,};
222 #ifdef USE_IO_OPS
223 #define W840_FLAGS (PCI_USES_IO | PCI_ADDR0 | PCI_USES_MASTER)
224 #else
225 #define W840_FLAGS (PCI_USES_MEM | PCI_ADDR1 | PCI_USES_MASTER)
226 #endif
228 static struct pci_device_id w840_pci_tbl[] = {
229 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
230 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
231 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
232 { 0, }
234 MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
236 struct pci_id_info {
237 const char *name;
238 struct match_info {
239 int pci, pci_mask, subsystem, subsystem_mask;
240 int revision, revision_mask; /* Only 8 bits. */
241 } id;
242 enum pci_id_flags_bits pci_flags;
243 int io_size; /* Needed for I/O region check or ioremap(). */
244 int drv_flags; /* Driver use, intended as capability flags. */
246 static struct pci_id_info pci_id_tbl[] = {
247 {"Winbond W89c840", /* Sometime a Level-One switch card. */
248 { 0x08401050, 0xffffffff, 0x81530000, 0xffff0000 },
249 W840_FLAGS, 128, CanHaveMII | HasBrokenTx | FDXOnNoMII},
250 {"Winbond W89c840", { 0x08401050, 0xffffffff, },
251 W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
252 {"Compex RL100-ATX", { 0x201111F6, 0xffffffff,},
253 W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
254 {NULL,}, /* 0 terminated list. */
257 /* This driver was written to use PCI memory space, however some x86 systems
258 work only with I/O space accesses. Pass -DUSE_IO_OPS to use PCI I/O space
259 accesses instead of memory space. */
261 #ifdef USE_IO_OPS
262 #undef readb
263 #undef readw
264 #undef readl
265 #undef writeb
266 #undef writew
267 #undef writel
268 #define readb inb
269 #define readw inw
270 #define readl inl
271 #define writeb outb
272 #define writew outw
273 #define writel outl
274 #endif
276 /* Offsets to the Command and Status Registers, "CSRs".
277 While similar to the Tulip, these registers are longword aligned.
278 Note: It's not useful to define symbolic names for every register bit in
279 the device. The name can only partially document the semantics and make
280 the driver longer and more difficult to read.
282 enum w840_offsets {
283 PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
284 RxRingPtr=0x0C, TxRingPtr=0x10,
285 IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
286 RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
287 CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */
288 MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
289 CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
292 /* Bits in the interrupt status/enable registers. */
293 /* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
294 enum intr_status_bits {
295 NormalIntr=0x10000, AbnormalIntr=0x8000,
296 IntrPCIErr=0x2000, TimerInt=0x800,
297 IntrRxDied=0x100, RxNoBuf=0x80, IntrRxDone=0x40,
298 TxFIFOUnderflow=0x20, RxErrIntr=0x10,
299 TxIdle=0x04, IntrTxStopped=0x02, IntrTxDone=0x01,
302 /* Bits in the NetworkConfig register. */
303 enum rx_mode_bits {
304 AcceptErr=0x80, AcceptRunt=0x40,
305 AcceptBroadcast=0x20, AcceptMulticast=0x10,
306 AcceptAllPhys=0x08, AcceptMyPhys=0x02,
309 enum mii_reg_bits {
310 MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
311 MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
314 /* The Tulip Rx and Tx buffer descriptors. */
315 struct w840_rx_desc {
316 s32 status;
317 s32 length;
318 u32 buffer1;
319 u32 buffer2;
322 struct w840_tx_desc {
323 s32 status;
324 s32 length;
325 u32 buffer1, buffer2;
328 /* Bits in network_desc.status */
329 enum desc_status_bits {
330 DescOwn=0x80000000, DescEndRing=0x02000000, DescUseLink=0x01000000,
331 DescWholePkt=0x60000000, DescStartPkt=0x20000000, DescEndPkt=0x40000000,
332 DescIntr=0x80000000,
335 #define MII_CNT 1 /* winbond only supports one MII */
336 struct netdev_private {
337 struct w840_rx_desc *rx_ring;
338 dma_addr_t rx_addr[RX_RING_SIZE];
339 struct w840_tx_desc *tx_ring;
340 dma_addr_t tx_addr[TX_RING_SIZE];
341 dma_addr_t ring_dma_addr;
342 /* The addresses of receive-in-place skbuffs. */
343 struct sk_buff* rx_skbuff[RX_RING_SIZE];
344 /* The saved address of a sent-in-place packet/buffer, for later free(). */
345 struct sk_buff* tx_skbuff[TX_RING_SIZE];
346 struct net_device_stats stats;
347 struct timer_list timer; /* Media monitoring timer. */
348 /* Frequently used values: keep some adjacent for cache effect. */
349 spinlock_t lock;
350 int chip_id, drv_flags;
351 struct pci_dev *pci_dev;
352 int csr6;
353 struct w840_rx_desc *rx_head_desc;
354 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
355 unsigned int rx_buf_sz; /* Based on MTU+slack. */
356 unsigned int cur_tx, dirty_tx;
357 unsigned int tx_q_bytes;
358 unsigned int tx_full; /* The Tx queue is full. */
359 /* MII transceiver section. */
360 int mii_cnt; /* MII device addresses. */
361 unsigned char phys[MII_CNT]; /* MII device addresses, but only the first is used */
362 u32 mii;
363 struct mii_if_info mii_if;
366 static int eeprom_read(long ioaddr, int location);
367 static int mdio_read(struct net_device *dev, int phy_id, int location);
368 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
369 static int netdev_open(struct net_device *dev);
370 static int update_link(struct net_device *dev);
371 static void netdev_timer(unsigned long data);
372 static void init_rxtx_rings(struct net_device *dev);
373 static void free_rxtx_rings(struct netdev_private *np);
374 static void init_registers(struct net_device *dev);
375 static void tx_timeout(struct net_device *dev);
376 static int alloc_ringdesc(struct net_device *dev);
377 static void free_ringdesc(struct netdev_private *np);
378 static int start_tx(struct sk_buff *skb, struct net_device *dev);
379 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
380 static void netdev_error(struct net_device *dev, int intr_status);
381 static int netdev_rx(struct net_device *dev);
382 static u32 __set_rx_mode(struct net_device *dev);
383 static void set_rx_mode(struct net_device *dev);
384 static struct net_device_stats *get_stats(struct net_device *dev);
385 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
386 static struct ethtool_ops netdev_ethtool_ops;
387 static int netdev_close(struct net_device *dev);
391 static int __devinit w840_probe1 (struct pci_dev *pdev,
392 const struct pci_device_id *ent)
394 struct net_device *dev;
395 struct netdev_private *np;
396 static int find_cnt;
397 int chip_idx = ent->driver_data;
398 int irq;
399 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
400 long ioaddr;
402 i = pci_enable_device(pdev);
403 if (i) return i;
405 pci_set_master(pdev);
407 irq = pdev->irq;
409 if (pci_set_dma_mask(pdev,0xFFFFffff)) {
410 printk(KERN_WARNING "Winbond-840: Device %s disabled due to DMA limitations.\n",
411 pci_name(pdev));
412 return -EIO;
414 dev = alloc_etherdev(sizeof(*np));
415 if (!dev)
416 return -ENOMEM;
417 SET_MODULE_OWNER(dev);
418 SET_NETDEV_DEV(dev, &pdev->dev);
420 if (pci_request_regions(pdev, DRV_NAME))
421 goto err_out_netdev;
423 #ifdef USE_IO_OPS
424 ioaddr = pci_resource_start(pdev, 0);
425 #else
426 ioaddr = pci_resource_start(pdev, 1);
427 ioaddr = (long) ioremap (ioaddr, pci_id_tbl[chip_idx].io_size);
428 if (!ioaddr)
429 goto err_out_free_res;
430 #endif
432 for (i = 0; i < 3; i++)
433 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i));
435 /* Reset the chip to erase previous misconfiguration.
436 No hold time required! */
437 writel(0x00000001, ioaddr + PCIBusCfg);
439 dev->base_addr = ioaddr;
440 dev->irq = irq;
442 np = dev->priv;
443 np->pci_dev = pdev;
444 np->chip_id = chip_idx;
445 np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
446 spin_lock_init(&np->lock);
447 np->mii_if.dev = dev;
448 np->mii_if.mdio_read = mdio_read;
449 np->mii_if.mdio_write = mdio_write;
451 pci_set_drvdata(pdev, dev);
453 if (dev->mem_start)
454 option = dev->mem_start;
456 /* The lower four bits are the media type. */
457 if (option > 0) {
458 if (option & 0x200)
459 np->mii_if.full_duplex = 1;
460 if (option & 15)
461 printk(KERN_INFO "%s: ignoring user supplied media type %d",
462 dev->name, option & 15);
464 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
465 np->mii_if.full_duplex = 1;
467 if (np->mii_if.full_duplex)
468 np->mii_if.force_media = 1;
470 /* The chip-specific entries in the device structure. */
471 dev->open = &netdev_open;
472 dev->hard_start_xmit = &start_tx;
473 dev->stop = &netdev_close;
474 dev->get_stats = &get_stats;
475 dev->set_multicast_list = &set_rx_mode;
476 dev->do_ioctl = &netdev_ioctl;
477 dev->ethtool_ops = &netdev_ethtool_ops;
478 dev->tx_timeout = &tx_timeout;
479 dev->watchdog_timeo = TX_TIMEOUT;
481 i = register_netdev(dev);
482 if (i)
483 goto err_out_cleardev;
485 printk(KERN_INFO "%s: %s at 0x%lx, ",
486 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
487 for (i = 0; i < 5; i++)
488 printk("%2.2x:", dev->dev_addr[i]);
489 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
491 if (np->drv_flags & CanHaveMII) {
492 int phy, phy_idx = 0;
493 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
494 int mii_status = mdio_read(dev, phy, MII_BMSR);
495 if (mii_status != 0xffff && mii_status != 0x0000) {
496 np->phys[phy_idx++] = phy;
497 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
498 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
499 mdio_read(dev, phy, MII_PHYSID2);
500 printk(KERN_INFO "%s: MII PHY %8.8xh found at address %d, status "
501 "0x%4.4x advertising %4.4x.\n",
502 dev->name, np->mii, phy, mii_status, np->mii_if.advertising);
505 np->mii_cnt = phy_idx;
506 np->mii_if.phy_id = np->phys[0];
507 if (phy_idx == 0) {
508 printk(KERN_WARNING "%s: MII PHY not found -- this device may "
509 "not operate correctly.\n", dev->name);
513 find_cnt++;
514 return 0;
516 err_out_cleardev:
517 pci_set_drvdata(pdev, NULL);
518 #ifndef USE_IO_OPS
519 iounmap((void *)ioaddr);
520 err_out_free_res:
521 #endif
522 pci_release_regions(pdev);
523 err_out_netdev:
524 free_netdev (dev);
525 return -ENODEV;
529 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
530 often serial bit streams generated by the host processor.
531 The example below is for the common 93c46 EEPROM, 64 16 bit words. */
533 /* Delay between EEPROM clock transitions.
534 No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
535 a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
536 made udelay() unreliable.
537 The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
538 depricated.
540 #define eeprom_delay(ee_addr) readl(ee_addr)
542 enum EEPROM_Ctrl_Bits {
543 EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
544 EE_ChipSelect=0x801, EE_DataIn=0x08,
547 /* The EEPROM commands include the alway-set leading bit. */
548 enum EEPROM_Cmds {
549 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
552 static int eeprom_read(long addr, int location)
554 int i;
555 int retval = 0;
556 long ee_addr = addr + EECtrl;
557 int read_cmd = location | EE_ReadCmd;
558 writel(EE_ChipSelect, ee_addr);
560 /* Shift the read command bits out. */
561 for (i = 10; i >= 0; i--) {
562 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
563 writel(dataval, ee_addr);
564 eeprom_delay(ee_addr);
565 writel(dataval | EE_ShiftClk, ee_addr);
566 eeprom_delay(ee_addr);
568 writel(EE_ChipSelect, ee_addr);
569 eeprom_delay(ee_addr);
571 for (i = 16; i > 0; i--) {
572 writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
573 eeprom_delay(ee_addr);
574 retval = (retval << 1) | ((readl(ee_addr) & EE_DataIn) ? 1 : 0);
575 writel(EE_ChipSelect, ee_addr);
576 eeprom_delay(ee_addr);
579 /* Terminate the EEPROM access. */
580 writel(0, ee_addr);
581 return retval;
584 /* MII transceiver control section.
585 Read and write the MII registers using software-generated serial
586 MDIO protocol. See the MII specifications or DP83840A data sheet
587 for details.
589 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
590 met by back-to-back 33Mhz PCI cycles. */
591 #define mdio_delay(mdio_addr) readl(mdio_addr)
593 /* Set iff a MII transceiver on any interface requires mdio preamble.
594 This only set with older transceivers, so the extra
595 code size of a per-interface flag is not worthwhile. */
596 static char mii_preamble_required = 1;
598 #define MDIO_WRITE0 (MDIO_EnbOutput)
599 #define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
601 /* Generate the preamble required for initial synchronization and
602 a few older transceivers. */
603 static void mdio_sync(long mdio_addr)
605 int bits = 32;
607 /* Establish sync by sending at least 32 logic ones. */
608 while (--bits >= 0) {
609 writel(MDIO_WRITE1, mdio_addr);
610 mdio_delay(mdio_addr);
611 writel(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
612 mdio_delay(mdio_addr);
616 static int mdio_read(struct net_device *dev, int phy_id, int location)
618 long mdio_addr = dev->base_addr + MIICtrl;
619 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
620 int i, retval = 0;
622 if (mii_preamble_required)
623 mdio_sync(mdio_addr);
625 /* Shift the read command bits out. */
626 for (i = 15; i >= 0; i--) {
627 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
629 writel(dataval, mdio_addr);
630 mdio_delay(mdio_addr);
631 writel(dataval | MDIO_ShiftClk, mdio_addr);
632 mdio_delay(mdio_addr);
634 /* Read the two transition, 16 data, and wire-idle bits. */
635 for (i = 20; i > 0; i--) {
636 writel(MDIO_EnbIn, mdio_addr);
637 mdio_delay(mdio_addr);
638 retval = (retval << 1) | ((readl(mdio_addr) & MDIO_DataIn) ? 1 : 0);
639 writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
640 mdio_delay(mdio_addr);
642 return (retval>>1) & 0xffff;
645 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
647 struct netdev_private *np = dev->priv;
648 long mdio_addr = dev->base_addr + MIICtrl;
649 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
650 int i;
652 if (location == 4 && phy_id == np->phys[0])
653 np->mii_if.advertising = value;
655 if (mii_preamble_required)
656 mdio_sync(mdio_addr);
658 /* Shift the command bits out. */
659 for (i = 31; i >= 0; i--) {
660 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
662 writel(dataval, mdio_addr);
663 mdio_delay(mdio_addr);
664 writel(dataval | MDIO_ShiftClk, mdio_addr);
665 mdio_delay(mdio_addr);
667 /* Clear out extra bits. */
668 for (i = 2; i > 0; i--) {
669 writel(MDIO_EnbIn, mdio_addr);
670 mdio_delay(mdio_addr);
671 writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
672 mdio_delay(mdio_addr);
674 return;
678 static int netdev_open(struct net_device *dev)
680 struct netdev_private *np = dev->priv;
681 long ioaddr = dev->base_addr;
682 int i;
684 writel(0x00000001, ioaddr + PCIBusCfg); /* Reset */
686 netif_device_detach(dev);
687 i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
688 if (i)
689 goto out_err;
691 if (debug > 1)
692 printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n",
693 dev->name, dev->irq);
695 if((i=alloc_ringdesc(dev)))
696 goto out_err;
698 spin_lock_irq(&np->lock);
699 netif_device_attach(dev);
700 init_registers(dev);
701 spin_unlock_irq(&np->lock);
703 netif_start_queue(dev);
704 if (debug > 2)
705 printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
707 /* Set the timer to check for link beat. */
708 init_timer(&np->timer);
709 np->timer.expires = jiffies + 1*HZ;
710 np->timer.data = (unsigned long)dev;
711 np->timer.function = &netdev_timer; /* timer handler */
712 add_timer(&np->timer);
713 return 0;
714 out_err:
715 netif_device_attach(dev);
716 return i;
719 #define MII_DAVICOM_DM9101 0x0181b800
721 static int update_link(struct net_device *dev)
723 struct netdev_private *np = dev->priv;
724 int duplex, fasteth, result, mii_reg;
726 /* BSMR */
727 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
729 if (mii_reg == 0xffff)
730 return np->csr6;
731 /* reread: the link status bit is sticky */
732 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
733 if (!(mii_reg & 0x4)) {
734 if (netif_carrier_ok(dev)) {
735 if (debug)
736 printk(KERN_INFO "%s: MII #%d reports no link. Disabling watchdog.\n",
737 dev->name, np->phys[0]);
738 netif_carrier_off(dev);
740 return np->csr6;
742 if (!netif_carrier_ok(dev)) {
743 if (debug)
744 printk(KERN_INFO "%s: MII #%d link is back. Enabling watchdog.\n",
745 dev->name, np->phys[0]);
746 netif_carrier_on(dev);
749 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
750 /* If the link partner doesn't support autonegotiation
751 * the MII detects it's abilities with the "parallel detection".
752 * Some MIIs update the LPA register to the result of the parallel
753 * detection, some don't.
754 * The Davicom PHY [at least 0181b800] doesn't.
755 * Instead bit 9 and 13 of the BMCR are updated to the result
756 * of the negotiation..
758 mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
759 duplex = mii_reg & BMCR_FULLDPLX;
760 fasteth = mii_reg & BMCR_SPEED100;
761 } else {
762 int negotiated;
763 mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
764 negotiated = mii_reg & np->mii_if.advertising;
766 duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
767 fasteth = negotiated & 0x380;
769 duplex |= np->mii_if.force_media;
770 /* remove fastether and fullduplex */
771 result = np->csr6 & ~0x20000200;
772 if (duplex)
773 result |= 0x200;
774 if (fasteth)
775 result |= 0x20000000;
776 if (result != np->csr6 && debug)
777 printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n",
778 dev->name, fasteth ? 100 : 10,
779 duplex ? "full" : "half", np->phys[0]);
780 return result;
783 #define RXTX_TIMEOUT 2000
784 static inline void update_csr6(struct net_device *dev, int new)
786 struct netdev_private *np = dev->priv;
787 long ioaddr = dev->base_addr;
788 int limit = RXTX_TIMEOUT;
790 if (!netif_device_present(dev))
791 new = 0;
792 if (new==np->csr6)
793 return;
794 /* stop both Tx and Rx processes */
795 writel(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
796 /* wait until they have really stopped */
797 for (;;) {
798 int csr5 = readl(ioaddr + IntrStatus);
799 int t;
801 t = (csr5 >> 17) & 0x07;
802 if (t==0||t==1) {
803 /* rx stopped */
804 t = (csr5 >> 20) & 0x07;
805 if (t==0||t==1)
806 break;
809 limit--;
810 if(!limit) {
811 printk(KERN_INFO "%s: couldn't stop rxtx, IntrStatus %xh.\n",
812 dev->name, csr5);
813 break;
815 udelay(1);
817 np->csr6 = new;
818 /* and restart them with the new configuration */
819 writel(np->csr6, ioaddr + NetworkConfig);
820 if (new & 0x200)
821 np->mii_if.full_duplex = 1;
824 static void netdev_timer(unsigned long data)
826 struct net_device *dev = (struct net_device *)data;
827 struct netdev_private *np = dev->priv;
828 long ioaddr = dev->base_addr;
830 if (debug > 2)
831 printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
832 "config %8.8x.\n",
833 dev->name, (int)readl(ioaddr + IntrStatus),
834 (int)readl(ioaddr + NetworkConfig));
835 spin_lock_irq(&np->lock);
836 update_csr6(dev, update_link(dev));
837 spin_unlock_irq(&np->lock);
838 np->timer.expires = jiffies + 10*HZ;
839 add_timer(&np->timer);
842 static void init_rxtx_rings(struct net_device *dev)
844 struct netdev_private *np = dev->priv;
845 int i;
847 np->rx_head_desc = &np->rx_ring[0];
848 np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
850 /* Initial all Rx descriptors. */
851 for (i = 0; i < RX_RING_SIZE; i++) {
852 np->rx_ring[i].length = np->rx_buf_sz;
853 np->rx_ring[i].status = 0;
854 np->rx_skbuff[i] = NULL;
856 /* Mark the last entry as wrapping the ring. */
857 np->rx_ring[i-1].length |= DescEndRing;
859 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
860 for (i = 0; i < RX_RING_SIZE; i++) {
861 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
862 np->rx_skbuff[i] = skb;
863 if (skb == NULL)
864 break;
865 skb->dev = dev; /* Mark as being used by this device. */
866 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->tail,
867 skb->len,PCI_DMA_FROMDEVICE);
869 np->rx_ring[i].buffer1 = np->rx_addr[i];
870 np->rx_ring[i].status = DescOwn;
873 np->cur_rx = 0;
874 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
876 /* Initialize the Tx descriptors */
877 for (i = 0; i < TX_RING_SIZE; i++) {
878 np->tx_skbuff[i] = NULL;
879 np->tx_ring[i].status = 0;
881 np->tx_full = 0;
882 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
884 writel(np->ring_dma_addr, dev->base_addr + RxRingPtr);
885 writel(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
886 dev->base_addr + TxRingPtr);
890 static void free_rxtx_rings(struct netdev_private* np)
892 int i;
893 /* Free all the skbuffs in the Rx queue. */
894 for (i = 0; i < RX_RING_SIZE; i++) {
895 np->rx_ring[i].status = 0;
896 if (np->rx_skbuff[i]) {
897 pci_unmap_single(np->pci_dev,
898 np->rx_addr[i],
899 np->rx_skbuff[i]->len,
900 PCI_DMA_FROMDEVICE);
901 dev_kfree_skb(np->rx_skbuff[i]);
903 np->rx_skbuff[i] = NULL;
905 for (i = 0; i < TX_RING_SIZE; i++) {
906 if (np->tx_skbuff[i]) {
907 pci_unmap_single(np->pci_dev,
908 np->tx_addr[i],
909 np->tx_skbuff[i]->len,
910 PCI_DMA_TODEVICE);
911 dev_kfree_skb(np->tx_skbuff[i]);
913 np->tx_skbuff[i] = NULL;
917 static void init_registers(struct net_device *dev)
919 struct netdev_private *np = dev->priv;
920 long ioaddr = dev->base_addr;
921 int i;
923 for (i = 0; i < 6; i++)
924 writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
926 /* Initialize other registers. */
927 #ifdef __BIG_ENDIAN
928 i = (1<<20); /* Big-endian descriptors */
929 #else
930 i = 0;
931 #endif
932 i |= (0x04<<2); /* skip length 4 u32 */
933 i |= 0x02; /* give Rx priority */
935 /* Configure the PCI bus bursts and FIFO thresholds.
936 486: Set 8 longword cache alignment, 8 longword burst.
937 586: Set 16 longword cache alignment, no burst limit.
938 Cache alignment bits 15:14 Burst length 13:8
939 0000 <not allowed> 0000 align to cache 0800 8 longwords
940 4000 8 longwords 0100 1 longword 1000 16 longwords
941 8000 16 longwords 0200 2 longwords 2000 32 longwords
942 C000 32 longwords 0400 4 longwords */
944 #if defined (__i386__) && !defined(MODULE)
945 /* When not a module we can work around broken '486 PCI boards. */
946 if (boot_cpu_data.x86 <= 4) {
947 i |= 0x4800;
948 printk(KERN_INFO "%s: This is a 386/486 PCI system, setting cache "
949 "alignment to 8 longwords.\n", dev->name);
950 } else {
951 i |= 0xE000;
953 #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
954 i |= 0xE000;
955 #elif defined(__sparc__)
956 i |= 0x4800;
957 #else
958 #warning Processor architecture undefined
959 i |= 0x4800;
960 #endif
961 writel(i, ioaddr + PCIBusCfg);
963 np->csr6 = 0;
964 /* 128 byte Tx threshold;
965 Transmit on; Receive on; */
966 update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
968 /* Clear and Enable interrupts by setting the interrupt mask. */
969 writel(0x1A0F5, ioaddr + IntrStatus);
970 writel(0x1A0F5, ioaddr + IntrEnable);
972 writel(0, ioaddr + RxStartDemand);
975 static void tx_timeout(struct net_device *dev)
977 struct netdev_private *np = dev->priv;
978 long ioaddr = dev->base_addr;
980 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
981 " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
984 int i;
985 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
986 for (i = 0; i < RX_RING_SIZE; i++)
987 printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
988 printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
989 for (i = 0; i < TX_RING_SIZE; i++)
990 printk(" %8.8x", np->tx_ring[i].status);
991 printk("\n");
993 printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d.\n",
994 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
995 printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",readl(ioaddr+0x4C));
997 disable_irq(dev->irq);
998 spin_lock_irq(&np->lock);
1000 * Under high load dirty_tx and the internal tx descriptor pointer
1001 * come out of sync, thus perform a software reset and reinitialize
1002 * everything.
1005 writel(1, dev->base_addr+PCIBusCfg);
1006 udelay(1);
1008 free_rxtx_rings(np);
1009 init_rxtx_rings(dev);
1010 init_registers(dev);
1011 spin_unlock_irq(&np->lock);
1012 enable_irq(dev->irq);
1014 netif_wake_queue(dev);
1015 dev->trans_start = jiffies;
1016 np->stats.tx_errors++;
1017 return;
1020 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1021 static int alloc_ringdesc(struct net_device *dev)
1023 struct netdev_private *np = dev->priv;
1025 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1027 np->rx_ring = pci_alloc_consistent(np->pci_dev,
1028 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
1029 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
1030 &np->ring_dma_addr);
1031 if(!np->rx_ring)
1032 return -ENOMEM;
1033 init_rxtx_rings(dev);
1034 return 0;
1037 static void free_ringdesc(struct netdev_private *np)
1039 pci_free_consistent(np->pci_dev,
1040 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
1041 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
1042 np->rx_ring, np->ring_dma_addr);
1046 static int start_tx(struct sk_buff *skb, struct net_device *dev)
1048 struct netdev_private *np = dev->priv;
1049 unsigned entry;
1051 /* Caution: the write order is important here, set the field
1052 with the "ownership" bits last. */
1054 /* Calculate the next Tx descriptor entry. */
1055 entry = np->cur_tx % TX_RING_SIZE;
1057 np->tx_addr[entry] = pci_map_single(np->pci_dev,
1058 skb->data,skb->len, PCI_DMA_TODEVICE);
1059 np->tx_skbuff[entry] = skb;
1061 np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1062 if (skb->len < TX_BUFLIMIT) {
1063 np->tx_ring[entry].length = DescWholePkt | skb->len;
1064 } else {
1065 int len = skb->len - TX_BUFLIMIT;
1067 np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1068 np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1070 if(entry == TX_RING_SIZE-1)
1071 np->tx_ring[entry].length |= DescEndRing;
1073 /* Now acquire the irq spinlock.
1074 * The difficult race is the the ordering between
1075 * increasing np->cur_tx and setting DescOwn:
1076 * - if np->cur_tx is increased first the interrupt
1077 * handler could consider the packet as transmitted
1078 * since DescOwn is cleared.
1079 * - If DescOwn is set first the NIC could report the
1080 * packet as sent, but the interrupt handler would ignore it
1081 * since the np->cur_tx was not yet increased.
1083 spin_lock_irq(&np->lock);
1084 np->cur_tx++;
1086 wmb(); /* flush length, buffer1, buffer2 */
1087 np->tx_ring[entry].status = DescOwn;
1088 wmb(); /* flush status and kick the hardware */
1089 writel(0, dev->base_addr + TxStartDemand);
1090 np->tx_q_bytes += skb->len;
1091 /* Work around horrible bug in the chip by marking the queue as full
1092 when we do not have FIFO room for a maximum sized packet. */
1093 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1094 ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1095 netif_stop_queue(dev);
1096 wmb();
1097 np->tx_full = 1;
1099 spin_unlock_irq(&np->lock);
1101 dev->trans_start = jiffies;
1103 if (debug > 4) {
1104 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1105 dev->name, np->cur_tx, entry);
1107 return 0;
1110 static void netdev_tx_done(struct net_device *dev)
1112 struct netdev_private *np = dev->priv;
1113 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1114 int entry = np->dirty_tx % TX_RING_SIZE;
1115 int tx_status = np->tx_ring[entry].status;
1117 if (tx_status < 0)
1118 break;
1119 if (tx_status & 0x8000) { /* There was an error, log it. */
1120 #ifndef final_version
1121 if (debug > 1)
1122 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1123 dev->name, tx_status);
1124 #endif
1125 np->stats.tx_errors++;
1126 if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1127 if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1128 if (tx_status & 0x0200) np->stats.tx_window_errors++;
1129 if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1130 if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1131 np->stats.tx_heartbeat_errors++;
1132 } else {
1133 #ifndef final_version
1134 if (debug > 3)
1135 printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %8.8x.\n",
1136 dev->name, entry, tx_status);
1137 #endif
1138 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1139 np->stats.collisions += (tx_status >> 3) & 15;
1140 np->stats.tx_packets++;
1142 /* Free the original skb. */
1143 pci_unmap_single(np->pci_dev,np->tx_addr[entry],
1144 np->tx_skbuff[entry]->len,
1145 PCI_DMA_TODEVICE);
1146 np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1147 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1148 np->tx_skbuff[entry] = NULL;
1150 if (np->tx_full &&
1151 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1152 np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1153 /* The ring is no longer full, clear tbusy. */
1154 np->tx_full = 0;
1155 wmb();
1156 netif_wake_queue(dev);
1160 /* The interrupt handler does all of the Rx thread work and cleans up
1161 after the Tx thread. */
1162 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1164 struct net_device *dev = (struct net_device *)dev_instance;
1165 struct netdev_private *np = dev->priv;
1166 long ioaddr = dev->base_addr;
1167 int work_limit = max_interrupt_work;
1168 int handled = 0;
1170 if (!netif_device_present(dev))
1171 return IRQ_NONE;
1172 do {
1173 u32 intr_status = readl(ioaddr + IntrStatus);
1175 /* Acknowledge all of the current interrupt sources ASAP. */
1176 writel(intr_status & 0x001ffff, ioaddr + IntrStatus);
1178 if (debug > 4)
1179 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1180 dev->name, intr_status);
1182 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1183 break;
1185 handled = 1;
1187 if (intr_status & (IntrRxDone | RxNoBuf))
1188 netdev_rx(dev);
1189 if (intr_status & RxNoBuf)
1190 writel(0, ioaddr + RxStartDemand);
1192 if (intr_status & (TxIdle | IntrTxDone) &&
1193 np->cur_tx != np->dirty_tx) {
1194 spin_lock(&np->lock);
1195 netdev_tx_done(dev);
1196 spin_unlock(&np->lock);
1199 /* Abnormal error summary/uncommon events handlers. */
1200 if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr |
1201 TimerInt | IntrTxStopped))
1202 netdev_error(dev, intr_status);
1204 if (--work_limit < 0) {
1205 printk(KERN_WARNING "%s: Too much work at interrupt, "
1206 "status=0x%4.4x.\n", dev->name, intr_status);
1207 /* Set the timer to re-enable the other interrupts after
1208 10*82usec ticks. */
1209 spin_lock(&np->lock);
1210 if (netif_device_present(dev)) {
1211 writel(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
1212 writel(10, ioaddr + GPTimer);
1214 spin_unlock(&np->lock);
1215 break;
1217 } while (1);
1219 if (debug > 3)
1220 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1221 dev->name, (int)readl(ioaddr + IntrStatus));
1222 return IRQ_RETVAL(handled);
1225 /* This routine is logically part of the interrupt handler, but separated
1226 for clarity and better register allocation. */
1227 static int netdev_rx(struct net_device *dev)
1229 struct netdev_private *np = dev->priv;
1230 int entry = np->cur_rx % RX_RING_SIZE;
1231 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1233 if (debug > 4) {
1234 printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
1235 entry, np->rx_ring[entry].status);
1238 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1239 while (--work_limit >= 0) {
1240 struct w840_rx_desc *desc = np->rx_head_desc;
1241 s32 status = desc->status;
1243 if (debug > 4)
1244 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1245 status);
1246 if (status < 0)
1247 break;
1248 if ((status & 0x38008300) != 0x0300) {
1249 if ((status & 0x38000300) != 0x0300) {
1250 /* Ingore earlier buffers. */
1251 if ((status & 0xffff) != 0x7fff) {
1252 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1253 "multiple buffers, entry %#x status %4.4x!\n",
1254 dev->name, np->cur_rx, status);
1255 np->stats.rx_length_errors++;
1257 } else if (status & 0x8000) {
1258 /* There was a fatal error. */
1259 if (debug > 2)
1260 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
1261 dev->name, status);
1262 np->stats.rx_errors++; /* end of a packet.*/
1263 if (status & 0x0890) np->stats.rx_length_errors++;
1264 if (status & 0x004C) np->stats.rx_frame_errors++;
1265 if (status & 0x0002) np->stats.rx_crc_errors++;
1267 } else {
1268 struct sk_buff *skb;
1269 /* Omit the four octet CRC from the length. */
1270 int pkt_len = ((status >> 16) & 0x7ff) - 4;
1272 #ifndef final_version
1273 if (debug > 4)
1274 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1275 " status %x.\n", pkt_len, status);
1276 #endif
1277 /* Check if the packet is long enough to accept without copying
1278 to a minimally-sized skbuff. */
1279 if (pkt_len < rx_copybreak
1280 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1281 skb->dev = dev;
1282 skb_reserve(skb, 2); /* 16 byte align the IP header */
1283 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1284 np->rx_skbuff[entry]->len,
1285 PCI_DMA_FROMDEVICE);
1286 eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
1287 skb_put(skb, pkt_len);
1288 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1289 np->rx_skbuff[entry]->len,
1290 PCI_DMA_FROMDEVICE);
1291 } else {
1292 pci_unmap_single(np->pci_dev,np->rx_addr[entry],
1293 np->rx_skbuff[entry]->len,
1294 PCI_DMA_FROMDEVICE);
1295 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1296 np->rx_skbuff[entry] = NULL;
1298 #ifndef final_version /* Remove after testing. */
1299 /* You will want this info for the initial debug. */
1300 if (debug > 5)
1301 printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
1302 "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
1303 "%d.%d.%d.%d.\n",
1304 skb->data[0], skb->data[1], skb->data[2], skb->data[3],
1305 skb->data[4], skb->data[5], skb->data[6], skb->data[7],
1306 skb->data[8], skb->data[9], skb->data[10],
1307 skb->data[11], skb->data[12], skb->data[13],
1308 skb->data[14], skb->data[15], skb->data[16],
1309 skb->data[17]);
1310 #endif
1311 skb->protocol = eth_type_trans(skb, dev);
1312 netif_rx(skb);
1313 dev->last_rx = jiffies;
1314 np->stats.rx_packets++;
1315 np->stats.rx_bytes += pkt_len;
1317 entry = (++np->cur_rx) % RX_RING_SIZE;
1318 np->rx_head_desc = &np->rx_ring[entry];
1321 /* Refill the Rx ring buffers. */
1322 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1323 struct sk_buff *skb;
1324 entry = np->dirty_rx % RX_RING_SIZE;
1325 if (np->rx_skbuff[entry] == NULL) {
1326 skb = dev_alloc_skb(np->rx_buf_sz);
1327 np->rx_skbuff[entry] = skb;
1328 if (skb == NULL)
1329 break; /* Better luck next round. */
1330 skb->dev = dev; /* Mark as being used by this device. */
1331 np->rx_addr[entry] = pci_map_single(np->pci_dev,
1332 skb->tail,
1333 skb->len, PCI_DMA_FROMDEVICE);
1334 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1336 wmb();
1337 np->rx_ring[entry].status = DescOwn;
1340 return 0;
1343 static void netdev_error(struct net_device *dev, int intr_status)
1345 long ioaddr = dev->base_addr;
1346 struct netdev_private *np = dev->priv;
1348 if (debug > 2)
1349 printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n",
1350 dev->name, intr_status);
1351 if (intr_status == 0xffffffff)
1352 return;
1353 spin_lock(&np->lock);
1354 if (intr_status & TxFIFOUnderflow) {
1355 int new;
1356 /* Bump up the Tx threshold */
1357 #if 0
1358 /* This causes lots of dropped packets,
1359 * and under high load even tx_timeouts
1361 new = np->csr6 + 0x4000;
1362 #else
1363 new = (np->csr6 >> 14)&0x7f;
1364 if (new < 64)
1365 new *= 2;
1366 else
1367 new = 127; /* load full packet before starting */
1368 new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1369 #endif
1370 printk(KERN_DEBUG "%s: Tx underflow, new csr6 %8.8x.\n",
1371 dev->name, new);
1372 update_csr6(dev, new);
1374 if (intr_status & IntrRxDied) { /* Missed a Rx frame. */
1375 np->stats.rx_errors++;
1377 if (intr_status & TimerInt) {
1378 /* Re-enable other interrupts. */
1379 if (netif_device_present(dev))
1380 writel(0x1A0F5, ioaddr + IntrEnable);
1382 np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
1383 writel(0, ioaddr + RxStartDemand);
1384 spin_unlock(&np->lock);
1387 static struct net_device_stats *get_stats(struct net_device *dev)
1389 long ioaddr = dev->base_addr;
1390 struct netdev_private *np = dev->priv;
1392 /* The chip only need report frame silently dropped. */
1393 spin_lock_irq(&np->lock);
1394 if (netif_running(dev) && netif_device_present(dev))
1395 np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
1396 spin_unlock_irq(&np->lock);
1398 return &np->stats;
1402 static u32 __set_rx_mode(struct net_device *dev)
1404 long ioaddr = dev->base_addr;
1405 u32 mc_filter[2]; /* Multicast hash filter */
1406 u32 rx_mode;
1408 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1409 /* Unconditionally log net taps. */
1410 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1411 memset(mc_filter, 0xff, sizeof(mc_filter));
1412 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys
1413 | AcceptMyPhys;
1414 } else if ((dev->mc_count > multicast_filter_limit)
1415 || (dev->flags & IFF_ALLMULTI)) {
1416 /* Too many to match, or accept all multicasts. */
1417 memset(mc_filter, 0xff, sizeof(mc_filter));
1418 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1419 } else {
1420 struct dev_mc_list *mclist;
1421 int i;
1422 memset(mc_filter, 0, sizeof(mc_filter));
1423 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1424 i++, mclist = mclist->next) {
1425 int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
1426 filterbit &= 0x3f;
1427 mc_filter[filterbit >> 5] |= cpu_to_le32(1 << (filterbit & 31));
1429 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1431 writel(mc_filter[0], ioaddr + MulticastFilter0);
1432 writel(mc_filter[1], ioaddr + MulticastFilter1);
1433 return rx_mode;
1436 static void set_rx_mode(struct net_device *dev)
1438 struct netdev_private *np = dev->priv;
1439 u32 rx_mode = __set_rx_mode(dev);
1440 spin_lock_irq(&np->lock);
1441 update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1442 spin_unlock_irq(&np->lock);
1445 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1447 struct netdev_private *np = dev->priv;
1449 strcpy (info->driver, DRV_NAME);
1450 strcpy (info->version, DRV_VERSION);
1451 strcpy (info->bus_info, pci_name(np->pci_dev));
1454 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1456 struct netdev_private *np = dev->priv;
1457 int rc;
1459 spin_lock_irq(&np->lock);
1460 rc = mii_ethtool_gset(&np->mii_if, cmd);
1461 spin_unlock_irq(&np->lock);
1463 return rc;
1466 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1468 struct netdev_private *np = dev->priv;
1469 int rc;
1471 spin_lock_irq(&np->lock);
1472 rc = mii_ethtool_sset(&np->mii_if, cmd);
1473 spin_unlock_irq(&np->lock);
1475 return rc;
1478 static int netdev_nway_reset(struct net_device *dev)
1480 struct netdev_private *np = dev->priv;
1481 return mii_nway_restart(&np->mii_if);
1484 static u32 netdev_get_link(struct net_device *dev)
1486 struct netdev_private *np = dev->priv;
1487 return mii_link_ok(&np->mii_if);
1490 static u32 netdev_get_msglevel(struct net_device *dev)
1492 return debug;
1495 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1497 debug = value;
1500 static struct ethtool_ops netdev_ethtool_ops = {
1501 .get_drvinfo = netdev_get_drvinfo,
1502 .get_settings = netdev_get_settings,
1503 .set_settings = netdev_set_settings,
1504 .nway_reset = netdev_nway_reset,
1505 .get_link = netdev_get_link,
1506 .get_msglevel = netdev_get_msglevel,
1507 .set_msglevel = netdev_set_msglevel,
1508 .get_sg = ethtool_op_get_sg,
1509 .get_tx_csum = ethtool_op_get_tx_csum,
1512 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1514 struct mii_ioctl_data *data = if_mii(rq);
1515 struct netdev_private *np = netdev_priv(dev);
1517 switch(cmd) {
1518 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1519 data->phy_id = ((struct netdev_private *)dev->priv)->phys[0] & 0x1f;
1520 /* Fall Through */
1522 case SIOCGMIIREG: /* Read MII PHY register. */
1523 spin_lock_irq(&np->lock);
1524 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1525 spin_unlock_irq(&np->lock);
1526 return 0;
1528 case SIOCSMIIREG: /* Write MII PHY register. */
1529 if (!capable(CAP_NET_ADMIN))
1530 return -EPERM;
1531 spin_lock_irq(&np->lock);
1532 mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1533 spin_unlock_irq(&np->lock);
1534 return 0;
1535 default:
1536 return -EOPNOTSUPP;
1540 static int netdev_close(struct net_device *dev)
1542 long ioaddr = dev->base_addr;
1543 struct netdev_private *np = dev->priv;
1545 netif_stop_queue(dev);
1547 if (debug > 1) {
1548 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x "
1549 "Config %8.8x.\n", dev->name, (int)readl(ioaddr + IntrStatus),
1550 (int)readl(ioaddr + NetworkConfig));
1551 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1552 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1555 /* Stop the chip's Tx and Rx processes. */
1556 spin_lock_irq(&np->lock);
1557 netif_device_detach(dev);
1558 update_csr6(dev, 0);
1559 writel(0x0000, ioaddr + IntrEnable);
1560 spin_unlock_irq(&np->lock);
1562 free_irq(dev->irq, dev);
1563 wmb();
1564 netif_device_attach(dev);
1566 if (readl(ioaddr + NetworkConfig) != 0xffffffff)
1567 np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
1569 #ifdef __i386__
1570 if (debug > 2) {
1571 int i;
1573 printk(KERN_DEBUG" Tx ring at %8.8x:\n",
1574 (int)np->tx_ring);
1575 for (i = 0; i < TX_RING_SIZE; i++)
1576 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x.\n",
1577 i, np->tx_ring[i].length,
1578 np->tx_ring[i].status, np->tx_ring[i].buffer1);
1579 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1580 (int)np->rx_ring);
1581 for (i = 0; i < RX_RING_SIZE; i++) {
1582 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1583 i, np->rx_ring[i].length,
1584 np->rx_ring[i].status, np->rx_ring[i].buffer1);
1587 #endif /* __i386__ debugging only */
1589 del_timer_sync(&np->timer);
1591 free_rxtx_rings(np);
1592 free_ringdesc(np);
1594 return 0;
1597 static void __devexit w840_remove1 (struct pci_dev *pdev)
1599 struct net_device *dev = pci_get_drvdata(pdev);
1601 if (dev) {
1602 unregister_netdev(dev);
1603 pci_release_regions(pdev);
1604 #ifndef USE_IO_OPS
1605 iounmap((char *)(dev->base_addr));
1606 #endif
1607 free_netdev(dev);
1610 pci_set_drvdata(pdev, NULL);
1613 #ifdef CONFIG_PM
1616 * suspend/resume synchronization:
1617 * - open, close, do_ioctl:
1618 * rtnl_lock, & netif_device_detach after the rtnl_unlock.
1619 * - get_stats:
1620 * spin_lock_irq(np->lock), doesn't touch hw if not present
1621 * - hard_start_xmit:
1622 * netif_stop_queue + spin_unlock_wait(&dev->xmit_lock);
1623 * - tx_timeout:
1624 * netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
1625 * - set_multicast_list
1626 * netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
1627 * - interrupt handler
1628 * doesn't touch hw if not present, synchronize_irq waits for
1629 * running instances of the interrupt handler.
1631 * Disabling hw requires clearing csr6 & IntrEnable.
1632 * update_csr6 & all function that write IntrEnable check netif_device_present
1633 * before settings any bits.
1635 * Detach must occur under spin_unlock_irq(), interrupts from a detached
1636 * device would cause an irq storm.
1638 static int w840_suspend (struct pci_dev *pdev, u32 state)
1640 struct net_device *dev = pci_get_drvdata (pdev);
1641 struct netdev_private *np = dev->priv;
1642 long ioaddr = dev->base_addr;
1644 rtnl_lock();
1645 if (netif_running (dev)) {
1646 del_timer_sync(&np->timer);
1648 spin_lock_irq(&np->lock);
1649 netif_device_detach(dev);
1650 update_csr6(dev, 0);
1651 writel(0, ioaddr + IntrEnable);
1652 netif_stop_queue(dev);
1653 spin_unlock_irq(&np->lock);
1655 spin_unlock_wait(&dev->xmit_lock);
1656 synchronize_irq(dev->irq);
1658 np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
1660 /* no more hardware accesses behind this line. */
1662 if (np->csr6) BUG();
1663 if (readl(ioaddr + IntrEnable)) BUG();
1665 /* pci_power_off(pdev, -1); */
1667 free_rxtx_rings(np);
1668 } else {
1669 netif_device_detach(dev);
1671 rtnl_unlock();
1672 return 0;
1675 static int w840_resume (struct pci_dev *pdev)
1677 struct net_device *dev = pci_get_drvdata (pdev);
1678 struct netdev_private *np = dev->priv;
1680 rtnl_lock();
1681 if (netif_device_present(dev))
1682 goto out; /* device not suspended */
1683 if (netif_running(dev)) {
1684 pci_enable_device(pdev);
1685 /* pci_power_on(pdev); */
1687 spin_lock_irq(&np->lock);
1688 writel(1, dev->base_addr+PCIBusCfg);
1689 readl(dev->base_addr+PCIBusCfg);
1690 udelay(1);
1691 netif_device_attach(dev);
1692 init_rxtx_rings(dev);
1693 init_registers(dev);
1694 spin_unlock_irq(&np->lock);
1696 netif_wake_queue(dev);
1698 mod_timer(&np->timer, jiffies + 1*HZ);
1699 } else {
1700 netif_device_attach(dev);
1702 out:
1703 rtnl_unlock();
1704 return 0;
1706 #endif
1708 static struct pci_driver w840_driver = {
1709 .name = DRV_NAME,
1710 .id_table = w840_pci_tbl,
1711 .probe = w840_probe1,
1712 .remove = __devexit_p(w840_remove1),
1713 #ifdef CONFIG_PM
1714 .suspend = w840_suspend,
1715 .resume = w840_resume,
1716 #endif
1719 static int __init w840_init(void)
1721 printk(version);
1722 return pci_module_init(&w840_driver);
1725 static void __exit w840_exit(void)
1727 pci_unregister_driver(&w840_driver);
1730 module_init(w840_init);
1731 module_exit(w840_exit);