Blackfin arch: push cache flushing up to dma_memcpy
[linux-2.6/mini2440.git] / drivers / net / tulip / winbond-840.c
blob022d99af8646303bb25b1e0b7c7c4c6929fbee88
1 /* winbond-840.c: A Linux PCI network adapter device driver. */
2 /*
3 Written 1998-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
17 Support and updates available at
18 http://www.scyld.com/network/drivers.html
20 Do not remove the copyright information.
21 Do not change the version information unless an improvement has been made.
22 Merely removing my name, as Compex has done in the past, does not count
23 as an improvement.
25 Changelog:
26 * ported to 2.4
27 ???
28 * spin lock update, memory barriers, new style dma mappings
29 limit each tx buffer to < 1024 bytes
30 remove DescIntr from Rx descriptors (that's an Tx flag)
31 remove next pointer from Tx descriptors
32 synchronize tx_q_bytes
33 software reset in tx_timeout
34 Copyright (C) 2000 Manfred Spraul
35 * further cleanups
36 power management.
37 support for big endian descriptors
38 Copyright (C) 2001 Manfred Spraul
39 * ethtool support (jgarzik)
40 * Replace some MII-related magic numbers with constants (jgarzik)
42 TODO:
43 * enable pci_power_off
44 * Wake-On-LAN
47 #define DRV_NAME "winbond-840"
48 #define DRV_VERSION "1.01-e"
49 #define DRV_RELDATE "Sep-11-2006"
52 /* Automatically extracted configuration info:
53 probe-func: winbond840_probe
54 config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
56 c-help-name: Winbond W89c840 PCI Ethernet support
57 c-help-symbol: CONFIG_WINBOND_840
58 c-help: This driver is for the Winbond W89c840 chip. It also works with
59 c-help: the TX9882 chip on the Compex RL100-ATX board.
60 c-help: More specific information and updates are available from
61 c-help: http://www.scyld.com/network/drivers.html
64 /* The user-configurable values.
65 These may be modified when a driver module is loaded.*/
67 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
68 static int max_interrupt_work = 20;
69 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
70 The '840 uses a 64 element hash table based on the Ethernet CRC. */
71 static int multicast_filter_limit = 32;
73 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
74 Setting to > 1518 effectively disables this feature. */
75 static int rx_copybreak;
77 /* Used to pass the media type, etc.
78 Both 'options[]' and 'full_duplex[]' should exist for driver
79 interoperability.
80 The media type is usually passed in 'options[]'.
82 #define MAX_UNITS 8 /* More are supported, limit only on options */
83 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
84 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
86 /* Operational parameters that are set at compile time. */
88 /* Keep the ring sizes a power of two for compile efficiency.
89 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
90 Making the Tx ring too large decreases the effectiveness of channel
91 bonding and packet priority.
92 There are no ill effects from too-large receive rings. */
93 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
94 #define TX_QUEUE_LEN_RESTART 5
96 #define TX_BUFLIMIT (1024-128)
98 /* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
99 To avoid overflowing we don't queue again until we have room for a
100 full-size packet.
102 #define TX_FIFO_SIZE (2048)
103 #define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
106 /* Operational parameters that usually are not changed. */
107 /* Time in jiffies before concluding the transmitter is hung. */
108 #define TX_TIMEOUT (2*HZ)
110 /* Include files, designed to support most kernel versions 2.0.0 and later. */
111 #include <linux/module.h>
112 #include <linux/kernel.h>
113 #include <linux/string.h>
114 #include <linux/timer.h>
115 #include <linux/errno.h>
116 #include <linux/ioport.h>
117 #include <linux/slab.h>
118 #include <linux/interrupt.h>
119 #include <linux/pci.h>
120 #include <linux/dma-mapping.h>
121 #include <linux/netdevice.h>
122 #include <linux/etherdevice.h>
123 #include <linux/skbuff.h>
124 #include <linux/init.h>
125 #include <linux/delay.h>
126 #include <linux/ethtool.h>
127 #include <linux/mii.h>
128 #include <linux/rtnetlink.h>
129 #include <linux/crc32.h>
130 #include <linux/bitops.h>
131 #include <asm/uaccess.h>
132 #include <asm/processor.h> /* Processor type for cache alignment. */
133 #include <asm/io.h>
134 #include <asm/irq.h>
136 #include "tulip.h"
138 #undef PKT_BUF_SZ /* tulip.h also defines this */
139 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
141 /* These identify the driver base version and may not be removed. */
142 static char version[] =
143 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
144 KERN_INFO " http://www.scyld.com/network/drivers.html\n";
146 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
147 MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_VERSION);
151 module_param(max_interrupt_work, int, 0);
152 module_param(debug, int, 0);
153 module_param(rx_copybreak, int, 0);
154 module_param(multicast_filter_limit, int, 0);
155 module_param_array(options, int, NULL, 0);
156 module_param_array(full_duplex, int, NULL, 0);
157 MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
158 MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
159 MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
160 MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
161 MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
162 MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
165 Theory of Operation
167 I. Board Compatibility
169 This driver is for the Winbond w89c840 chip.
171 II. Board-specific settings
173 None.
175 III. Driver operation
177 This chip is very similar to the Digital 21*4* "Tulip" family. The first
178 twelve registers and the descriptor format are nearly identical. Read a
179 Tulip manual for operational details.
181 A significant difference is that the multicast filter and station address are
182 stored in registers rather than loaded through a pseudo-transmit packet.
184 Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a
185 full-sized packet we must use both data buffers in a descriptor. Thus the
186 driver uses ring mode where descriptors are implicitly sequential in memory,
187 rather than using the second descriptor address as a chain pointer to
188 subsequent descriptors.
190 IV. Notes
192 If you are going to almost clone a Tulip, why not go all the way and avoid
193 the need for a new driver?
195 IVb. References
197 http://www.scyld.com/expert/100mbps.html
198 http://www.scyld.com/expert/NWay.html
199 http://www.winbond.com.tw/
201 IVc. Errata
203 A horrible bug exists in the transmit FIFO. Apparently the chip doesn't
204 correctly detect a full FIFO, and queuing more than 2048 bytes may result in
205 silent data corruption.
207 Test with 'ping -s 10000' on a fast computer.
214 PCI probe table.
216 enum chip_capability_flags {
217 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
220 static const struct pci_device_id w840_pci_tbl[] = {
221 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
222 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
223 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
226 MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
228 enum {
229 netdev_res_size = 128, /* size of PCI BAR resource */
232 struct pci_id_info {
233 const char *name;
234 int drv_flags; /* Driver use, intended as capability flags. */
237 static const struct pci_id_info pci_id_tbl[] __devinitdata = {
238 { /* Sometime a Level-One switch card. */
239 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
240 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
241 { "Compex RL100-ATX", CanHaveMII | HasBrokenTx},
242 { } /* terminate list. */
245 /* This driver was written to use PCI memory space, however some x86 systems
246 work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config
249 /* Offsets to the Command and Status Registers, "CSRs".
250 While similar to the Tulip, these registers are longword aligned.
251 Note: It's not useful to define symbolic names for every register bit in
252 the device. The name can only partially document the semantics and make
253 the driver longer and more difficult to read.
255 enum w840_offsets {
256 PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
257 RxRingPtr=0x0C, TxRingPtr=0x10,
258 IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
259 RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
260 CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */
261 MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
262 CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
265 /* Bits in the NetworkConfig register. */
266 enum rx_mode_bits {
267 AcceptErr=0x80,
268 RxAcceptBroadcast=0x20, AcceptMulticast=0x10,
269 RxAcceptAllPhys=0x08, AcceptMyPhys=0x02,
272 enum mii_reg_bits {
273 MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
274 MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
277 /* The Tulip Rx and Tx buffer descriptors. */
278 struct w840_rx_desc {
279 s32 status;
280 s32 length;
281 u32 buffer1;
282 u32 buffer2;
285 struct w840_tx_desc {
286 s32 status;
287 s32 length;
288 u32 buffer1, buffer2;
291 #define MII_CNT 1 /* winbond only supports one MII */
292 struct netdev_private {
293 struct w840_rx_desc *rx_ring;
294 dma_addr_t rx_addr[RX_RING_SIZE];
295 struct w840_tx_desc *tx_ring;
296 dma_addr_t tx_addr[TX_RING_SIZE];
297 dma_addr_t ring_dma_addr;
298 /* The addresses of receive-in-place skbuffs. */
299 struct sk_buff* rx_skbuff[RX_RING_SIZE];
300 /* The saved address of a sent-in-place packet/buffer, for later free(). */
301 struct sk_buff* tx_skbuff[TX_RING_SIZE];
302 struct net_device_stats stats;
303 struct timer_list timer; /* Media monitoring timer. */
304 /* Frequently used values: keep some adjacent for cache effect. */
305 spinlock_t lock;
306 int chip_id, drv_flags;
307 struct pci_dev *pci_dev;
308 int csr6;
309 struct w840_rx_desc *rx_head_desc;
310 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
311 unsigned int rx_buf_sz; /* Based on MTU+slack. */
312 unsigned int cur_tx, dirty_tx;
313 unsigned int tx_q_bytes;
314 unsigned int tx_full; /* The Tx queue is full. */
315 /* MII transceiver section. */
316 int mii_cnt; /* MII device addresses. */
317 unsigned char phys[MII_CNT]; /* MII device addresses, but only the first is used */
318 u32 mii;
319 struct mii_if_info mii_if;
320 void __iomem *base_addr;
323 static int eeprom_read(void __iomem *ioaddr, int location);
324 static int mdio_read(struct net_device *dev, int phy_id, int location);
325 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
326 static int netdev_open(struct net_device *dev);
327 static int update_link(struct net_device *dev);
328 static void netdev_timer(unsigned long data);
329 static void init_rxtx_rings(struct net_device *dev);
330 static void free_rxtx_rings(struct netdev_private *np);
331 static void init_registers(struct net_device *dev);
332 static void tx_timeout(struct net_device *dev);
333 static int alloc_ringdesc(struct net_device *dev);
334 static void free_ringdesc(struct netdev_private *np);
335 static int start_tx(struct sk_buff *skb, struct net_device *dev);
336 static irqreturn_t intr_handler(int irq, void *dev_instance);
337 static void netdev_error(struct net_device *dev, int intr_status);
338 static int netdev_rx(struct net_device *dev);
339 static u32 __set_rx_mode(struct net_device *dev);
340 static void set_rx_mode(struct net_device *dev);
341 static struct net_device_stats *get_stats(struct net_device *dev);
342 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
343 static const struct ethtool_ops netdev_ethtool_ops;
344 static int netdev_close(struct net_device *dev);
348 static int __devinit w840_probe1 (struct pci_dev *pdev,
349 const struct pci_device_id *ent)
351 struct net_device *dev;
352 struct netdev_private *np;
353 static int find_cnt;
354 int chip_idx = ent->driver_data;
355 int irq;
356 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
357 void __iomem *ioaddr;
359 i = pci_enable_device(pdev);
360 if (i) return i;
362 pci_set_master(pdev);
364 irq = pdev->irq;
366 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
367 printk(KERN_WARNING "Winbond-840: Device %s disabled due to DMA limitations.\n",
368 pci_name(pdev));
369 return -EIO;
371 dev = alloc_etherdev(sizeof(*np));
372 if (!dev)
373 return -ENOMEM;
374 SET_NETDEV_DEV(dev, &pdev->dev);
376 if (pci_request_regions(pdev, DRV_NAME))
377 goto err_out_netdev;
379 ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
380 if (!ioaddr)
381 goto err_out_free_res;
383 for (i = 0; i < 3; i++)
384 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
386 /* Reset the chip to erase previous misconfiguration.
387 No hold time required! */
388 iowrite32(0x00000001, ioaddr + PCIBusCfg);
390 dev->base_addr = (unsigned long)ioaddr;
391 dev->irq = irq;
393 np = netdev_priv(dev);
394 np->pci_dev = pdev;
395 np->chip_id = chip_idx;
396 np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
397 spin_lock_init(&np->lock);
398 np->mii_if.dev = dev;
399 np->mii_if.mdio_read = mdio_read;
400 np->mii_if.mdio_write = mdio_write;
401 np->base_addr = ioaddr;
403 pci_set_drvdata(pdev, dev);
405 if (dev->mem_start)
406 option = dev->mem_start;
408 /* The lower four bits are the media type. */
409 if (option > 0) {
410 if (option & 0x200)
411 np->mii_if.full_duplex = 1;
412 if (option & 15)
413 printk(KERN_INFO "%s: ignoring user supplied media type %d",
414 dev->name, option & 15);
416 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
417 np->mii_if.full_duplex = 1;
419 if (np->mii_if.full_duplex)
420 np->mii_if.force_media = 1;
422 /* The chip-specific entries in the device structure. */
423 dev->open = &netdev_open;
424 dev->hard_start_xmit = &start_tx;
425 dev->stop = &netdev_close;
426 dev->get_stats = &get_stats;
427 dev->set_multicast_list = &set_rx_mode;
428 dev->do_ioctl = &netdev_ioctl;
429 dev->ethtool_ops = &netdev_ethtool_ops;
430 dev->tx_timeout = &tx_timeout;
431 dev->watchdog_timeo = TX_TIMEOUT;
433 i = register_netdev(dev);
434 if (i)
435 goto err_out_cleardev;
437 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
438 dev->name, pci_id_tbl[chip_idx].name, ioaddr,
439 dev->dev_addr, irq);
441 if (np->drv_flags & CanHaveMII) {
442 int phy, phy_idx = 0;
443 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
444 int mii_status = mdio_read(dev, phy, MII_BMSR);
445 if (mii_status != 0xffff && mii_status != 0x0000) {
446 np->phys[phy_idx++] = phy;
447 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
448 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
449 mdio_read(dev, phy, MII_PHYSID2);
450 printk(KERN_INFO "%s: MII PHY %8.8xh found at address %d, status "
451 "0x%4.4x advertising %4.4x.\n",
452 dev->name, np->mii, phy, mii_status, np->mii_if.advertising);
455 np->mii_cnt = phy_idx;
456 np->mii_if.phy_id = np->phys[0];
457 if (phy_idx == 0) {
458 printk(KERN_WARNING "%s: MII PHY not found -- this device may "
459 "not operate correctly.\n", dev->name);
463 find_cnt++;
464 return 0;
466 err_out_cleardev:
467 pci_set_drvdata(pdev, NULL);
468 pci_iounmap(pdev, ioaddr);
469 err_out_free_res:
470 pci_release_regions(pdev);
471 err_out_netdev:
472 free_netdev (dev);
473 return -ENODEV;
477 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
478 often serial bit streams generated by the host processor.
479 The example below is for the common 93c46 EEPROM, 64 16 bit words. */
481 /* Delay between EEPROM clock transitions.
482 No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
483 a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
484 made udelay() unreliable.
485 The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
486 deprecated.
488 #define eeprom_delay(ee_addr) ioread32(ee_addr)
490 enum EEPROM_Ctrl_Bits {
491 EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
492 EE_ChipSelect=0x801, EE_DataIn=0x08,
495 /* The EEPROM commands include the alway-set leading bit. */
496 enum EEPROM_Cmds {
497 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
500 static int eeprom_read(void __iomem *addr, int location)
502 int i;
503 int retval = 0;
504 void __iomem *ee_addr = addr + EECtrl;
505 int read_cmd = location | EE_ReadCmd;
506 iowrite32(EE_ChipSelect, ee_addr);
508 /* Shift the read command bits out. */
509 for (i = 10; i >= 0; i--) {
510 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
511 iowrite32(dataval, ee_addr);
512 eeprom_delay(ee_addr);
513 iowrite32(dataval | EE_ShiftClk, ee_addr);
514 eeprom_delay(ee_addr);
516 iowrite32(EE_ChipSelect, ee_addr);
517 eeprom_delay(ee_addr);
519 for (i = 16; i > 0; i--) {
520 iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr);
521 eeprom_delay(ee_addr);
522 retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0);
523 iowrite32(EE_ChipSelect, ee_addr);
524 eeprom_delay(ee_addr);
527 /* Terminate the EEPROM access. */
528 iowrite32(0, ee_addr);
529 return retval;
532 /* MII transceiver control section.
533 Read and write the MII registers using software-generated serial
534 MDIO protocol. See the MII specifications or DP83840A data sheet
535 for details.
537 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
538 met by back-to-back 33Mhz PCI cycles. */
539 #define mdio_delay(mdio_addr) ioread32(mdio_addr)
541 /* Set iff a MII transceiver on any interface requires mdio preamble.
542 This only set with older transceivers, so the extra
543 code size of a per-interface flag is not worthwhile. */
544 static char mii_preamble_required = 1;
546 #define MDIO_WRITE0 (MDIO_EnbOutput)
547 #define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
549 /* Generate the preamble required for initial synchronization and
550 a few older transceivers. */
551 static void mdio_sync(void __iomem *mdio_addr)
553 int bits = 32;
555 /* Establish sync by sending at least 32 logic ones. */
556 while (--bits >= 0) {
557 iowrite32(MDIO_WRITE1, mdio_addr);
558 mdio_delay(mdio_addr);
559 iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
560 mdio_delay(mdio_addr);
564 static int mdio_read(struct net_device *dev, int phy_id, int location)
566 struct netdev_private *np = netdev_priv(dev);
567 void __iomem *mdio_addr = np->base_addr + MIICtrl;
568 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
569 int i, retval = 0;
571 if (mii_preamble_required)
572 mdio_sync(mdio_addr);
574 /* Shift the read command bits out. */
575 for (i = 15; i >= 0; i--) {
576 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
578 iowrite32(dataval, mdio_addr);
579 mdio_delay(mdio_addr);
580 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
581 mdio_delay(mdio_addr);
583 /* Read the two transition, 16 data, and wire-idle bits. */
584 for (i = 20; i > 0; i--) {
585 iowrite32(MDIO_EnbIn, mdio_addr);
586 mdio_delay(mdio_addr);
587 retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0);
588 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
589 mdio_delay(mdio_addr);
591 return (retval>>1) & 0xffff;
594 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
596 struct netdev_private *np = netdev_priv(dev);
597 void __iomem *mdio_addr = np->base_addr + MIICtrl;
598 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
599 int i;
601 if (location == 4 && phy_id == np->phys[0])
602 np->mii_if.advertising = value;
604 if (mii_preamble_required)
605 mdio_sync(mdio_addr);
607 /* Shift the command bits out. */
608 for (i = 31; i >= 0; i--) {
609 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
611 iowrite32(dataval, mdio_addr);
612 mdio_delay(mdio_addr);
613 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
614 mdio_delay(mdio_addr);
616 /* Clear out extra bits. */
617 for (i = 2; i > 0; i--) {
618 iowrite32(MDIO_EnbIn, mdio_addr);
619 mdio_delay(mdio_addr);
620 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
621 mdio_delay(mdio_addr);
623 return;
627 static int netdev_open(struct net_device *dev)
629 struct netdev_private *np = netdev_priv(dev);
630 void __iomem *ioaddr = np->base_addr;
631 int i;
633 iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */
635 netif_device_detach(dev);
636 i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
637 if (i)
638 goto out_err;
640 if (debug > 1)
641 printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n",
642 dev->name, dev->irq);
644 if((i=alloc_ringdesc(dev)))
645 goto out_err;
647 spin_lock_irq(&np->lock);
648 netif_device_attach(dev);
649 init_registers(dev);
650 spin_unlock_irq(&np->lock);
652 netif_start_queue(dev);
653 if (debug > 2)
654 printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
656 /* Set the timer to check for link beat. */
657 init_timer(&np->timer);
658 np->timer.expires = jiffies + 1*HZ;
659 np->timer.data = (unsigned long)dev;
660 np->timer.function = &netdev_timer; /* timer handler */
661 add_timer(&np->timer);
662 return 0;
663 out_err:
664 netif_device_attach(dev);
665 return i;
668 #define MII_DAVICOM_DM9101 0x0181b800
670 static int update_link(struct net_device *dev)
672 struct netdev_private *np = netdev_priv(dev);
673 int duplex, fasteth, result, mii_reg;
675 /* BSMR */
676 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
678 if (mii_reg == 0xffff)
679 return np->csr6;
680 /* reread: the link status bit is sticky */
681 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
682 if (!(mii_reg & 0x4)) {
683 if (netif_carrier_ok(dev)) {
684 if (debug)
685 printk(KERN_INFO "%s: MII #%d reports no link. Disabling watchdog.\n",
686 dev->name, np->phys[0]);
687 netif_carrier_off(dev);
689 return np->csr6;
691 if (!netif_carrier_ok(dev)) {
692 if (debug)
693 printk(KERN_INFO "%s: MII #%d link is back. Enabling watchdog.\n",
694 dev->name, np->phys[0]);
695 netif_carrier_on(dev);
698 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
699 /* If the link partner doesn't support autonegotiation
700 * the MII detects it's abilities with the "parallel detection".
701 * Some MIIs update the LPA register to the result of the parallel
702 * detection, some don't.
703 * The Davicom PHY [at least 0181b800] doesn't.
704 * Instead bit 9 and 13 of the BMCR are updated to the result
705 * of the negotiation..
707 mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
708 duplex = mii_reg & BMCR_FULLDPLX;
709 fasteth = mii_reg & BMCR_SPEED100;
710 } else {
711 int negotiated;
712 mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
713 negotiated = mii_reg & np->mii_if.advertising;
715 duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
716 fasteth = negotiated & 0x380;
718 duplex |= np->mii_if.force_media;
719 /* remove fastether and fullduplex */
720 result = np->csr6 & ~0x20000200;
721 if (duplex)
722 result |= 0x200;
723 if (fasteth)
724 result |= 0x20000000;
725 if (result != np->csr6 && debug)
726 printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n",
727 dev->name, fasteth ? 100 : 10,
728 duplex ? "full" : "half", np->phys[0]);
729 return result;
732 #define RXTX_TIMEOUT 2000
733 static inline void update_csr6(struct net_device *dev, int new)
735 struct netdev_private *np = netdev_priv(dev);
736 void __iomem *ioaddr = np->base_addr;
737 int limit = RXTX_TIMEOUT;
739 if (!netif_device_present(dev))
740 new = 0;
741 if (new==np->csr6)
742 return;
743 /* stop both Tx and Rx processes */
744 iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
745 /* wait until they have really stopped */
746 for (;;) {
747 int csr5 = ioread32(ioaddr + IntrStatus);
748 int t;
750 t = (csr5 >> 17) & 0x07;
751 if (t==0||t==1) {
752 /* rx stopped */
753 t = (csr5 >> 20) & 0x07;
754 if (t==0||t==1)
755 break;
758 limit--;
759 if(!limit) {
760 printk(KERN_INFO "%s: couldn't stop rxtx, IntrStatus %xh.\n",
761 dev->name, csr5);
762 break;
764 udelay(1);
766 np->csr6 = new;
767 /* and restart them with the new configuration */
768 iowrite32(np->csr6, ioaddr + NetworkConfig);
769 if (new & 0x200)
770 np->mii_if.full_duplex = 1;
773 static void netdev_timer(unsigned long data)
775 struct net_device *dev = (struct net_device *)data;
776 struct netdev_private *np = netdev_priv(dev);
777 void __iomem *ioaddr = np->base_addr;
779 if (debug > 2)
780 printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
781 "config %8.8x.\n",
782 dev->name, ioread32(ioaddr + IntrStatus),
783 ioread32(ioaddr + NetworkConfig));
784 spin_lock_irq(&np->lock);
785 update_csr6(dev, update_link(dev));
786 spin_unlock_irq(&np->lock);
787 np->timer.expires = jiffies + 10*HZ;
788 add_timer(&np->timer);
791 static void init_rxtx_rings(struct net_device *dev)
793 struct netdev_private *np = netdev_priv(dev);
794 int i;
796 np->rx_head_desc = &np->rx_ring[0];
797 np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
799 /* Initial all Rx descriptors. */
800 for (i = 0; i < RX_RING_SIZE; i++) {
801 np->rx_ring[i].length = np->rx_buf_sz;
802 np->rx_ring[i].status = 0;
803 np->rx_skbuff[i] = NULL;
805 /* Mark the last entry as wrapping the ring. */
806 np->rx_ring[i-1].length |= DescEndRing;
808 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
809 for (i = 0; i < RX_RING_SIZE; i++) {
810 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
811 np->rx_skbuff[i] = skb;
812 if (skb == NULL)
813 break;
814 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
815 np->rx_buf_sz,PCI_DMA_FROMDEVICE);
817 np->rx_ring[i].buffer1 = np->rx_addr[i];
818 np->rx_ring[i].status = DescOwned;
821 np->cur_rx = 0;
822 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
824 /* Initialize the Tx descriptors */
825 for (i = 0; i < TX_RING_SIZE; i++) {
826 np->tx_skbuff[i] = NULL;
827 np->tx_ring[i].status = 0;
829 np->tx_full = 0;
830 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
832 iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
833 iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
834 np->base_addr + TxRingPtr);
838 static void free_rxtx_rings(struct netdev_private* np)
840 int i;
841 /* Free all the skbuffs in the Rx queue. */
842 for (i = 0; i < RX_RING_SIZE; i++) {
843 np->rx_ring[i].status = 0;
844 if (np->rx_skbuff[i]) {
845 pci_unmap_single(np->pci_dev,
846 np->rx_addr[i],
847 np->rx_skbuff[i]->len,
848 PCI_DMA_FROMDEVICE);
849 dev_kfree_skb(np->rx_skbuff[i]);
851 np->rx_skbuff[i] = NULL;
853 for (i = 0; i < TX_RING_SIZE; i++) {
854 if (np->tx_skbuff[i]) {
855 pci_unmap_single(np->pci_dev,
856 np->tx_addr[i],
857 np->tx_skbuff[i]->len,
858 PCI_DMA_TODEVICE);
859 dev_kfree_skb(np->tx_skbuff[i]);
861 np->tx_skbuff[i] = NULL;
865 static void init_registers(struct net_device *dev)
867 struct netdev_private *np = netdev_priv(dev);
868 void __iomem *ioaddr = np->base_addr;
869 int i;
871 for (i = 0; i < 6; i++)
872 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
874 /* Initialize other registers. */
875 #ifdef __BIG_ENDIAN
876 i = (1<<20); /* Big-endian descriptors */
877 #else
878 i = 0;
879 #endif
880 i |= (0x04<<2); /* skip length 4 u32 */
881 i |= 0x02; /* give Rx priority */
883 /* Configure the PCI bus bursts and FIFO thresholds.
884 486: Set 8 longword cache alignment, 8 longword burst.
885 586: Set 16 longword cache alignment, no burst limit.
886 Cache alignment bits 15:14 Burst length 13:8
887 0000 <not allowed> 0000 align to cache 0800 8 longwords
888 4000 8 longwords 0100 1 longword 1000 16 longwords
889 8000 16 longwords 0200 2 longwords 2000 32 longwords
890 C000 32 longwords 0400 4 longwords */
892 #if defined (__i386__) && !defined(MODULE)
893 /* When not a module we can work around broken '486 PCI boards. */
894 if (boot_cpu_data.x86 <= 4) {
895 i |= 0x4800;
896 printk(KERN_INFO "%s: This is a 386/486 PCI system, setting cache "
897 "alignment to 8 longwords.\n", dev->name);
898 } else {
899 i |= 0xE000;
901 #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
902 i |= 0xE000;
903 #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC)
904 i |= 0x4800;
905 #else
906 #warning Processor architecture undefined
907 i |= 0x4800;
908 #endif
909 iowrite32(i, ioaddr + PCIBusCfg);
911 np->csr6 = 0;
912 /* 128 byte Tx threshold;
913 Transmit on; Receive on; */
914 update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
916 /* Clear and Enable interrupts by setting the interrupt mask. */
917 iowrite32(0x1A0F5, ioaddr + IntrStatus);
918 iowrite32(0x1A0F5, ioaddr + IntrEnable);
920 iowrite32(0, ioaddr + RxStartDemand);
923 static void tx_timeout(struct net_device *dev)
925 struct netdev_private *np = netdev_priv(dev);
926 void __iomem *ioaddr = np->base_addr;
928 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
929 " resetting...\n", dev->name, ioread32(ioaddr + IntrStatus));
932 int i;
933 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
934 for (i = 0; i < RX_RING_SIZE; i++)
935 printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
936 printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
937 for (i = 0; i < TX_RING_SIZE; i++)
938 printk(" %8.8x", np->tx_ring[i].status);
939 printk("\n");
941 printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d.\n",
942 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
943 printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",ioread32(ioaddr+0x4C));
945 disable_irq(dev->irq);
946 spin_lock_irq(&np->lock);
948 * Under high load dirty_tx and the internal tx descriptor pointer
949 * come out of sync, thus perform a software reset and reinitialize
950 * everything.
953 iowrite32(1, np->base_addr+PCIBusCfg);
954 udelay(1);
956 free_rxtx_rings(np);
957 init_rxtx_rings(dev);
958 init_registers(dev);
959 spin_unlock_irq(&np->lock);
960 enable_irq(dev->irq);
962 netif_wake_queue(dev);
963 dev->trans_start = jiffies;
964 np->stats.tx_errors++;
965 return;
968 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
969 static int alloc_ringdesc(struct net_device *dev)
971 struct netdev_private *np = netdev_priv(dev);
973 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
975 np->rx_ring = pci_alloc_consistent(np->pci_dev,
976 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
977 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
978 &np->ring_dma_addr);
979 if(!np->rx_ring)
980 return -ENOMEM;
981 init_rxtx_rings(dev);
982 return 0;
985 static void free_ringdesc(struct netdev_private *np)
987 pci_free_consistent(np->pci_dev,
988 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
989 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
990 np->rx_ring, np->ring_dma_addr);
994 static int start_tx(struct sk_buff *skb, struct net_device *dev)
996 struct netdev_private *np = netdev_priv(dev);
997 unsigned entry;
999 /* Caution: the write order is important here, set the field
1000 with the "ownership" bits last. */
1002 /* Calculate the next Tx descriptor entry. */
1003 entry = np->cur_tx % TX_RING_SIZE;
1005 np->tx_addr[entry] = pci_map_single(np->pci_dev,
1006 skb->data,skb->len, PCI_DMA_TODEVICE);
1007 np->tx_skbuff[entry] = skb;
1009 np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1010 if (skb->len < TX_BUFLIMIT) {
1011 np->tx_ring[entry].length = DescWholePkt | skb->len;
1012 } else {
1013 int len = skb->len - TX_BUFLIMIT;
1015 np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1016 np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1018 if(entry == TX_RING_SIZE-1)
1019 np->tx_ring[entry].length |= DescEndRing;
1021 /* Now acquire the irq spinlock.
1022 * The difficult race is the ordering between
1023 * increasing np->cur_tx and setting DescOwned:
1024 * - if np->cur_tx is increased first the interrupt
1025 * handler could consider the packet as transmitted
1026 * since DescOwned is cleared.
1027 * - If DescOwned is set first the NIC could report the
1028 * packet as sent, but the interrupt handler would ignore it
1029 * since the np->cur_tx was not yet increased.
1031 spin_lock_irq(&np->lock);
1032 np->cur_tx++;
1034 wmb(); /* flush length, buffer1, buffer2 */
1035 np->tx_ring[entry].status = DescOwned;
1036 wmb(); /* flush status and kick the hardware */
1037 iowrite32(0, np->base_addr + TxStartDemand);
1038 np->tx_q_bytes += skb->len;
1039 /* Work around horrible bug in the chip by marking the queue as full
1040 when we do not have FIFO room for a maximum sized packet. */
1041 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1042 ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1043 netif_stop_queue(dev);
1044 wmb();
1045 np->tx_full = 1;
1047 spin_unlock_irq(&np->lock);
1049 dev->trans_start = jiffies;
1051 if (debug > 4) {
1052 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1053 dev->name, np->cur_tx, entry);
1055 return 0;
1058 static void netdev_tx_done(struct net_device *dev)
1060 struct netdev_private *np = netdev_priv(dev);
1061 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1062 int entry = np->dirty_tx % TX_RING_SIZE;
1063 int tx_status = np->tx_ring[entry].status;
1065 if (tx_status < 0)
1066 break;
1067 if (tx_status & 0x8000) { /* There was an error, log it. */
1068 #ifndef final_version
1069 if (debug > 1)
1070 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1071 dev->name, tx_status);
1072 #endif
1073 np->stats.tx_errors++;
1074 if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1075 if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1076 if (tx_status & 0x0200) np->stats.tx_window_errors++;
1077 if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1078 if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1079 np->stats.tx_heartbeat_errors++;
1080 } else {
1081 #ifndef final_version
1082 if (debug > 3)
1083 printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %8.8x.\n",
1084 dev->name, entry, tx_status);
1085 #endif
1086 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1087 np->stats.collisions += (tx_status >> 3) & 15;
1088 np->stats.tx_packets++;
1090 /* Free the original skb. */
1091 pci_unmap_single(np->pci_dev,np->tx_addr[entry],
1092 np->tx_skbuff[entry]->len,
1093 PCI_DMA_TODEVICE);
1094 np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1095 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1096 np->tx_skbuff[entry] = NULL;
1098 if (np->tx_full &&
1099 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1100 np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1101 /* The ring is no longer full, clear tbusy. */
1102 np->tx_full = 0;
1103 wmb();
1104 netif_wake_queue(dev);
1108 /* The interrupt handler does all of the Rx thread work and cleans up
1109 after the Tx thread. */
1110 static irqreturn_t intr_handler(int irq, void *dev_instance)
1112 struct net_device *dev = (struct net_device *)dev_instance;
1113 struct netdev_private *np = netdev_priv(dev);
1114 void __iomem *ioaddr = np->base_addr;
1115 int work_limit = max_interrupt_work;
1116 int handled = 0;
1118 if (!netif_device_present(dev))
1119 return IRQ_NONE;
1120 do {
1121 u32 intr_status = ioread32(ioaddr + IntrStatus);
1123 /* Acknowledge all of the current interrupt sources ASAP. */
1124 iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
1126 if (debug > 4)
1127 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1128 dev->name, intr_status);
1130 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1131 break;
1133 handled = 1;
1135 if (intr_status & (RxIntr | RxNoBuf))
1136 netdev_rx(dev);
1137 if (intr_status & RxNoBuf)
1138 iowrite32(0, ioaddr + RxStartDemand);
1140 if (intr_status & (TxNoBuf | TxIntr) &&
1141 np->cur_tx != np->dirty_tx) {
1142 spin_lock(&np->lock);
1143 netdev_tx_done(dev);
1144 spin_unlock(&np->lock);
1147 /* Abnormal error summary/uncommon events handlers. */
1148 if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SystemError |
1149 TimerInt | TxDied))
1150 netdev_error(dev, intr_status);
1152 if (--work_limit < 0) {
1153 printk(KERN_WARNING "%s: Too much work at interrupt, "
1154 "status=0x%4.4x.\n", dev->name, intr_status);
1155 /* Set the timer to re-enable the other interrupts after
1156 10*82usec ticks. */
1157 spin_lock(&np->lock);
1158 if (netif_device_present(dev)) {
1159 iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
1160 iowrite32(10, ioaddr + GPTimer);
1162 spin_unlock(&np->lock);
1163 break;
1165 } while (1);
1167 if (debug > 3)
1168 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1169 dev->name, ioread32(ioaddr + IntrStatus));
1170 return IRQ_RETVAL(handled);
1173 /* This routine is logically part of the interrupt handler, but separated
1174 for clarity and better register allocation. */
1175 static int netdev_rx(struct net_device *dev)
1177 struct netdev_private *np = netdev_priv(dev);
1178 int entry = np->cur_rx % RX_RING_SIZE;
1179 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1181 if (debug > 4) {
1182 printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
1183 entry, np->rx_ring[entry].status);
1186 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1187 while (--work_limit >= 0) {
1188 struct w840_rx_desc *desc = np->rx_head_desc;
1189 s32 status = desc->status;
1191 if (debug > 4)
1192 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1193 status);
1194 if (status < 0)
1195 break;
1196 if ((status & 0x38008300) != 0x0300) {
1197 if ((status & 0x38000300) != 0x0300) {
1198 /* Ingore earlier buffers. */
1199 if ((status & 0xffff) != 0x7fff) {
1200 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1201 "multiple buffers, entry %#x status %4.4x!\n",
1202 dev->name, np->cur_rx, status);
1203 np->stats.rx_length_errors++;
1205 } else if (status & 0x8000) {
1206 /* There was a fatal error. */
1207 if (debug > 2)
1208 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
1209 dev->name, status);
1210 np->stats.rx_errors++; /* end of a packet.*/
1211 if (status & 0x0890) np->stats.rx_length_errors++;
1212 if (status & 0x004C) np->stats.rx_frame_errors++;
1213 if (status & 0x0002) np->stats.rx_crc_errors++;
1215 } else {
1216 struct sk_buff *skb;
1217 /* Omit the four octet CRC from the length. */
1218 int pkt_len = ((status >> 16) & 0x7ff) - 4;
1220 #ifndef final_version
1221 if (debug > 4)
1222 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1223 " status %x.\n", pkt_len, status);
1224 #endif
1225 /* Check if the packet is long enough to accept without copying
1226 to a minimally-sized skbuff. */
1227 if (pkt_len < rx_copybreak
1228 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1229 skb_reserve(skb, 2); /* 16 byte align the IP header */
1230 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1231 np->rx_skbuff[entry]->len,
1232 PCI_DMA_FROMDEVICE);
1233 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1234 skb_put(skb, pkt_len);
1235 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1236 np->rx_skbuff[entry]->len,
1237 PCI_DMA_FROMDEVICE);
1238 } else {
1239 pci_unmap_single(np->pci_dev,np->rx_addr[entry],
1240 np->rx_skbuff[entry]->len,
1241 PCI_DMA_FROMDEVICE);
1242 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1243 np->rx_skbuff[entry] = NULL;
1245 #ifndef final_version /* Remove after testing. */
1246 /* You will want this info for the initial debug. */
1247 if (debug > 5)
1248 printk(KERN_DEBUG " Rx data %pM %pM"
1249 " %2.2x%2.2x %d.%d.%d.%d.\n",
1250 &skb->data[0], &skb->data[6],
1251 skb->data[12], skb->data[13],
1252 skb->data[14], skb->data[15], skb->data[16], skb->data[17]);
1253 #endif
1254 skb->protocol = eth_type_trans(skb, dev);
1255 netif_rx(skb);
1256 np->stats.rx_packets++;
1257 np->stats.rx_bytes += pkt_len;
1259 entry = (++np->cur_rx) % RX_RING_SIZE;
1260 np->rx_head_desc = &np->rx_ring[entry];
1263 /* Refill the Rx ring buffers. */
1264 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1265 struct sk_buff *skb;
1266 entry = np->dirty_rx % RX_RING_SIZE;
1267 if (np->rx_skbuff[entry] == NULL) {
1268 skb = dev_alloc_skb(np->rx_buf_sz);
1269 np->rx_skbuff[entry] = skb;
1270 if (skb == NULL)
1271 break; /* Better luck next round. */
1272 np->rx_addr[entry] = pci_map_single(np->pci_dev,
1273 skb->data,
1274 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1275 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1277 wmb();
1278 np->rx_ring[entry].status = DescOwned;
1281 return 0;
1284 static void netdev_error(struct net_device *dev, int intr_status)
1286 struct netdev_private *np = netdev_priv(dev);
1287 void __iomem *ioaddr = np->base_addr;
1289 if (debug > 2)
1290 printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n",
1291 dev->name, intr_status);
1292 if (intr_status == 0xffffffff)
1293 return;
1294 spin_lock(&np->lock);
1295 if (intr_status & TxFIFOUnderflow) {
1296 int new;
1297 /* Bump up the Tx threshold */
1298 #if 0
1299 /* This causes lots of dropped packets,
1300 * and under high load even tx_timeouts
1302 new = np->csr6 + 0x4000;
1303 #else
1304 new = (np->csr6 >> 14)&0x7f;
1305 if (new < 64)
1306 new *= 2;
1307 else
1308 new = 127; /* load full packet before starting */
1309 new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1310 #endif
1311 printk(KERN_DEBUG "%s: Tx underflow, new csr6 %8.8x.\n",
1312 dev->name, new);
1313 update_csr6(dev, new);
1315 if (intr_status & RxDied) { /* Missed a Rx frame. */
1316 np->stats.rx_errors++;
1318 if (intr_status & TimerInt) {
1319 /* Re-enable other interrupts. */
1320 if (netif_device_present(dev))
1321 iowrite32(0x1A0F5, ioaddr + IntrEnable);
1323 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1324 iowrite32(0, ioaddr + RxStartDemand);
1325 spin_unlock(&np->lock);
1328 static struct net_device_stats *get_stats(struct net_device *dev)
1330 struct netdev_private *np = netdev_priv(dev);
1331 void __iomem *ioaddr = np->base_addr;
1333 /* The chip only need report frame silently dropped. */
1334 spin_lock_irq(&np->lock);
1335 if (netif_running(dev) && netif_device_present(dev))
1336 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1337 spin_unlock_irq(&np->lock);
1339 return &np->stats;
1343 static u32 __set_rx_mode(struct net_device *dev)
1345 struct netdev_private *np = netdev_priv(dev);
1346 void __iomem *ioaddr = np->base_addr;
1347 u32 mc_filter[2]; /* Multicast hash filter */
1348 u32 rx_mode;
1350 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1351 memset(mc_filter, 0xff, sizeof(mc_filter));
1352 rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
1353 | AcceptMyPhys;
1354 } else if ((dev->mc_count > multicast_filter_limit)
1355 || (dev->flags & IFF_ALLMULTI)) {
1356 /* Too many to match, or accept all multicasts. */
1357 memset(mc_filter, 0xff, sizeof(mc_filter));
1358 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1359 } else {
1360 struct dev_mc_list *mclist;
1361 int i;
1362 memset(mc_filter, 0, sizeof(mc_filter));
1363 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1364 i++, mclist = mclist->next) {
1365 int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
1366 filterbit &= 0x3f;
1367 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1369 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1371 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1372 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1373 return rx_mode;
1376 static void set_rx_mode(struct net_device *dev)
1378 struct netdev_private *np = netdev_priv(dev);
1379 u32 rx_mode = __set_rx_mode(dev);
1380 spin_lock_irq(&np->lock);
1381 update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1382 spin_unlock_irq(&np->lock);
1385 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1387 struct netdev_private *np = netdev_priv(dev);
1389 strcpy (info->driver, DRV_NAME);
1390 strcpy (info->version, DRV_VERSION);
1391 strcpy (info->bus_info, pci_name(np->pci_dev));
1394 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1396 struct netdev_private *np = netdev_priv(dev);
1397 int rc;
1399 spin_lock_irq(&np->lock);
1400 rc = mii_ethtool_gset(&np->mii_if, cmd);
1401 spin_unlock_irq(&np->lock);
1403 return rc;
1406 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1408 struct netdev_private *np = netdev_priv(dev);
1409 int rc;
1411 spin_lock_irq(&np->lock);
1412 rc = mii_ethtool_sset(&np->mii_if, cmd);
1413 spin_unlock_irq(&np->lock);
1415 return rc;
1418 static int netdev_nway_reset(struct net_device *dev)
1420 struct netdev_private *np = netdev_priv(dev);
1421 return mii_nway_restart(&np->mii_if);
1424 static u32 netdev_get_link(struct net_device *dev)
1426 struct netdev_private *np = netdev_priv(dev);
1427 return mii_link_ok(&np->mii_if);
1430 static u32 netdev_get_msglevel(struct net_device *dev)
1432 return debug;
1435 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1437 debug = value;
1440 static const struct ethtool_ops netdev_ethtool_ops = {
1441 .get_drvinfo = netdev_get_drvinfo,
1442 .get_settings = netdev_get_settings,
1443 .set_settings = netdev_set_settings,
1444 .nway_reset = netdev_nway_reset,
1445 .get_link = netdev_get_link,
1446 .get_msglevel = netdev_get_msglevel,
1447 .set_msglevel = netdev_set_msglevel,
1450 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1452 struct mii_ioctl_data *data = if_mii(rq);
1453 struct netdev_private *np = netdev_priv(dev);
1455 switch(cmd) {
1456 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1457 data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
1458 /* Fall Through */
1460 case SIOCGMIIREG: /* Read MII PHY register. */
1461 spin_lock_irq(&np->lock);
1462 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1463 spin_unlock_irq(&np->lock);
1464 return 0;
1466 case SIOCSMIIREG: /* Write MII PHY register. */
1467 if (!capable(CAP_NET_ADMIN))
1468 return -EPERM;
1469 spin_lock_irq(&np->lock);
1470 mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1471 spin_unlock_irq(&np->lock);
1472 return 0;
1473 default:
1474 return -EOPNOTSUPP;
1478 static int netdev_close(struct net_device *dev)
1480 struct netdev_private *np = netdev_priv(dev);
1481 void __iomem *ioaddr = np->base_addr;
1483 netif_stop_queue(dev);
1485 if (debug > 1) {
1486 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x "
1487 "Config %8.8x.\n", dev->name, ioread32(ioaddr + IntrStatus),
1488 ioread32(ioaddr + NetworkConfig));
1489 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1490 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1493 /* Stop the chip's Tx and Rx processes. */
1494 spin_lock_irq(&np->lock);
1495 netif_device_detach(dev);
1496 update_csr6(dev, 0);
1497 iowrite32(0x0000, ioaddr + IntrEnable);
1498 spin_unlock_irq(&np->lock);
1500 free_irq(dev->irq, dev);
1501 wmb();
1502 netif_device_attach(dev);
1504 if (ioread32(ioaddr + NetworkConfig) != 0xffffffff)
1505 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1507 #ifdef __i386__
1508 if (debug > 2) {
1509 int i;
1511 printk(KERN_DEBUG" Tx ring at %8.8x:\n",
1512 (int)np->tx_ring);
1513 for (i = 0; i < TX_RING_SIZE; i++)
1514 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x.\n",
1515 i, np->tx_ring[i].length,
1516 np->tx_ring[i].status, np->tx_ring[i].buffer1);
1517 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1518 (int)np->rx_ring);
1519 for (i = 0; i < RX_RING_SIZE; i++) {
1520 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1521 i, np->rx_ring[i].length,
1522 np->rx_ring[i].status, np->rx_ring[i].buffer1);
1525 #endif /* __i386__ debugging only */
1527 del_timer_sync(&np->timer);
1529 free_rxtx_rings(np);
1530 free_ringdesc(np);
1532 return 0;
1535 static void __devexit w840_remove1 (struct pci_dev *pdev)
1537 struct net_device *dev = pci_get_drvdata(pdev);
1539 if (dev) {
1540 struct netdev_private *np = netdev_priv(dev);
1541 unregister_netdev(dev);
1542 pci_release_regions(pdev);
1543 pci_iounmap(pdev, np->base_addr);
1544 free_netdev(dev);
1547 pci_set_drvdata(pdev, NULL);
1550 #ifdef CONFIG_PM
1553 * suspend/resume synchronization:
1554 * - open, close, do_ioctl:
1555 * rtnl_lock, & netif_device_detach after the rtnl_unlock.
1556 * - get_stats:
1557 * spin_lock_irq(np->lock), doesn't touch hw if not present
1558 * - hard_start_xmit:
1559 * synchronize_irq + netif_tx_disable;
1560 * - tx_timeout:
1561 * netif_device_detach + netif_tx_disable;
1562 * - set_multicast_list
1563 * netif_device_detach + netif_tx_disable;
1564 * - interrupt handler
1565 * doesn't touch hw if not present, synchronize_irq waits for
1566 * running instances of the interrupt handler.
1568 * Disabling hw requires clearing csr6 & IntrEnable.
1569 * update_csr6 & all function that write IntrEnable check netif_device_present
1570 * before settings any bits.
1572 * Detach must occur under spin_unlock_irq(), interrupts from a detached
1573 * device would cause an irq storm.
1575 static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
1577 struct net_device *dev = pci_get_drvdata (pdev);
1578 struct netdev_private *np = netdev_priv(dev);
1579 void __iomem *ioaddr = np->base_addr;
1581 rtnl_lock();
1582 if (netif_running (dev)) {
1583 del_timer_sync(&np->timer);
1585 spin_lock_irq(&np->lock);
1586 netif_device_detach(dev);
1587 update_csr6(dev, 0);
1588 iowrite32(0, ioaddr + IntrEnable);
1589 spin_unlock_irq(&np->lock);
1591 synchronize_irq(dev->irq);
1592 netif_tx_disable(dev);
1594 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1596 /* no more hardware accesses behind this line. */
1598 BUG_ON(np->csr6);
1599 if (ioread32(ioaddr + IntrEnable)) BUG();
1601 /* pci_power_off(pdev, -1); */
1603 free_rxtx_rings(np);
1604 } else {
1605 netif_device_detach(dev);
1607 rtnl_unlock();
1608 return 0;
1611 static int w840_resume (struct pci_dev *pdev)
1613 struct net_device *dev = pci_get_drvdata (pdev);
1614 struct netdev_private *np = netdev_priv(dev);
1615 int retval = 0;
1617 rtnl_lock();
1618 if (netif_device_present(dev))
1619 goto out; /* device not suspended */
1620 if (netif_running(dev)) {
1621 if ((retval = pci_enable_device(pdev))) {
1622 printk (KERN_ERR
1623 "%s: pci_enable_device failed in resume\n",
1624 dev->name);
1625 goto out;
1627 spin_lock_irq(&np->lock);
1628 iowrite32(1, np->base_addr+PCIBusCfg);
1629 ioread32(np->base_addr+PCIBusCfg);
1630 udelay(1);
1631 netif_device_attach(dev);
1632 init_rxtx_rings(dev);
1633 init_registers(dev);
1634 spin_unlock_irq(&np->lock);
1636 netif_wake_queue(dev);
1638 mod_timer(&np->timer, jiffies + 1*HZ);
1639 } else {
1640 netif_device_attach(dev);
1642 out:
1643 rtnl_unlock();
1644 return retval;
1646 #endif
1648 static struct pci_driver w840_driver = {
1649 .name = DRV_NAME,
1650 .id_table = w840_pci_tbl,
1651 .probe = w840_probe1,
1652 .remove = __devexit_p(w840_remove1),
1653 #ifdef CONFIG_PM
1654 .suspend = w840_suspend,
1655 .resume = w840_resume,
1656 #endif
1659 static int __init w840_init(void)
1661 printk(version);
1662 return pci_register_driver(&w840_driver);
1665 static void __exit w840_exit(void)
1667 pci_unregister_driver(&w840_driver);
1670 module_init(w840_init);
1671 module_exit(w840_exit);