rocket: fix test_bit parameters
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / sundance.c
blobc399b1955c1eec016a037e67059f57d2f3f8eebc
1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2 /*
3 Written 1999-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19 [link no longer provides useful info -jgarzik]
20 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
25 #define DRV_NAME "sundance"
26 #define DRV_VERSION "1.2"
27 #define DRV_RELDATE "11-Sep-2006"
30 /* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
35 static const int multicast_filter_limit = 32;
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
40 need a copy-align. */
41 static int rx_copybreak;
42 static int flowctrl=1;
44 /* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
51 1 10Mbps half duplex.
52 2 10Mbps full duplex.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
56 #define MAX_UNITS 8
57 static char *media[MAX_UNITS];
60 /* Operational parameters that are set at compile time. */
62 /* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
66 Tx error recovery.
67 Large receive rings merely waste memory. */
68 #define TX_RING_SIZE 32
69 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70 #define RX_RING_SIZE 64
71 #define RX_BUDGET 32
72 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
75 /* Operational parameters that usually are not changed. */
76 /* Time in jiffies before concluding the transmitter is hung. */
77 #define TX_TIMEOUT (4*HZ)
78 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/slab.h>
88 #include <linux/interrupt.h>
89 #include <linux/pci.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/skbuff.h>
93 #include <linux/init.h>
94 #include <linux/bitops.h>
95 #include <asm/uaccess.h>
96 #include <asm/processor.h> /* Processor type for cache alignment. */
97 #include <asm/io.h>
98 #include <linux/delay.h>
99 #include <linux/spinlock.h>
100 #ifndef _COMPAT_WITH_OLD_KERNEL
101 #include <linux/crc32.h>
102 #include <linux/ethtool.h>
103 #include <linux/mii.h>
104 #else
105 #include "crc32.h"
106 #include "ethtool.h"
107 #include "mii.h"
108 #include "compat.h"
109 #endif
111 /* These identify the driver base version and may not be removed. */
112 static const char version[] __devinitconst =
113 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
114 " Written by Donald Becker\n";
116 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
117 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
118 MODULE_LICENSE("GPL");
120 module_param(debug, int, 0);
121 module_param(rx_copybreak, int, 0);
122 module_param_array(media, charp, NULL, 0);
123 module_param(flowctrl, int, 0);
124 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
125 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
126 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
129 Theory of Operation
131 I. Board Compatibility
133 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
135 II. Board-specific settings
137 III. Driver operation
139 IIIa. Ring buffers
141 This driver uses two statically allocated fixed-size descriptor lists
142 formed into rings by a branch from the final descriptor to the beginning of
143 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
144 Some chips explicitly use only 2^N sized rings, while others use a
145 'next descriptor' pointer that the driver forms into rings.
147 IIIb/c. Transmit/Receive Structure
149 This driver uses a zero-copy receive and transmit scheme.
150 The driver allocates full frame size skbuffs for the Rx ring buffers at
151 open() time and passes the skb->data field to the chip as receive data
152 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
153 a fresh skbuff is allocated and the frame is copied to the new skbuff.
154 When the incoming frame is larger, the skbuff is passed directly up the
155 protocol stack. Buffers consumed this way are replaced by newly allocated
156 skbuffs in a later phase of receives.
158 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
159 using a full-sized skbuff for small frames vs. the copying costs of larger
160 frames. New boards are typically used in generously configured machines
161 and the underfilled buffers have negligible impact compared to the benefit of
162 a single allocation size, so the default value of zero results in never
163 copying packets. When copying is done, the cost is usually mitigated by using
164 a combined copy/checksum routine. Copying also preloads the cache, which is
165 most useful with small frames.
167 A subtle aspect of the operation is that the IP header at offset 14 in an
168 ethernet frame isn't longword aligned for further processing.
169 Unaligned buffers are permitted by the Sundance hardware, so
170 frames are received into the skbuff at an offset of "+2", 16-byte aligning
171 the IP header.
173 IIId. Synchronization
175 The driver runs as two independent, single-threaded flows of control. One
176 is the send-packet routine, which enforces single-threaded use by the
177 dev->tbusy flag. The other thread is the interrupt handler, which is single
178 threaded by the hardware and interrupt handling software.
180 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
181 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
182 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
183 the 'lp->tx_full' flag.
185 The interrupt handler has exclusive control over the Rx ring and records stats
186 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
187 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
188 clears both the tx_full and tbusy flags.
190 IV. Notes
192 IVb. References
194 The Sundance ST201 datasheet, preliminary version.
195 The Kendin KS8723 datasheet, preliminary version.
196 The ICplus IP100 datasheet, preliminary version.
197 http://www.scyld.com/expert/100mbps.html
198 http://www.scyld.com/expert/NWay.html
200 IVc. Errata
204 /* Work-around for Kendin chip bugs. */
205 #ifndef CONFIG_SUNDANCE_MMIO
206 #define USE_IO_OPS 1
207 #endif
209 static const struct pci_device_id sundance_pci_tbl[] = {
210 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
211 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
212 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
213 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
214 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
215 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
216 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
219 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
221 enum {
222 netdev_io_size = 128
225 struct pci_id_info {
226 const char *name;
228 static const struct pci_id_info pci_id_tbl[] __devinitdata = {
229 {"D-Link DFE-550TX FAST Ethernet Adapter"},
230 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
231 {"D-Link DFE-580TX 4 port Server Adapter"},
232 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
233 {"D-Link DL10050-based FAST Ethernet Adapter"},
234 {"Sundance Technology Alta"},
235 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
236 { } /* terminate list. */
239 /* This driver was written to use PCI memory space, however x86-oriented
240 hardware often uses I/O space accesses. */
242 /* Offsets to the device registers.
243 Unlike software-only systems, device drivers interact with complex hardware.
244 It's not useful to define symbolic names for every register bit in the
245 device. The name can only partially document the semantics and make
246 the driver longer and more difficult to read.
247 In general, only the important configuration values or bits changed
248 multiple times should be defined symbolically.
250 enum alta_offsets {
251 DMACtrl = 0x00,
252 TxListPtr = 0x04,
253 TxDMABurstThresh = 0x08,
254 TxDMAUrgentThresh = 0x09,
255 TxDMAPollPeriod = 0x0a,
256 RxDMAStatus = 0x0c,
257 RxListPtr = 0x10,
258 DebugCtrl0 = 0x1a,
259 DebugCtrl1 = 0x1c,
260 RxDMABurstThresh = 0x14,
261 RxDMAUrgentThresh = 0x15,
262 RxDMAPollPeriod = 0x16,
263 LEDCtrl = 0x1a,
264 ASICCtrl = 0x30,
265 EEData = 0x34,
266 EECtrl = 0x36,
267 FlashAddr = 0x40,
268 FlashData = 0x44,
269 TxStatus = 0x46,
270 TxFrameId = 0x47,
271 DownCounter = 0x18,
272 IntrClear = 0x4a,
273 IntrEnable = 0x4c,
274 IntrStatus = 0x4e,
275 MACCtrl0 = 0x50,
276 MACCtrl1 = 0x52,
277 StationAddr = 0x54,
278 MaxFrameSize = 0x5A,
279 RxMode = 0x5c,
280 MIICtrl = 0x5e,
281 MulticastFilter0 = 0x60,
282 MulticastFilter1 = 0x64,
283 RxOctetsLow = 0x68,
284 RxOctetsHigh = 0x6a,
285 TxOctetsLow = 0x6c,
286 TxOctetsHigh = 0x6e,
287 TxFramesOK = 0x70,
288 RxFramesOK = 0x72,
289 StatsCarrierError = 0x74,
290 StatsLateColl = 0x75,
291 StatsMultiColl = 0x76,
292 StatsOneColl = 0x77,
293 StatsTxDefer = 0x78,
294 RxMissed = 0x79,
295 StatsTxXSDefer = 0x7a,
296 StatsTxAbort = 0x7b,
297 StatsBcastTx = 0x7c,
298 StatsBcastRx = 0x7d,
299 StatsMcastTx = 0x7e,
300 StatsMcastRx = 0x7f,
301 /* Aliased and bogus values! */
302 RxStatus = 0x0c,
304 enum ASICCtrl_HiWord_bit {
305 GlobalReset = 0x0001,
306 RxReset = 0x0002,
307 TxReset = 0x0004,
308 DMAReset = 0x0008,
309 FIFOReset = 0x0010,
310 NetworkReset = 0x0020,
311 HostReset = 0x0040,
312 ResetBusy = 0x0400,
315 /* Bits in the interrupt status/mask registers. */
316 enum intr_status_bits {
317 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
318 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
319 IntrDrvRqst=0x0040,
320 StatsMax=0x0080, LinkChange=0x0100,
321 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
324 /* Bits in the RxMode register. */
325 enum rx_mode_bits {
326 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
327 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
329 /* Bits in MACCtrl. */
330 enum mac_ctrl0_bits {
331 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
332 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
334 enum mac_ctrl1_bits {
335 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
336 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
337 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
340 /* The Rx and Tx buffer descriptors. */
341 /* Note that using only 32 bit fields simplifies conversion to big-endian
342 architectures. */
343 struct netdev_desc {
344 __le32 next_desc;
345 __le32 status;
346 struct desc_frag { __le32 addr, length; } frag[1];
349 /* Bits in netdev_desc.status */
350 enum desc_status_bits {
351 DescOwn=0x8000,
352 DescEndPacket=0x4000,
353 DescEndRing=0x2000,
354 LastFrag=0x80000000,
355 DescIntrOnTx=0x8000,
356 DescIntrOnDMADone=0x80000000,
357 DisableAlign = 0x00000001,
360 #define PRIV_ALIGN 15 /* Required alignment mask */
361 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
362 within the structure. */
363 #define MII_CNT 4
364 struct netdev_private {
365 /* Descriptor rings first for alignment. */
366 struct netdev_desc *rx_ring;
367 struct netdev_desc *tx_ring;
368 struct sk_buff* rx_skbuff[RX_RING_SIZE];
369 struct sk_buff* tx_skbuff[TX_RING_SIZE];
370 dma_addr_t tx_ring_dma;
371 dma_addr_t rx_ring_dma;
372 struct net_device_stats stats;
373 struct timer_list timer; /* Media monitoring timer. */
374 /* Frequently used values: keep some adjacent for cache effect. */
375 spinlock_t lock;
376 spinlock_t rx_lock; /* Group with Tx control cache line. */
377 int msg_enable;
378 int chip_id;
379 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
380 unsigned int rx_buf_sz; /* Based on MTU+slack. */
381 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
382 unsigned int cur_tx, dirty_tx;
383 /* These values are keep track of the transceiver/media in use. */
384 unsigned int flowctrl:1;
385 unsigned int default_port:4; /* Last dev->if_port value. */
386 unsigned int an_enable:1;
387 unsigned int speed;
388 struct tasklet_struct rx_tasklet;
389 struct tasklet_struct tx_tasklet;
390 int budget;
391 int cur_task;
392 /* Multicast and receive mode. */
393 spinlock_t mcastlock; /* SMP lock multicast updates. */
394 u16 mcast_filter[4];
395 /* MII transceiver section. */
396 struct mii_if_info mii_if;
397 int mii_preamble_required;
398 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
399 struct pci_dev *pci_dev;
400 void __iomem *base;
403 /* The station address location in the EEPROM. */
404 #define EEPROM_SA_OFFSET 0x10
405 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
406 IntrDrvRqst | IntrTxDone | StatsMax | \
407 LinkChange)
409 static int change_mtu(struct net_device *dev, int new_mtu);
410 static int eeprom_read(void __iomem *ioaddr, int location);
411 static int mdio_read(struct net_device *dev, int phy_id, int location);
412 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
413 static int mdio_wait_link(struct net_device *dev, int wait);
414 static int netdev_open(struct net_device *dev);
415 static void check_duplex(struct net_device *dev);
416 static void netdev_timer(unsigned long data);
417 static void tx_timeout(struct net_device *dev);
418 static void init_ring(struct net_device *dev);
419 static int start_tx(struct sk_buff *skb, struct net_device *dev);
420 static int reset_tx (struct net_device *dev);
421 static irqreturn_t intr_handler(int irq, void *dev_instance);
422 static void rx_poll(unsigned long data);
423 static void tx_poll(unsigned long data);
424 static void refill_rx (struct net_device *dev);
425 static void netdev_error(struct net_device *dev, int intr_status);
426 static void netdev_error(struct net_device *dev, int intr_status);
427 static void set_rx_mode(struct net_device *dev);
428 static int __set_mac_addr(struct net_device *dev);
429 static struct net_device_stats *get_stats(struct net_device *dev);
430 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
431 static int netdev_close(struct net_device *dev);
432 static const struct ethtool_ops ethtool_ops;
434 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
436 struct netdev_private *np = netdev_priv(dev);
437 void __iomem *ioaddr = np->base + ASICCtrl;
438 int countdown;
440 /* ST201 documentation states ASICCtrl is a 32bit register */
441 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
442 /* ST201 documentation states reset can take up to 1 ms */
443 countdown = 10 + 1;
444 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
445 if (--countdown == 0) {
446 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
447 break;
449 udelay(100);
453 static const struct net_device_ops netdev_ops = {
454 .ndo_open = netdev_open,
455 .ndo_stop = netdev_close,
456 .ndo_start_xmit = start_tx,
457 .ndo_get_stats = get_stats,
458 .ndo_set_multicast_list = set_rx_mode,
459 .ndo_do_ioctl = netdev_ioctl,
460 .ndo_tx_timeout = tx_timeout,
461 .ndo_change_mtu = change_mtu,
462 .ndo_set_mac_address = eth_mac_addr,
463 .ndo_validate_addr = eth_validate_addr,
466 static int __devinit sundance_probe1 (struct pci_dev *pdev,
467 const struct pci_device_id *ent)
469 struct net_device *dev;
470 struct netdev_private *np;
471 static int card_idx;
472 int chip_idx = ent->driver_data;
473 int irq;
474 int i;
475 void __iomem *ioaddr;
476 u16 mii_ctl;
477 void *ring_space;
478 dma_addr_t ring_dma;
479 #ifdef USE_IO_OPS
480 int bar = 0;
481 #else
482 int bar = 1;
483 #endif
484 int phy, phy_end, phy_idx = 0;
486 /* when built into the kernel, we only print version if device is found */
487 #ifndef MODULE
488 static int printed_version;
489 if (!printed_version++)
490 printk(version);
491 #endif
493 if (pci_enable_device(pdev))
494 return -EIO;
495 pci_set_master(pdev);
497 irq = pdev->irq;
499 dev = alloc_etherdev(sizeof(*np));
500 if (!dev)
501 return -ENOMEM;
502 SET_NETDEV_DEV(dev, &pdev->dev);
504 if (pci_request_regions(pdev, DRV_NAME))
505 goto err_out_netdev;
507 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
508 if (!ioaddr)
509 goto err_out_res;
511 for (i = 0; i < 3; i++)
512 ((__le16 *)dev->dev_addr)[i] =
513 cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
514 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
516 dev->base_addr = (unsigned long)ioaddr;
517 dev->irq = irq;
519 np = netdev_priv(dev);
520 np->base = ioaddr;
521 np->pci_dev = pdev;
522 np->chip_id = chip_idx;
523 np->msg_enable = (1 << debug) - 1;
524 spin_lock_init(&np->lock);
525 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
526 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
528 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
529 if (!ring_space)
530 goto err_out_cleardev;
531 np->tx_ring = (struct netdev_desc *)ring_space;
532 np->tx_ring_dma = ring_dma;
534 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
535 if (!ring_space)
536 goto err_out_unmap_tx;
537 np->rx_ring = (struct netdev_desc *)ring_space;
538 np->rx_ring_dma = ring_dma;
540 np->mii_if.dev = dev;
541 np->mii_if.mdio_read = mdio_read;
542 np->mii_if.mdio_write = mdio_write;
543 np->mii_if.phy_id_mask = 0x1f;
544 np->mii_if.reg_num_mask = 0x1f;
546 /* The chip-specific entries in the device structure. */
547 dev->netdev_ops = &netdev_ops;
548 SET_ETHTOOL_OPS(dev, &ethtool_ops);
549 dev->watchdog_timeo = TX_TIMEOUT;
551 pci_set_drvdata(pdev, dev);
553 i = register_netdev(dev);
554 if (i)
555 goto err_out_unmap_rx;
557 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
558 dev->name, pci_id_tbl[chip_idx].name, ioaddr,
559 dev->dev_addr, irq);
561 np->phys[0] = 1; /* Default setting */
562 np->mii_preamble_required++;
565 * It seems some phys doesn't deal well with address 0 being accessed
566 * first
568 if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
569 phy = 0;
570 phy_end = 31;
571 } else {
572 phy = 1;
573 phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */
575 for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
576 int phyx = phy & 0x1f;
577 int mii_status = mdio_read(dev, phyx, MII_BMSR);
578 if (mii_status != 0xffff && mii_status != 0x0000) {
579 np->phys[phy_idx++] = phyx;
580 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
581 if ((mii_status & 0x0040) == 0)
582 np->mii_preamble_required++;
583 printk(KERN_INFO "%s: MII PHY found at address %d, status "
584 "0x%4.4x advertising %4.4x.\n",
585 dev->name, phyx, mii_status, np->mii_if.advertising);
588 np->mii_preamble_required--;
590 if (phy_idx == 0) {
591 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
592 dev->name, ioread32(ioaddr + ASICCtrl));
593 goto err_out_unregister;
596 np->mii_if.phy_id = np->phys[0];
598 /* Parse override configuration */
599 np->an_enable = 1;
600 if (card_idx < MAX_UNITS) {
601 if (media[card_idx] != NULL) {
602 np->an_enable = 0;
603 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
604 strcmp (media[card_idx], "4") == 0) {
605 np->speed = 100;
606 np->mii_if.full_duplex = 1;
607 } else if (strcmp (media[card_idx], "100mbps_hd") == 0
608 || strcmp (media[card_idx], "3") == 0) {
609 np->speed = 100;
610 np->mii_if.full_duplex = 0;
611 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
612 strcmp (media[card_idx], "2") == 0) {
613 np->speed = 10;
614 np->mii_if.full_duplex = 1;
615 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
616 strcmp (media[card_idx], "1") == 0) {
617 np->speed = 10;
618 np->mii_if.full_duplex = 0;
619 } else {
620 np->an_enable = 1;
623 if (flowctrl == 1)
624 np->flowctrl = 1;
627 /* Fibre PHY? */
628 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
629 /* Default 100Mbps Full */
630 if (np->an_enable) {
631 np->speed = 100;
632 np->mii_if.full_duplex = 1;
633 np->an_enable = 0;
636 /* Reset PHY */
637 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
638 mdelay (300);
639 /* If flow control enabled, we need to advertise it.*/
640 if (np->flowctrl)
641 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
642 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
643 /* Force media type */
644 if (!np->an_enable) {
645 mii_ctl = 0;
646 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
647 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
648 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
649 printk (KERN_INFO "Override speed=%d, %s duplex\n",
650 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
654 /* Perhaps move the reset here? */
655 /* Reset the chip to erase previous misconfiguration. */
656 if (netif_msg_hw(np))
657 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
658 sundance_reset(dev, 0x00ff << 16);
659 if (netif_msg_hw(np))
660 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
662 card_idx++;
663 return 0;
665 err_out_unregister:
666 unregister_netdev(dev);
667 err_out_unmap_rx:
668 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
669 err_out_unmap_tx:
670 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
671 err_out_cleardev:
672 pci_set_drvdata(pdev, NULL);
673 pci_iounmap(pdev, ioaddr);
674 err_out_res:
675 pci_release_regions(pdev);
676 err_out_netdev:
677 free_netdev (dev);
678 return -ENODEV;
681 static int change_mtu(struct net_device *dev, int new_mtu)
683 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
684 return -EINVAL;
685 if (netif_running(dev))
686 return -EBUSY;
687 dev->mtu = new_mtu;
688 return 0;
691 #define eeprom_delay(ee_addr) ioread32(ee_addr)
692 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
693 static int __devinit eeprom_read(void __iomem *ioaddr, int location)
695 int boguscnt = 10000; /* Typical 1900 ticks. */
696 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
697 do {
698 eeprom_delay(ioaddr + EECtrl);
699 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
700 return ioread16(ioaddr + EEData);
702 } while (--boguscnt > 0);
703 return 0;
706 /* MII transceiver control section.
707 Read and write the MII registers using software-generated serial
708 MDIO protocol. See the MII specifications or DP83840A data sheet
709 for details.
711 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
712 met by back-to-back 33Mhz PCI cycles. */
713 #define mdio_delay() ioread8(mdio_addr)
715 enum mii_reg_bits {
716 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
718 #define MDIO_EnbIn (0)
719 #define MDIO_WRITE0 (MDIO_EnbOutput)
720 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
722 /* Generate the preamble required for initial synchronization and
723 a few older transceivers. */
724 static void mdio_sync(void __iomem *mdio_addr)
726 int bits = 32;
728 /* Establish sync by sending at least 32 logic ones. */
729 while (--bits >= 0) {
730 iowrite8(MDIO_WRITE1, mdio_addr);
731 mdio_delay();
732 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
733 mdio_delay();
737 static int mdio_read(struct net_device *dev, int phy_id, int location)
739 struct netdev_private *np = netdev_priv(dev);
740 void __iomem *mdio_addr = np->base + MIICtrl;
741 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
742 int i, retval = 0;
744 if (np->mii_preamble_required)
745 mdio_sync(mdio_addr);
747 /* Shift the read command bits out. */
748 for (i = 15; i >= 0; i--) {
749 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
751 iowrite8(dataval, mdio_addr);
752 mdio_delay();
753 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
754 mdio_delay();
756 /* Read the two transition, 16 data, and wire-idle bits. */
757 for (i = 19; i > 0; i--) {
758 iowrite8(MDIO_EnbIn, mdio_addr);
759 mdio_delay();
760 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
761 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
762 mdio_delay();
764 return (retval>>1) & 0xffff;
767 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
769 struct netdev_private *np = netdev_priv(dev);
770 void __iomem *mdio_addr = np->base + MIICtrl;
771 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
772 int i;
774 if (np->mii_preamble_required)
775 mdio_sync(mdio_addr);
777 /* Shift the command bits out. */
778 for (i = 31; i >= 0; i--) {
779 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
781 iowrite8(dataval, mdio_addr);
782 mdio_delay();
783 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
784 mdio_delay();
786 /* Clear out extra bits. */
787 for (i = 2; i > 0; i--) {
788 iowrite8(MDIO_EnbIn, mdio_addr);
789 mdio_delay();
790 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
791 mdio_delay();
793 return;
796 static int mdio_wait_link(struct net_device *dev, int wait)
798 int bmsr;
799 int phy_id;
800 struct netdev_private *np;
802 np = netdev_priv(dev);
803 phy_id = np->phys[0];
805 do {
806 bmsr = mdio_read(dev, phy_id, MII_BMSR);
807 if (bmsr & 0x0004)
808 return 0;
809 mdelay(1);
810 } while (--wait > 0);
811 return -1;
814 static int netdev_open(struct net_device *dev)
816 struct netdev_private *np = netdev_priv(dev);
817 void __iomem *ioaddr = np->base;
818 unsigned long flags;
819 int i;
821 /* Do we need to reset the chip??? */
823 i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
824 if (i)
825 return i;
827 if (netif_msg_ifup(np))
828 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
829 dev->name, dev->irq);
830 init_ring(dev);
832 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
833 /* The Tx list pointer is written as packets are queued. */
835 /* Initialize other registers. */
836 __set_mac_addr(dev);
837 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
838 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
839 #else
840 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
841 #endif
842 if (dev->mtu > 2047)
843 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
845 /* Configure the PCI bus bursts and FIFO thresholds. */
847 if (dev->if_port == 0)
848 dev->if_port = np->default_port;
850 spin_lock_init(&np->mcastlock);
852 set_rx_mode(dev);
853 iowrite16(0, ioaddr + IntrEnable);
854 iowrite16(0, ioaddr + DownCounter);
855 /* Set the chip to poll every N*320nsec. */
856 iowrite8(100, ioaddr + RxDMAPollPeriod);
857 iowrite8(127, ioaddr + TxDMAPollPeriod);
858 /* Fix DFE-580TX packet drop issue */
859 if (np->pci_dev->revision >= 0x14)
860 iowrite8(0x01, ioaddr + DebugCtrl1);
861 netif_start_queue(dev);
863 spin_lock_irqsave(&np->lock, flags);
864 reset_tx(dev);
865 spin_unlock_irqrestore(&np->lock, flags);
867 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
869 if (netif_msg_ifup(np))
870 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
871 "MAC Control %x, %4.4x %4.4x.\n",
872 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
873 ioread32(ioaddr + MACCtrl0),
874 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
876 /* Set the timer to check for link beat. */
877 init_timer(&np->timer);
878 np->timer.expires = jiffies + 3*HZ;
879 np->timer.data = (unsigned long)dev;
880 np->timer.function = &netdev_timer; /* timer handler */
881 add_timer(&np->timer);
883 /* Enable interrupts by setting the interrupt mask. */
884 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
886 return 0;
889 static void check_duplex(struct net_device *dev)
891 struct netdev_private *np = netdev_priv(dev);
892 void __iomem *ioaddr = np->base;
893 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
894 int negotiated = mii_lpa & np->mii_if.advertising;
895 int duplex;
897 /* Force media */
898 if (!np->an_enable || mii_lpa == 0xffff) {
899 if (np->mii_if.full_duplex)
900 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
901 ioaddr + MACCtrl0);
902 return;
905 /* Autonegotiation */
906 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
907 if (np->mii_if.full_duplex != duplex) {
908 np->mii_if.full_duplex = duplex;
909 if (netif_msg_link(np))
910 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
911 "negotiated capability %4.4x.\n", dev->name,
912 duplex ? "full" : "half", np->phys[0], negotiated);
913 iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
917 static void netdev_timer(unsigned long data)
919 struct net_device *dev = (struct net_device *)data;
920 struct netdev_private *np = netdev_priv(dev);
921 void __iomem *ioaddr = np->base;
922 int next_tick = 10*HZ;
924 if (netif_msg_timer(np)) {
925 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
926 "Tx %x Rx %x.\n",
927 dev->name, ioread16(ioaddr + IntrEnable),
928 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
930 check_duplex(dev);
931 np->timer.expires = jiffies + next_tick;
932 add_timer(&np->timer);
935 static void tx_timeout(struct net_device *dev)
937 struct netdev_private *np = netdev_priv(dev);
938 void __iomem *ioaddr = np->base;
939 unsigned long flag;
941 netif_stop_queue(dev);
942 tasklet_disable(&np->tx_tasklet);
943 iowrite16(0, ioaddr + IntrEnable);
944 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
945 "TxFrameId %2.2x,"
946 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
947 ioread8(ioaddr + TxFrameId));
950 int i;
951 for (i=0; i<TX_RING_SIZE; i++) {
952 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
953 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
954 le32_to_cpu(np->tx_ring[i].next_desc),
955 le32_to_cpu(np->tx_ring[i].status),
956 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
957 le32_to_cpu(np->tx_ring[i].frag[0].addr),
958 le32_to_cpu(np->tx_ring[i].frag[0].length));
960 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
961 ioread32(np->base + TxListPtr),
962 netif_queue_stopped(dev));
963 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
964 np->cur_tx, np->cur_tx % TX_RING_SIZE,
965 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
966 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
967 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
969 spin_lock_irqsave(&np->lock, flag);
971 /* Stop and restart the chip's Tx processes . */
972 reset_tx(dev);
973 spin_unlock_irqrestore(&np->lock, flag);
975 dev->if_port = 0;
977 dev->trans_start = jiffies;
978 np->stats.tx_errors++;
979 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
980 netif_wake_queue(dev);
982 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
983 tasklet_enable(&np->tx_tasklet);
987 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
988 static void init_ring(struct net_device *dev)
990 struct netdev_private *np = netdev_priv(dev);
991 int i;
993 np->cur_rx = np->cur_tx = 0;
994 np->dirty_rx = np->dirty_tx = 0;
995 np->cur_task = 0;
997 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
999 /* Initialize all Rx descriptors. */
1000 for (i = 0; i < RX_RING_SIZE; i++) {
1001 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1002 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1003 np->rx_ring[i].status = 0;
1004 np->rx_ring[i].frag[0].length = 0;
1005 np->rx_skbuff[i] = NULL;
1008 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1009 for (i = 0; i < RX_RING_SIZE; i++) {
1010 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1011 np->rx_skbuff[i] = skb;
1012 if (skb == NULL)
1013 break;
1014 skb->dev = dev; /* Mark as being used by this device. */
1015 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1016 np->rx_ring[i].frag[0].addr = cpu_to_le32(
1017 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
1018 PCI_DMA_FROMDEVICE));
1019 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1021 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1023 for (i = 0; i < TX_RING_SIZE; i++) {
1024 np->tx_skbuff[i] = NULL;
1025 np->tx_ring[i].status = 0;
1027 return;
1030 static void tx_poll (unsigned long data)
1032 struct net_device *dev = (struct net_device *)data;
1033 struct netdev_private *np = netdev_priv(dev);
1034 unsigned head = np->cur_task % TX_RING_SIZE;
1035 struct netdev_desc *txdesc =
1036 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1038 /* Chain the next pointer */
1039 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1040 int entry = np->cur_task % TX_RING_SIZE;
1041 txdesc = &np->tx_ring[entry];
1042 if (np->last_tx) {
1043 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1044 entry*sizeof(struct netdev_desc));
1046 np->last_tx = txdesc;
1048 /* Indicate the latest descriptor of tx ring */
1049 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1051 if (ioread32 (np->base + TxListPtr) == 0)
1052 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1053 np->base + TxListPtr);
1054 return;
1057 static int
1058 start_tx (struct sk_buff *skb, struct net_device *dev)
1060 struct netdev_private *np = netdev_priv(dev);
1061 struct netdev_desc *txdesc;
1062 unsigned entry;
1064 /* Calculate the next Tx descriptor entry. */
1065 entry = np->cur_tx % TX_RING_SIZE;
1066 np->tx_skbuff[entry] = skb;
1067 txdesc = &np->tx_ring[entry];
1069 txdesc->next_desc = 0;
1070 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1071 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1072 skb->len,
1073 PCI_DMA_TODEVICE));
1074 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1076 /* Increment cur_tx before tasklet_schedule() */
1077 np->cur_tx++;
1078 mb();
1079 /* Schedule a tx_poll() task */
1080 tasklet_schedule(&np->tx_tasklet);
1082 /* On some architectures: explicitly flush cache lines here. */
1083 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1084 && !netif_queue_stopped(dev)) {
1085 /* do nothing */
1086 } else {
1087 netif_stop_queue (dev);
1089 dev->trans_start = jiffies;
1090 if (netif_msg_tx_queued(np)) {
1091 printk (KERN_DEBUG
1092 "%s: Transmit frame #%d queued in slot %d.\n",
1093 dev->name, np->cur_tx, entry);
1095 return 0;
1098 /* Reset hardware tx and free all of tx buffers */
1099 static int
1100 reset_tx (struct net_device *dev)
1102 struct netdev_private *np = netdev_priv(dev);
1103 void __iomem *ioaddr = np->base;
1104 struct sk_buff *skb;
1105 int i;
1106 int irq = in_interrupt();
1108 /* Reset tx logic, TxListPtr will be cleaned */
1109 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1110 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1112 /* free all tx skbuff */
1113 for (i = 0; i < TX_RING_SIZE; i++) {
1114 np->tx_ring[i].next_desc = 0;
1116 skb = np->tx_skbuff[i];
1117 if (skb) {
1118 pci_unmap_single(np->pci_dev,
1119 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1120 skb->len, PCI_DMA_TODEVICE);
1121 if (irq)
1122 dev_kfree_skb_irq (skb);
1123 else
1124 dev_kfree_skb (skb);
1125 np->tx_skbuff[i] = NULL;
1126 np->stats.tx_dropped++;
1129 np->cur_tx = np->dirty_tx = 0;
1130 np->cur_task = 0;
1132 np->last_tx = NULL;
1133 iowrite8(127, ioaddr + TxDMAPollPeriod);
1135 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1136 return 0;
1139 /* The interrupt handler cleans up after the Tx thread,
1140 and schedule a Rx thread work */
1141 static irqreturn_t intr_handler(int irq, void *dev_instance)
1143 struct net_device *dev = (struct net_device *)dev_instance;
1144 struct netdev_private *np = netdev_priv(dev);
1145 void __iomem *ioaddr = np->base;
1146 int hw_frame_id;
1147 int tx_cnt;
1148 int tx_status;
1149 int handled = 0;
1150 int i;
1153 do {
1154 int intr_status = ioread16(ioaddr + IntrStatus);
1155 iowrite16(intr_status, ioaddr + IntrStatus);
1157 if (netif_msg_intr(np))
1158 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1159 dev->name, intr_status);
1161 if (!(intr_status & DEFAULT_INTR))
1162 break;
1164 handled = 1;
1166 if (intr_status & (IntrRxDMADone)) {
1167 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1168 ioaddr + IntrEnable);
1169 if (np->budget < 0)
1170 np->budget = RX_BUDGET;
1171 tasklet_schedule(&np->rx_tasklet);
1173 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1174 tx_status = ioread16 (ioaddr + TxStatus);
1175 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1176 if (netif_msg_tx_done(np))
1177 printk
1178 ("%s: Transmit status is %2.2x.\n",
1179 dev->name, tx_status);
1180 if (tx_status & 0x1e) {
1181 if (netif_msg_tx_err(np))
1182 printk("%s: Transmit error status %4.4x.\n",
1183 dev->name, tx_status);
1184 np->stats.tx_errors++;
1185 if (tx_status & 0x10)
1186 np->stats.tx_fifo_errors++;
1187 if (tx_status & 0x08)
1188 np->stats.collisions++;
1189 if (tx_status & 0x04)
1190 np->stats.tx_fifo_errors++;
1191 if (tx_status & 0x02)
1192 np->stats.tx_window_errors++;
1195 ** This reset has been verified on
1196 ** DFE-580TX boards ! phdm@macqel.be.
1198 if (tx_status & 0x10) { /* TxUnderrun */
1199 /* Restart Tx FIFO and transmitter */
1200 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1201 /* No need to reset the Tx pointer here */
1203 /* Restart the Tx. Need to make sure tx enabled */
1204 i = 10;
1205 do {
1206 iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1207 if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1208 break;
1209 mdelay(1);
1210 } while (--i);
1212 /* Yup, this is a documentation bug. It cost me *hours*. */
1213 iowrite16 (0, ioaddr + TxStatus);
1214 if (tx_cnt < 0) {
1215 iowrite32(5000, ioaddr + DownCounter);
1216 break;
1218 tx_status = ioread16 (ioaddr + TxStatus);
1220 hw_frame_id = (tx_status >> 8) & 0xff;
1221 } else {
1222 hw_frame_id = ioread8(ioaddr + TxFrameId);
1225 if (np->pci_dev->revision >= 0x14) {
1226 spin_lock(&np->lock);
1227 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1228 int entry = np->dirty_tx % TX_RING_SIZE;
1229 struct sk_buff *skb;
1230 int sw_frame_id;
1231 sw_frame_id = (le32_to_cpu(
1232 np->tx_ring[entry].status) >> 2) & 0xff;
1233 if (sw_frame_id == hw_frame_id &&
1234 !(le32_to_cpu(np->tx_ring[entry].status)
1235 & 0x00010000))
1236 break;
1237 if (sw_frame_id == (hw_frame_id + 1) %
1238 TX_RING_SIZE)
1239 break;
1240 skb = np->tx_skbuff[entry];
1241 /* Free the original skb. */
1242 pci_unmap_single(np->pci_dev,
1243 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1244 skb->len, PCI_DMA_TODEVICE);
1245 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1246 np->tx_skbuff[entry] = NULL;
1247 np->tx_ring[entry].frag[0].addr = 0;
1248 np->tx_ring[entry].frag[0].length = 0;
1250 spin_unlock(&np->lock);
1251 } else {
1252 spin_lock(&np->lock);
1253 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1254 int entry = np->dirty_tx % TX_RING_SIZE;
1255 struct sk_buff *skb;
1256 if (!(le32_to_cpu(np->tx_ring[entry].status)
1257 & 0x00010000))
1258 break;
1259 skb = np->tx_skbuff[entry];
1260 /* Free the original skb. */
1261 pci_unmap_single(np->pci_dev,
1262 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1263 skb->len, PCI_DMA_TODEVICE);
1264 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1265 np->tx_skbuff[entry] = NULL;
1266 np->tx_ring[entry].frag[0].addr = 0;
1267 np->tx_ring[entry].frag[0].length = 0;
1269 spin_unlock(&np->lock);
1272 if (netif_queue_stopped(dev) &&
1273 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1274 /* The ring is no longer full, clear busy flag. */
1275 netif_wake_queue (dev);
1277 /* Abnormal error summary/uncommon events handlers. */
1278 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1279 netdev_error(dev, intr_status);
1280 } while (0);
1281 if (netif_msg_intr(np))
1282 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1283 dev->name, ioread16(ioaddr + IntrStatus));
1284 return IRQ_RETVAL(handled);
1287 static void rx_poll(unsigned long data)
1289 struct net_device *dev = (struct net_device *)data;
1290 struct netdev_private *np = netdev_priv(dev);
1291 int entry = np->cur_rx % RX_RING_SIZE;
1292 int boguscnt = np->budget;
1293 void __iomem *ioaddr = np->base;
1294 int received = 0;
1296 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1297 while (1) {
1298 struct netdev_desc *desc = &(np->rx_ring[entry]);
1299 u32 frame_status = le32_to_cpu(desc->status);
1300 int pkt_len;
1302 if (--boguscnt < 0) {
1303 goto not_done;
1305 if (!(frame_status & DescOwn))
1306 break;
1307 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1308 if (netif_msg_rx_status(np))
1309 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1310 frame_status);
1311 if (frame_status & 0x001f4000) {
1312 /* There was a error. */
1313 if (netif_msg_rx_err(np))
1314 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1315 frame_status);
1316 np->stats.rx_errors++;
1317 if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1318 if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1319 if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1320 if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1321 if (frame_status & 0x00100000) {
1322 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1323 " status %8.8x.\n",
1324 dev->name, frame_status);
1326 } else {
1327 struct sk_buff *skb;
1328 #ifndef final_version
1329 if (netif_msg_rx_status(np))
1330 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1331 ", bogus_cnt %d.\n",
1332 pkt_len, boguscnt);
1333 #endif
1334 /* Check if the packet is long enough to accept without copying
1335 to a minimally-sized skbuff. */
1336 if (pkt_len < rx_copybreak
1337 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1338 skb_reserve(skb, 2); /* 16 byte align the IP header */
1339 pci_dma_sync_single_for_cpu(np->pci_dev,
1340 le32_to_cpu(desc->frag[0].addr),
1341 np->rx_buf_sz,
1342 PCI_DMA_FROMDEVICE);
1344 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1345 pci_dma_sync_single_for_device(np->pci_dev,
1346 le32_to_cpu(desc->frag[0].addr),
1347 np->rx_buf_sz,
1348 PCI_DMA_FROMDEVICE);
1349 skb_put(skb, pkt_len);
1350 } else {
1351 pci_unmap_single(np->pci_dev,
1352 le32_to_cpu(desc->frag[0].addr),
1353 np->rx_buf_sz,
1354 PCI_DMA_FROMDEVICE);
1355 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1356 np->rx_skbuff[entry] = NULL;
1358 skb->protocol = eth_type_trans(skb, dev);
1359 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1360 netif_rx(skb);
1362 entry = (entry + 1) % RX_RING_SIZE;
1363 received++;
1365 np->cur_rx = entry;
1366 refill_rx (dev);
1367 np->budget -= received;
1368 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1369 return;
1371 not_done:
1372 np->cur_rx = entry;
1373 refill_rx (dev);
1374 if (!received)
1375 received = 1;
1376 np->budget -= received;
1377 if (np->budget <= 0)
1378 np->budget = RX_BUDGET;
1379 tasklet_schedule(&np->rx_tasklet);
1380 return;
1383 static void refill_rx (struct net_device *dev)
1385 struct netdev_private *np = netdev_priv(dev);
1386 int entry;
1387 int cnt = 0;
1389 /* Refill the Rx ring buffers. */
1390 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1391 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1392 struct sk_buff *skb;
1393 entry = np->dirty_rx % RX_RING_SIZE;
1394 if (np->rx_skbuff[entry] == NULL) {
1395 skb = dev_alloc_skb(np->rx_buf_sz);
1396 np->rx_skbuff[entry] = skb;
1397 if (skb == NULL)
1398 break; /* Better luck next round. */
1399 skb->dev = dev; /* Mark as being used by this device. */
1400 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1401 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1402 pci_map_single(np->pci_dev, skb->data,
1403 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1405 /* Perhaps we need not reset this field. */
1406 np->rx_ring[entry].frag[0].length =
1407 cpu_to_le32(np->rx_buf_sz | LastFrag);
1408 np->rx_ring[entry].status = 0;
1409 cnt++;
1411 return;
1413 static void netdev_error(struct net_device *dev, int intr_status)
1415 struct netdev_private *np = netdev_priv(dev);
1416 void __iomem *ioaddr = np->base;
1417 u16 mii_ctl, mii_advertise, mii_lpa;
1418 int speed;
1420 if (intr_status & LinkChange) {
1421 if (mdio_wait_link(dev, 10) == 0) {
1422 printk(KERN_INFO "%s: Link up\n", dev->name);
1423 if (np->an_enable) {
1424 mii_advertise = mdio_read(dev, np->phys[0],
1425 MII_ADVERTISE);
1426 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1427 mii_advertise &= mii_lpa;
1428 printk(KERN_INFO "%s: Link changed: ",
1429 dev->name);
1430 if (mii_advertise & ADVERTISE_100FULL) {
1431 np->speed = 100;
1432 printk("100Mbps, full duplex\n");
1433 } else if (mii_advertise & ADVERTISE_100HALF) {
1434 np->speed = 100;
1435 printk("100Mbps, half duplex\n");
1436 } else if (mii_advertise & ADVERTISE_10FULL) {
1437 np->speed = 10;
1438 printk("10Mbps, full duplex\n");
1439 } else if (mii_advertise & ADVERTISE_10HALF) {
1440 np->speed = 10;
1441 printk("10Mbps, half duplex\n");
1442 } else
1443 printk("\n");
1445 } else {
1446 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1447 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1448 np->speed = speed;
1449 printk(KERN_INFO "%s: Link changed: %dMbps ,",
1450 dev->name, speed);
1451 printk("%s duplex.\n",
1452 (mii_ctl & BMCR_FULLDPLX) ?
1453 "full" : "half");
1455 check_duplex(dev);
1456 if (np->flowctrl && np->mii_if.full_duplex) {
1457 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1458 ioaddr + MulticastFilter1+2);
1459 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1460 ioaddr + MACCtrl0);
1462 netif_carrier_on(dev);
1463 } else {
1464 printk(KERN_INFO "%s: Link down\n", dev->name);
1465 netif_carrier_off(dev);
1468 if (intr_status & StatsMax) {
1469 get_stats(dev);
1471 if (intr_status & IntrPCIErr) {
1472 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1473 dev->name, intr_status);
1474 /* We must do a global reset of DMA to continue. */
1478 static struct net_device_stats *get_stats(struct net_device *dev)
1480 struct netdev_private *np = netdev_priv(dev);
1481 void __iomem *ioaddr = np->base;
1482 int i;
1484 /* We should lock this segment of code for SMP eventually, although
1485 the vulnerability window is very small and statistics are
1486 non-critical. */
1487 /* The chip only need report frame silently dropped. */
1488 np->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1489 np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1490 np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1491 np->stats.collisions += ioread8(ioaddr + StatsLateColl);
1492 np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1493 np->stats.collisions += ioread8(ioaddr + StatsOneColl);
1494 np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1495 ioread8(ioaddr + StatsTxDefer);
1496 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1497 ioread8(ioaddr + i);
1498 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1499 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1500 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1501 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1503 return &np->stats;
1506 static void set_rx_mode(struct net_device *dev)
1508 struct netdev_private *np = netdev_priv(dev);
1509 void __iomem *ioaddr = np->base;
1510 u16 mc_filter[4]; /* Multicast hash filter */
1511 u32 rx_mode;
1512 int i;
1514 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1515 memset(mc_filter, 0xff, sizeof(mc_filter));
1516 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1517 } else if ((dev->mc_count > multicast_filter_limit)
1518 || (dev->flags & IFF_ALLMULTI)) {
1519 /* Too many to match, or accept all multicasts. */
1520 memset(mc_filter, 0xff, sizeof(mc_filter));
1521 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1522 } else if (dev->mc_count) {
1523 struct dev_mc_list *mclist;
1524 int bit;
1525 int index;
1526 int crc;
1527 memset (mc_filter, 0, sizeof (mc_filter));
1528 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1529 i++, mclist = mclist->next) {
1530 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1531 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1532 if (crc & 0x80000000) index |= 1 << bit;
1533 mc_filter[index/16] |= (1 << (index % 16));
1535 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1536 } else {
1537 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1538 return;
1540 if (np->mii_if.full_duplex && np->flowctrl)
1541 mc_filter[3] |= 0x0200;
1543 for (i = 0; i < 4; i++)
1544 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1545 iowrite8(rx_mode, ioaddr + RxMode);
1548 static int __set_mac_addr(struct net_device *dev)
1550 struct netdev_private *np = netdev_priv(dev);
1551 u16 addr16;
1553 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1554 iowrite16(addr16, np->base + StationAddr);
1555 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1556 iowrite16(addr16, np->base + StationAddr+2);
1557 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1558 iowrite16(addr16, np->base + StationAddr+4);
1559 return 0;
1562 static int check_if_running(struct net_device *dev)
1564 if (!netif_running(dev))
1565 return -EINVAL;
1566 return 0;
1569 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1571 struct netdev_private *np = netdev_priv(dev);
1572 strcpy(info->driver, DRV_NAME);
1573 strcpy(info->version, DRV_VERSION);
1574 strcpy(info->bus_info, pci_name(np->pci_dev));
1577 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1579 struct netdev_private *np = netdev_priv(dev);
1580 spin_lock_irq(&np->lock);
1581 mii_ethtool_gset(&np->mii_if, ecmd);
1582 spin_unlock_irq(&np->lock);
1583 return 0;
1586 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1588 struct netdev_private *np = netdev_priv(dev);
1589 int res;
1590 spin_lock_irq(&np->lock);
1591 res = mii_ethtool_sset(&np->mii_if, ecmd);
1592 spin_unlock_irq(&np->lock);
1593 return res;
1596 static int nway_reset(struct net_device *dev)
1598 struct netdev_private *np = netdev_priv(dev);
1599 return mii_nway_restart(&np->mii_if);
1602 static u32 get_link(struct net_device *dev)
1604 struct netdev_private *np = netdev_priv(dev);
1605 return mii_link_ok(&np->mii_if);
1608 static u32 get_msglevel(struct net_device *dev)
1610 struct netdev_private *np = netdev_priv(dev);
1611 return np->msg_enable;
1614 static void set_msglevel(struct net_device *dev, u32 val)
1616 struct netdev_private *np = netdev_priv(dev);
1617 np->msg_enable = val;
1620 static const struct ethtool_ops ethtool_ops = {
1621 .begin = check_if_running,
1622 .get_drvinfo = get_drvinfo,
1623 .get_settings = get_settings,
1624 .set_settings = set_settings,
1625 .nway_reset = nway_reset,
1626 .get_link = get_link,
1627 .get_msglevel = get_msglevel,
1628 .set_msglevel = set_msglevel,
1631 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1633 struct netdev_private *np = netdev_priv(dev);
1634 int rc;
1636 if (!netif_running(dev))
1637 return -EINVAL;
1639 spin_lock_irq(&np->lock);
1640 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1641 spin_unlock_irq(&np->lock);
1643 return rc;
1646 static int netdev_close(struct net_device *dev)
1648 struct netdev_private *np = netdev_priv(dev);
1649 void __iomem *ioaddr = np->base;
1650 struct sk_buff *skb;
1651 int i;
1653 /* Wait and kill tasklet */
1654 tasklet_kill(&np->rx_tasklet);
1655 tasklet_kill(&np->tx_tasklet);
1656 np->cur_tx = 0;
1657 np->dirty_tx = 0;
1658 np->cur_task = 0;
1659 np->last_tx = NULL;
1661 netif_stop_queue(dev);
1663 if (netif_msg_ifdown(np)) {
1664 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1665 "Rx %4.4x Int %2.2x.\n",
1666 dev->name, ioread8(ioaddr + TxStatus),
1667 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1668 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1669 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1672 /* Disable interrupts by clearing the interrupt mask. */
1673 iowrite16(0x0000, ioaddr + IntrEnable);
1675 /* Disable Rx and Tx DMA for safely release resource */
1676 iowrite32(0x500, ioaddr + DMACtrl);
1678 /* Stop the chip's Tx and Rx processes. */
1679 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1681 for (i = 2000; i > 0; i--) {
1682 if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1683 break;
1684 mdelay(1);
1687 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1688 ioaddr +ASICCtrl + 2);
1690 for (i = 2000; i > 0; i--) {
1691 if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
1692 break;
1693 mdelay(1);
1696 #ifdef __i386__
1697 if (netif_msg_hw(np)) {
1698 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1699 (int)(np->tx_ring_dma));
1700 for (i = 0; i < TX_RING_SIZE; i++)
1701 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1702 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1703 np->tx_ring[i].frag[0].length);
1704 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1705 (int)(np->rx_ring_dma));
1706 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1707 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1708 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1709 np->rx_ring[i].frag[0].length);
1712 #endif /* __i386__ debugging only */
1714 free_irq(dev->irq, dev);
1716 del_timer_sync(&np->timer);
1718 /* Free all the skbuffs in the Rx queue. */
1719 for (i = 0; i < RX_RING_SIZE; i++) {
1720 np->rx_ring[i].status = 0;
1721 skb = np->rx_skbuff[i];
1722 if (skb) {
1723 pci_unmap_single(np->pci_dev,
1724 le32_to_cpu(np->rx_ring[i].frag[0].addr),
1725 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1726 dev_kfree_skb(skb);
1727 np->rx_skbuff[i] = NULL;
1729 np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1731 for (i = 0; i < TX_RING_SIZE; i++) {
1732 np->tx_ring[i].next_desc = 0;
1733 skb = np->tx_skbuff[i];
1734 if (skb) {
1735 pci_unmap_single(np->pci_dev,
1736 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1737 skb->len, PCI_DMA_TODEVICE);
1738 dev_kfree_skb(skb);
1739 np->tx_skbuff[i] = NULL;
1743 return 0;
1746 static void __devexit sundance_remove1 (struct pci_dev *pdev)
1748 struct net_device *dev = pci_get_drvdata(pdev);
1750 if (dev) {
1751 struct netdev_private *np = netdev_priv(dev);
1753 unregister_netdev(dev);
1754 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1755 np->rx_ring_dma);
1756 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1757 np->tx_ring_dma);
1758 pci_iounmap(pdev, np->base);
1759 pci_release_regions(pdev);
1760 free_netdev(dev);
1761 pci_set_drvdata(pdev, NULL);
1765 static struct pci_driver sundance_driver = {
1766 .name = DRV_NAME,
1767 .id_table = sundance_pci_tbl,
1768 .probe = sundance_probe1,
1769 .remove = __devexit_p(sundance_remove1),
1772 static int __init sundance_init(void)
1774 /* when a module, this is printed whether or not devices are found in probe */
1775 #ifdef MODULE
1776 printk(version);
1777 #endif
1778 return pci_register_driver(&sundance_driver);
1781 static void __exit sundance_exit(void)
1783 pci_unregister_driver(&sundance_driver);
1786 module_init(sundance_init);
1787 module_exit(sundance_exit);