RT-AC56 3.0.0.4.374.37 core
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / net / sundance.c
blob033ccdfc8e42027c382b8f2ad69f841bb09b625a
1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2 /*
3 Written 1999-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19 [link no longer provides useful info -jgarzik]
20 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
25 #define DRV_NAME "sundance"
26 #define DRV_VERSION "1.2"
27 #define DRV_RELDATE "11-Sep-2006"
30 /* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
35 static const int multicast_filter_limit = 32;
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
40 need a copy-align. */
41 static int rx_copybreak;
42 static int flowctrl=1;
44 /* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
51 1 10Mbps half duplex.
52 2 10Mbps full duplex.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
56 #define MAX_UNITS 8
57 static char *media[MAX_UNITS];
60 /* Operational parameters that are set at compile time. */
62 /* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
66 Tx error recovery.
67 Large receive rings merely waste memory. */
68 #define TX_RING_SIZE 32
69 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70 #define RX_RING_SIZE 64
71 #define RX_BUDGET 32
72 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
75 /* Operational parameters that usually are not changed. */
76 /* Time in jiffies before concluding the transmitter is hung. */
77 #define TX_TIMEOUT (4*HZ)
78 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/interrupt.h>
88 #include <linux/pci.h>
89 #include <linux/netdevice.h>
90 #include <linux/etherdevice.h>
91 #include <linux/skbuff.h>
92 #include <linux/init.h>
93 #include <linux/bitops.h>
94 #include <asm/uaccess.h>
95 #include <asm/processor.h> /* Processor type for cache alignment. */
96 #include <asm/io.h>
97 #include <linux/delay.h>
98 #include <linux/spinlock.h>
99 #ifndef _COMPAT_WITH_OLD_KERNEL
100 #include <linux/crc32.h>
101 #include <linux/ethtool.h>
102 #include <linux/mii.h>
103 #else
104 #include "crc32.h"
105 #include "ethtool.h"
106 #include "mii.h"
107 #include "compat.h"
108 #endif
110 /* These identify the driver base version and may not be removed. */
111 static const char version[] __devinitconst =
112 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
113 " Written by Donald Becker\n";
115 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
116 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
117 MODULE_LICENSE("GPL");
119 module_param(debug, int, 0);
120 module_param(rx_copybreak, int, 0);
121 module_param_array(media, charp, NULL, 0);
122 module_param(flowctrl, int, 0);
123 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
124 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
125 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
128 Theory of Operation
130 I. Board Compatibility
132 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
134 II. Board-specific settings
136 III. Driver operation
138 IIIa. Ring buffers
140 This driver uses two statically allocated fixed-size descriptor lists
141 formed into rings by a branch from the final descriptor to the beginning of
142 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
143 Some chips explicitly use only 2^N sized rings, while others use a
144 'next descriptor' pointer that the driver forms into rings.
146 IIIb/c. Transmit/Receive Structure
148 This driver uses a zero-copy receive and transmit scheme.
149 The driver allocates full frame size skbuffs for the Rx ring buffers at
150 open() time and passes the skb->data field to the chip as receive data
151 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
152 a fresh skbuff is allocated and the frame is copied to the new skbuff.
153 When the incoming frame is larger, the skbuff is passed directly up the
154 protocol stack. Buffers consumed this way are replaced by newly allocated
155 skbuffs in a later phase of receives.
157 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
158 using a full-sized skbuff for small frames vs. the copying costs of larger
159 frames. New boards are typically used in generously configured machines
160 and the underfilled buffers have negligible impact compared to the benefit of
161 a single allocation size, so the default value of zero results in never
162 copying packets. When copying is done, the cost is usually mitigated by using
163 a combined copy/checksum routine. Copying also preloads the cache, which is
164 most useful with small frames.
166 A subtle aspect of the operation is that the IP header at offset 14 in an
167 ethernet frame isn't longword aligned for further processing.
168 Unaligned buffers are permitted by the Sundance hardware, so
169 frames are received into the skbuff at an offset of "+2", 16-byte aligning
170 the IP header.
172 IIId. Synchronization
174 The driver runs as two independent, single-threaded flows of control. One
175 is the send-packet routine, which enforces single-threaded use by the
176 dev->tbusy flag. The other thread is the interrupt handler, which is single
177 threaded by the hardware and interrupt handling software.
179 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
180 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
181 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
182 the 'lp->tx_full' flag.
184 The interrupt handler has exclusive control over the Rx ring and records stats
185 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
186 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
187 clears both the tx_full and tbusy flags.
189 IV. Notes
191 IVb. References
193 The Sundance ST201 datasheet, preliminary version.
194 The Kendin KS8723 datasheet, preliminary version.
195 The ICplus IP100 datasheet, preliminary version.
196 http://www.scyld.com/expert/100mbps.html
197 http://www.scyld.com/expert/NWay.html
199 IVc. Errata
203 #ifndef CONFIG_SUNDANCE_MMIO
204 #define USE_IO_OPS 1
205 #endif
207 static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
208 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
209 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
210 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
211 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
212 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
213 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
214 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
217 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
219 enum {
220 netdev_io_size = 128
223 struct pci_id_info {
224 const char *name;
226 static const struct pci_id_info pci_id_tbl[] __devinitdata = {
227 {"D-Link DFE-550TX FAST Ethernet Adapter"},
228 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
229 {"D-Link DFE-580TX 4 port Server Adapter"},
230 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
231 {"D-Link DL10050-based FAST Ethernet Adapter"},
232 {"Sundance Technology Alta"},
233 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
234 { } /* terminate list. */
237 /* This driver was written to use PCI memory space, however x86-oriented
238 hardware often uses I/O space accesses. */
240 /* Offsets to the device registers.
241 Unlike software-only systems, device drivers interact with complex hardware.
242 It's not useful to define symbolic names for every register bit in the
243 device. The name can only partially document the semantics and make
244 the driver longer and more difficult to read.
245 In general, only the important configuration values or bits changed
246 multiple times should be defined symbolically.
248 enum alta_offsets {
249 DMACtrl = 0x00,
250 TxListPtr = 0x04,
251 TxDMABurstThresh = 0x08,
252 TxDMAUrgentThresh = 0x09,
253 TxDMAPollPeriod = 0x0a,
254 RxDMAStatus = 0x0c,
255 RxListPtr = 0x10,
256 DebugCtrl0 = 0x1a,
257 DebugCtrl1 = 0x1c,
258 RxDMABurstThresh = 0x14,
259 RxDMAUrgentThresh = 0x15,
260 RxDMAPollPeriod = 0x16,
261 LEDCtrl = 0x1a,
262 ASICCtrl = 0x30,
263 EEData = 0x34,
264 EECtrl = 0x36,
265 FlashAddr = 0x40,
266 FlashData = 0x44,
267 TxStatus = 0x46,
268 TxFrameId = 0x47,
269 DownCounter = 0x18,
270 IntrClear = 0x4a,
271 IntrEnable = 0x4c,
272 IntrStatus = 0x4e,
273 MACCtrl0 = 0x50,
274 MACCtrl1 = 0x52,
275 StationAddr = 0x54,
276 MaxFrameSize = 0x5A,
277 RxMode = 0x5c,
278 MIICtrl = 0x5e,
279 MulticastFilter0 = 0x60,
280 MulticastFilter1 = 0x64,
281 RxOctetsLow = 0x68,
282 RxOctetsHigh = 0x6a,
283 TxOctetsLow = 0x6c,
284 TxOctetsHigh = 0x6e,
285 TxFramesOK = 0x70,
286 RxFramesOK = 0x72,
287 StatsCarrierError = 0x74,
288 StatsLateColl = 0x75,
289 StatsMultiColl = 0x76,
290 StatsOneColl = 0x77,
291 StatsTxDefer = 0x78,
292 RxMissed = 0x79,
293 StatsTxXSDefer = 0x7a,
294 StatsTxAbort = 0x7b,
295 StatsBcastTx = 0x7c,
296 StatsBcastRx = 0x7d,
297 StatsMcastTx = 0x7e,
298 StatsMcastRx = 0x7f,
299 /* Aliased and bogus values! */
300 RxStatus = 0x0c,
302 enum ASICCtrl_HiWord_bit {
303 GlobalReset = 0x0001,
304 RxReset = 0x0002,
305 TxReset = 0x0004,
306 DMAReset = 0x0008,
307 FIFOReset = 0x0010,
308 NetworkReset = 0x0020,
309 HostReset = 0x0040,
310 ResetBusy = 0x0400,
313 /* Bits in the interrupt status/mask registers. */
314 enum intr_status_bits {
315 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
316 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
317 IntrDrvRqst=0x0040,
318 StatsMax=0x0080, LinkChange=0x0100,
319 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
322 /* Bits in the RxMode register. */
323 enum rx_mode_bits {
324 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
325 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
327 /* Bits in MACCtrl. */
328 enum mac_ctrl0_bits {
329 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
330 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
332 enum mac_ctrl1_bits {
333 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
334 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
335 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
338 /* The Rx and Tx buffer descriptors. */
339 /* Note that using only 32 bit fields simplifies conversion to big-endian
340 architectures. */
341 struct netdev_desc {
342 __le32 next_desc;
343 __le32 status;
344 struct desc_frag { __le32 addr, length; } frag[1];
347 /* Bits in netdev_desc.status */
348 enum desc_status_bits {
349 DescOwn=0x8000,
350 DescEndPacket=0x4000,
351 DescEndRing=0x2000,
352 LastFrag=0x80000000,
353 DescIntrOnTx=0x8000,
354 DescIntrOnDMADone=0x80000000,
355 DisableAlign = 0x00000001,
358 #define PRIV_ALIGN 15 /* Required alignment mask */
359 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
360 within the structure. */
361 #define MII_CNT 4
362 struct netdev_private {
363 /* Descriptor rings first for alignment. */
364 struct netdev_desc *rx_ring;
365 struct netdev_desc *tx_ring;
366 struct sk_buff* rx_skbuff[RX_RING_SIZE];
367 struct sk_buff* tx_skbuff[TX_RING_SIZE];
368 dma_addr_t tx_ring_dma;
369 dma_addr_t rx_ring_dma;
370 struct timer_list timer; /* Media monitoring timer. */
371 /* Frequently used values: keep some adjacent for cache effect. */
372 spinlock_t lock;
373 spinlock_t rx_lock; /* Group with Tx control cache line. */
374 int msg_enable;
375 int chip_id;
376 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
377 unsigned int rx_buf_sz; /* Based on MTU+slack. */
378 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
379 unsigned int cur_tx, dirty_tx;
380 /* These values are keep track of the transceiver/media in use. */
381 unsigned int flowctrl:1;
382 unsigned int default_port:4; /* Last dev->if_port value. */
383 unsigned int an_enable:1;
384 unsigned int speed;
385 struct tasklet_struct rx_tasklet;
386 struct tasklet_struct tx_tasklet;
387 int budget;
388 int cur_task;
389 /* Multicast and receive mode. */
390 spinlock_t mcastlock; /* SMP lock multicast updates. */
391 u16 mcast_filter[4];
392 /* MII transceiver section. */
393 struct mii_if_info mii_if;
394 int mii_preamble_required;
395 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
396 struct pci_dev *pci_dev;
397 void __iomem *base;
400 /* The station address location in the EEPROM. */
401 #define EEPROM_SA_OFFSET 0x10
402 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
403 IntrDrvRqst | IntrTxDone | StatsMax | \
404 LinkChange)
406 static int change_mtu(struct net_device *dev, int new_mtu);
407 static int eeprom_read(void __iomem *ioaddr, int location);
408 static int mdio_read(struct net_device *dev, int phy_id, int location);
409 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
410 static int mdio_wait_link(struct net_device *dev, int wait);
411 static int netdev_open(struct net_device *dev);
412 static void check_duplex(struct net_device *dev);
413 static void netdev_timer(unsigned long data);
414 static void tx_timeout(struct net_device *dev);
415 static void init_ring(struct net_device *dev);
416 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
417 static int reset_tx (struct net_device *dev);
418 static irqreturn_t intr_handler(int irq, void *dev_instance);
419 static void rx_poll(unsigned long data);
420 static void tx_poll(unsigned long data);
421 static void refill_rx (struct net_device *dev);
422 static void netdev_error(struct net_device *dev, int intr_status);
423 static void netdev_error(struct net_device *dev, int intr_status);
424 static void set_rx_mode(struct net_device *dev);
425 static int __set_mac_addr(struct net_device *dev);
426 static struct net_device_stats *get_stats(struct net_device *dev);
427 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
428 static int netdev_close(struct net_device *dev);
429 static const struct ethtool_ops ethtool_ops;
431 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
433 struct netdev_private *np = netdev_priv(dev);
434 void __iomem *ioaddr = np->base + ASICCtrl;
435 int countdown;
437 /* ST201 documentation states ASICCtrl is a 32bit register */
438 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
439 /* ST201 documentation states reset can take up to 1 ms */
440 countdown = 10 + 1;
441 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
442 if (--countdown == 0) {
443 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
444 break;
446 udelay(100);
450 static const struct net_device_ops netdev_ops = {
451 .ndo_open = netdev_open,
452 .ndo_stop = netdev_close,
453 .ndo_start_xmit = start_tx,
454 .ndo_get_stats = get_stats,
455 .ndo_set_multicast_list = set_rx_mode,
456 .ndo_do_ioctl = netdev_ioctl,
457 .ndo_tx_timeout = tx_timeout,
458 .ndo_change_mtu = change_mtu,
459 .ndo_set_mac_address = eth_mac_addr,
460 .ndo_validate_addr = eth_validate_addr,
463 static int __devinit sundance_probe1 (struct pci_dev *pdev,
464 const struct pci_device_id *ent)
466 struct net_device *dev;
467 struct netdev_private *np;
468 static int card_idx;
469 int chip_idx = ent->driver_data;
470 int irq;
471 int i;
472 void __iomem *ioaddr;
473 u16 mii_ctl;
474 void *ring_space;
475 dma_addr_t ring_dma;
476 #ifdef USE_IO_OPS
477 int bar = 0;
478 #else
479 int bar = 1;
480 #endif
481 int phy, phy_end, phy_idx = 0;
483 /* when built into the kernel, we only print version if device is found */
484 #ifndef MODULE
485 static int printed_version;
486 if (!printed_version++)
487 printk(version);
488 #endif
490 if (pci_enable_device(pdev))
491 return -EIO;
492 pci_set_master(pdev);
494 irq = pdev->irq;
496 dev = alloc_etherdev(sizeof(*np));
497 if (!dev)
498 return -ENOMEM;
499 SET_NETDEV_DEV(dev, &pdev->dev);
501 if (pci_request_regions(pdev, DRV_NAME))
502 goto err_out_netdev;
504 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
505 if (!ioaddr)
506 goto err_out_res;
508 for (i = 0; i < 3; i++)
509 ((__le16 *)dev->dev_addr)[i] =
510 cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
511 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
513 dev->base_addr = (unsigned long)ioaddr;
514 dev->irq = irq;
516 np = netdev_priv(dev);
517 np->base = ioaddr;
518 np->pci_dev = pdev;
519 np->chip_id = chip_idx;
520 np->msg_enable = (1 << debug) - 1;
521 spin_lock_init(&np->lock);
522 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
523 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
525 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
526 if (!ring_space)
527 goto err_out_cleardev;
528 np->tx_ring = (struct netdev_desc *)ring_space;
529 np->tx_ring_dma = ring_dma;
531 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
532 if (!ring_space)
533 goto err_out_unmap_tx;
534 np->rx_ring = (struct netdev_desc *)ring_space;
535 np->rx_ring_dma = ring_dma;
537 np->mii_if.dev = dev;
538 np->mii_if.mdio_read = mdio_read;
539 np->mii_if.mdio_write = mdio_write;
540 np->mii_if.phy_id_mask = 0x1f;
541 np->mii_if.reg_num_mask = 0x1f;
543 /* The chip-specific entries in the device structure. */
544 dev->netdev_ops = &netdev_ops;
545 SET_ETHTOOL_OPS(dev, &ethtool_ops);
546 dev->watchdog_timeo = TX_TIMEOUT;
548 pci_set_drvdata(pdev, dev);
550 i = register_netdev(dev);
551 if (i)
552 goto err_out_unmap_rx;
554 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
555 dev->name, pci_id_tbl[chip_idx].name, ioaddr,
556 dev->dev_addr, irq);
558 np->phys[0] = 1; /* Default setting */
559 np->mii_preamble_required++;
562 * It seems some phys doesn't deal well with address 0 being accessed
563 * first
565 if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
566 phy = 0;
567 phy_end = 31;
568 } else {
569 phy = 1;
570 phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */
572 for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
573 int phyx = phy & 0x1f;
574 int mii_status = mdio_read(dev, phyx, MII_BMSR);
575 if (mii_status != 0xffff && mii_status != 0x0000) {
576 np->phys[phy_idx++] = phyx;
577 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
578 if ((mii_status & 0x0040) == 0)
579 np->mii_preamble_required++;
580 printk(KERN_INFO "%s: MII PHY found at address %d, status "
581 "0x%4.4x advertising %4.4x.\n",
582 dev->name, phyx, mii_status, np->mii_if.advertising);
585 np->mii_preamble_required--;
587 if (phy_idx == 0) {
588 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
589 dev->name, ioread32(ioaddr + ASICCtrl));
590 goto err_out_unregister;
593 np->mii_if.phy_id = np->phys[0];
595 /* Parse override configuration */
596 np->an_enable = 1;
597 if (card_idx < MAX_UNITS) {
598 if (media[card_idx] != NULL) {
599 np->an_enable = 0;
600 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
601 strcmp (media[card_idx], "4") == 0) {
602 np->speed = 100;
603 np->mii_if.full_duplex = 1;
604 } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
605 strcmp (media[card_idx], "3") == 0) {
606 np->speed = 100;
607 np->mii_if.full_duplex = 0;
608 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
609 strcmp (media[card_idx], "2") == 0) {
610 np->speed = 10;
611 np->mii_if.full_duplex = 1;
612 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
613 strcmp (media[card_idx], "1") == 0) {
614 np->speed = 10;
615 np->mii_if.full_duplex = 0;
616 } else {
617 np->an_enable = 1;
620 if (flowctrl == 1)
621 np->flowctrl = 1;
624 /* Fibre PHY? */
625 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
626 /* Default 100Mbps Full */
627 if (np->an_enable) {
628 np->speed = 100;
629 np->mii_if.full_duplex = 1;
630 np->an_enable = 0;
633 /* Reset PHY */
634 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
635 mdelay (300);
636 /* If flow control enabled, we need to advertise it.*/
637 if (np->flowctrl)
638 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
639 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
640 /* Force media type */
641 if (!np->an_enable) {
642 mii_ctl = 0;
643 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
644 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
645 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
646 printk (KERN_INFO "Override speed=%d, %s duplex\n",
647 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
651 /* Perhaps move the reset here? */
652 /* Reset the chip to erase previous misconfiguration. */
653 if (netif_msg_hw(np))
654 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
655 sundance_reset(dev, 0x00ff << 16);
656 if (netif_msg_hw(np))
657 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
659 card_idx++;
660 return 0;
662 err_out_unregister:
663 unregister_netdev(dev);
664 err_out_unmap_rx:
665 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
666 err_out_unmap_tx:
667 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
668 err_out_cleardev:
669 pci_set_drvdata(pdev, NULL);
670 pci_iounmap(pdev, ioaddr);
671 err_out_res:
672 pci_release_regions(pdev);
673 err_out_netdev:
674 free_netdev (dev);
675 return -ENODEV;
678 static int change_mtu(struct net_device *dev, int new_mtu)
680 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
681 return -EINVAL;
682 if (netif_running(dev))
683 return -EBUSY;
684 dev->mtu = new_mtu;
685 return 0;
688 #define eeprom_delay(ee_addr) ioread32(ee_addr)
689 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
690 static int __devinit eeprom_read(void __iomem *ioaddr, int location)
692 int boguscnt = 10000; /* Typical 1900 ticks. */
693 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
694 do {
695 eeprom_delay(ioaddr + EECtrl);
696 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
697 return ioread16(ioaddr + EEData);
699 } while (--boguscnt > 0);
700 return 0;
703 /* MII transceiver control section.
704 Read and write the MII registers using software-generated serial
705 MDIO protocol. See the MII specifications or DP83840A data sheet
706 for details.
708 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
709 met by back-to-back 33Mhz PCI cycles. */
710 #define mdio_delay() ioread8(mdio_addr)
712 enum mii_reg_bits {
713 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
715 #define MDIO_EnbIn (0)
716 #define MDIO_WRITE0 (MDIO_EnbOutput)
717 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
719 /* Generate the preamble required for initial synchronization and
720 a few older transceivers. */
721 static void mdio_sync(void __iomem *mdio_addr)
723 int bits = 32;
725 /* Establish sync by sending at least 32 logic ones. */
726 while (--bits >= 0) {
727 iowrite8(MDIO_WRITE1, mdio_addr);
728 mdio_delay();
729 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
730 mdio_delay();
734 static int mdio_read(struct net_device *dev, int phy_id, int location)
736 struct netdev_private *np = netdev_priv(dev);
737 void __iomem *mdio_addr = np->base + MIICtrl;
738 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
739 int i, retval = 0;
741 if (np->mii_preamble_required)
742 mdio_sync(mdio_addr);
744 /* Shift the read command bits out. */
745 for (i = 15; i >= 0; i--) {
746 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
748 iowrite8(dataval, mdio_addr);
749 mdio_delay();
750 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
751 mdio_delay();
753 /* Read the two transition, 16 data, and wire-idle bits. */
754 for (i = 19; i > 0; i--) {
755 iowrite8(MDIO_EnbIn, mdio_addr);
756 mdio_delay();
757 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
758 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
759 mdio_delay();
761 return (retval>>1) & 0xffff;
764 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
766 struct netdev_private *np = netdev_priv(dev);
767 void __iomem *mdio_addr = np->base + MIICtrl;
768 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
769 int i;
771 if (np->mii_preamble_required)
772 mdio_sync(mdio_addr);
774 /* Shift the command bits out. */
775 for (i = 31; i >= 0; i--) {
776 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
778 iowrite8(dataval, mdio_addr);
779 mdio_delay();
780 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
781 mdio_delay();
783 /* Clear out extra bits. */
784 for (i = 2; i > 0; i--) {
785 iowrite8(MDIO_EnbIn, mdio_addr);
786 mdio_delay();
787 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
788 mdio_delay();
792 static int mdio_wait_link(struct net_device *dev, int wait)
794 int bmsr;
795 int phy_id;
796 struct netdev_private *np;
798 np = netdev_priv(dev);
799 phy_id = np->phys[0];
801 do {
802 bmsr = mdio_read(dev, phy_id, MII_BMSR);
803 if (bmsr & 0x0004)
804 return 0;
805 mdelay(1);
806 } while (--wait > 0);
807 return -1;
810 static int netdev_open(struct net_device *dev)
812 struct netdev_private *np = netdev_priv(dev);
813 void __iomem *ioaddr = np->base;
814 unsigned long flags;
815 int i;
817 /* Do we need to reset the chip??? */
819 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
820 if (i)
821 return i;
823 if (netif_msg_ifup(np))
824 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
825 dev->name, dev->irq);
826 init_ring(dev);
828 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
829 /* The Tx list pointer is written as packets are queued. */
831 /* Initialize other registers. */
832 __set_mac_addr(dev);
833 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
834 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
835 #else
836 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
837 #endif
838 if (dev->mtu > 2047)
839 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
841 /* Configure the PCI bus bursts and FIFO thresholds. */
843 if (dev->if_port == 0)
844 dev->if_port = np->default_port;
846 spin_lock_init(&np->mcastlock);
848 set_rx_mode(dev);
849 iowrite16(0, ioaddr + IntrEnable);
850 iowrite16(0, ioaddr + DownCounter);
851 /* Set the chip to poll every N*320nsec. */
852 iowrite8(100, ioaddr + RxDMAPollPeriod);
853 iowrite8(127, ioaddr + TxDMAPollPeriod);
854 /* Fix DFE-580TX packet drop issue */
855 if (np->pci_dev->revision >= 0x14)
856 iowrite8(0x01, ioaddr + DebugCtrl1);
857 netif_start_queue(dev);
859 spin_lock_irqsave(&np->lock, flags);
860 reset_tx(dev);
861 spin_unlock_irqrestore(&np->lock, flags);
863 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
865 if (netif_msg_ifup(np))
866 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
867 "MAC Control %x, %4.4x %4.4x.\n",
868 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
869 ioread32(ioaddr + MACCtrl0),
870 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
872 /* Set the timer to check for link beat. */
873 init_timer(&np->timer);
874 np->timer.expires = jiffies + 3*HZ;
875 np->timer.data = (unsigned long)dev;
876 np->timer.function = &netdev_timer; /* timer handler */
877 add_timer(&np->timer);
879 /* Enable interrupts by setting the interrupt mask. */
880 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
882 return 0;
885 static void check_duplex(struct net_device *dev)
887 struct netdev_private *np = netdev_priv(dev);
888 void __iomem *ioaddr = np->base;
889 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
890 int negotiated = mii_lpa & np->mii_if.advertising;
891 int duplex;
893 /* Force media */
894 if (!np->an_enable || mii_lpa == 0xffff) {
895 if (np->mii_if.full_duplex)
896 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
897 ioaddr + MACCtrl0);
898 return;
901 /* Autonegotiation */
902 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
903 if (np->mii_if.full_duplex != duplex) {
904 np->mii_if.full_duplex = duplex;
905 if (netif_msg_link(np))
906 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
907 "negotiated capability %4.4x.\n", dev->name,
908 duplex ? "full" : "half", np->phys[0], negotiated);
909 iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
913 static void netdev_timer(unsigned long data)
915 struct net_device *dev = (struct net_device *)data;
916 struct netdev_private *np = netdev_priv(dev);
917 void __iomem *ioaddr = np->base;
918 int next_tick = 10*HZ;
920 if (netif_msg_timer(np)) {
921 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
922 "Tx %x Rx %x.\n",
923 dev->name, ioread16(ioaddr + IntrEnable),
924 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
926 check_duplex(dev);
927 np->timer.expires = jiffies + next_tick;
928 add_timer(&np->timer);
931 static void tx_timeout(struct net_device *dev)
933 struct netdev_private *np = netdev_priv(dev);
934 void __iomem *ioaddr = np->base;
935 unsigned long flag;
937 netif_stop_queue(dev);
938 tasklet_disable(&np->tx_tasklet);
939 iowrite16(0, ioaddr + IntrEnable);
940 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
941 "TxFrameId %2.2x,"
942 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
943 ioread8(ioaddr + TxFrameId));
946 int i;
947 for (i=0; i<TX_RING_SIZE; i++) {
948 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
949 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
950 le32_to_cpu(np->tx_ring[i].next_desc),
951 le32_to_cpu(np->tx_ring[i].status),
952 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
953 le32_to_cpu(np->tx_ring[i].frag[0].addr),
954 le32_to_cpu(np->tx_ring[i].frag[0].length));
956 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
957 ioread32(np->base + TxListPtr),
958 netif_queue_stopped(dev));
959 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
960 np->cur_tx, np->cur_tx % TX_RING_SIZE,
961 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
962 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
963 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
965 spin_lock_irqsave(&np->lock, flag);
967 /* Stop and restart the chip's Tx processes . */
968 reset_tx(dev);
969 spin_unlock_irqrestore(&np->lock, flag);
971 dev->if_port = 0;
973 dev->trans_start = jiffies; /* prevent tx timeout */
974 dev->stats.tx_errors++;
975 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
976 netif_wake_queue(dev);
978 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
979 tasklet_enable(&np->tx_tasklet);
983 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
984 static void init_ring(struct net_device *dev)
986 struct netdev_private *np = netdev_priv(dev);
987 int i;
989 np->cur_rx = np->cur_tx = 0;
990 np->dirty_rx = np->dirty_tx = 0;
991 np->cur_task = 0;
993 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
995 /* Initialize all Rx descriptors. */
996 for (i = 0; i < RX_RING_SIZE; i++) {
997 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
998 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
999 np->rx_ring[i].status = 0;
1000 np->rx_ring[i].frag[0].length = 0;
1001 np->rx_skbuff[i] = NULL;
1004 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1005 for (i = 0; i < RX_RING_SIZE; i++) {
1006 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1007 np->rx_skbuff[i] = skb;
1008 if (skb == NULL)
1009 break;
1010 skb->dev = dev; /* Mark as being used by this device. */
1011 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1012 np->rx_ring[i].frag[0].addr = cpu_to_le32(
1013 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
1014 PCI_DMA_FROMDEVICE));
1015 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1017 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1019 for (i = 0; i < TX_RING_SIZE; i++) {
1020 np->tx_skbuff[i] = NULL;
1021 np->tx_ring[i].status = 0;
1025 static void tx_poll (unsigned long data)
1027 struct net_device *dev = (struct net_device *)data;
1028 struct netdev_private *np = netdev_priv(dev);
1029 unsigned head = np->cur_task % TX_RING_SIZE;
1030 struct netdev_desc *txdesc =
1031 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1033 /* Chain the next pointer */
1034 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1035 int entry = np->cur_task % TX_RING_SIZE;
1036 txdesc = &np->tx_ring[entry];
1037 if (np->last_tx) {
1038 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1039 entry*sizeof(struct netdev_desc));
1041 np->last_tx = txdesc;
1043 /* Indicate the latest descriptor of tx ring */
1044 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1046 if (ioread32 (np->base + TxListPtr) == 0)
1047 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1048 np->base + TxListPtr);
1051 static netdev_tx_t
1052 start_tx (struct sk_buff *skb, struct net_device *dev)
1054 struct netdev_private *np = netdev_priv(dev);
1055 struct netdev_desc *txdesc;
1056 unsigned entry;
1058 /* Calculate the next Tx descriptor entry. */
1059 entry = np->cur_tx % TX_RING_SIZE;
1060 np->tx_skbuff[entry] = skb;
1061 txdesc = &np->tx_ring[entry];
1063 txdesc->next_desc = 0;
1064 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1065 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1066 skb->len,
1067 PCI_DMA_TODEVICE));
1068 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1070 /* Increment cur_tx before tasklet_schedule() */
1071 np->cur_tx++;
1072 mb();
1073 /* Schedule a tx_poll() task */
1074 tasklet_schedule(&np->tx_tasklet);
1076 /* On some architectures: explicitly flush cache lines here. */
1077 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1078 !netif_queue_stopped(dev)) {
1079 /* do nothing */
1080 } else {
1081 netif_stop_queue (dev);
1083 if (netif_msg_tx_queued(np)) {
1084 printk (KERN_DEBUG
1085 "%s: Transmit frame #%d queued in slot %d.\n",
1086 dev->name, np->cur_tx, entry);
1088 return NETDEV_TX_OK;
1091 /* Reset hardware tx and free all of tx buffers */
1092 static int
1093 reset_tx (struct net_device *dev)
1095 struct netdev_private *np = netdev_priv(dev);
1096 void __iomem *ioaddr = np->base;
1097 struct sk_buff *skb;
1098 int i;
1099 int irq = in_interrupt();
1101 /* Reset tx logic, TxListPtr will be cleaned */
1102 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1103 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1105 /* free all tx skbuff */
1106 for (i = 0; i < TX_RING_SIZE; i++) {
1107 np->tx_ring[i].next_desc = 0;
1109 skb = np->tx_skbuff[i];
1110 if (skb) {
1111 pci_unmap_single(np->pci_dev,
1112 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1113 skb->len, PCI_DMA_TODEVICE);
1114 if (irq)
1115 dev_kfree_skb_irq (skb);
1116 else
1117 dev_kfree_skb (skb);
1118 np->tx_skbuff[i] = NULL;
1119 dev->stats.tx_dropped++;
1122 np->cur_tx = np->dirty_tx = 0;
1123 np->cur_task = 0;
1125 np->last_tx = NULL;
1126 iowrite8(127, ioaddr + TxDMAPollPeriod);
1128 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1129 return 0;
1132 /* The interrupt handler cleans up after the Tx thread,
1133 and schedule a Rx thread work */
1134 static irqreturn_t intr_handler(int irq, void *dev_instance)
1136 struct net_device *dev = (struct net_device *)dev_instance;
1137 struct netdev_private *np = netdev_priv(dev);
1138 void __iomem *ioaddr = np->base;
1139 int hw_frame_id;
1140 int tx_cnt;
1141 int tx_status;
1142 int handled = 0;
1143 int i;
1146 do {
1147 int intr_status = ioread16(ioaddr + IntrStatus);
1148 iowrite16(intr_status, ioaddr + IntrStatus);
1150 if (netif_msg_intr(np))
1151 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1152 dev->name, intr_status);
1154 if (!(intr_status & DEFAULT_INTR))
1155 break;
1157 handled = 1;
1159 if (intr_status & (IntrRxDMADone)) {
1160 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1161 ioaddr + IntrEnable);
1162 if (np->budget < 0)
1163 np->budget = RX_BUDGET;
1164 tasklet_schedule(&np->rx_tasklet);
1166 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1167 tx_status = ioread16 (ioaddr + TxStatus);
1168 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1169 if (netif_msg_tx_done(np))
1170 printk
1171 ("%s: Transmit status is %2.2x.\n",
1172 dev->name, tx_status);
1173 if (tx_status & 0x1e) {
1174 if (netif_msg_tx_err(np))
1175 printk("%s: Transmit error status %4.4x.\n",
1176 dev->name, tx_status);
1177 dev->stats.tx_errors++;
1178 if (tx_status & 0x10)
1179 dev->stats.tx_fifo_errors++;
1180 if (tx_status & 0x08)
1181 dev->stats.collisions++;
1182 if (tx_status & 0x04)
1183 dev->stats.tx_fifo_errors++;
1184 if (tx_status & 0x02)
1185 dev->stats.tx_window_errors++;
1188 ** This reset has been verified on
1189 ** DFE-580TX boards ! phdm@macqel.be.
1191 if (tx_status & 0x10) { /* TxUnderrun */
1192 /* Restart Tx FIFO and transmitter */
1193 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1194 /* No need to reset the Tx pointer here */
1196 /* Restart the Tx. Need to make sure tx enabled */
1197 i = 10;
1198 do {
1199 iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1200 if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1201 break;
1202 mdelay(1);
1203 } while (--i);
1205 /* Yup, this is a documentation bug. It cost me *hours*. */
1206 iowrite16 (0, ioaddr + TxStatus);
1207 if (tx_cnt < 0) {
1208 iowrite32(5000, ioaddr + DownCounter);
1209 break;
1211 tx_status = ioread16 (ioaddr + TxStatus);
1213 hw_frame_id = (tx_status >> 8) & 0xff;
1214 } else {
1215 hw_frame_id = ioread8(ioaddr + TxFrameId);
1218 if (np->pci_dev->revision >= 0x14) {
1219 spin_lock(&np->lock);
1220 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1221 int entry = np->dirty_tx % TX_RING_SIZE;
1222 struct sk_buff *skb;
1223 int sw_frame_id;
1224 sw_frame_id = (le32_to_cpu(
1225 np->tx_ring[entry].status) >> 2) & 0xff;
1226 if (sw_frame_id == hw_frame_id &&
1227 !(le32_to_cpu(np->tx_ring[entry].status)
1228 & 0x00010000))
1229 break;
1230 if (sw_frame_id == (hw_frame_id + 1) %
1231 TX_RING_SIZE)
1232 break;
1233 skb = np->tx_skbuff[entry];
1234 /* Free the original skb. */
1235 pci_unmap_single(np->pci_dev,
1236 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1237 skb->len, PCI_DMA_TODEVICE);
1238 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1239 np->tx_skbuff[entry] = NULL;
1240 np->tx_ring[entry].frag[0].addr = 0;
1241 np->tx_ring[entry].frag[0].length = 0;
1243 spin_unlock(&np->lock);
1244 } else {
1245 spin_lock(&np->lock);
1246 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1247 int entry = np->dirty_tx % TX_RING_SIZE;
1248 struct sk_buff *skb;
1249 if (!(le32_to_cpu(np->tx_ring[entry].status)
1250 & 0x00010000))
1251 break;
1252 skb = np->tx_skbuff[entry];
1253 /* Free the original skb. */
1254 pci_unmap_single(np->pci_dev,
1255 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1256 skb->len, PCI_DMA_TODEVICE);
1257 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1258 np->tx_skbuff[entry] = NULL;
1259 np->tx_ring[entry].frag[0].addr = 0;
1260 np->tx_ring[entry].frag[0].length = 0;
1262 spin_unlock(&np->lock);
1265 if (netif_queue_stopped(dev) &&
1266 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1267 /* The ring is no longer full, clear busy flag. */
1268 netif_wake_queue (dev);
1270 /* Abnormal error summary/uncommon events handlers. */
1271 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1272 netdev_error(dev, intr_status);
1273 } while (0);
1274 if (netif_msg_intr(np))
1275 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1276 dev->name, ioread16(ioaddr + IntrStatus));
1277 return IRQ_RETVAL(handled);
1280 static void rx_poll(unsigned long data)
1282 struct net_device *dev = (struct net_device *)data;
1283 struct netdev_private *np = netdev_priv(dev);
1284 int entry = np->cur_rx % RX_RING_SIZE;
1285 int boguscnt = np->budget;
1286 void __iomem *ioaddr = np->base;
1287 int received = 0;
1289 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1290 while (1) {
1291 struct netdev_desc *desc = &(np->rx_ring[entry]);
1292 u32 frame_status = le32_to_cpu(desc->status);
1293 int pkt_len;
1295 if (--boguscnt < 0) {
1296 goto not_done;
1298 if (!(frame_status & DescOwn))
1299 break;
1300 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1301 if (netif_msg_rx_status(np))
1302 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1303 frame_status);
1304 if (frame_status & 0x001f4000) {
1305 /* There was a error. */
1306 if (netif_msg_rx_err(np))
1307 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1308 frame_status);
1309 dev->stats.rx_errors++;
1310 if (frame_status & 0x00100000)
1311 dev->stats.rx_length_errors++;
1312 if (frame_status & 0x00010000)
1313 dev->stats.rx_fifo_errors++;
1314 if (frame_status & 0x00060000)
1315 dev->stats.rx_frame_errors++;
1316 if (frame_status & 0x00080000)
1317 dev->stats.rx_crc_errors++;
1318 if (frame_status & 0x00100000) {
1319 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1320 " status %8.8x.\n",
1321 dev->name, frame_status);
1323 } else {
1324 struct sk_buff *skb;
1325 #ifndef final_version
1326 if (netif_msg_rx_status(np))
1327 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1328 ", bogus_cnt %d.\n",
1329 pkt_len, boguscnt);
1330 #endif
1331 /* Check if the packet is long enough to accept without copying
1332 to a minimally-sized skbuff. */
1333 if (pkt_len < rx_copybreak &&
1334 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1335 skb_reserve(skb, 2); /* 16 byte align the IP header */
1336 pci_dma_sync_single_for_cpu(np->pci_dev,
1337 le32_to_cpu(desc->frag[0].addr),
1338 np->rx_buf_sz,
1339 PCI_DMA_FROMDEVICE);
1341 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1342 pci_dma_sync_single_for_device(np->pci_dev,
1343 le32_to_cpu(desc->frag[0].addr),
1344 np->rx_buf_sz,
1345 PCI_DMA_FROMDEVICE);
1346 skb_put(skb, pkt_len);
1347 } else {
1348 pci_unmap_single(np->pci_dev,
1349 le32_to_cpu(desc->frag[0].addr),
1350 np->rx_buf_sz,
1351 PCI_DMA_FROMDEVICE);
1352 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1353 np->rx_skbuff[entry] = NULL;
1355 skb->protocol = eth_type_trans(skb, dev);
1356 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1357 netif_rx(skb);
1359 entry = (entry + 1) % RX_RING_SIZE;
1360 received++;
1362 np->cur_rx = entry;
1363 refill_rx (dev);
1364 np->budget -= received;
1365 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1366 return;
1368 not_done:
1369 np->cur_rx = entry;
1370 refill_rx (dev);
1371 if (!received)
1372 received = 1;
1373 np->budget -= received;
1374 if (np->budget <= 0)
1375 np->budget = RX_BUDGET;
1376 tasklet_schedule(&np->rx_tasklet);
1379 static void refill_rx (struct net_device *dev)
1381 struct netdev_private *np = netdev_priv(dev);
1382 int entry;
1383 int cnt = 0;
1385 /* Refill the Rx ring buffers. */
1386 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1387 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1388 struct sk_buff *skb;
1389 entry = np->dirty_rx % RX_RING_SIZE;
1390 if (np->rx_skbuff[entry] == NULL) {
1391 skb = dev_alloc_skb(np->rx_buf_sz);
1392 np->rx_skbuff[entry] = skb;
1393 if (skb == NULL)
1394 break; /* Better luck next round. */
1395 skb->dev = dev; /* Mark as being used by this device. */
1396 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1397 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1398 pci_map_single(np->pci_dev, skb->data,
1399 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1401 /* Perhaps we need not reset this field. */
1402 np->rx_ring[entry].frag[0].length =
1403 cpu_to_le32(np->rx_buf_sz | LastFrag);
1404 np->rx_ring[entry].status = 0;
1405 cnt++;
1408 static void netdev_error(struct net_device *dev, int intr_status)
1410 struct netdev_private *np = netdev_priv(dev);
1411 void __iomem *ioaddr = np->base;
1412 u16 mii_ctl, mii_advertise, mii_lpa;
1413 int speed;
1415 if (intr_status & LinkChange) {
1416 if (mdio_wait_link(dev, 10) == 0) {
1417 printk(KERN_INFO "%s: Link up\n", dev->name);
1418 if (np->an_enable) {
1419 mii_advertise = mdio_read(dev, np->phys[0],
1420 MII_ADVERTISE);
1421 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1422 mii_advertise &= mii_lpa;
1423 printk(KERN_INFO "%s: Link changed: ",
1424 dev->name);
1425 if (mii_advertise & ADVERTISE_100FULL) {
1426 np->speed = 100;
1427 printk("100Mbps, full duplex\n");
1428 } else if (mii_advertise & ADVERTISE_100HALF) {
1429 np->speed = 100;
1430 printk("100Mbps, half duplex\n");
1431 } else if (mii_advertise & ADVERTISE_10FULL) {
1432 np->speed = 10;
1433 printk("10Mbps, full duplex\n");
1434 } else if (mii_advertise & ADVERTISE_10HALF) {
1435 np->speed = 10;
1436 printk("10Mbps, half duplex\n");
1437 } else
1438 printk("\n");
1440 } else {
1441 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1442 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1443 np->speed = speed;
1444 printk(KERN_INFO "%s: Link changed: %dMbps ,",
1445 dev->name, speed);
1446 printk("%s duplex.\n",
1447 (mii_ctl & BMCR_FULLDPLX) ?
1448 "full" : "half");
1450 check_duplex(dev);
1451 if (np->flowctrl && np->mii_if.full_duplex) {
1452 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1453 ioaddr + MulticastFilter1+2);
1454 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1455 ioaddr + MACCtrl0);
1457 netif_carrier_on(dev);
1458 } else {
1459 printk(KERN_INFO "%s: Link down\n", dev->name);
1460 netif_carrier_off(dev);
1463 if (intr_status & StatsMax) {
1464 get_stats(dev);
1466 if (intr_status & IntrPCIErr) {
1467 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1468 dev->name, intr_status);
1469 /* We must do a global reset of DMA to continue. */
1473 static struct net_device_stats *get_stats(struct net_device *dev)
1475 struct netdev_private *np = netdev_priv(dev);
1476 void __iomem *ioaddr = np->base;
1477 int i;
1479 /* We should lock this segment of code for SMP eventually, although
1480 the vulnerability window is very small and statistics are
1481 non-critical. */
1482 /* The chip only need report frame silently dropped. */
1483 dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1484 dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1485 dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1486 dev->stats.collisions += ioread8(ioaddr + StatsLateColl);
1487 dev->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1488 dev->stats.collisions += ioread8(ioaddr + StatsOneColl);
1489 dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1490 ioread8(ioaddr + StatsTxDefer);
1491 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1492 ioread8(ioaddr + i);
1493 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1494 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1495 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1496 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1498 return &dev->stats;
1501 static void set_rx_mode(struct net_device *dev)
1503 struct netdev_private *np = netdev_priv(dev);
1504 void __iomem *ioaddr = np->base;
1505 u16 mc_filter[4]; /* Multicast hash filter */
1506 u32 rx_mode;
1507 int i;
1509 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1510 memset(mc_filter, 0xff, sizeof(mc_filter));
1511 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1512 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1513 (dev->flags & IFF_ALLMULTI)) {
1514 /* Too many to match, or accept all multicasts. */
1515 memset(mc_filter, 0xff, sizeof(mc_filter));
1516 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1517 } else if (!netdev_mc_empty(dev)) {
1518 struct netdev_hw_addr *ha;
1519 int bit;
1520 int index;
1521 int crc;
1522 memset (mc_filter, 0, sizeof (mc_filter));
1523 netdev_for_each_mc_addr(ha, dev) {
1524 crc = ether_crc_le(ETH_ALEN, ha->addr);
1525 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1526 if (crc & 0x80000000) index |= 1 << bit;
1527 mc_filter[index/16] |= (1 << (index % 16));
1529 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1530 } else {
1531 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1532 return;
1534 if (np->mii_if.full_duplex && np->flowctrl)
1535 mc_filter[3] |= 0x0200;
1537 for (i = 0; i < 4; i++)
1538 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1539 iowrite8(rx_mode, ioaddr + RxMode);
1542 static int __set_mac_addr(struct net_device *dev)
1544 struct netdev_private *np = netdev_priv(dev);
1545 u16 addr16;
1547 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1548 iowrite16(addr16, np->base + StationAddr);
1549 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1550 iowrite16(addr16, np->base + StationAddr+2);
1551 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1552 iowrite16(addr16, np->base + StationAddr+4);
1553 return 0;
1556 static int check_if_running(struct net_device *dev)
1558 if (!netif_running(dev))
1559 return -EINVAL;
1560 return 0;
1563 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1565 struct netdev_private *np = netdev_priv(dev);
1566 strcpy(info->driver, DRV_NAME);
1567 strcpy(info->version, DRV_VERSION);
1568 strcpy(info->bus_info, pci_name(np->pci_dev));
1571 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1573 struct netdev_private *np = netdev_priv(dev);
1574 spin_lock_irq(&np->lock);
1575 mii_ethtool_gset(&np->mii_if, ecmd);
1576 spin_unlock_irq(&np->lock);
1577 return 0;
1580 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1582 struct netdev_private *np = netdev_priv(dev);
1583 int res;
1584 spin_lock_irq(&np->lock);
1585 res = mii_ethtool_sset(&np->mii_if, ecmd);
1586 spin_unlock_irq(&np->lock);
1587 return res;
1590 static int nway_reset(struct net_device *dev)
1592 struct netdev_private *np = netdev_priv(dev);
1593 return mii_nway_restart(&np->mii_if);
1596 static u32 get_link(struct net_device *dev)
1598 struct netdev_private *np = netdev_priv(dev);
1599 return mii_link_ok(&np->mii_if);
1602 static u32 get_msglevel(struct net_device *dev)
1604 struct netdev_private *np = netdev_priv(dev);
1605 return np->msg_enable;
1608 static void set_msglevel(struct net_device *dev, u32 val)
1610 struct netdev_private *np = netdev_priv(dev);
1611 np->msg_enable = val;
1614 static const struct ethtool_ops ethtool_ops = {
1615 .begin = check_if_running,
1616 .get_drvinfo = get_drvinfo,
1617 .get_settings = get_settings,
1618 .set_settings = set_settings,
1619 .nway_reset = nway_reset,
1620 .get_link = get_link,
1621 .get_msglevel = get_msglevel,
1622 .set_msglevel = set_msglevel,
1625 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1627 struct netdev_private *np = netdev_priv(dev);
1628 int rc;
1630 if (!netif_running(dev))
1631 return -EINVAL;
1633 spin_lock_irq(&np->lock);
1634 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1635 spin_unlock_irq(&np->lock);
1637 return rc;
1640 static int netdev_close(struct net_device *dev)
1642 struct netdev_private *np = netdev_priv(dev);
1643 void __iomem *ioaddr = np->base;
1644 struct sk_buff *skb;
1645 int i;
1647 /* Wait and kill tasklet */
1648 tasklet_kill(&np->rx_tasklet);
1649 tasklet_kill(&np->tx_tasklet);
1650 np->cur_tx = 0;
1651 np->dirty_tx = 0;
1652 np->cur_task = 0;
1653 np->last_tx = NULL;
1655 netif_stop_queue(dev);
1657 if (netif_msg_ifdown(np)) {
1658 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1659 "Rx %4.4x Int %2.2x.\n",
1660 dev->name, ioread8(ioaddr + TxStatus),
1661 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1662 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1663 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1666 /* Disable interrupts by clearing the interrupt mask. */
1667 iowrite16(0x0000, ioaddr + IntrEnable);
1669 /* Disable Rx and Tx DMA for safely release resource */
1670 iowrite32(0x500, ioaddr + DMACtrl);
1672 /* Stop the chip's Tx and Rx processes. */
1673 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1675 for (i = 2000; i > 0; i--) {
1676 if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1677 break;
1678 mdelay(1);
1681 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1682 ioaddr +ASICCtrl + 2);
1684 for (i = 2000; i > 0; i--) {
1685 if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
1686 break;
1687 mdelay(1);
1690 #ifdef __i386__
1691 if (netif_msg_hw(np)) {
1692 printk(KERN_DEBUG " Tx ring at %8.8x:\n",
1693 (int)(np->tx_ring_dma));
1694 for (i = 0; i < TX_RING_SIZE; i++)
1695 printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1696 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1697 np->tx_ring[i].frag[0].length);
1698 printk(KERN_DEBUG " Rx ring %8.8x:\n",
1699 (int)(np->rx_ring_dma));
1700 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1701 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1702 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1703 np->rx_ring[i].frag[0].length);
1706 #endif /* __i386__ debugging only */
1708 free_irq(dev->irq, dev);
1710 del_timer_sync(&np->timer);
1712 /* Free all the skbuffs in the Rx queue. */
1713 for (i = 0; i < RX_RING_SIZE; i++) {
1714 np->rx_ring[i].status = 0;
1715 skb = np->rx_skbuff[i];
1716 if (skb) {
1717 pci_unmap_single(np->pci_dev,
1718 le32_to_cpu(np->rx_ring[i].frag[0].addr),
1719 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1720 dev_kfree_skb(skb);
1721 np->rx_skbuff[i] = NULL;
1723 np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1725 for (i = 0; i < TX_RING_SIZE; i++) {
1726 np->tx_ring[i].next_desc = 0;
1727 skb = np->tx_skbuff[i];
1728 if (skb) {
1729 pci_unmap_single(np->pci_dev,
1730 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1731 skb->len, PCI_DMA_TODEVICE);
1732 dev_kfree_skb(skb);
1733 np->tx_skbuff[i] = NULL;
1737 return 0;
1740 static void __devexit sundance_remove1 (struct pci_dev *pdev)
1742 struct net_device *dev = pci_get_drvdata(pdev);
1744 if (dev) {
1745 struct netdev_private *np = netdev_priv(dev);
1747 unregister_netdev(dev);
1748 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1749 np->rx_ring_dma);
1750 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1751 np->tx_ring_dma);
1752 pci_iounmap(pdev, np->base);
1753 pci_release_regions(pdev);
1754 free_netdev(dev);
1755 pci_set_drvdata(pdev, NULL);
1759 static struct pci_driver sundance_driver = {
1760 .name = DRV_NAME,
1761 .id_table = sundance_pci_tbl,
1762 .probe = sundance_probe1,
1763 .remove = __devexit_p(sundance_remove1),
1766 static int __init sundance_init(void)
1768 /* when a module, this is printed whether or not devices are found in probe */
1769 #ifdef MODULE
1770 printk(version);
1771 #endif
1772 return pci_register_driver(&sundance_driver);
1775 static void __exit sundance_exit(void)
1777 pci_unregister_driver(&sundance_driver);
1780 module_init(sundance_init);
1781 module_exit(sundance_exit);