Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / sundance.c
blob61eec46cb1114a48c018c78c499d553db4ce0c09
1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2 /*
3 Written 1999-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
21 Version LK1.01a (jgarzik):
22 - Replace some MII-related magic numbers with constants
24 Version LK1.02 (D-Link):
25 - Add new board to PCI ID list
26 - Fix multicast bug
28 Version LK1.03 (D-Link):
29 - New Rx scheme, reduce Rx congestion
30 - Option to disable flow control
32 Version LK1.04 (D-Link):
33 - Tx timeout recovery
34 - More support for ethtool.
36 Version LK1.04a:
37 - Remove unused/constant members from struct pci_id_info
38 (which then allows removal of 'drv_flags' from private struct)
39 (jgarzik)
40 - If no phy is found, fail to load that board (jgarzik)
41 - Always start phy id scan at id 1 to avoid problems (Donald Becker)
42 - Autodetect where mii_preable_required is needed,
43 default to not needed. (Donald Becker)
45 Version LK1.04b:
46 - Remove mii_preamble_required module parameter (Donald Becker)
47 - Add per-interface mii_preamble_required (setting is autodetected)
48 (Donald Becker)
49 - Remove unnecessary cast from void pointer (jgarzik)
50 - Re-align comments in private struct (jgarzik)
52 Version LK1.04c (jgarzik):
53 - Support bitmapped message levels (NETIF_MSG_xxx), and the
54 two ethtool ioctls that get/set them
55 - Don't hand-code MII ethtool support, use standard API/lib
57 Version LK1.04d:
58 - Merge from Donald Becker's sundance.c: (Jason Lunz)
59 * proper support for variably-sized MTUs
60 * default to PIO, to fix chip bugs
61 - Add missing unregister_netdev (Jason Lunz)
62 - Add CONFIG_SUNDANCE_MMIO config option (jgarzik)
63 - Better rx buf size calculation (Donald Becker)
65 Version LK1.05 (D-Link):
66 - Fix DFE-580TX packet drop issue (for DL10050C)
67 - Fix reset_tx logic
69 Version LK1.06 (D-Link):
70 - Fix crash while unloading driver
72 Versin LK1.06b (D-Link):
73 - New tx scheme, adaptive tx_coalesce
75 Version LK1.07 (D-Link):
76 - Fix tx bugs in big-endian machines
77 - Remove unused max_interrupt_work module parameter, the new
78 NAPI-like rx scheme doesn't need it.
79 - Remove redundancy get_stats() in intr_handler(), those
80 I/O access could affect performance in ARM-based system
81 - Add Linux software VLAN support
83 Version LK1.08 (Philippe De Muyter phdm@macqel.be):
84 - Fix bug of custom mac address
85 (StationAddr register only accept word write)
87 Version LK1.09 (D-Link):
88 - Fix the flowctrl bug.
89 - Set Pause bit in MII ANAR if flow control enabled.
91 Version LK1.09a (ICPlus):
92 - Add the delay time in reading the contents of EEPROM
94 Version LK1.10 (Philippe De Muyter phdm@macqel.be):
95 - Make 'unblock interface after Tx underrun' work
99 #define DRV_NAME "sundance"
100 #define DRV_VERSION "1.01+LK1.10"
101 #define DRV_RELDATE "28-Oct-2005"
104 /* The user-configurable values.
105 These may be modified when a driver module is loaded.*/
106 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
107 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
108 Typical is a 64 element hash table based on the Ethernet CRC. */
109 static const int multicast_filter_limit = 32;
111 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
112 Setting to > 1518 effectively disables this feature.
113 This chip can receive into offset buffers, so the Alpha does not
114 need a copy-align. */
115 static int rx_copybreak;
116 static int flowctrl=1;
118 /* media[] specifies the media type the NIC operates at.
119 autosense Autosensing active media.
120 10mbps_hd 10Mbps half duplex.
121 10mbps_fd 10Mbps full duplex.
122 100mbps_hd 100Mbps half duplex.
123 100mbps_fd 100Mbps full duplex.
124 0 Autosensing active media.
125 1 10Mbps half duplex.
126 2 10Mbps full duplex.
127 3 100Mbps half duplex.
128 4 100Mbps full duplex.
130 #define MAX_UNITS 8
131 static char *media[MAX_UNITS];
134 /* Operational parameters that are set at compile time. */
136 /* Keep the ring sizes a power of two for compile efficiency.
137 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
138 Making the Tx ring too large decreases the effectiveness of channel
139 bonding and packet priority, and more than 128 requires modifying the
140 Tx error recovery.
141 Large receive rings merely waste memory. */
142 #define TX_RING_SIZE 32
143 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
144 #define RX_RING_SIZE 64
145 #define RX_BUDGET 32
146 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
147 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
149 /* Operational parameters that usually are not changed. */
150 /* Time in jiffies before concluding the transmitter is hung. */
151 #define TX_TIMEOUT (4*HZ)
152 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
154 /* Include files, designed to support most kernel versions 2.0.0 and later. */
155 #include <linux/module.h>
156 #include <linux/kernel.h>
157 #include <linux/string.h>
158 #include <linux/timer.h>
159 #include <linux/errno.h>
160 #include <linux/ioport.h>
161 #include <linux/slab.h>
162 #include <linux/interrupt.h>
163 #include <linux/pci.h>
164 #include <linux/netdevice.h>
165 #include <linux/etherdevice.h>
166 #include <linux/skbuff.h>
167 #include <linux/init.h>
168 #include <linux/bitops.h>
169 #include <asm/uaccess.h>
170 #include <asm/processor.h> /* Processor type for cache alignment. */
171 #include <asm/io.h>
172 #include <linux/delay.h>
173 #include <linux/spinlock.h>
174 #ifndef _COMPAT_WITH_OLD_KERNEL
175 #include <linux/crc32.h>
176 #include <linux/ethtool.h>
177 #include <linux/mii.h>
178 #else
179 #include "crc32.h"
180 #include "ethtool.h"
181 #include "mii.h"
182 #include "compat.h"
183 #endif
185 /* These identify the driver base version and may not be removed. */
186 static char version[] __devinitdata =
187 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
188 KERN_INFO " http://www.scyld.com/network/sundance.html\n";
190 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
191 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
192 MODULE_LICENSE("GPL");
194 module_param(debug, int, 0);
195 module_param(rx_copybreak, int, 0);
196 module_param_array(media, charp, NULL, 0);
197 module_param(flowctrl, int, 0);
198 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
199 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
200 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
203 Theory of Operation
205 I. Board Compatibility
207 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
209 II. Board-specific settings
211 III. Driver operation
213 IIIa. Ring buffers
215 This driver uses two statically allocated fixed-size descriptor lists
216 formed into rings by a branch from the final descriptor to the beginning of
217 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
218 Some chips explicitly use only 2^N sized rings, while others use a
219 'next descriptor' pointer that the driver forms into rings.
221 IIIb/c. Transmit/Receive Structure
223 This driver uses a zero-copy receive and transmit scheme.
224 The driver allocates full frame size skbuffs for the Rx ring buffers at
225 open() time and passes the skb->data field to the chip as receive data
226 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
227 a fresh skbuff is allocated and the frame is copied to the new skbuff.
228 When the incoming frame is larger, the skbuff is passed directly up the
229 protocol stack. Buffers consumed this way are replaced by newly allocated
230 skbuffs in a later phase of receives.
232 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
233 using a full-sized skbuff for small frames vs. the copying costs of larger
234 frames. New boards are typically used in generously configured machines
235 and the underfilled buffers have negligible impact compared to the benefit of
236 a single allocation size, so the default value of zero results in never
237 copying packets. When copying is done, the cost is usually mitigated by using
238 a combined copy/checksum routine. Copying also preloads the cache, which is
239 most useful with small frames.
241 A subtle aspect of the operation is that the IP header at offset 14 in an
242 ethernet frame isn't longword aligned for further processing.
243 Unaligned buffers are permitted by the Sundance hardware, so
244 frames are received into the skbuff at an offset of "+2", 16-byte aligning
245 the IP header.
247 IIId. Synchronization
249 The driver runs as two independent, single-threaded flows of control. One
250 is the send-packet routine, which enforces single-threaded use by the
251 dev->tbusy flag. The other thread is the interrupt handler, which is single
252 threaded by the hardware and interrupt handling software.
254 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
255 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
256 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
257 the 'lp->tx_full' flag.
259 The interrupt handler has exclusive control over the Rx ring and records stats
260 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
261 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
262 clears both the tx_full and tbusy flags.
264 IV. Notes
266 IVb. References
268 The Sundance ST201 datasheet, preliminary version.
269 The Kendin KS8723 datasheet, preliminary version.
270 The ICplus IP100 datasheet, preliminary version.
271 http://www.scyld.com/expert/100mbps.html
272 http://www.scyld.com/expert/NWay.html
274 IVc. Errata
278 /* Work-around for Kendin chip bugs. */
279 #ifndef CONFIG_SUNDANCE_MMIO
280 #define USE_IO_OPS 1
281 #endif
283 static struct pci_device_id sundance_pci_tbl[] = {
284 {0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0},
285 {0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1},
286 {0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2},
287 {0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3},
288 {0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
289 {0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
290 {0,}
292 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
294 enum {
295 netdev_io_size = 128
298 struct pci_id_info {
299 const char *name;
301 static const struct pci_id_info pci_id_tbl[] = {
302 {"D-Link DFE-550TX FAST Ethernet Adapter"},
303 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
304 {"D-Link DFE-580TX 4 port Server Adapter"},
305 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
306 {"D-Link DL10050-based FAST Ethernet Adapter"},
307 {"Sundance Technology Alta"},
308 {NULL,}, /* 0 terminated list. */
311 /* This driver was written to use PCI memory space, however x86-oriented
312 hardware often uses I/O space accesses. */
314 /* Offsets to the device registers.
315 Unlike software-only systems, device drivers interact with complex hardware.
316 It's not useful to define symbolic names for every register bit in the
317 device. The name can only partially document the semantics and make
318 the driver longer and more difficult to read.
319 In general, only the important configuration values or bits changed
320 multiple times should be defined symbolically.
322 enum alta_offsets {
323 DMACtrl = 0x00,
324 TxListPtr = 0x04,
325 TxDMABurstThresh = 0x08,
326 TxDMAUrgentThresh = 0x09,
327 TxDMAPollPeriod = 0x0a,
328 RxDMAStatus = 0x0c,
329 RxListPtr = 0x10,
330 DebugCtrl0 = 0x1a,
331 DebugCtrl1 = 0x1c,
332 RxDMABurstThresh = 0x14,
333 RxDMAUrgentThresh = 0x15,
334 RxDMAPollPeriod = 0x16,
335 LEDCtrl = 0x1a,
336 ASICCtrl = 0x30,
337 EEData = 0x34,
338 EECtrl = 0x36,
339 TxStartThresh = 0x3c,
340 RxEarlyThresh = 0x3e,
341 FlashAddr = 0x40,
342 FlashData = 0x44,
343 TxStatus = 0x46,
344 TxFrameId = 0x47,
345 DownCounter = 0x18,
346 IntrClear = 0x4a,
347 IntrEnable = 0x4c,
348 IntrStatus = 0x4e,
349 MACCtrl0 = 0x50,
350 MACCtrl1 = 0x52,
351 StationAddr = 0x54,
352 MaxFrameSize = 0x5A,
353 RxMode = 0x5c,
354 MIICtrl = 0x5e,
355 MulticastFilter0 = 0x60,
356 MulticastFilter1 = 0x64,
357 RxOctetsLow = 0x68,
358 RxOctetsHigh = 0x6a,
359 TxOctetsLow = 0x6c,
360 TxOctetsHigh = 0x6e,
361 TxFramesOK = 0x70,
362 RxFramesOK = 0x72,
363 StatsCarrierError = 0x74,
364 StatsLateColl = 0x75,
365 StatsMultiColl = 0x76,
366 StatsOneColl = 0x77,
367 StatsTxDefer = 0x78,
368 RxMissed = 0x79,
369 StatsTxXSDefer = 0x7a,
370 StatsTxAbort = 0x7b,
371 StatsBcastTx = 0x7c,
372 StatsBcastRx = 0x7d,
373 StatsMcastTx = 0x7e,
374 StatsMcastRx = 0x7f,
375 /* Aliased and bogus values! */
376 RxStatus = 0x0c,
378 enum ASICCtrl_HiWord_bit {
379 GlobalReset = 0x0001,
380 RxReset = 0x0002,
381 TxReset = 0x0004,
382 DMAReset = 0x0008,
383 FIFOReset = 0x0010,
384 NetworkReset = 0x0020,
385 HostReset = 0x0040,
386 ResetBusy = 0x0400,
389 /* Bits in the interrupt status/mask registers. */
390 enum intr_status_bits {
391 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
392 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
393 IntrDrvRqst=0x0040,
394 StatsMax=0x0080, LinkChange=0x0100,
395 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
398 /* Bits in the RxMode register. */
399 enum rx_mode_bits {
400 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
401 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
403 /* Bits in MACCtrl. */
404 enum mac_ctrl0_bits {
405 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
406 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
408 enum mac_ctrl1_bits {
409 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
410 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
411 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
414 /* The Rx and Tx buffer descriptors. */
415 /* Note that using only 32 bit fields simplifies conversion to big-endian
416 architectures. */
417 struct netdev_desc {
418 u32 next_desc;
419 u32 status;
420 struct desc_frag { u32 addr, length; } frag[1];
423 /* Bits in netdev_desc.status */
424 enum desc_status_bits {
425 DescOwn=0x8000,
426 DescEndPacket=0x4000,
427 DescEndRing=0x2000,
428 LastFrag=0x80000000,
429 DescIntrOnTx=0x8000,
430 DescIntrOnDMADone=0x80000000,
431 DisableAlign = 0x00000001,
434 #define PRIV_ALIGN 15 /* Required alignment mask */
435 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
436 within the structure. */
437 #define MII_CNT 4
438 struct netdev_private {
439 /* Descriptor rings first for alignment. */
440 struct netdev_desc *rx_ring;
441 struct netdev_desc *tx_ring;
442 struct sk_buff* rx_skbuff[RX_RING_SIZE];
443 struct sk_buff* tx_skbuff[TX_RING_SIZE];
444 dma_addr_t tx_ring_dma;
445 dma_addr_t rx_ring_dma;
446 struct net_device_stats stats;
447 struct timer_list timer; /* Media monitoring timer. */
448 /* Frequently used values: keep some adjacent for cache effect. */
449 spinlock_t lock;
450 spinlock_t rx_lock; /* Group with Tx control cache line. */
451 int msg_enable;
452 int chip_id;
453 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
454 unsigned int rx_buf_sz; /* Based on MTU+slack. */
455 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
456 unsigned int cur_tx, dirty_tx;
457 /* These values are keep track of the transceiver/media in use. */
458 unsigned int flowctrl:1;
459 unsigned int default_port:4; /* Last dev->if_port value. */
460 unsigned int an_enable:1;
461 unsigned int speed;
462 struct tasklet_struct rx_tasklet;
463 struct tasklet_struct tx_tasklet;
464 int budget;
465 int cur_task;
466 /* Multicast and receive mode. */
467 spinlock_t mcastlock; /* SMP lock multicast updates. */
468 u16 mcast_filter[4];
469 /* MII transceiver section. */
470 struct mii_if_info mii_if;
471 int mii_preamble_required;
472 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
473 struct pci_dev *pci_dev;
474 void __iomem *base;
475 unsigned char pci_rev_id;
478 /* The station address location in the EEPROM. */
479 #define EEPROM_SA_OFFSET 0x10
480 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
481 IntrDrvRqst | IntrTxDone | StatsMax | \
482 LinkChange)
484 static int change_mtu(struct net_device *dev, int new_mtu);
485 static int eeprom_read(void __iomem *ioaddr, int location);
486 static int mdio_read(struct net_device *dev, int phy_id, int location);
487 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
488 static int netdev_open(struct net_device *dev);
489 static void check_duplex(struct net_device *dev);
490 static void netdev_timer(unsigned long data);
491 static void tx_timeout(struct net_device *dev);
492 static void init_ring(struct net_device *dev);
493 static int start_tx(struct sk_buff *skb, struct net_device *dev);
494 static int reset_tx (struct net_device *dev);
495 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
496 static void rx_poll(unsigned long data);
497 static void tx_poll(unsigned long data);
498 static void refill_rx (struct net_device *dev);
499 static void netdev_error(struct net_device *dev, int intr_status);
500 static void netdev_error(struct net_device *dev, int intr_status);
501 static void set_rx_mode(struct net_device *dev);
502 static int __set_mac_addr(struct net_device *dev);
503 static struct net_device_stats *get_stats(struct net_device *dev);
504 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
505 static int netdev_close(struct net_device *dev);
506 static struct ethtool_ops ethtool_ops;
508 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
510 struct netdev_private *np = netdev_priv(dev);
511 void __iomem *ioaddr = np->base + ASICCtrl;
512 int countdown;
514 /* ST201 documentation states ASICCtrl is a 32bit register */
515 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
516 /* ST201 documentation states reset can take up to 1 ms */
517 countdown = 10 + 1;
518 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
519 if (--countdown == 0) {
520 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
521 break;
523 udelay(100);
527 static int __devinit sundance_probe1 (struct pci_dev *pdev,
528 const struct pci_device_id *ent)
530 struct net_device *dev;
531 struct netdev_private *np;
532 static int card_idx;
533 int chip_idx = ent->driver_data;
534 int irq;
535 int i;
536 void __iomem *ioaddr;
537 u16 mii_ctl;
538 void *ring_space;
539 dma_addr_t ring_dma;
540 #ifdef USE_IO_OPS
541 int bar = 0;
542 #else
543 int bar = 1;
544 #endif
545 int phy, phy_idx = 0;
548 /* when built into the kernel, we only print version if device is found */
549 #ifndef MODULE
550 static int printed_version;
551 if (!printed_version++)
552 printk(version);
553 #endif
555 if (pci_enable_device(pdev))
556 return -EIO;
557 pci_set_master(pdev);
559 irq = pdev->irq;
561 dev = alloc_etherdev(sizeof(*np));
562 if (!dev)
563 return -ENOMEM;
564 SET_MODULE_OWNER(dev);
565 SET_NETDEV_DEV(dev, &pdev->dev);
567 if (pci_request_regions(pdev, DRV_NAME))
568 goto err_out_netdev;
570 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
571 if (!ioaddr)
572 goto err_out_res;
574 for (i = 0; i < 3; i++)
575 ((u16 *)dev->dev_addr)[i] =
576 le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
577 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
579 dev->base_addr = (unsigned long)ioaddr;
580 dev->irq = irq;
582 np = netdev_priv(dev);
583 np->base = ioaddr;
584 np->pci_dev = pdev;
585 np->chip_id = chip_idx;
586 np->msg_enable = (1 << debug) - 1;
587 spin_lock_init(&np->lock);
588 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
589 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
591 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
592 if (!ring_space)
593 goto err_out_cleardev;
594 np->tx_ring = (struct netdev_desc *)ring_space;
595 np->tx_ring_dma = ring_dma;
597 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
598 if (!ring_space)
599 goto err_out_unmap_tx;
600 np->rx_ring = (struct netdev_desc *)ring_space;
601 np->rx_ring_dma = ring_dma;
603 np->mii_if.dev = dev;
604 np->mii_if.mdio_read = mdio_read;
605 np->mii_if.mdio_write = mdio_write;
606 np->mii_if.phy_id_mask = 0x1f;
607 np->mii_if.reg_num_mask = 0x1f;
609 /* The chip-specific entries in the device structure. */
610 dev->open = &netdev_open;
611 dev->hard_start_xmit = &start_tx;
612 dev->stop = &netdev_close;
613 dev->get_stats = &get_stats;
614 dev->set_multicast_list = &set_rx_mode;
615 dev->do_ioctl = &netdev_ioctl;
616 SET_ETHTOOL_OPS(dev, &ethtool_ops);
617 dev->tx_timeout = &tx_timeout;
618 dev->watchdog_timeo = TX_TIMEOUT;
619 dev->change_mtu = &change_mtu;
620 pci_set_drvdata(pdev, dev);
622 pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
624 i = register_netdev(dev);
625 if (i)
626 goto err_out_unmap_rx;
628 printk(KERN_INFO "%s: %s at %p, ",
629 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
630 for (i = 0; i < 5; i++)
631 printk("%2.2x:", dev->dev_addr[i]);
632 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
634 np->phys[0] = 1; /* Default setting */
635 np->mii_preamble_required++;
637 * It seems some phys doesn't deal well with address 0 being accessed
638 * first, so leave address zero to the end of the loop (32 & 31).
640 for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) {
641 int phyx = phy & 0x1f;
642 int mii_status = mdio_read(dev, phyx, MII_BMSR);
643 if (mii_status != 0xffff && mii_status != 0x0000) {
644 np->phys[phy_idx++] = phyx;
645 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
646 if ((mii_status & 0x0040) == 0)
647 np->mii_preamble_required++;
648 printk(KERN_INFO "%s: MII PHY found at address %d, status "
649 "0x%4.4x advertising %4.4x.\n",
650 dev->name, phyx, mii_status, np->mii_if.advertising);
653 np->mii_preamble_required--;
655 if (phy_idx == 0) {
656 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
657 dev->name, ioread32(ioaddr + ASICCtrl));
658 goto err_out_unregister;
661 np->mii_if.phy_id = np->phys[0];
663 /* Parse override configuration */
664 np->an_enable = 1;
665 if (card_idx < MAX_UNITS) {
666 if (media[card_idx] != NULL) {
667 np->an_enable = 0;
668 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
669 strcmp (media[card_idx], "4") == 0) {
670 np->speed = 100;
671 np->mii_if.full_duplex = 1;
672 } else if (strcmp (media[card_idx], "100mbps_hd") == 0
673 || strcmp (media[card_idx], "3") == 0) {
674 np->speed = 100;
675 np->mii_if.full_duplex = 0;
676 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
677 strcmp (media[card_idx], "2") == 0) {
678 np->speed = 10;
679 np->mii_if.full_duplex = 1;
680 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
681 strcmp (media[card_idx], "1") == 0) {
682 np->speed = 10;
683 np->mii_if.full_duplex = 0;
684 } else {
685 np->an_enable = 1;
688 if (flowctrl == 1)
689 np->flowctrl = 1;
692 /* Fibre PHY? */
693 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
694 /* Default 100Mbps Full */
695 if (np->an_enable) {
696 np->speed = 100;
697 np->mii_if.full_duplex = 1;
698 np->an_enable = 0;
701 /* Reset PHY */
702 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
703 mdelay (300);
704 /* If flow control enabled, we need to advertise it.*/
705 if (np->flowctrl)
706 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
707 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
708 /* Force media type */
709 if (!np->an_enable) {
710 mii_ctl = 0;
711 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
712 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
713 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
714 printk (KERN_INFO "Override speed=%d, %s duplex\n",
715 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
719 /* Perhaps move the reset here? */
720 /* Reset the chip to erase previous misconfiguration. */
721 if (netif_msg_hw(np))
722 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
723 iowrite16(0x00ff, ioaddr + ASICCtrl + 2);
724 if (netif_msg_hw(np))
725 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
727 card_idx++;
728 return 0;
730 err_out_unregister:
731 unregister_netdev(dev);
732 err_out_unmap_rx:
733 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
734 err_out_unmap_tx:
735 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
736 err_out_cleardev:
737 pci_set_drvdata(pdev, NULL);
738 pci_iounmap(pdev, ioaddr);
739 err_out_res:
740 pci_release_regions(pdev);
741 err_out_netdev:
742 free_netdev (dev);
743 return -ENODEV;
746 static int change_mtu(struct net_device *dev, int new_mtu)
748 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
749 return -EINVAL;
750 if (netif_running(dev))
751 return -EBUSY;
752 dev->mtu = new_mtu;
753 return 0;
756 #define eeprom_delay(ee_addr) ioread32(ee_addr)
757 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
758 static int __devinit eeprom_read(void __iomem *ioaddr, int location)
760 int boguscnt = 10000; /* Typical 1900 ticks. */
761 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
762 do {
763 eeprom_delay(ioaddr + EECtrl);
764 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
765 return ioread16(ioaddr + EEData);
767 } while (--boguscnt > 0);
768 return 0;
771 /* MII transceiver control section.
772 Read and write the MII registers using software-generated serial
773 MDIO protocol. See the MII specifications or DP83840A data sheet
774 for details.
776 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
777 met by back-to-back 33Mhz PCI cycles. */
778 #define mdio_delay() ioread8(mdio_addr)
780 enum mii_reg_bits {
781 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
783 #define MDIO_EnbIn (0)
784 #define MDIO_WRITE0 (MDIO_EnbOutput)
785 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
787 /* Generate the preamble required for initial synchronization and
788 a few older transceivers. */
789 static void mdio_sync(void __iomem *mdio_addr)
791 int bits = 32;
793 /* Establish sync by sending at least 32 logic ones. */
794 while (--bits >= 0) {
795 iowrite8(MDIO_WRITE1, mdio_addr);
796 mdio_delay();
797 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
798 mdio_delay();
802 static int mdio_read(struct net_device *dev, int phy_id, int location)
804 struct netdev_private *np = netdev_priv(dev);
805 void __iomem *mdio_addr = np->base + MIICtrl;
806 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
807 int i, retval = 0;
809 if (np->mii_preamble_required)
810 mdio_sync(mdio_addr);
812 /* Shift the read command bits out. */
813 for (i = 15; i >= 0; i--) {
814 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
816 iowrite8(dataval, mdio_addr);
817 mdio_delay();
818 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
819 mdio_delay();
821 /* Read the two transition, 16 data, and wire-idle bits. */
822 for (i = 19; i > 0; i--) {
823 iowrite8(MDIO_EnbIn, mdio_addr);
824 mdio_delay();
825 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
826 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
827 mdio_delay();
829 return (retval>>1) & 0xffff;
832 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
834 struct netdev_private *np = netdev_priv(dev);
835 void __iomem *mdio_addr = np->base + MIICtrl;
836 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
837 int i;
839 if (np->mii_preamble_required)
840 mdio_sync(mdio_addr);
842 /* Shift the command bits out. */
843 for (i = 31; i >= 0; i--) {
844 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
846 iowrite8(dataval, mdio_addr);
847 mdio_delay();
848 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
849 mdio_delay();
851 /* Clear out extra bits. */
852 for (i = 2; i > 0; i--) {
853 iowrite8(MDIO_EnbIn, mdio_addr);
854 mdio_delay();
855 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
856 mdio_delay();
858 return;
861 static int netdev_open(struct net_device *dev)
863 struct netdev_private *np = netdev_priv(dev);
864 void __iomem *ioaddr = np->base;
865 int i;
867 /* Do we need to reset the chip??? */
869 i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
870 if (i)
871 return i;
873 if (netif_msg_ifup(np))
874 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
875 dev->name, dev->irq);
876 init_ring(dev);
878 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
879 /* The Tx list pointer is written as packets are queued. */
881 /* Initialize other registers. */
882 __set_mac_addr(dev);
883 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
884 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
885 #else
886 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
887 #endif
888 if (dev->mtu > 2047)
889 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
891 /* Configure the PCI bus bursts and FIFO thresholds. */
893 if (dev->if_port == 0)
894 dev->if_port = np->default_port;
896 spin_lock_init(&np->mcastlock);
898 set_rx_mode(dev);
899 iowrite16(0, ioaddr + IntrEnable);
900 iowrite16(0, ioaddr + DownCounter);
901 /* Set the chip to poll every N*320nsec. */
902 iowrite8(100, ioaddr + RxDMAPollPeriod);
903 iowrite8(127, ioaddr + TxDMAPollPeriod);
904 /* Fix DFE-580TX packet drop issue */
905 if (np->pci_rev_id >= 0x14)
906 iowrite8(0x01, ioaddr + DebugCtrl1);
907 netif_start_queue(dev);
909 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
911 if (netif_msg_ifup(np))
912 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
913 "MAC Control %x, %4.4x %4.4x.\n",
914 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
915 ioread32(ioaddr + MACCtrl0),
916 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
918 /* Set the timer to check for link beat. */
919 init_timer(&np->timer);
920 np->timer.expires = jiffies + 3*HZ;
921 np->timer.data = (unsigned long)dev;
922 np->timer.function = &netdev_timer; /* timer handler */
923 add_timer(&np->timer);
925 /* Enable interrupts by setting the interrupt mask. */
926 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
928 return 0;
931 static void check_duplex(struct net_device *dev)
933 struct netdev_private *np = netdev_priv(dev);
934 void __iomem *ioaddr = np->base;
935 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
936 int negotiated = mii_lpa & np->mii_if.advertising;
937 int duplex;
939 /* Force media */
940 if (!np->an_enable || mii_lpa == 0xffff) {
941 if (np->mii_if.full_duplex)
942 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
943 ioaddr + MACCtrl0);
944 return;
947 /* Autonegotiation */
948 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
949 if (np->mii_if.full_duplex != duplex) {
950 np->mii_if.full_duplex = duplex;
951 if (netif_msg_link(np))
952 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
953 "negotiated capability %4.4x.\n", dev->name,
954 duplex ? "full" : "half", np->phys[0], negotiated);
955 iowrite16(ioread16(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0);
959 static void netdev_timer(unsigned long data)
961 struct net_device *dev = (struct net_device *)data;
962 struct netdev_private *np = netdev_priv(dev);
963 void __iomem *ioaddr = np->base;
964 int next_tick = 10*HZ;
966 if (netif_msg_timer(np)) {
967 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
968 "Tx %x Rx %x.\n",
969 dev->name, ioread16(ioaddr + IntrEnable),
970 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
972 check_duplex(dev);
973 np->timer.expires = jiffies + next_tick;
974 add_timer(&np->timer);
977 static void tx_timeout(struct net_device *dev)
979 struct netdev_private *np = netdev_priv(dev);
980 void __iomem *ioaddr = np->base;
981 unsigned long flag;
983 netif_stop_queue(dev);
984 tasklet_disable(&np->tx_tasklet);
985 iowrite16(0, ioaddr + IntrEnable);
986 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
987 "TxFrameId %2.2x,"
988 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
989 ioread8(ioaddr + TxFrameId));
992 int i;
993 for (i=0; i<TX_RING_SIZE; i++) {
994 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
995 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
996 le32_to_cpu(np->tx_ring[i].next_desc),
997 le32_to_cpu(np->tx_ring[i].status),
998 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
999 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1000 le32_to_cpu(np->tx_ring[i].frag[0].length));
1002 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1003 ioread32(np->base + TxListPtr),
1004 netif_queue_stopped(dev));
1005 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1006 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1007 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1008 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1009 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1011 spin_lock_irqsave(&np->lock, flag);
1013 /* Stop and restart the chip's Tx processes . */
1014 reset_tx(dev);
1015 spin_unlock_irqrestore(&np->lock, flag);
1017 dev->if_port = 0;
1019 dev->trans_start = jiffies;
1020 np->stats.tx_errors++;
1021 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1022 netif_wake_queue(dev);
1024 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1025 tasklet_enable(&np->tx_tasklet);
1029 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1030 static void init_ring(struct net_device *dev)
1032 struct netdev_private *np = netdev_priv(dev);
1033 int i;
1035 np->cur_rx = np->cur_tx = 0;
1036 np->dirty_rx = np->dirty_tx = 0;
1037 np->cur_task = 0;
1039 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1041 /* Initialize all Rx descriptors. */
1042 for (i = 0; i < RX_RING_SIZE; i++) {
1043 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1044 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1045 np->rx_ring[i].status = 0;
1046 np->rx_ring[i].frag[0].length = 0;
1047 np->rx_skbuff[i] = NULL;
1050 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1051 for (i = 0; i < RX_RING_SIZE; i++) {
1052 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1053 np->rx_skbuff[i] = skb;
1054 if (skb == NULL)
1055 break;
1056 skb->dev = dev; /* Mark as being used by this device. */
1057 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1058 np->rx_ring[i].frag[0].addr = cpu_to_le32(
1059 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
1060 PCI_DMA_FROMDEVICE));
1061 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1063 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1065 for (i = 0; i < TX_RING_SIZE; i++) {
1066 np->tx_skbuff[i] = NULL;
1067 np->tx_ring[i].status = 0;
1069 return;
1072 static void tx_poll (unsigned long data)
1074 struct net_device *dev = (struct net_device *)data;
1075 struct netdev_private *np = netdev_priv(dev);
1076 unsigned head = np->cur_task % TX_RING_SIZE;
1077 struct netdev_desc *txdesc =
1078 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1080 /* Chain the next pointer */
1081 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1082 int entry = np->cur_task % TX_RING_SIZE;
1083 txdesc = &np->tx_ring[entry];
1084 if (np->last_tx) {
1085 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1086 entry*sizeof(struct netdev_desc));
1088 np->last_tx = txdesc;
1090 /* Indicate the latest descriptor of tx ring */
1091 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1093 if (ioread32 (np->base + TxListPtr) == 0)
1094 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1095 np->base + TxListPtr);
1096 return;
1099 static int
1100 start_tx (struct sk_buff *skb, struct net_device *dev)
1102 struct netdev_private *np = netdev_priv(dev);
1103 struct netdev_desc *txdesc;
1104 unsigned entry;
1106 /* Calculate the next Tx descriptor entry. */
1107 entry = np->cur_tx % TX_RING_SIZE;
1108 np->tx_skbuff[entry] = skb;
1109 txdesc = &np->tx_ring[entry];
1111 txdesc->next_desc = 0;
1112 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1113 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1114 skb->len,
1115 PCI_DMA_TODEVICE));
1116 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1118 /* Increment cur_tx before tasklet_schedule() */
1119 np->cur_tx++;
1120 mb();
1121 /* Schedule a tx_poll() task */
1122 tasklet_schedule(&np->tx_tasklet);
1124 /* On some architectures: explicitly flush cache lines here. */
1125 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1126 && !netif_queue_stopped(dev)) {
1127 /* do nothing */
1128 } else {
1129 netif_stop_queue (dev);
1131 dev->trans_start = jiffies;
1132 if (netif_msg_tx_queued(np)) {
1133 printk (KERN_DEBUG
1134 "%s: Transmit frame #%d queued in slot %d.\n",
1135 dev->name, np->cur_tx, entry);
1137 return 0;
1140 /* Reset hardware tx and free all of tx buffers */
1141 static int
1142 reset_tx (struct net_device *dev)
1144 struct netdev_private *np = netdev_priv(dev);
1145 void __iomem *ioaddr = np->base;
1146 struct sk_buff *skb;
1147 int i;
1148 int irq = in_interrupt();
1150 /* Reset tx logic, TxListPtr will be cleaned */
1151 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1152 iowrite16 (TxReset | DMAReset | FIFOReset | NetworkReset,
1153 ioaddr + ASICCtrl + 2);
1154 for (i=50; i > 0; i--) {
1155 if ((ioread16(ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
1156 break;
1157 mdelay(1);
1159 /* free all tx skbuff */
1160 for (i = 0; i < TX_RING_SIZE; i++) {
1161 skb = np->tx_skbuff[i];
1162 if (skb) {
1163 pci_unmap_single(np->pci_dev,
1164 np->tx_ring[i].frag[0].addr, skb->len,
1165 PCI_DMA_TODEVICE);
1166 if (irq)
1167 dev_kfree_skb_irq (skb);
1168 else
1169 dev_kfree_skb (skb);
1170 np->tx_skbuff[i] = NULL;
1171 np->stats.tx_dropped++;
1174 np->cur_tx = np->dirty_tx = 0;
1175 np->cur_task = 0;
1176 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1177 return 0;
1180 /* The interrupt handler cleans up after the Tx thread,
1181 and schedule a Rx thread work */
1182 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1184 struct net_device *dev = (struct net_device *)dev_instance;
1185 struct netdev_private *np = netdev_priv(dev);
1186 void __iomem *ioaddr = np->base;
1187 int hw_frame_id;
1188 int tx_cnt;
1189 int tx_status;
1190 int handled = 0;
1193 do {
1194 int intr_status = ioread16(ioaddr + IntrStatus);
1195 iowrite16(intr_status, ioaddr + IntrStatus);
1197 if (netif_msg_intr(np))
1198 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1199 dev->name, intr_status);
1201 if (!(intr_status & DEFAULT_INTR))
1202 break;
1204 handled = 1;
1206 if (intr_status & (IntrRxDMADone)) {
1207 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1208 ioaddr + IntrEnable);
1209 if (np->budget < 0)
1210 np->budget = RX_BUDGET;
1211 tasklet_schedule(&np->rx_tasklet);
1213 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1214 tx_status = ioread16 (ioaddr + TxStatus);
1215 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1216 if (netif_msg_tx_done(np))
1217 printk
1218 ("%s: Transmit status is %2.2x.\n",
1219 dev->name, tx_status);
1220 if (tx_status & 0x1e) {
1221 if (netif_msg_tx_err(np))
1222 printk("%s: Transmit error status %4.4x.\n",
1223 dev->name, tx_status);
1224 np->stats.tx_errors++;
1225 if (tx_status & 0x10)
1226 np->stats.tx_fifo_errors++;
1227 if (tx_status & 0x08)
1228 np->stats.collisions++;
1229 if (tx_status & 0x04)
1230 np->stats.tx_fifo_errors++;
1231 if (tx_status & 0x02)
1232 np->stats.tx_window_errors++;
1234 ** This reset has been verified on
1235 ** DFE-580TX boards ! phdm@macqel.be.
1237 if (tx_status & 0x10) { /* TxUnderrun */
1238 unsigned short txthreshold;
1240 txthreshold = ioread16 (ioaddr + TxStartThresh);
1241 /* Restart Tx FIFO and transmitter */
1242 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1243 iowrite16 (txthreshold, ioaddr + TxStartThresh);
1244 /* No need to reset the Tx pointer here */
1246 /* Restart the Tx. */
1247 iowrite16 (TxEnable, ioaddr + MACCtrl1);
1249 /* Yup, this is a documentation bug. It cost me *hours*. */
1250 iowrite16 (0, ioaddr + TxStatus);
1251 if (tx_cnt < 0) {
1252 iowrite32(5000, ioaddr + DownCounter);
1253 break;
1255 tx_status = ioread16 (ioaddr + TxStatus);
1257 hw_frame_id = (tx_status >> 8) & 0xff;
1258 } else {
1259 hw_frame_id = ioread8(ioaddr + TxFrameId);
1262 if (np->pci_rev_id >= 0x14) {
1263 spin_lock(&np->lock);
1264 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1265 int entry = np->dirty_tx % TX_RING_SIZE;
1266 struct sk_buff *skb;
1267 int sw_frame_id;
1268 sw_frame_id = (le32_to_cpu(
1269 np->tx_ring[entry].status) >> 2) & 0xff;
1270 if (sw_frame_id == hw_frame_id &&
1271 !(le32_to_cpu(np->tx_ring[entry].status)
1272 & 0x00010000))
1273 break;
1274 if (sw_frame_id == (hw_frame_id + 1) %
1275 TX_RING_SIZE)
1276 break;
1277 skb = np->tx_skbuff[entry];
1278 /* Free the original skb. */
1279 pci_unmap_single(np->pci_dev,
1280 np->tx_ring[entry].frag[0].addr,
1281 skb->len, PCI_DMA_TODEVICE);
1282 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1283 np->tx_skbuff[entry] = NULL;
1284 np->tx_ring[entry].frag[0].addr = 0;
1285 np->tx_ring[entry].frag[0].length = 0;
1287 spin_unlock(&np->lock);
1288 } else {
1289 spin_lock(&np->lock);
1290 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1291 int entry = np->dirty_tx % TX_RING_SIZE;
1292 struct sk_buff *skb;
1293 if (!(le32_to_cpu(np->tx_ring[entry].status)
1294 & 0x00010000))
1295 break;
1296 skb = np->tx_skbuff[entry];
1297 /* Free the original skb. */
1298 pci_unmap_single(np->pci_dev,
1299 np->tx_ring[entry].frag[0].addr,
1300 skb->len, PCI_DMA_TODEVICE);
1301 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1302 np->tx_skbuff[entry] = NULL;
1303 np->tx_ring[entry].frag[0].addr = 0;
1304 np->tx_ring[entry].frag[0].length = 0;
1306 spin_unlock(&np->lock);
1309 if (netif_queue_stopped(dev) &&
1310 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1311 /* The ring is no longer full, clear busy flag. */
1312 netif_wake_queue (dev);
1314 /* Abnormal error summary/uncommon events handlers. */
1315 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1316 netdev_error(dev, intr_status);
1317 } while (0);
1318 if (netif_msg_intr(np))
1319 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1320 dev->name, ioread16(ioaddr + IntrStatus));
1321 return IRQ_RETVAL(handled);
1324 static void rx_poll(unsigned long data)
1326 struct net_device *dev = (struct net_device *)data;
1327 struct netdev_private *np = netdev_priv(dev);
1328 int entry = np->cur_rx % RX_RING_SIZE;
1329 int boguscnt = np->budget;
1330 void __iomem *ioaddr = np->base;
1331 int received = 0;
1333 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1334 while (1) {
1335 struct netdev_desc *desc = &(np->rx_ring[entry]);
1336 u32 frame_status = le32_to_cpu(desc->status);
1337 int pkt_len;
1339 if (--boguscnt < 0) {
1340 goto not_done;
1342 if (!(frame_status & DescOwn))
1343 break;
1344 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1345 if (netif_msg_rx_status(np))
1346 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1347 frame_status);
1348 if (frame_status & 0x001f4000) {
1349 /* There was a error. */
1350 if (netif_msg_rx_err(np))
1351 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1352 frame_status);
1353 np->stats.rx_errors++;
1354 if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1355 if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1356 if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1357 if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1358 if (frame_status & 0x00100000) {
1359 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1360 " status %8.8x.\n",
1361 dev->name, frame_status);
1363 } else {
1364 struct sk_buff *skb;
1365 #ifndef final_version
1366 if (netif_msg_rx_status(np))
1367 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1368 ", bogus_cnt %d.\n",
1369 pkt_len, boguscnt);
1370 #endif
1371 /* Check if the packet is long enough to accept without copying
1372 to a minimally-sized skbuff. */
1373 if (pkt_len < rx_copybreak
1374 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1375 skb->dev = dev;
1376 skb_reserve(skb, 2); /* 16 byte align the IP header */
1377 pci_dma_sync_single_for_cpu(np->pci_dev,
1378 desc->frag[0].addr,
1379 np->rx_buf_sz,
1380 PCI_DMA_FROMDEVICE);
1382 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
1383 pci_dma_sync_single_for_device(np->pci_dev,
1384 desc->frag[0].addr,
1385 np->rx_buf_sz,
1386 PCI_DMA_FROMDEVICE);
1387 skb_put(skb, pkt_len);
1388 } else {
1389 pci_unmap_single(np->pci_dev,
1390 desc->frag[0].addr,
1391 np->rx_buf_sz,
1392 PCI_DMA_FROMDEVICE);
1393 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1394 np->rx_skbuff[entry] = NULL;
1396 skb->protocol = eth_type_trans(skb, dev);
1397 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1398 netif_rx(skb);
1399 dev->last_rx = jiffies;
1401 entry = (entry + 1) % RX_RING_SIZE;
1402 received++;
1404 np->cur_rx = entry;
1405 refill_rx (dev);
1406 np->budget -= received;
1407 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1408 return;
1410 not_done:
1411 np->cur_rx = entry;
1412 refill_rx (dev);
1413 if (!received)
1414 received = 1;
1415 np->budget -= received;
1416 if (np->budget <= 0)
1417 np->budget = RX_BUDGET;
1418 tasklet_schedule(&np->rx_tasklet);
1419 return;
1422 static void refill_rx (struct net_device *dev)
1424 struct netdev_private *np = netdev_priv(dev);
1425 int entry;
1426 int cnt = 0;
1428 /* Refill the Rx ring buffers. */
1429 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1430 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1431 struct sk_buff *skb;
1432 entry = np->dirty_rx % RX_RING_SIZE;
1433 if (np->rx_skbuff[entry] == NULL) {
1434 skb = dev_alloc_skb(np->rx_buf_sz);
1435 np->rx_skbuff[entry] = skb;
1436 if (skb == NULL)
1437 break; /* Better luck next round. */
1438 skb->dev = dev; /* Mark as being used by this device. */
1439 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1440 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1441 pci_map_single(np->pci_dev, skb->data,
1442 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1444 /* Perhaps we need not reset this field. */
1445 np->rx_ring[entry].frag[0].length =
1446 cpu_to_le32(np->rx_buf_sz | LastFrag);
1447 np->rx_ring[entry].status = 0;
1448 cnt++;
1450 return;
1452 static void netdev_error(struct net_device *dev, int intr_status)
1454 struct netdev_private *np = netdev_priv(dev);
1455 void __iomem *ioaddr = np->base;
1456 u16 mii_ctl, mii_advertise, mii_lpa;
1457 int speed;
1459 if (intr_status & LinkChange) {
1460 if (np->an_enable) {
1461 mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
1462 mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
1463 mii_advertise &= mii_lpa;
1464 printk (KERN_INFO "%s: Link changed: ", dev->name);
1465 if (mii_advertise & ADVERTISE_100FULL) {
1466 np->speed = 100;
1467 printk ("100Mbps, full duplex\n");
1468 } else if (mii_advertise & ADVERTISE_100HALF) {
1469 np->speed = 100;
1470 printk ("100Mbps, half duplex\n");
1471 } else if (mii_advertise & ADVERTISE_10FULL) {
1472 np->speed = 10;
1473 printk ("10Mbps, full duplex\n");
1474 } else if (mii_advertise & ADVERTISE_10HALF) {
1475 np->speed = 10;
1476 printk ("10Mbps, half duplex\n");
1477 } else
1478 printk ("\n");
1480 } else {
1481 mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
1482 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1483 np->speed = speed;
1484 printk (KERN_INFO "%s: Link changed: %dMbps ,",
1485 dev->name, speed);
1486 printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1487 "full" : "half");
1489 check_duplex (dev);
1490 if (np->flowctrl && np->mii_if.full_duplex) {
1491 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1492 ioaddr + MulticastFilter1+2);
1493 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1494 ioaddr + MACCtrl0);
1497 if (intr_status & StatsMax) {
1498 get_stats(dev);
1500 if (intr_status & IntrPCIErr) {
1501 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1502 dev->name, intr_status);
1503 /* We must do a global reset of DMA to continue. */
1507 static struct net_device_stats *get_stats(struct net_device *dev)
1509 struct netdev_private *np = netdev_priv(dev);
1510 void __iomem *ioaddr = np->base;
1511 int i;
1513 /* We should lock this segment of code for SMP eventually, although
1514 the vulnerability window is very small and statistics are
1515 non-critical. */
1516 /* The chip only need report frame silently dropped. */
1517 np->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1518 np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1519 np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1520 np->stats.collisions += ioread8(ioaddr + StatsLateColl);
1521 np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1522 np->stats.collisions += ioread8(ioaddr + StatsOneColl);
1523 np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1524 ioread8(ioaddr + StatsTxDefer);
1525 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1526 ioread8(ioaddr + i);
1527 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1528 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1529 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1530 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1532 return &np->stats;
1535 static void set_rx_mode(struct net_device *dev)
1537 struct netdev_private *np = netdev_priv(dev);
1538 void __iomem *ioaddr = np->base;
1539 u16 mc_filter[4]; /* Multicast hash filter */
1540 u32 rx_mode;
1541 int i;
1543 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1544 /* Unconditionally log net taps. */
1545 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1546 memset(mc_filter, 0xff, sizeof(mc_filter));
1547 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1548 } else if ((dev->mc_count > multicast_filter_limit)
1549 || (dev->flags & IFF_ALLMULTI)) {
1550 /* Too many to match, or accept all multicasts. */
1551 memset(mc_filter, 0xff, sizeof(mc_filter));
1552 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1553 } else if (dev->mc_count) {
1554 struct dev_mc_list *mclist;
1555 int bit;
1556 int index;
1557 int crc;
1558 memset (mc_filter, 0, sizeof (mc_filter));
1559 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1560 i++, mclist = mclist->next) {
1561 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1562 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1563 if (crc & 0x80000000) index |= 1 << bit;
1564 mc_filter[index/16] |= (1 << (index % 16));
1566 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1567 } else {
1568 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1569 return;
1571 if (np->mii_if.full_duplex && np->flowctrl)
1572 mc_filter[3] |= 0x0200;
1574 for (i = 0; i < 4; i++)
1575 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1576 iowrite8(rx_mode, ioaddr + RxMode);
1579 static int __set_mac_addr(struct net_device *dev)
1581 struct netdev_private *np = netdev_priv(dev);
1582 u16 addr16;
1584 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1585 iowrite16(addr16, np->base + StationAddr);
1586 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1587 iowrite16(addr16, np->base + StationAddr+2);
1588 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1589 iowrite16(addr16, np->base + StationAddr+4);
1590 return 0;
1593 static int check_if_running(struct net_device *dev)
1595 if (!netif_running(dev))
1596 return -EINVAL;
1597 return 0;
1600 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1602 struct netdev_private *np = netdev_priv(dev);
1603 strcpy(info->driver, DRV_NAME);
1604 strcpy(info->version, DRV_VERSION);
1605 strcpy(info->bus_info, pci_name(np->pci_dev));
1608 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1610 struct netdev_private *np = netdev_priv(dev);
1611 spin_lock_irq(&np->lock);
1612 mii_ethtool_gset(&np->mii_if, ecmd);
1613 spin_unlock_irq(&np->lock);
1614 return 0;
1617 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1619 struct netdev_private *np = netdev_priv(dev);
1620 int res;
1621 spin_lock_irq(&np->lock);
1622 res = mii_ethtool_sset(&np->mii_if, ecmd);
1623 spin_unlock_irq(&np->lock);
1624 return res;
1627 static int nway_reset(struct net_device *dev)
1629 struct netdev_private *np = netdev_priv(dev);
1630 return mii_nway_restart(&np->mii_if);
1633 static u32 get_link(struct net_device *dev)
1635 struct netdev_private *np = netdev_priv(dev);
1636 return mii_link_ok(&np->mii_if);
1639 static u32 get_msglevel(struct net_device *dev)
1641 struct netdev_private *np = netdev_priv(dev);
1642 return np->msg_enable;
1645 static void set_msglevel(struct net_device *dev, u32 val)
1647 struct netdev_private *np = netdev_priv(dev);
1648 np->msg_enable = val;
1651 static struct ethtool_ops ethtool_ops = {
1652 .begin = check_if_running,
1653 .get_drvinfo = get_drvinfo,
1654 .get_settings = get_settings,
1655 .set_settings = set_settings,
1656 .nway_reset = nway_reset,
1657 .get_link = get_link,
1658 .get_msglevel = get_msglevel,
1659 .set_msglevel = set_msglevel,
1660 .get_perm_addr = ethtool_op_get_perm_addr,
1663 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1665 struct netdev_private *np = netdev_priv(dev);
1666 void __iomem *ioaddr = np->base;
1667 int rc;
1668 int i;
1670 if (!netif_running(dev))
1671 return -EINVAL;
1673 spin_lock_irq(&np->lock);
1674 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1675 spin_unlock_irq(&np->lock);
1676 switch (cmd) {
1677 case SIOCDEVPRIVATE:
1678 for (i=0; i<TX_RING_SIZE; i++) {
1679 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
1680 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
1681 le32_to_cpu(np->tx_ring[i].next_desc),
1682 le32_to_cpu(np->tx_ring[i].status),
1683 (le32_to_cpu(np->tx_ring[i].status) >> 2)
1684 & 0xff,
1685 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1686 le32_to_cpu(np->tx_ring[i].frag[0].length));
1688 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1689 ioread32(np->base + TxListPtr),
1690 netif_queue_stopped(dev));
1691 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1692 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1693 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1694 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1695 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1696 printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus));
1697 return 0;
1701 return rc;
1704 static int netdev_close(struct net_device *dev)
1706 struct netdev_private *np = netdev_priv(dev);
1707 void __iomem *ioaddr = np->base;
1708 struct sk_buff *skb;
1709 int i;
1711 netif_stop_queue(dev);
1713 if (netif_msg_ifdown(np)) {
1714 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1715 "Rx %4.4x Int %2.2x.\n",
1716 dev->name, ioread8(ioaddr + TxStatus),
1717 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1718 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1719 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1722 /* Disable interrupts by clearing the interrupt mask. */
1723 iowrite16(0x0000, ioaddr + IntrEnable);
1725 /* Stop the chip's Tx and Rx processes. */
1726 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1728 /* Wait and kill tasklet */
1729 tasklet_kill(&np->rx_tasklet);
1730 tasklet_kill(&np->tx_tasklet);
1732 #ifdef __i386__
1733 if (netif_msg_hw(np)) {
1734 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1735 (int)(np->tx_ring_dma));
1736 for (i = 0; i < TX_RING_SIZE; i++)
1737 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1738 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1739 np->tx_ring[i].frag[0].length);
1740 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1741 (int)(np->rx_ring_dma));
1742 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1743 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1744 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1745 np->rx_ring[i].frag[0].length);
1748 #endif /* __i386__ debugging only */
1750 free_irq(dev->irq, dev);
1752 del_timer_sync(&np->timer);
1754 /* Free all the skbuffs in the Rx queue. */
1755 for (i = 0; i < RX_RING_SIZE; i++) {
1756 np->rx_ring[i].status = 0;
1757 np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
1758 skb = np->rx_skbuff[i];
1759 if (skb) {
1760 pci_unmap_single(np->pci_dev,
1761 np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
1762 PCI_DMA_FROMDEVICE);
1763 dev_kfree_skb(skb);
1764 np->rx_skbuff[i] = NULL;
1767 for (i = 0; i < TX_RING_SIZE; i++) {
1768 skb = np->tx_skbuff[i];
1769 if (skb) {
1770 pci_unmap_single(np->pci_dev,
1771 np->tx_ring[i].frag[0].addr, skb->len,
1772 PCI_DMA_TODEVICE);
1773 dev_kfree_skb(skb);
1774 np->tx_skbuff[i] = NULL;
1778 return 0;
1781 static void __devexit sundance_remove1 (struct pci_dev *pdev)
1783 struct net_device *dev = pci_get_drvdata(pdev);
1785 if (dev) {
1786 struct netdev_private *np = netdev_priv(dev);
1788 unregister_netdev(dev);
1789 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1790 np->rx_ring_dma);
1791 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1792 np->tx_ring_dma);
1793 pci_iounmap(pdev, np->base);
1794 pci_release_regions(pdev);
1795 free_netdev(dev);
1796 pci_set_drvdata(pdev, NULL);
1800 static struct pci_driver sundance_driver = {
1801 .name = DRV_NAME,
1802 .id_table = sundance_pci_tbl,
1803 .probe = sundance_probe1,
1804 .remove = __devexit_p(sundance_remove1),
1807 static int __init sundance_init(void)
1809 /* when a module, this is printed whether or not devices are found in probe */
1810 #ifdef MODULE
1811 printk(version);
1812 #endif
1813 return pci_module_init(&sundance_driver);
1816 static void __exit sundance_exit(void)
1818 pci_unregister_driver(&sundance_driver);
1821 module_init(sundance_init);
1822 module_exit(sundance_exit);