MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / drivers / net / sundance.c
blobe0ddf2be907dc74185f257fdc105a6edf0fb1d1f
1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2 /*
3 Written 1999-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
21 Version LK1.01a (jgarzik):
22 - Replace some MII-related magic numbers with constants
24 Version LK1.02 (D-Link):
25 - Add new board to PCI ID list
26 - Fix multicast bug
28 Version LK1.03 (D-Link):
29 - New Rx scheme, reduce Rx congestion
30 - Option to disable flow control
32 Version LK1.04 (D-Link):
33 - Tx timeout recovery
34 - More support for ethtool.
36 Version LK1.04a:
37 - Remove unused/constant members from struct pci_id_info
38 (which then allows removal of 'drv_flags' from private struct)
39 (jgarzik)
40 - If no phy is found, fail to load that board (jgarzik)
41 - Always start phy id scan at id 1 to avoid problems (Donald Becker)
42 - Autodetect where mii_preable_required is needed,
43 default to not needed. (Donald Becker)
45 Version LK1.04b:
46 - Remove mii_preamble_required module parameter (Donald Becker)
47 - Add per-interface mii_preamble_required (setting is autodetected)
48 (Donald Becker)
49 - Remove unnecessary cast from void pointer (jgarzik)
50 - Re-align comments in private struct (jgarzik)
52 Version LK1.04c (jgarzik):
53 - Support bitmapped message levels (NETIF_MSG_xxx), and the
54 two ethtool ioctls that get/set them
55 - Don't hand-code MII ethtool support, use standard API/lib
57 Version LK1.04d:
58 - Merge from Donald Becker's sundance.c: (Jason Lunz)
59 * proper support for variably-sized MTUs
60 * default to PIO, to fix chip bugs
61 - Add missing unregister_netdev (Jason Lunz)
62 - Add CONFIG_SUNDANCE_MMIO config option (jgarzik)
63 - Better rx buf size calculation (Donald Becker)
65 Version LK1.05 (D-Link):
66 - Fix DFE-580TX packet drop issue (for DL10050C)
67 - Fix reset_tx logic
69 Version LK1.06 (D-Link):
70 - Fix crash while unloading driver
72 Versin LK1.06b (D-Link):
73 - New tx scheme, adaptive tx_coalesce
75 Version LK1.07 (D-Link):
76 - Fix tx bugs in big-endian machines
77 - Remove unused max_interrupt_work module parameter, the new
78 NAPI-like rx scheme doesn't need it.
79 - Remove redundancy get_stats() in intr_handler(), those
80 I/O access could affect performance in ARM-based system
81 - Add Linux software VLAN support
83 Version LK1.08 (D-Link):
84 - Fix bug of custom mac address
85 (StationAddr register only accept word write)
87 Version LK1.09 (D-Link):
88 - Fix the flowctrl bug.
89 - Set Pause bit in MII ANAR if flow control enabled.
91 Version LK1.09a (ICPlus):
92 - Add the delay time in reading the contents of EEPROM
96 #define DRV_NAME "sundance"
97 #define DRV_VERSION "1.01+LK1.09a"
98 #define DRV_RELDATE "10-Jul-2003"
101 /* The user-configurable values.
102 These may be modified when a driver module is loaded.*/
103 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
104 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
105 Typical is a 64 element hash table based on the Ethernet CRC. */
106 static int multicast_filter_limit = 32;
108 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
109 Setting to > 1518 effectively disables this feature.
110 This chip can receive into offset buffers, so the Alpha does not
111 need a copy-align. */
112 static int rx_copybreak;
113 static int flowctrl=1;
115 /* media[] specifies the media type the NIC operates at.
116 autosense Autosensing active media.
117 10mbps_hd 10Mbps half duplex.
118 10mbps_fd 10Mbps full duplex.
119 100mbps_hd 100Mbps half duplex.
120 100mbps_fd 100Mbps full duplex.
121 0 Autosensing active media.
122 1 10Mbps half duplex.
123 2 10Mbps full duplex.
124 3 100Mbps half duplex.
125 4 100Mbps full duplex.
127 #define MAX_UNITS 8
128 static char *media[MAX_UNITS];
131 /* Operational parameters that are set at compile time. */
133 /* Keep the ring sizes a power of two for compile efficiency.
134 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
135 Making the Tx ring too large decreases the effectiveness of channel
136 bonding and packet priority, and more than 128 requires modifying the
137 Tx error recovery.
138 Large receive rings merely waste memory. */
139 #define TX_RING_SIZE 32
140 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
141 #define RX_RING_SIZE 64
142 #define RX_BUDGET 32
143 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
144 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
146 /* Operational parameters that usually are not changed. */
147 /* Time in jiffies before concluding the transmitter is hung. */
148 #define TX_TIMEOUT (4*HZ)
149 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
151 /* Include files, designed to support most kernel versions 2.0.0 and later. */
152 #include <linux/module.h>
153 #include <linux/kernel.h>
154 #include <linux/string.h>
155 #include <linux/timer.h>
156 #include <linux/errno.h>
157 #include <linux/ioport.h>
158 #include <linux/slab.h>
159 #include <linux/interrupt.h>
160 #include <linux/pci.h>
161 #include <linux/netdevice.h>
162 #include <linux/etherdevice.h>
163 #include <linux/skbuff.h>
164 #include <linux/init.h>
165 #include <asm/uaccess.h>
166 #include <asm/processor.h> /* Processor type for cache alignment. */
167 #include <asm/bitops.h>
168 #include <asm/io.h>
169 #include <linux/delay.h>
170 #include <linux/spinlock.h>
171 #ifndef _COMPAT_WITH_OLD_KERNEL
172 #include <linux/crc32.h>
173 #include <linux/ethtool.h>
174 #include <linux/mii.h>
175 #else
176 #include "crc32.h"
177 #include "ethtool.h"
178 #include "mii.h"
179 #include "compat.h"
180 #endif
182 /* These identify the driver base version and may not be removed. */
183 static char version[] __devinitdata =
184 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
185 KERN_INFO " http://www.scyld.com/network/sundance.html\n";
187 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
188 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
189 MODULE_LICENSE("GPL");
191 MODULE_PARM(debug, "i");
192 MODULE_PARM(rx_copybreak, "i");
193 MODULE_PARM(media, "1-" __MODULE_STRING(MAX_UNITS) "s");
194 MODULE_PARM(flowctrl, "i");
195 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
196 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
197 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
200 Theory of Operation
202 I. Board Compatibility
204 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
206 II. Board-specific settings
208 III. Driver operation
210 IIIa. Ring buffers
212 This driver uses two statically allocated fixed-size descriptor lists
213 formed into rings by a branch from the final descriptor to the beginning of
214 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
215 Some chips explicitly use only 2^N sized rings, while others use a
216 'next descriptor' pointer that the driver forms into rings.
218 IIIb/c. Transmit/Receive Structure
220 This driver uses a zero-copy receive and transmit scheme.
221 The driver allocates full frame size skbuffs for the Rx ring buffers at
222 open() time and passes the skb->data field to the chip as receive data
223 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
224 a fresh skbuff is allocated and the frame is copied to the new skbuff.
225 When the incoming frame is larger, the skbuff is passed directly up the
226 protocol stack. Buffers consumed this way are replaced by newly allocated
227 skbuffs in a later phase of receives.
229 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
230 using a full-sized skbuff for small frames vs. the copying costs of larger
231 frames. New boards are typically used in generously configured machines
232 and the underfilled buffers have negligible impact compared to the benefit of
233 a single allocation size, so the default value of zero results in never
234 copying packets. When copying is done, the cost is usually mitigated by using
235 a combined copy/checksum routine. Copying also preloads the cache, which is
236 most useful with small frames.
238 A subtle aspect of the operation is that the IP header at offset 14 in an
239 ethernet frame isn't longword aligned for further processing.
240 Unaligned buffers are permitted by the Sundance hardware, so
241 frames are received into the skbuff at an offset of "+2", 16-byte aligning
242 the IP header.
244 IIId. Synchronization
246 The driver runs as two independent, single-threaded flows of control. One
247 is the send-packet routine, which enforces single-threaded use by the
248 dev->tbusy flag. The other thread is the interrupt handler, which is single
249 threaded by the hardware and interrupt handling software.
251 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
252 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
253 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
254 the 'lp->tx_full' flag.
256 The interrupt handler has exclusive control over the Rx ring and records stats
257 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
258 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
259 clears both the tx_full and tbusy flags.
261 IV. Notes
263 IVb. References
265 The Sundance ST201 datasheet, preliminary version.
266 http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
267 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
269 IVc. Errata
273 /* Work-around for Kendin chip bugs. */
274 #ifndef CONFIG_SUNDANCE_MMIO
275 #define USE_IO_OPS 1
276 #endif
278 static struct pci_device_id sundance_pci_tbl[] = {
279 {0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0},
280 {0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1},
281 {0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2},
282 {0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3},
283 {0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
284 {0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
285 {0,}
287 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
289 enum {
290 netdev_io_size = 128
293 struct pci_id_info {
294 const char *name;
296 static struct pci_id_info pci_id_tbl[] = {
297 {"D-Link DFE-550TX FAST Ethernet Adapter"},
298 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
299 {"D-Link DFE-580TX 4 port Server Adapter"},
300 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
301 {"D-Link DL10050-based FAST Ethernet Adapter"},
302 {"Sundance Technology Alta"},
303 {NULL,}, /* 0 terminated list. */
306 /* This driver was written to use PCI memory space, however x86-oriented
307 hardware often uses I/O space accesses. */
308 #ifdef USE_IO_OPS
309 #undef readb
310 #undef readw
311 #undef readl
312 #undef writeb
313 #undef writew
314 #undef writel
315 #define readb inb
316 #define readw inw
317 #define readl inl
318 #define writeb outb
319 #define writew outw
320 #define writel outl
321 #endif
323 /* Offsets to the device registers.
324 Unlike software-only systems, device drivers interact with complex hardware.
325 It's not useful to define symbolic names for every register bit in the
326 device. The name can only partially document the semantics and make
327 the driver longer and more difficult to read.
328 In general, only the important configuration values or bits changed
329 multiple times should be defined symbolically.
331 enum alta_offsets {
332 DMACtrl = 0x00,
333 TxListPtr = 0x04,
334 TxDMABurstThresh = 0x08,
335 TxDMAUrgentThresh = 0x09,
336 TxDMAPollPeriod = 0x0a,
337 RxDMAStatus = 0x0c,
338 RxListPtr = 0x10,
339 DebugCtrl0 = 0x1a,
340 DebugCtrl1 = 0x1c,
341 RxDMABurstThresh = 0x14,
342 RxDMAUrgentThresh = 0x15,
343 RxDMAPollPeriod = 0x16,
344 LEDCtrl = 0x1a,
345 ASICCtrl = 0x30,
346 EEData = 0x34,
347 EECtrl = 0x36,
348 TxStartThresh = 0x3c,
349 RxEarlyThresh = 0x3e,
350 FlashAddr = 0x40,
351 FlashData = 0x44,
352 TxStatus = 0x46,
353 TxFrameId = 0x47,
354 DownCounter = 0x18,
355 IntrClear = 0x4a,
356 IntrEnable = 0x4c,
357 IntrStatus = 0x4e,
358 MACCtrl0 = 0x50,
359 MACCtrl1 = 0x52,
360 StationAddr = 0x54,
361 MaxFrameSize = 0x5A,
362 RxMode = 0x5c,
363 MIICtrl = 0x5e,
364 MulticastFilter0 = 0x60,
365 MulticastFilter1 = 0x64,
366 RxOctetsLow = 0x68,
367 RxOctetsHigh = 0x6a,
368 TxOctetsLow = 0x6c,
369 TxOctetsHigh = 0x6e,
370 TxFramesOK = 0x70,
371 RxFramesOK = 0x72,
372 StatsCarrierError = 0x74,
373 StatsLateColl = 0x75,
374 StatsMultiColl = 0x76,
375 StatsOneColl = 0x77,
376 StatsTxDefer = 0x78,
377 RxMissed = 0x79,
378 StatsTxXSDefer = 0x7a,
379 StatsTxAbort = 0x7b,
380 StatsBcastTx = 0x7c,
381 StatsBcastRx = 0x7d,
382 StatsMcastTx = 0x7e,
383 StatsMcastRx = 0x7f,
384 /* Aliased and bogus values! */
385 RxStatus = 0x0c,
387 enum ASICCtrl_HiWord_bit {
388 GlobalReset = 0x0001,
389 RxReset = 0x0002,
390 TxReset = 0x0004,
391 DMAReset = 0x0008,
392 FIFOReset = 0x0010,
393 NetworkReset = 0x0020,
394 HostReset = 0x0040,
395 ResetBusy = 0x0400,
398 /* Bits in the interrupt status/mask registers. */
399 enum intr_status_bits {
400 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
401 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
402 IntrDrvRqst=0x0040,
403 StatsMax=0x0080, LinkChange=0x0100,
404 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
407 /* Bits in the RxMode register. */
408 enum rx_mode_bits {
409 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
410 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
412 /* Bits in MACCtrl. */
413 enum mac_ctrl0_bits {
414 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
415 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
417 enum mac_ctrl1_bits {
418 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
419 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
420 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
423 /* The Rx and Tx buffer descriptors. */
424 /* Note that using only 32 bit fields simplifies conversion to big-endian
425 architectures. */
426 struct netdev_desc {
427 u32 next_desc;
428 u32 status;
429 struct desc_frag { u32 addr, length; } frag[1];
432 /* Bits in netdev_desc.status */
433 enum desc_status_bits {
434 DescOwn=0x8000,
435 DescEndPacket=0x4000,
436 DescEndRing=0x2000,
437 LastFrag=0x80000000,
438 DescIntrOnTx=0x8000,
439 DescIntrOnDMADone=0x80000000,
440 DisableAlign = 0x00000001,
443 #define PRIV_ALIGN 15 /* Required alignment mask */
444 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
445 within the structure. */
446 #define MII_CNT 4
447 struct netdev_private {
448 /* Descriptor rings first for alignment. */
449 struct netdev_desc *rx_ring;
450 struct netdev_desc *tx_ring;
451 struct sk_buff* rx_skbuff[RX_RING_SIZE];
452 struct sk_buff* tx_skbuff[TX_RING_SIZE];
453 dma_addr_t tx_ring_dma;
454 dma_addr_t rx_ring_dma;
455 struct net_device_stats stats;
456 struct timer_list timer; /* Media monitoring timer. */
457 /* Frequently used values: keep some adjacent for cache effect. */
458 spinlock_t lock;
459 spinlock_t rx_lock; /* Group with Tx control cache line. */
460 int msg_enable;
461 int chip_id;
462 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
463 unsigned int rx_buf_sz; /* Based on MTU+slack. */
464 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
465 unsigned int cur_tx, dirty_tx;
466 /* These values are keep track of the transceiver/media in use. */
467 unsigned int flowctrl:1;
468 unsigned int default_port:4; /* Last dev->if_port value. */
469 unsigned int an_enable:1;
470 unsigned int speed;
471 struct tasklet_struct rx_tasklet;
472 struct tasklet_struct tx_tasklet;
473 int budget;
474 int cur_task;
475 /* Multicast and receive mode. */
476 spinlock_t mcastlock; /* SMP lock multicast updates. */
477 u16 mcast_filter[4];
478 /* MII transceiver section. */
479 struct mii_if_info mii_if;
480 int mii_preamble_required;
481 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
482 struct pci_dev *pci_dev;
483 unsigned char pci_rev_id;
486 /* The station address location in the EEPROM. */
487 #define EEPROM_SA_OFFSET 0x10
488 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
489 IntrDrvRqst | IntrTxDone | StatsMax | \
490 LinkChange)
492 static int change_mtu(struct net_device *dev, int new_mtu);
493 static int eeprom_read(long ioaddr, int location);
494 static int mdio_read(struct net_device *dev, int phy_id, int location);
495 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
496 static int netdev_open(struct net_device *dev);
497 static void check_duplex(struct net_device *dev);
498 static void netdev_timer(unsigned long data);
499 static void tx_timeout(struct net_device *dev);
500 static void init_ring(struct net_device *dev);
501 static int start_tx(struct sk_buff *skb, struct net_device *dev);
502 static int reset_tx (struct net_device *dev);
503 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
504 static void rx_poll(unsigned long data);
505 static void tx_poll(unsigned long data);
506 static void refill_rx (struct net_device *dev);
507 static void netdev_error(struct net_device *dev, int intr_status);
508 static void netdev_error(struct net_device *dev, int intr_status);
509 static void set_rx_mode(struct net_device *dev);
510 static int __set_mac_addr(struct net_device *dev);
511 static struct net_device_stats *get_stats(struct net_device *dev);
512 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
513 static int netdev_close(struct net_device *dev);
517 static int __devinit sundance_probe1 (struct pci_dev *pdev,
518 const struct pci_device_id *ent)
520 struct net_device *dev;
521 struct netdev_private *np;
522 static int card_idx;
523 int chip_idx = ent->driver_data;
524 int irq;
525 int i;
526 long ioaddr;
527 u16 mii_ctl;
528 void *ring_space;
529 dma_addr_t ring_dma;
532 /* when built into the kernel, we only print version if device is found */
533 #ifndef MODULE
534 static int printed_version;
535 if (!printed_version++)
536 printk(version);
537 #endif
539 if (pci_enable_device(pdev))
540 return -EIO;
541 pci_set_master(pdev);
543 irq = pdev->irq;
545 dev = alloc_etherdev(sizeof(*np));
546 if (!dev)
547 return -ENOMEM;
548 SET_MODULE_OWNER(dev);
549 SET_NETDEV_DEV(dev, &pdev->dev);
551 if (pci_request_regions(pdev, DRV_NAME))
552 goto err_out_netdev;
554 #ifdef USE_IO_OPS
555 ioaddr = pci_resource_start(pdev, 0);
556 #else
557 ioaddr = pci_resource_start(pdev, 1);
558 ioaddr = (long) ioremap (ioaddr, netdev_io_size);
559 if (!ioaddr)
560 goto err_out_res;
561 #endif
563 for (i = 0; i < 3; i++)
564 ((u16 *)dev->dev_addr)[i] =
565 le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
567 dev->base_addr = ioaddr;
568 dev->irq = irq;
570 np = dev->priv;
571 np->pci_dev = pdev;
572 np->chip_id = chip_idx;
573 np->msg_enable = (1 << debug) - 1;
574 spin_lock_init(&np->lock);
575 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
576 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
578 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
579 if (!ring_space)
580 goto err_out_cleardev;
581 np->tx_ring = (struct netdev_desc *)ring_space;
582 np->tx_ring_dma = ring_dma;
584 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
585 if (!ring_space)
586 goto err_out_unmap_tx;
587 np->rx_ring = (struct netdev_desc *)ring_space;
588 np->rx_ring_dma = ring_dma;
590 np->mii_if.dev = dev;
591 np->mii_if.mdio_read = mdio_read;
592 np->mii_if.mdio_write = mdio_write;
593 np->mii_if.phy_id_mask = 0x1f;
594 np->mii_if.reg_num_mask = 0x1f;
596 /* The chip-specific entries in the device structure. */
597 dev->open = &netdev_open;
598 dev->hard_start_xmit = &start_tx;
599 dev->stop = &netdev_close;
600 dev->get_stats = &get_stats;
601 dev->set_multicast_list = &set_rx_mode;
602 dev->do_ioctl = &netdev_ioctl;
603 dev->tx_timeout = &tx_timeout;
604 dev->watchdog_timeo = TX_TIMEOUT;
605 dev->change_mtu = &change_mtu;
606 pci_set_drvdata(pdev, dev);
608 pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
610 i = register_netdev(dev);
611 if (i)
612 goto err_out_unmap_rx;
614 printk(KERN_INFO "%s: %s at 0x%lx, ",
615 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
616 for (i = 0; i < 5; i++)
617 printk("%2.2x:", dev->dev_addr[i]);
618 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
620 if (1) {
621 int phy, phy_idx = 0;
622 np->phys[0] = 1; /* Default setting */
623 np->mii_preamble_required++;
624 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
625 int mii_status = mdio_read(dev, phy, MII_BMSR);
626 if (mii_status != 0xffff && mii_status != 0x0000) {
627 np->phys[phy_idx++] = phy;
628 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
629 if ((mii_status & 0x0040) == 0)
630 np->mii_preamble_required++;
631 printk(KERN_INFO "%s: MII PHY found at address %d, status "
632 "0x%4.4x advertising %4.4x.\n",
633 dev->name, phy, mii_status, np->mii_if.advertising);
636 np->mii_preamble_required--;
638 if (phy_idx == 0) {
639 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
640 dev->name, readl(ioaddr + ASICCtrl));
641 goto err_out_unregister;
644 np->mii_if.phy_id = np->phys[0];
647 /* Parse override configuration */
648 np->an_enable = 1;
649 if (card_idx < MAX_UNITS) {
650 if (media[card_idx] != NULL) {
651 np->an_enable = 0;
652 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
653 strcmp (media[card_idx], "4") == 0) {
654 np->speed = 100;
655 np->mii_if.full_duplex = 1;
656 } else if (strcmp (media[card_idx], "100mbps_hd") == 0
657 || strcmp (media[card_idx], "3") == 0) {
658 np->speed = 100;
659 np->mii_if.full_duplex = 0;
660 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
661 strcmp (media[card_idx], "2") == 0) {
662 np->speed = 10;
663 np->mii_if.full_duplex = 1;
664 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
665 strcmp (media[card_idx], "1") == 0) {
666 np->speed = 10;
667 np->mii_if.full_duplex = 0;
668 } else {
669 np->an_enable = 1;
672 if (flowctrl == 1)
673 np->flowctrl = 1;
676 /* Fibre PHY? */
677 if (readl (ioaddr + ASICCtrl) & 0x80) {
678 /* Default 100Mbps Full */
679 if (np->an_enable) {
680 np->speed = 100;
681 np->mii_if.full_duplex = 1;
682 np->an_enable = 0;
685 /* Reset PHY */
686 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
687 mdelay (300);
688 /* If flow control enabled, we need to advertise it.*/
689 if (np->flowctrl)
690 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
691 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
692 /* Force media type */
693 if (!np->an_enable) {
694 mii_ctl = 0;
695 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
696 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
697 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
698 printk (KERN_INFO "Override speed=%d, %s duplex\n",
699 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
703 /* Perhaps move the reset here? */
704 /* Reset the chip to erase previous misconfiguration. */
705 if (netif_msg_hw(np))
706 printk("ASIC Control is %x.\n", readl(ioaddr + ASICCtrl));
707 writew(0x007f, ioaddr + ASICCtrl + 2);
708 if (netif_msg_hw(np))
709 printk("ASIC Control is now %x.\n", readl(ioaddr + ASICCtrl));
711 card_idx++;
712 return 0;
714 err_out_unregister:
715 unregister_netdev(dev);
716 err_out_unmap_rx:
717 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
718 err_out_unmap_tx:
719 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
720 err_out_cleardev:
721 pci_set_drvdata(pdev, NULL);
722 #ifndef USE_IO_OPS
723 iounmap((void *)ioaddr);
724 err_out_res:
725 #endif
726 pci_release_regions(pdev);
727 err_out_netdev:
728 free_netdev (dev);
729 return -ENODEV;
732 static int change_mtu(struct net_device *dev, int new_mtu)
734 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
735 return -EINVAL;
736 if (netif_running(dev))
737 return -EBUSY;
738 dev->mtu = new_mtu;
739 return 0;
742 #define eeprom_delay(ee_addr) readl(ee_addr)
743 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
744 static int __devinit eeprom_read(long ioaddr, int location)
746 int boguscnt = 10000; /* Typical 1900 ticks. */
747 writew(0x0200 | (location & 0xff), ioaddr + EECtrl);
748 do {
749 eeprom_delay(ioaddr + EECtrl);
750 if (! (readw(ioaddr + EECtrl) & 0x8000)) {
751 return readw(ioaddr + EEData);
753 } while (--boguscnt > 0);
754 return 0;
757 /* MII transceiver control section.
758 Read and write the MII registers using software-generated serial
759 MDIO protocol. See the MII specifications or DP83840A data sheet
760 for details.
762 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
763 met by back-to-back 33Mhz PCI cycles. */
764 #define mdio_delay() readb(mdio_addr)
766 enum mii_reg_bits {
767 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
769 #define MDIO_EnbIn (0)
770 #define MDIO_WRITE0 (MDIO_EnbOutput)
771 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
773 /* Generate the preamble required for initial synchronization and
774 a few older transceivers. */
775 static void mdio_sync(long mdio_addr)
777 int bits = 32;
779 /* Establish sync by sending at least 32 logic ones. */
780 while (--bits >= 0) {
781 writeb(MDIO_WRITE1, mdio_addr);
782 mdio_delay();
783 writeb(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
784 mdio_delay();
788 static int mdio_read(struct net_device *dev, int phy_id, int location)
790 struct netdev_private *np = dev->priv;
791 long mdio_addr = dev->base_addr + MIICtrl;
792 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
793 int i, retval = 0;
795 if (np->mii_preamble_required)
796 mdio_sync(mdio_addr);
798 /* Shift the read command bits out. */
799 for (i = 15; i >= 0; i--) {
800 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
802 writeb(dataval, mdio_addr);
803 mdio_delay();
804 writeb(dataval | MDIO_ShiftClk, mdio_addr);
805 mdio_delay();
807 /* Read the two transition, 16 data, and wire-idle bits. */
808 for (i = 19; i > 0; i--) {
809 writeb(MDIO_EnbIn, mdio_addr);
810 mdio_delay();
811 retval = (retval << 1) | ((readb(mdio_addr) & MDIO_Data) ? 1 : 0);
812 writeb(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
813 mdio_delay();
815 return (retval>>1) & 0xffff;
818 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
820 struct netdev_private *np = dev->priv;
821 long mdio_addr = dev->base_addr + MIICtrl;
822 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
823 int i;
825 if (np->mii_preamble_required)
826 mdio_sync(mdio_addr);
828 /* Shift the command bits out. */
829 for (i = 31; i >= 0; i--) {
830 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
832 writeb(dataval, mdio_addr);
833 mdio_delay();
834 writeb(dataval | MDIO_ShiftClk, mdio_addr);
835 mdio_delay();
837 /* Clear out extra bits. */
838 for (i = 2; i > 0; i--) {
839 writeb(MDIO_EnbIn, mdio_addr);
840 mdio_delay();
841 writeb(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
842 mdio_delay();
844 return;
847 static int netdev_open(struct net_device *dev)
849 struct netdev_private *np = dev->priv;
850 long ioaddr = dev->base_addr;
851 int i;
853 /* Do we need to reset the chip??? */
855 i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
856 if (i)
857 return i;
859 if (netif_msg_ifup(np))
860 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
861 dev->name, dev->irq);
862 init_ring(dev);
864 writel(np->rx_ring_dma, ioaddr + RxListPtr);
865 /* The Tx list pointer is written as packets are queued. */
867 /* Initialize other registers. */
868 __set_mac_addr(dev);
869 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
870 writew(dev->mtu + 18, ioaddr + MaxFrameSize);
871 #else
872 writew(dev->mtu + 14, ioaddr + MaxFrameSize);
873 #endif
874 if (dev->mtu > 2047)
875 writel(readl(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
877 /* Configure the PCI bus bursts and FIFO thresholds. */
879 if (dev->if_port == 0)
880 dev->if_port = np->default_port;
882 np->mcastlock = SPIN_LOCK_UNLOCKED;
884 set_rx_mode(dev);
885 writew(0, ioaddr + IntrEnable);
886 writew(0, ioaddr + DownCounter);
887 /* Set the chip to poll every N*320nsec. */
888 writeb(100, ioaddr + RxDMAPollPeriod);
889 writeb(127, ioaddr + TxDMAPollPeriod);
890 /* Fix DFE-580TX packet drop issue */
891 if (np->pci_rev_id >= 0x14)
892 writeb(0x01, ioaddr + DebugCtrl1);
893 netif_start_queue(dev);
895 writew (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
897 if (netif_msg_ifup(np))
898 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
899 "MAC Control %x, %4.4x %4.4x.\n",
900 dev->name, readl(ioaddr + RxStatus), readb(ioaddr + TxStatus),
901 readl(ioaddr + MACCtrl0),
902 readw(ioaddr + MACCtrl1), readw(ioaddr + MACCtrl0));
904 /* Set the timer to check for link beat. */
905 init_timer(&np->timer);
906 np->timer.expires = jiffies + 3*HZ;
907 np->timer.data = (unsigned long)dev;
908 np->timer.function = &netdev_timer; /* timer handler */
909 add_timer(&np->timer);
911 /* Enable interrupts by setting the interrupt mask. */
912 writew(DEFAULT_INTR, ioaddr + IntrEnable);
914 return 0;
917 static void check_duplex(struct net_device *dev)
919 struct netdev_private *np = dev->priv;
920 long ioaddr = dev->base_addr;
921 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
922 int negotiated = mii_lpa & np->mii_if.advertising;
923 int duplex;
925 /* Force media */
926 if (!np->an_enable || mii_lpa == 0xffff) {
927 if (np->mii_if.full_duplex)
928 writew (readw (ioaddr + MACCtrl0) | EnbFullDuplex,
929 ioaddr + MACCtrl0);
930 return;
933 /* Autonegotiation */
934 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
935 if (np->mii_if.full_duplex != duplex) {
936 np->mii_if.full_duplex = duplex;
937 if (netif_msg_link(np))
938 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
939 "negotiated capability %4.4x.\n", dev->name,
940 duplex ? "full" : "half", np->phys[0], negotiated);
941 writew(readw(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0);
945 static void netdev_timer(unsigned long data)
947 struct net_device *dev = (struct net_device *)data;
948 struct netdev_private *np = dev->priv;
949 long ioaddr = dev->base_addr;
950 int next_tick = 10*HZ;
952 if (netif_msg_timer(np)) {
953 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
954 "Tx %x Rx %x.\n",
955 dev->name, readw(ioaddr + IntrEnable),
956 readb(ioaddr + TxStatus), readl(ioaddr + RxStatus));
958 check_duplex(dev);
959 np->timer.expires = jiffies + next_tick;
960 add_timer(&np->timer);
963 static void tx_timeout(struct net_device *dev)
965 struct netdev_private *np = dev->priv;
966 long ioaddr = dev->base_addr;
967 unsigned long flag;
969 netif_stop_queue(dev);
970 tasklet_disable(&np->tx_tasklet);
971 writew(0, ioaddr + IntrEnable);
972 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
973 "TxFrameId %2.2x,"
974 " resetting...\n", dev->name, readb(ioaddr + TxStatus),
975 readb(ioaddr + TxFrameId));
978 int i;
979 for (i=0; i<TX_RING_SIZE; i++) {
980 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
981 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
982 le32_to_cpu(np->tx_ring[i].next_desc),
983 le32_to_cpu(np->tx_ring[i].status),
984 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
985 le32_to_cpu(np->tx_ring[i].frag[0].addr),
986 le32_to_cpu(np->tx_ring[i].frag[0].length));
988 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
989 readl(dev->base_addr + TxListPtr),
990 netif_queue_stopped(dev));
991 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
992 np->cur_tx, np->cur_tx % TX_RING_SIZE,
993 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
994 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
995 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
997 spin_lock_irqsave(&np->lock, flag);
999 /* Stop and restart the chip's Tx processes . */
1000 reset_tx(dev);
1001 spin_unlock_irqrestore(&np->lock, flag);
1003 dev->if_port = 0;
1005 dev->trans_start = jiffies;
1006 np->stats.tx_errors++;
1007 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1008 netif_wake_queue(dev);
1010 writew(DEFAULT_INTR, ioaddr + IntrEnable);
1011 tasklet_enable(&np->tx_tasklet);
1015 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1016 static void init_ring(struct net_device *dev)
1018 struct netdev_private *np = dev->priv;
1019 int i;
1021 np->cur_rx = np->cur_tx = 0;
1022 np->dirty_rx = np->dirty_tx = 0;
1023 np->cur_task = 0;
1025 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1027 /* Initialize all Rx descriptors. */
1028 for (i = 0; i < RX_RING_SIZE; i++) {
1029 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1030 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1031 np->rx_ring[i].status = 0;
1032 np->rx_ring[i].frag[0].length = 0;
1033 np->rx_skbuff[i] = NULL;
1036 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1037 for (i = 0; i < RX_RING_SIZE; i++) {
1038 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1039 np->rx_skbuff[i] = skb;
1040 if (skb == NULL)
1041 break;
1042 skb->dev = dev; /* Mark as being used by this device. */
1043 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1044 np->rx_ring[i].frag[0].addr = cpu_to_le32(
1045 pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz,
1046 PCI_DMA_FROMDEVICE));
1047 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1049 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1051 for (i = 0; i < TX_RING_SIZE; i++) {
1052 np->tx_skbuff[i] = NULL;
1053 np->tx_ring[i].status = 0;
1055 return;
1058 static void tx_poll (unsigned long data)
1060 struct net_device *dev = (struct net_device *)data;
1061 struct netdev_private *np = dev->priv;
1062 unsigned head = np->cur_task % TX_RING_SIZE;
1063 struct netdev_desc *txdesc =
1064 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1066 /* Chain the next pointer */
1067 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1068 int entry = np->cur_task % TX_RING_SIZE;
1069 txdesc = &np->tx_ring[entry];
1070 if (np->last_tx) {
1071 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1072 entry*sizeof(struct netdev_desc));
1074 np->last_tx = txdesc;
1076 /* Indicate the latest descriptor of tx ring */
1077 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1079 if (readl (dev->base_addr + TxListPtr) == 0)
1080 writel (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1081 dev->base_addr + TxListPtr);
1082 return;
1085 static int
1086 start_tx (struct sk_buff *skb, struct net_device *dev)
1088 struct netdev_private *np = dev->priv;
1089 struct netdev_desc *txdesc;
1090 unsigned entry;
1092 /* Calculate the next Tx descriptor entry. */
1093 entry = np->cur_tx % TX_RING_SIZE;
1094 np->tx_skbuff[entry] = skb;
1095 txdesc = &np->tx_ring[entry];
1097 txdesc->next_desc = 0;
1098 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1099 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1100 skb->len,
1101 PCI_DMA_TODEVICE));
1102 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1104 /* Increment cur_tx before tasklet_schedule() */
1105 np->cur_tx++;
1106 mb();
1107 /* Schedule a tx_poll() task */
1108 tasklet_schedule(&np->tx_tasklet);
1110 /* On some architectures: explicitly flush cache lines here. */
1111 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1112 && !netif_queue_stopped(dev)) {
1113 /* do nothing */
1114 } else {
1115 netif_stop_queue (dev);
1117 dev->trans_start = jiffies;
1118 if (netif_msg_tx_queued(np)) {
1119 printk (KERN_DEBUG
1120 "%s: Transmit frame #%d queued in slot %d.\n",
1121 dev->name, np->cur_tx, entry);
1123 return 0;
1126 /* Reset hardware tx and free all of tx buffers */
1127 static int
1128 reset_tx (struct net_device *dev)
1130 struct netdev_private *np = dev->priv;
1131 long ioaddr = dev->base_addr;
1132 struct sk_buff *skb;
1133 int i;
1134 int irq = in_interrupt();
1136 /* Reset tx logic, TxListPtr will be cleaned */
1137 writew (TxDisable, ioaddr + MACCtrl1);
1138 writew (TxReset | DMAReset | FIFOReset | NetworkReset,
1139 ioaddr + ASICCtrl + 2);
1140 for (i=50; i > 0; i--) {
1141 if ((readw(ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
1142 break;
1143 mdelay(1);
1145 /* free all tx skbuff */
1146 for (i = 0; i < TX_RING_SIZE; i++) {
1147 skb = np->tx_skbuff[i];
1148 if (skb) {
1149 pci_unmap_single(np->pci_dev,
1150 np->tx_ring[i].frag[0].addr, skb->len,
1151 PCI_DMA_TODEVICE);
1152 if (irq)
1153 dev_kfree_skb_irq (skb);
1154 else
1155 dev_kfree_skb (skb);
1156 np->tx_skbuff[i] = NULL;
1157 np->stats.tx_dropped++;
1160 np->cur_tx = np->dirty_tx = 0;
1161 np->cur_task = 0;
1162 writew (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1163 return 0;
1166 /* The interrupt handler cleans up after the Tx thread,
1167 and schedule a Rx thread work */
1168 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1170 struct net_device *dev = (struct net_device *)dev_instance;
1171 struct netdev_private *np;
1172 long ioaddr;
1173 int hw_frame_id;
1174 int tx_cnt;
1175 int tx_status;
1176 int handled = 0;
1178 ioaddr = dev->base_addr;
1179 np = dev->priv;
1181 do {
1182 int intr_status = readw(ioaddr + IntrStatus);
1183 writew(intr_status, ioaddr + IntrStatus);
1185 if (netif_msg_intr(np))
1186 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1187 dev->name, intr_status);
1189 if (!(intr_status & DEFAULT_INTR))
1190 break;
1192 handled = 1;
1194 if (intr_status & (IntrRxDMADone)) {
1195 writew(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1196 ioaddr + IntrEnable);
1197 if (np->budget < 0)
1198 np->budget = RX_BUDGET;
1199 tasklet_schedule(&np->rx_tasklet);
1201 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1202 tx_status = readw (ioaddr + TxStatus);
1203 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1204 if (netif_msg_tx_done(np))
1205 printk
1206 ("%s: Transmit status is %2.2x.\n",
1207 dev->name, tx_status);
1208 if (tx_status & 0x1e) {
1209 np->stats.tx_errors++;
1210 if (tx_status & 0x10)
1211 np->stats.tx_fifo_errors++;
1212 if (tx_status & 0x08)
1213 np->stats.collisions++;
1214 if (tx_status & 0x02)
1215 np->stats.tx_window_errors++;
1216 /* This reset has not been verified!. */
1217 if (tx_status & 0x10) { /* Reset the Tx. */
1218 np->stats.tx_fifo_errors++;
1219 spin_lock(&np->lock);
1220 reset_tx(dev);
1221 spin_unlock(&np->lock);
1223 if (tx_status & 0x1e) /* Restart the Tx. */
1224 writew (TxEnable,
1225 ioaddr + MACCtrl1);
1227 /* Yup, this is a documentation bug. It cost me *hours*. */
1228 writew (0, ioaddr + TxStatus);
1229 tx_status = readw (ioaddr + TxStatus);
1230 if (tx_cnt < 0)
1231 break;
1233 hw_frame_id = (tx_status >> 8) & 0xff;
1234 } else {
1235 hw_frame_id = readb(ioaddr + TxFrameId);
1238 if (np->pci_rev_id >= 0x14) {
1239 spin_lock(&np->lock);
1240 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1241 int entry = np->dirty_tx % TX_RING_SIZE;
1242 struct sk_buff *skb;
1243 int sw_frame_id;
1244 sw_frame_id = (le32_to_cpu(
1245 np->tx_ring[entry].status) >> 2) & 0xff;
1246 if (sw_frame_id == hw_frame_id &&
1247 !(le32_to_cpu(np->tx_ring[entry].status)
1248 & 0x00010000))
1249 break;
1250 if (sw_frame_id == (hw_frame_id + 1) %
1251 TX_RING_SIZE)
1252 break;
1253 skb = np->tx_skbuff[entry];
1254 /* Free the original skb. */
1255 pci_unmap_single(np->pci_dev,
1256 np->tx_ring[entry].frag[0].addr,
1257 skb->len, PCI_DMA_TODEVICE);
1258 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1259 np->tx_skbuff[entry] = NULL;
1260 np->tx_ring[entry].frag[0].addr = 0;
1261 np->tx_ring[entry].frag[0].length = 0;
1263 spin_unlock(&np->lock);
1264 } else {
1265 spin_lock(&np->lock);
1266 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1267 int entry = np->dirty_tx % TX_RING_SIZE;
1268 struct sk_buff *skb;
1269 if (!(le32_to_cpu(np->tx_ring[entry].status)
1270 & 0x00010000))
1271 break;
1272 skb = np->tx_skbuff[entry];
1273 /* Free the original skb. */
1274 pci_unmap_single(np->pci_dev,
1275 np->tx_ring[entry].frag[0].addr,
1276 skb->len, PCI_DMA_TODEVICE);
1277 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1278 np->tx_skbuff[entry] = NULL;
1279 np->tx_ring[entry].frag[0].addr = 0;
1280 np->tx_ring[entry].frag[0].length = 0;
1282 spin_unlock(&np->lock);
1285 if (netif_queue_stopped(dev) &&
1286 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1287 /* The ring is no longer full, clear busy flag. */
1288 netif_wake_queue (dev);
1290 /* Abnormal error summary/uncommon events handlers. */
1291 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1292 netdev_error(dev, intr_status);
1293 } while (0);
1294 if (netif_msg_intr(np))
1295 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1296 dev->name, readw(ioaddr + IntrStatus));
1297 writel(5000, ioaddr + DownCounter);
1298 return IRQ_RETVAL(handled);
1301 static void rx_poll(unsigned long data)
1303 struct net_device *dev = (struct net_device *)data;
1304 struct netdev_private *np = dev->priv;
1305 int entry = np->cur_rx % RX_RING_SIZE;
1306 int boguscnt = np->budget;
1307 long ioaddr = dev->base_addr;
1308 int received = 0;
1310 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1311 while (1) {
1312 struct netdev_desc *desc = &(np->rx_ring[entry]);
1313 u32 frame_status = le32_to_cpu(desc->status);
1314 int pkt_len;
1316 if (--boguscnt < 0) {
1317 goto not_done;
1319 if (!(frame_status & DescOwn))
1320 break;
1321 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1322 if (netif_msg_rx_status(np))
1323 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1324 frame_status);
1325 if (frame_status & 0x001f4000) {
1326 /* There was a error. */
1327 if (netif_msg_rx_err(np))
1328 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1329 frame_status);
1330 np->stats.rx_errors++;
1331 if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1332 if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1333 if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1334 if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1335 if (frame_status & 0x00100000) {
1336 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1337 " status %8.8x.\n",
1338 dev->name, frame_status);
1340 } else {
1341 struct sk_buff *skb;
1342 #ifndef final_version
1343 if (netif_msg_rx_status(np))
1344 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1345 ", bogus_cnt %d.\n",
1346 pkt_len, boguscnt);
1347 #endif
1348 /* Check if the packet is long enough to accept without copying
1349 to a minimally-sized skbuff. */
1350 if (pkt_len < rx_copybreak
1351 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1352 skb->dev = dev;
1353 skb_reserve(skb, 2); /* 16 byte align the IP header */
1354 pci_dma_sync_single_for_cpu(np->pci_dev,
1355 desc->frag[0].addr,
1356 np->rx_buf_sz,
1357 PCI_DMA_FROMDEVICE);
1359 eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
1360 pci_dma_sync_single_for_device(np->pci_dev,
1361 desc->frag[0].addr,
1362 np->rx_buf_sz,
1363 PCI_DMA_FROMDEVICE);
1364 skb_put(skb, pkt_len);
1365 } else {
1366 pci_unmap_single(np->pci_dev,
1367 desc->frag[0].addr,
1368 np->rx_buf_sz,
1369 PCI_DMA_FROMDEVICE);
1370 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1371 np->rx_skbuff[entry] = NULL;
1373 skb->protocol = eth_type_trans(skb, dev);
1374 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1375 netif_rx(skb);
1376 dev->last_rx = jiffies;
1378 entry = (entry + 1) % RX_RING_SIZE;
1379 received++;
1381 np->cur_rx = entry;
1382 refill_rx (dev);
1383 np->budget -= received;
1384 writew(DEFAULT_INTR, ioaddr + IntrEnable);
1385 return;
1387 not_done:
1388 np->cur_rx = entry;
1389 refill_rx (dev);
1390 if (!received)
1391 received = 1;
1392 np->budget -= received;
1393 if (np->budget <= 0)
1394 np->budget = RX_BUDGET;
1395 tasklet_schedule(&np->rx_tasklet);
1396 return;
1399 static void refill_rx (struct net_device *dev)
1401 struct netdev_private *np = dev->priv;
1402 int entry;
1403 int cnt = 0;
1405 /* Refill the Rx ring buffers. */
1406 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1407 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1408 struct sk_buff *skb;
1409 entry = np->dirty_rx % RX_RING_SIZE;
1410 if (np->rx_skbuff[entry] == NULL) {
1411 skb = dev_alloc_skb(np->rx_buf_sz);
1412 np->rx_skbuff[entry] = skb;
1413 if (skb == NULL)
1414 break; /* Better luck next round. */
1415 skb->dev = dev; /* Mark as being used by this device. */
1416 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1417 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1418 pci_map_single(np->pci_dev, skb->tail,
1419 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1421 /* Perhaps we need not reset this field. */
1422 np->rx_ring[entry].frag[0].length =
1423 cpu_to_le32(np->rx_buf_sz | LastFrag);
1424 np->rx_ring[entry].status = 0;
1425 cnt++;
1427 return;
1429 static void netdev_error(struct net_device *dev, int intr_status)
1431 long ioaddr = dev->base_addr;
1432 struct netdev_private *np = dev->priv;
1433 u16 mii_ctl, mii_advertise, mii_lpa;
1434 int speed;
1436 if (intr_status & LinkChange) {
1437 if (np->an_enable) {
1438 mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
1439 mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
1440 mii_advertise &= mii_lpa;
1441 printk (KERN_INFO "%s: Link changed: ", dev->name);
1442 if (mii_advertise & ADVERTISE_100FULL) {
1443 np->speed = 100;
1444 printk ("100Mbps, full duplex\n");
1445 } else if (mii_advertise & ADVERTISE_100HALF) {
1446 np->speed = 100;
1447 printk ("100Mbps, half duplex\n");
1448 } else if (mii_advertise & ADVERTISE_10FULL) {
1449 np->speed = 10;
1450 printk ("10Mbps, full duplex\n");
1451 } else if (mii_advertise & ADVERTISE_10HALF) {
1452 np->speed = 10;
1453 printk ("10Mbps, half duplex\n");
1454 } else
1455 printk ("\n");
1457 } else {
1458 mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
1459 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1460 np->speed = speed;
1461 printk (KERN_INFO "%s: Link changed: %dMbps ,",
1462 dev->name, speed);
1463 printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1464 "full" : "half");
1466 check_duplex (dev);
1467 if (np->flowctrl && np->mii_if.full_duplex) {
1468 writew(readw(ioaddr + MulticastFilter1+2) | 0x0200,
1469 ioaddr + MulticastFilter1+2);
1470 writew(readw(ioaddr + MACCtrl0) | EnbFlowCtrl,
1471 ioaddr + MACCtrl0);
1474 if (intr_status & StatsMax) {
1475 get_stats(dev);
1477 if (intr_status & IntrPCIErr) {
1478 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1479 dev->name, intr_status);
1480 /* We must do a global reset of DMA to continue. */
1484 static struct net_device_stats *get_stats(struct net_device *dev)
1486 struct netdev_private *np = dev->priv;
1487 long ioaddr = dev->base_addr;
1488 int i;
1490 /* We should lock this segment of code for SMP eventually, although
1491 the vulnerability window is very small and statistics are
1492 non-critical. */
1493 /* The chip only need report frame silently dropped. */
1494 np->stats.rx_missed_errors += readb(ioaddr + RxMissed);
1495 np->stats.tx_packets += readw(ioaddr + TxFramesOK);
1496 np->stats.rx_packets += readw(ioaddr + RxFramesOK);
1497 np->stats.collisions += readb(ioaddr + StatsLateColl);
1498 np->stats.collisions += readb(ioaddr + StatsMultiColl);
1499 np->stats.collisions += readb(ioaddr + StatsOneColl);
1500 np->stats.tx_carrier_errors += readb(ioaddr + StatsCarrierError);
1501 readb(ioaddr + StatsTxDefer);
1502 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1503 readb(ioaddr + i);
1504 np->stats.tx_bytes += readw(ioaddr + TxOctetsLow);
1505 np->stats.tx_bytes += readw(ioaddr + TxOctetsHigh) << 16;
1506 np->stats.rx_bytes += readw(ioaddr + RxOctetsLow);
1507 np->stats.rx_bytes += readw(ioaddr + RxOctetsHigh) << 16;
1509 return &np->stats;
1512 static void set_rx_mode(struct net_device *dev)
1514 long ioaddr = dev->base_addr;
1515 struct netdev_private *np = dev->priv;
1516 u16 mc_filter[4]; /* Multicast hash filter */
1517 u32 rx_mode;
1518 int i;
1520 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1521 /* Unconditionally log net taps. */
1522 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1523 memset(mc_filter, 0xff, sizeof(mc_filter));
1524 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1525 } else if ((dev->mc_count > multicast_filter_limit)
1526 || (dev->flags & IFF_ALLMULTI)) {
1527 /* Too many to match, or accept all multicasts. */
1528 memset(mc_filter, 0xff, sizeof(mc_filter));
1529 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1530 } else if (dev->mc_count) {
1531 struct dev_mc_list *mclist;
1532 int bit;
1533 int index;
1534 int crc;
1535 memset (mc_filter, 0, sizeof (mc_filter));
1536 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1537 i++, mclist = mclist->next) {
1538 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1539 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1540 if (crc & 0x80000000) index |= 1 << bit;
1541 mc_filter[index/16] |= (1 << (index % 16));
1543 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1544 } else {
1545 writeb(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1546 return;
1548 if (np->mii_if.full_duplex && np->flowctrl)
1549 mc_filter[3] |= 0x0200;
1551 for (i = 0; i < 4; i++)
1552 writew(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1553 writeb(rx_mode, ioaddr + RxMode);
1556 static int __set_mac_addr(struct net_device *dev)
1558 u16 addr16;
1560 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1561 writew(addr16, dev->base_addr + StationAddr);
1562 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1563 writew(addr16, dev->base_addr + StationAddr+2);
1564 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1565 writew(addr16, dev->base_addr + StationAddr+4);
1566 return 0;
1570 static int netdev_ethtool_ioctl(struct net_device *dev, void __user *useraddr)
1572 struct netdev_private *np = dev->priv;
1573 u32 ethcmd;
1575 if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
1576 return -EFAULT;
1578 switch (ethcmd) {
1579 /* get constant driver settings/info */
1580 case ETHTOOL_GDRVINFO: {
1581 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
1582 strcpy(info.driver, DRV_NAME);
1583 strcpy(info.version, DRV_VERSION);
1584 strcpy(info.bus_info, pci_name(np->pci_dev));
1585 memset(&info.fw_version, 0, sizeof(info.fw_version));
1586 if (copy_to_user(useraddr, &info, sizeof(info)))
1587 return -EFAULT;
1588 return 0;
1591 /* get media settings */
1592 case ETHTOOL_GSET: {
1593 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1594 spin_lock_irq(&np->lock);
1595 mii_ethtool_gset(&np->mii_if, &ecmd);
1596 spin_unlock_irq(&np->lock);
1597 if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1598 return -EFAULT;
1599 return 0;
1601 /* set media settings */
1602 case ETHTOOL_SSET: {
1603 int r;
1604 struct ethtool_cmd ecmd;
1605 if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1606 return -EFAULT;
1607 spin_lock_irq(&np->lock);
1608 r = mii_ethtool_sset(&np->mii_if, &ecmd);
1609 spin_unlock_irq(&np->lock);
1610 return r;
1613 /* restart autonegotiation */
1614 case ETHTOOL_NWAY_RST: {
1615 return mii_nway_restart(&np->mii_if);
1618 /* get link status */
1619 case ETHTOOL_GLINK: {
1620 struct ethtool_value edata = {ETHTOOL_GLINK};
1621 edata.data = mii_link_ok(&np->mii_if);
1622 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1623 return -EFAULT;
1624 return 0;
1627 /* get message-level */
1628 case ETHTOOL_GMSGLVL: {
1629 struct ethtool_value edata = {ETHTOOL_GMSGLVL};
1630 edata.data = np->msg_enable;
1631 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1632 return -EFAULT;
1633 return 0;
1635 /* set message-level */
1636 case ETHTOOL_SMSGLVL: {
1637 struct ethtool_value edata;
1638 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1639 return -EFAULT;
1640 np->msg_enable = edata.data;
1641 return 0;
1644 default:
1645 return -EOPNOTSUPP;
1650 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1652 struct netdev_private *np = dev->priv;
1653 int rc;
1654 int i;
1655 long ioaddr = dev->base_addr;
1657 if (!netif_running(dev))
1658 return -EINVAL;
1660 if (cmd == SIOCETHTOOL)
1661 rc = netdev_ethtool_ioctl(dev, rq->ifr_data);
1663 else {
1664 spin_lock_irq(&np->lock);
1665 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1666 spin_unlock_irq(&np->lock);
1668 switch (cmd) {
1669 case SIOCDEVPRIVATE:
1670 for (i=0; i<TX_RING_SIZE; i++) {
1671 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
1672 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
1673 le32_to_cpu(np->tx_ring[i].next_desc),
1674 le32_to_cpu(np->tx_ring[i].status),
1675 (le32_to_cpu(np->tx_ring[i].status) >> 2)
1676 & 0xff,
1677 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1678 le32_to_cpu(np->tx_ring[i].frag[0].length));
1680 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1681 readl(dev->base_addr + TxListPtr),
1682 netif_queue_stopped(dev));
1683 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1684 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1685 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1686 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1687 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1688 printk(KERN_DEBUG "TxStatus=%04x\n", readw(ioaddr + TxStatus));
1689 return 0;
1693 return rc;
1696 static int netdev_close(struct net_device *dev)
1698 long ioaddr = dev->base_addr;
1699 struct netdev_private *np = dev->priv;
1700 struct sk_buff *skb;
1701 int i;
1703 netif_stop_queue(dev);
1705 if (netif_msg_ifdown(np)) {
1706 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1707 "Rx %4.4x Int %2.2x.\n",
1708 dev->name, readb(ioaddr + TxStatus),
1709 readl(ioaddr + RxStatus), readw(ioaddr + IntrStatus));
1710 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1711 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1714 /* Disable interrupts by clearing the interrupt mask. */
1715 writew(0x0000, ioaddr + IntrEnable);
1717 /* Stop the chip's Tx and Rx processes. */
1718 writew(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1720 /* Wait and kill tasklet */
1721 tasklet_kill(&np->rx_tasklet);
1722 tasklet_kill(&np->tx_tasklet);
1724 #ifdef __i386__
1725 if (netif_msg_hw(np)) {
1726 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1727 (int)(np->tx_ring_dma));
1728 for (i = 0; i < TX_RING_SIZE; i++)
1729 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1730 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1731 np->tx_ring[i].frag[0].length);
1732 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1733 (int)(np->rx_ring_dma));
1734 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1735 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1736 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1737 np->rx_ring[i].frag[0].length);
1740 #endif /* __i386__ debugging only */
1742 free_irq(dev->irq, dev);
1744 del_timer_sync(&np->timer);
1746 /* Free all the skbuffs in the Rx queue. */
1747 for (i = 0; i < RX_RING_SIZE; i++) {
1748 np->rx_ring[i].status = 0;
1749 np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
1750 skb = np->rx_skbuff[i];
1751 if (skb) {
1752 pci_unmap_single(np->pci_dev,
1753 np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
1754 PCI_DMA_FROMDEVICE);
1755 dev_kfree_skb(skb);
1756 np->rx_skbuff[i] = NULL;
1759 for (i = 0; i < TX_RING_SIZE; i++) {
1760 skb = np->tx_skbuff[i];
1761 if (skb) {
1762 pci_unmap_single(np->pci_dev,
1763 np->tx_ring[i].frag[0].addr, skb->len,
1764 PCI_DMA_TODEVICE);
1765 dev_kfree_skb(skb);
1766 np->tx_skbuff[i] = NULL;
1770 return 0;
1773 static void __devexit sundance_remove1 (struct pci_dev *pdev)
1775 struct net_device *dev = pci_get_drvdata(pdev);
1777 if (dev) {
1778 struct netdev_private *np = dev->priv;
1780 unregister_netdev(dev);
1781 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1782 np->rx_ring_dma);
1783 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1784 np->tx_ring_dma);
1785 pci_release_regions(pdev);
1786 #ifndef USE_IO_OPS
1787 iounmap((char *)(dev->base_addr));
1788 #endif
1789 free_netdev(dev);
1790 pci_set_drvdata(pdev, NULL);
1794 static struct pci_driver sundance_driver = {
1795 .name = DRV_NAME,
1796 .id_table = sundance_pci_tbl,
1797 .probe = sundance_probe1,
1798 .remove = __devexit_p(sundance_remove1),
1801 static int __init sundance_init(void)
1803 /* when a module, this is printed whether or not devices are found in probe */
1804 #ifdef MODULE
1805 printk(version);
1806 #endif
1807 return pci_module_init(&sundance_driver);
1810 static void __exit sundance_exit(void)
1812 pci_unregister_driver(&sundance_driver);
1815 module_init(sundance_init);
1816 module_exit(sundance_exit);