1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
3 Written 1999-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
21 Version LK1.01a (jgarzik):
22 - Replace some MII-related magic numbers with constants
24 Version LK1.02 (D-Link):
25 - Add new board to PCI ID list
28 Version LK1.03 (D-Link):
29 - New Rx scheme, reduce Rx congestion
30 - Option to disable flow control
32 Version LK1.04 (D-Link):
34 - More support for ethtool.
37 - Remove unused/constant members from struct pci_id_info
38 (which then allows removal of 'drv_flags' from private struct)
40 - If no phy is found, fail to load that board (jgarzik)
41 - Always start phy id scan at id 1 to avoid problems (Donald Becker)
42 - Autodetect where mii_preable_required is needed,
43 default to not needed. (Donald Becker)
46 - Remove mii_preamble_required module parameter (Donald Becker)
47 - Add per-interface mii_preamble_required (setting is autodetected)
49 - Remove unnecessary cast from void pointer (jgarzik)
50 - Re-align comments in private struct (jgarzik)
52 Version LK1.04c (jgarzik):
53 - Support bitmapped message levels (NETIF_MSG_xxx), and the
54 two ethtool ioctls that get/set them
55 - Don't hand-code MII ethtool support, use standard API/lib
58 - Merge from Donald Becker's sundance.c: (Jason Lunz)
59 * proper support for variably-sized MTUs
60 * default to PIO, to fix chip bugs
61 - Add missing unregister_netdev (Jason Lunz)
62 - Add CONFIG_SUNDANCE_MMIO config option (jgarzik)
63 - Better rx buf size calculation (Donald Becker)
65 Version LK1.05 (D-Link):
66 - Fix DFE-580TX packet drop issue (for DL10050C)
69 Version LK1.06 (D-Link):
70 - Fix crash while unloading driver
72 Versin LK1.06b (D-Link):
73 - New tx scheme, adaptive tx_coalesce
75 Version LK1.07 (D-Link):
76 - Fix tx bugs in big-endian machines
77 - Remove unused max_interrupt_work module parameter, the new
78 NAPI-like rx scheme doesn't need it.
79 - Remove redundancy get_stats() in intr_handler(), those
80 I/O access could affect performance in ARM-based system
81 - Add Linux software VLAN support
83 Version LK1.08 (D-Link):
84 - Fix bug of custom mac address
85 (StationAddr register only accept word write)
87 Version LK1.09 (D-Link):
88 - Fix the flowctrl bug.
89 - Set Pause bit in MII ANAR if flow control enabled.
92 #define DRV_NAME "sundance"
93 #define DRV_VERSION "1.01+LK1.09a"
94 #define DRV_RELDATE "16-May-2003"
97 /* The user-configurable values.
98 These may be modified when a driver module is loaded.*/
99 static int debug
= 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
100 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
101 Typical is a 64 element hash table based on the Ethernet CRC. */
102 static int multicast_filter_limit
= 32;
104 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
105 Setting to > 1518 effectively disables this feature.
106 This chip can receive into offset buffers, so the Alpha does not
107 need a copy-align. */
108 static int rx_copybreak
;
109 static int flowctrl
=1;
111 /* media[] specifies the media type the NIC operates at.
112 autosense Autosensing active media.
113 10mbps_hd 10Mbps half duplex.
114 10mbps_fd 10Mbps full duplex.
115 100mbps_hd 100Mbps half duplex.
116 100mbps_fd 100Mbps full duplex.
117 0 Autosensing active media.
118 1 10Mbps half duplex.
119 2 10Mbps full duplex.
120 3 100Mbps half duplex.
121 4 100Mbps full duplex.
124 static char *media
[MAX_UNITS
];
127 /* Operational parameters that are set at compile time. */
129 /* Keep the ring sizes a power of two for compile efficiency.
130 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
131 Making the Tx ring too large decreases the effectiveness of channel
132 bonding and packet priority, and more than 128 requires modifying the
134 Large receive rings merely waste memory. */
135 #define TX_RING_SIZE 32
136 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
137 #define RX_RING_SIZE 64
139 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
140 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
142 /* Operational parameters that usually are not changed. */
143 /* Time in jiffies before concluding the transmitter is hung. */
144 #define TX_TIMEOUT (4*HZ)
145 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
150 #if !defined(__OPTIMIZE__)
151 #warning You must compile this file with the correct options!
152 #warning See the last lines of the source file.
153 #error You must compile this driver with "-O".
156 /* Include files, designed to support most kernel versions 2.0.0 and later. */
157 #include <linux/module.h>
158 #include <linux/kernel.h>
159 #include <linux/string.h>
160 #include <linux/timer.h>
161 #include <linux/errno.h>
162 #include <linux/ioport.h>
163 #include <linux/slab.h>
164 #include <linux/interrupt.h>
165 #include <linux/pci.h>
166 #include <linux/netdevice.h>
167 #include <linux/etherdevice.h>
168 #include <linux/skbuff.h>
169 #include <linux/init.h>
170 #include <asm/uaccess.h>
171 #include <asm/processor.h> /* Processor type for cache alignment. */
172 #include <asm/bitops.h>
174 #include <linux/delay.h>
175 #include <linux/spinlock.h>
176 #ifndef _COMPAT_WITH_OLD_KERNEL
177 #include <linux/crc32.h>
178 #include <linux/ethtool.h>
179 #include <linux/mii.h>
187 /* These identify the driver base version and may not be removed. */
188 static char version
[] __devinitdata
=
189 KERN_INFO DRV_NAME
".c:v" DRV_VERSION
" " DRV_RELDATE
" Written by Donald Becker\n"
190 KERN_INFO
" http://www.scyld.com/network/sundance.html\n";
192 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
193 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
194 MODULE_LICENSE("GPL");
196 MODULE_PARM(debug
, "i");
197 MODULE_PARM(rx_copybreak
, "i");
198 MODULE_PARM(media
, "1-" __MODULE_STRING(MAX_UNITS
) "s");
199 MODULE_PARM(flowctrl
, "i");
200 MODULE_PARM_DESC(debug
, "Sundance Alta debug level (0-5)");
201 MODULE_PARM_DESC(rx_copybreak
, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
202 MODULE_PARM_DESC(flowctrl
, "Sundance Alta flow control [0|1]");
207 I. Board Compatibility
209 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
211 II. Board-specific settings
213 III. Driver operation
217 This driver uses two statically allocated fixed-size descriptor lists
218 formed into rings by a branch from the final descriptor to the beginning of
219 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
220 Some chips explicitly use only 2^N sized rings, while others use a
221 'next descriptor' pointer that the driver forms into rings.
223 IIIb/c. Transmit/Receive Structure
225 This driver uses a zero-copy receive and transmit scheme.
226 The driver allocates full frame size skbuffs for the Rx ring buffers at
227 open() time and passes the skb->data field to the chip as receive data
228 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
229 a fresh skbuff is allocated and the frame is copied to the new skbuff.
230 When the incoming frame is larger, the skbuff is passed directly up the
231 protocol stack. Buffers consumed this way are replaced by newly allocated
232 skbuffs in a later phase of receives.
234 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
235 using a full-sized skbuff for small frames vs. the copying costs of larger
236 frames. New boards are typically used in generously configured machines
237 and the underfilled buffers have negligible impact compared to the benefit of
238 a single allocation size, so the default value of zero results in never
239 copying packets. When copying is done, the cost is usually mitigated by using
240 a combined copy/checksum routine. Copying also preloads the cache, which is
241 most useful with small frames.
243 A subtle aspect of the operation is that the IP header at offset 14 in an
244 ethernet frame isn't longword aligned for further processing.
245 Unaligned buffers are permitted by the Sundance hardware, so
246 frames are received into the skbuff at an offset of "+2", 16-byte aligning
249 IIId. Synchronization
251 The driver runs as two independent, single-threaded flows of control. One
252 is the send-packet routine, which enforces single-threaded use by the
253 dev->tbusy flag. The other thread is the interrupt handler, which is single
254 threaded by the hardware and interrupt handling software.
256 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
257 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
258 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
259 the 'lp->tx_full' flag.
261 The interrupt handler has exclusive control over the Rx ring and records stats
262 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
263 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
264 clears both the tx_full and tbusy flags.
270 The Sundance ST201 datasheet, preliminary version.
271 http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
272 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
278 /* Work-around for Kendin chip bugs. */
279 #ifndef CONFIG_SUNDANCE_MMIO
283 static struct pci_device_id sundance_pci_tbl
[] __devinitdata
= {
284 {0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0},
285 {0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1},
286 {0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2},
287 {0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3},
288 {0x1186, 0x1002, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 4},
289 {0x13F0, 0x0201, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 5},
292 MODULE_DEVICE_TABLE(pci
, sundance_pci_tbl
);
301 static struct pci_id_info pci_id_tbl
[] = {
302 {"D-Link DFE-550TX FAST Ethernet Adapter"},
303 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
304 {"D-Link DFE-580TX 4 port Server Adapter"},
305 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
306 {"D-Link DL10050-based FAST Ethernet Adapter"},
307 {"Sundance Technology Alta"},
308 {0,}, /* 0 terminated list. */
311 /* This driver was written to use PCI memory space, however x86-oriented
312 hardware often uses I/O space accesses. */
328 /* Offsets to the device registers.
329 Unlike software-only systems, device drivers interact with complex hardware.
330 It's not useful to define symbolic names for every register bit in the
331 device. The name can only partially document the semantics and make
332 the driver longer and more difficult to read.
333 In general, only the important configuration values or bits changed
334 multiple times should be defined symbolically.
339 TxDMABurstThresh
= 0x08,
340 TxDMAUrgentThresh
= 0x09,
341 TxDMAPollPeriod
= 0x0a,
346 RxDMABurstThresh
= 0x14,
347 RxDMAUrgentThresh
= 0x15,
348 RxDMAPollPeriod
= 0x16,
353 TxStartThresh
= 0x3c,
354 RxEarlyThresh
= 0x3e,
369 MulticastFilter0
= 0x60,
370 MulticastFilter1
= 0x64,
377 StatsCarrierError
= 0x74,
378 StatsLateColl
= 0x75,
379 StatsMultiColl
= 0x76,
383 StatsTxXSDefer
= 0x7a,
389 /* Aliased and bogus values! */
392 enum ASICCtrl_HiWord_bit
{
393 GlobalReset
= 0x0001,
398 NetworkReset
= 0x0020,
403 /* Bits in the interrupt status/mask registers. */
404 enum intr_status_bits
{
405 IntrSummary
=0x0001, IntrPCIErr
=0x0002, IntrMACCtrl
=0x0008,
406 IntrTxDone
=0x0004, IntrRxDone
=0x0010, IntrRxStart
=0x0020,
408 StatsMax
=0x0080, LinkChange
=0x0100,
409 IntrTxDMADone
=0x0200, IntrRxDMADone
=0x0400,
412 /* Bits in the RxMode register. */
414 AcceptAllIPMulti
=0x20, AcceptMultiHash
=0x10, AcceptAll
=0x08,
415 AcceptBroadcast
=0x04, AcceptMulticast
=0x02, AcceptMyPhys
=0x01,
417 /* Bits in MACCtrl. */
418 enum mac_ctrl0_bits
{
419 EnbFullDuplex
=0x20, EnbRcvLargeFrame
=0x40,
420 EnbFlowCtrl
=0x100, EnbPassRxCRC
=0x200,
422 enum mac_ctrl1_bits
{
423 StatsEnable
=0x0020, StatsDisable
=0x0040, StatsEnabled
=0x0080,
424 TxEnable
=0x0100, TxDisable
=0x0200, TxEnabled
=0x0400,
425 RxEnable
=0x0800, RxDisable
=0x1000, RxEnabled
=0x2000,
428 /* The Rx and Tx buffer descriptors. */
429 /* Note that using only 32 bit fields simplifies conversion to big-endian
434 struct desc_frag
{ u32 addr
, length
; } frag
[1];
437 /* Bits in netdev_desc.status */
438 enum desc_status_bits
{
440 DescEndPacket
=0x4000,
444 DescIntrOnDMADone
=0x80000000,
445 DisableAlign
= 0x00000001,
448 #define PRIV_ALIGN 15 /* Required alignment mask */
449 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
450 within the structure. */
452 struct netdev_private
{
453 /* Descriptor rings first for alignment. */
454 struct netdev_desc
*rx_ring
;
455 struct netdev_desc
*tx_ring
;
456 struct sk_buff
* rx_skbuff
[RX_RING_SIZE
];
457 struct sk_buff
* tx_skbuff
[TX_RING_SIZE
];
458 dma_addr_t tx_ring_dma
;
459 dma_addr_t rx_ring_dma
;
460 struct net_device_stats stats
;
461 struct timer_list timer
; /* Media monitoring timer. */
462 /* Frequently used values: keep some adjacent for cache effect. */
464 spinlock_t rx_lock
; /* Group with Tx control cache line. */
467 unsigned int cur_rx
, dirty_rx
; /* Producer/consumer ring indices */
468 unsigned int rx_buf_sz
; /* Based on MTU+slack. */
469 struct netdev_desc
*last_tx
; /* Last Tx descriptor used. */
470 unsigned int cur_tx
, dirty_tx
;
471 /* These values are keep track of the transceiver/media in use. */
472 unsigned int flowctrl
:1;
473 unsigned int default_port
:4; /* Last dev->if_port value. */
474 unsigned int an_enable
:1;
476 struct tasklet_struct rx_tasklet
;
477 struct tasklet_struct tx_tasklet
;
480 /* Multicast and receive mode. */
481 spinlock_t mcastlock
; /* SMP lock multicast updates. */
483 /* MII transceiver section. */
484 struct mii_if_info mii_if
;
485 int mii_preamble_required
;
486 unsigned char phys
[MII_CNT
]; /* MII device addresses, only first one used. */
487 struct pci_dev
*pci_dev
;
488 unsigned char pci_rev_id
;
491 /* The station address location in the EEPROM. */
492 #define EEPROM_SA_OFFSET 0x10
493 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
494 IntrDrvRqst | IntrTxDone | StatsMax | \
497 static int change_mtu(struct net_device
*dev
, int new_mtu
);
498 static int eeprom_read(long ioaddr
, int location
);
499 static int mdio_read(struct net_device
*dev
, int phy_id
, int location
);
500 static void mdio_write(struct net_device
*dev
, int phy_id
, int location
, int value
);
501 static int netdev_open(struct net_device
*dev
);
502 static void check_duplex(struct net_device
*dev
);
503 static void netdev_timer(unsigned long data
);
504 static void tx_timeout(struct net_device
*dev
);
505 static void init_ring(struct net_device
*dev
);
506 static int start_tx(struct sk_buff
*skb
, struct net_device
*dev
);
507 static int reset_tx (struct net_device
*dev
);
508 static irqreturn_t
intr_handler(int irq
, void *dev_instance
, struct pt_regs
*regs
);
509 static void rx_poll(unsigned long data
);
510 static void tx_poll(unsigned long data
);
511 static void refill_rx (struct net_device
*dev
);
512 static void netdev_error(struct net_device
*dev
, int intr_status
);
513 static void netdev_error(struct net_device
*dev
, int intr_status
);
514 static void set_rx_mode(struct net_device
*dev
);
515 static int __set_mac_addr(struct net_device
*dev
);
516 static struct net_device_stats
*get_stats(struct net_device
*dev
);
517 static int netdev_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
518 static int netdev_close(struct net_device
*dev
);
522 static int __devinit
sundance_probe1 (struct pci_dev
*pdev
,
523 const struct pci_device_id
*ent
)
525 struct net_device
*dev
;
526 struct netdev_private
*np
;
528 int chip_idx
= ent
->driver_data
;
537 /* when built into the kernel, we only print version if device is found */
539 static int printed_version
;
540 if (!printed_version
++)
544 if (pci_enable_device(pdev
))
546 pci_set_master(pdev
);
550 dev
= alloc_etherdev(sizeof(*np
));
553 SET_MODULE_OWNER(dev
);
554 SET_NETDEV_DEV(dev
, &pdev
->dev
);
556 if (pci_request_regions(pdev
, DRV_NAME
))
560 ioaddr
= pci_resource_start(pdev
, 0);
562 ioaddr
= pci_resource_start(pdev
, 1);
563 ioaddr
= (long) ioremap (ioaddr
, netdev_io_size
);
568 for (i
= 0; i
< 3; i
++)
569 ((u16
*)dev
->dev_addr
)[i
] =
570 le16_to_cpu(eeprom_read(ioaddr
, i
+ EEPROM_SA_OFFSET
));
572 dev
->base_addr
= ioaddr
;
577 np
->chip_id
= chip_idx
;
578 np
->msg_enable
= (1 << debug
) - 1;
579 spin_lock_init(&np
->lock
);
580 tasklet_init(&np
->rx_tasklet
, rx_poll
, (unsigned long)dev
);
581 tasklet_init(&np
->tx_tasklet
, tx_poll
, (unsigned long)dev
);
583 ring_space
= pci_alloc_consistent(pdev
, TX_TOTAL_SIZE
, &ring_dma
);
585 goto err_out_cleardev
;
586 np
->tx_ring
= (struct netdev_desc
*)ring_space
;
587 np
->tx_ring_dma
= ring_dma
;
589 ring_space
= pci_alloc_consistent(pdev
, RX_TOTAL_SIZE
, &ring_dma
);
591 goto err_out_unmap_tx
;
592 np
->rx_ring
= (struct netdev_desc
*)ring_space
;
593 np
->rx_ring_dma
= ring_dma
;
595 np
->mii_if
.dev
= dev
;
596 np
->mii_if
.mdio_read
= mdio_read
;
597 np
->mii_if
.mdio_write
= mdio_write
;
598 np
->mii_if
.phy_id_mask
= 0x1f;
599 np
->mii_if
.reg_num_mask
= 0x1f;
601 /* The chip-specific entries in the device structure. */
602 dev
->open
= &netdev_open
;
603 dev
->hard_start_xmit
= &start_tx
;
604 dev
->stop
= &netdev_close
;
605 dev
->get_stats
= &get_stats
;
606 dev
->set_multicast_list
= &set_rx_mode
;
607 dev
->do_ioctl
= &netdev_ioctl
;
608 dev
->tx_timeout
= &tx_timeout
;
609 dev
->watchdog_timeo
= TX_TIMEOUT
;
610 dev
->change_mtu
= &change_mtu
;
611 pci_set_drvdata(pdev
, dev
);
613 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &np
->pci_rev_id
);
615 i
= register_netdev(dev
);
617 goto err_out_unmap_rx
;
619 printk(KERN_INFO
"%s: %s at 0x%lx, ",
620 dev
->name
, pci_id_tbl
[chip_idx
].name
, ioaddr
);
621 for (i
= 0; i
< 5; i
++)
622 printk("%2.2x:", dev
->dev_addr
[i
]);
623 printk("%2.2x, IRQ %d.\n", dev
->dev_addr
[i
], irq
);
626 int phy
, phy_idx
= 0;
627 np
->phys
[0] = 1; /* Default setting */
628 np
->mii_preamble_required
++;
629 for (phy
= 1; phy
< 32 && phy_idx
< MII_CNT
; phy
++) {
630 int mii_status
= mdio_read(dev
, phy
, MII_BMSR
);
631 if (mii_status
!= 0xffff && mii_status
!= 0x0000) {
632 np
->phys
[phy_idx
++] = phy
;
633 np
->mii_if
.advertising
= mdio_read(dev
, phy
, MII_ADVERTISE
);
634 if ((mii_status
& 0x0040) == 0)
635 np
->mii_preamble_required
++;
636 printk(KERN_INFO
"%s: MII PHY found at address %d, status "
637 "0x%4.4x advertising %4.4x.\n",
638 dev
->name
, phy
, mii_status
, np
->mii_if
.advertising
);
641 np
->mii_preamble_required
--;
644 printk(KERN_INFO
"%s: No MII transceiver found, aborting. ASIC status %x\n",
645 dev
->name
, readl(ioaddr
+ ASICCtrl
));
646 goto err_out_unregister
;
649 np
->mii_if
.phy_id
= np
->phys
[0];
652 /* Parse override configuration */
654 if (card_idx
< MAX_UNITS
) {
655 if (media
[card_idx
] != NULL
) {
657 if (strcmp (media
[card_idx
], "100mbps_fd") == 0 ||
658 strcmp (media
[card_idx
], "4") == 0) {
660 np
->mii_if
.full_duplex
= 1;
661 } else if (strcmp (media
[card_idx
], "100mbps_hd") == 0
662 || strcmp (media
[card_idx
], "3") == 0) {
664 np
->mii_if
.full_duplex
= 0;
665 } else if (strcmp (media
[card_idx
], "10mbps_fd") == 0 ||
666 strcmp (media
[card_idx
], "2") == 0) {
668 np
->mii_if
.full_duplex
= 1;
669 } else if (strcmp (media
[card_idx
], "10mbps_hd") == 0 ||
670 strcmp (media
[card_idx
], "1") == 0) {
672 np
->mii_if
.full_duplex
= 0;
682 if (readl (ioaddr
+ ASICCtrl
) & 0x80) {
683 /* Default 100Mbps Full */
686 np
->mii_if
.full_duplex
= 1;
691 mdio_write (dev
, np
->phys
[0], MII_BMCR
, BMCR_RESET
);
693 /* If flow control enabled, we need to advertise it.*/
695 mdio_write (dev
, np
->phys
[0], MII_ADVERTISE
, np
->mii_if
.advertising
| 0x0400);
696 mdio_write (dev
, np
->phys
[0], MII_BMCR
, BMCR_ANENABLE
|BMCR_ANRESTART
);
697 /* Force media type */
698 if (!np
->an_enable
) {
700 mii_ctl
|= (np
->speed
== 100) ? BMCR_SPEED100
: 0;
701 mii_ctl
|= (np
->mii_if
.full_duplex
) ? BMCR_FULLDPLX
: 0;
702 mdio_write (dev
, np
->phys
[0], MII_BMCR
, mii_ctl
);
703 printk (KERN_INFO
"Override speed=%d, %s duplex\n",
704 np
->speed
, np
->mii_if
.full_duplex
? "Full" : "Half");
708 /* Perhaps move the reset here? */
709 /* Reset the chip to erase previous misconfiguration. */
710 if (netif_msg_hw(np
))
711 printk("ASIC Control is %x.\n", readl(ioaddr
+ ASICCtrl
));
712 writew(0x007f, ioaddr
+ ASICCtrl
+ 2);
713 if (netif_msg_hw(np
))
714 printk("ASIC Control is now %x.\n", readl(ioaddr
+ ASICCtrl
));
720 unregister_netdev(dev
);
722 pci_free_consistent(pdev
, RX_TOTAL_SIZE
, np
->rx_ring
, np
->rx_ring_dma
);
724 pci_free_consistent(pdev
, TX_TOTAL_SIZE
, np
->tx_ring
, np
->tx_ring_dma
);
726 pci_set_drvdata(pdev
, NULL
);
728 iounmap((void *)ioaddr
);
731 pci_release_regions(pdev
);
737 static int change_mtu(struct net_device
*dev
, int new_mtu
)
739 if ((new_mtu
< 68) || (new_mtu
> 8191)) /* Set by RxDMAFrameLen */
741 if (netif_running(dev
))
747 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
748 static int __devinit
eeprom_read(long ioaddr
, int location
)
750 int boguscnt
= 1000; /* Typical 190 ticks. */
751 writew(0x0200 | (location
& 0xff), ioaddr
+ EECtrl
);
753 if (! (readw(ioaddr
+ EECtrl
) & 0x8000)) {
754 return readw(ioaddr
+ EEData
);
756 } while (--boguscnt
> 0);
760 /* MII transceiver control section.
761 Read and write the MII registers using software-generated serial
762 MDIO protocol. See the MII specifications or DP83840A data sheet
765 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
766 met by back-to-back 33Mhz PCI cycles. */
767 #define mdio_delay() readb(mdio_addr)
770 MDIO_ShiftClk
=0x0001, MDIO_Data
=0x0002, MDIO_EnbOutput
=0x0004,
772 #define MDIO_EnbIn (0)
773 #define MDIO_WRITE0 (MDIO_EnbOutput)
774 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
776 /* Generate the preamble required for initial synchronization and
777 a few older transceivers. */
778 static void mdio_sync(long mdio_addr
)
782 /* Establish sync by sending at least 32 logic ones. */
783 while (--bits
>= 0) {
784 writeb(MDIO_WRITE1
, mdio_addr
);
786 writeb(MDIO_WRITE1
| MDIO_ShiftClk
, mdio_addr
);
791 static int mdio_read(struct net_device
*dev
, int phy_id
, int location
)
793 struct netdev_private
*np
= dev
->priv
;
794 long mdio_addr
= dev
->base_addr
+ MIICtrl
;
795 int mii_cmd
= (0xf6 << 10) | (phy_id
<< 5) | location
;
798 if (np
->mii_preamble_required
)
799 mdio_sync(mdio_addr
);
801 /* Shift the read command bits out. */
802 for (i
= 15; i
>= 0; i
--) {
803 int dataval
= (mii_cmd
& (1 << i
)) ? MDIO_WRITE1
: MDIO_WRITE0
;
805 writeb(dataval
, mdio_addr
);
807 writeb(dataval
| MDIO_ShiftClk
, mdio_addr
);
810 /* Read the two transition, 16 data, and wire-idle bits. */
811 for (i
= 19; i
> 0; i
--) {
812 writeb(MDIO_EnbIn
, mdio_addr
);
814 retval
= (retval
<< 1) | ((readb(mdio_addr
) & MDIO_Data
) ? 1 : 0);
815 writeb(MDIO_EnbIn
| MDIO_ShiftClk
, mdio_addr
);
818 return (retval
>>1) & 0xffff;
821 static void mdio_write(struct net_device
*dev
, int phy_id
, int location
, int value
)
823 struct netdev_private
*np
= dev
->priv
;
824 long mdio_addr
= dev
->base_addr
+ MIICtrl
;
825 int mii_cmd
= (0x5002 << 16) | (phy_id
<< 23) | (location
<<18) | value
;
828 if (np
->mii_preamble_required
)
829 mdio_sync(mdio_addr
);
831 /* Shift the command bits out. */
832 for (i
= 31; i
>= 0; i
--) {
833 int dataval
= (mii_cmd
& (1 << i
)) ? MDIO_WRITE1
: MDIO_WRITE0
;
835 writeb(dataval
, mdio_addr
);
837 writeb(dataval
| MDIO_ShiftClk
, mdio_addr
);
840 /* Clear out extra bits. */
841 for (i
= 2; i
> 0; i
--) {
842 writeb(MDIO_EnbIn
, mdio_addr
);
844 writeb(MDIO_EnbIn
| MDIO_ShiftClk
, mdio_addr
);
850 static int netdev_open(struct net_device
*dev
)
852 struct netdev_private
*np
= dev
->priv
;
853 long ioaddr
= dev
->base_addr
;
856 /* Do we need to reset the chip??? */
858 i
= request_irq(dev
->irq
, &intr_handler
, SA_SHIRQ
, dev
->name
, dev
);
862 if (netif_msg_ifup(np
))
863 printk(KERN_DEBUG
"%s: netdev_open() irq %d.\n",
864 dev
->name
, dev
->irq
);
867 writel(np
->rx_ring_dma
, ioaddr
+ RxListPtr
);
868 /* The Tx list pointer is written as packets are queued. */
870 /* Initialize other registers. */
872 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
873 writew(dev
->mtu
+ 18, ioaddr
+ MaxFrameSize
);
875 writew(dev
->mtu
+ 14, ioaddr
+ MaxFrameSize
);
878 writel(readl(ioaddr
+ ASICCtrl
) | 0x0C, ioaddr
+ ASICCtrl
);
880 /* Configure the PCI bus bursts and FIFO thresholds. */
882 if (dev
->if_port
== 0)
883 dev
->if_port
= np
->default_port
;
885 np
->mcastlock
= (spinlock_t
) SPIN_LOCK_UNLOCKED
;
888 writew(0, ioaddr
+ IntrEnable
);
889 writew(0, ioaddr
+ DownCounter
);
890 /* Set the chip to poll every N*320nsec. */
891 writeb(100, ioaddr
+ RxDMAPollPeriod
);
892 writeb(127, ioaddr
+ TxDMAPollPeriod
);
893 /* Fix DFE-580TX packet drop issue */
894 if (np
->pci_rev_id
>= 0x14)
895 writeb(0x01, ioaddr
+ DebugCtrl1
);
896 netif_start_queue(dev
);
898 writew (StatsEnable
| RxEnable
| TxEnable
, ioaddr
+ MACCtrl1
);
900 if (netif_msg_ifup(np
))
901 printk(KERN_DEBUG
"%s: Done netdev_open(), status: Rx %x Tx %x "
902 "MAC Control %x, %4.4x %4.4x.\n",
903 dev
->name
, readl(ioaddr
+ RxStatus
), readb(ioaddr
+ TxStatus
),
904 readl(ioaddr
+ MACCtrl0
),
905 readw(ioaddr
+ MACCtrl1
), readw(ioaddr
+ MACCtrl0
));
907 /* Set the timer to check for link beat. */
908 init_timer(&np
->timer
);
909 np
->timer
.expires
= jiffies
+ 3*HZ
;
910 np
->timer
.data
= (unsigned long)dev
;
911 np
->timer
.function
= &netdev_timer
; /* timer handler */
912 add_timer(&np
->timer
);
914 /* Enable interrupts by setting the interrupt mask. */
915 writew(DEFAULT_INTR
, ioaddr
+ IntrEnable
);
920 static void check_duplex(struct net_device
*dev
)
922 struct netdev_private
*np
= dev
->priv
;
923 long ioaddr
= dev
->base_addr
;
924 int mii_lpa
= mdio_read(dev
, np
->phys
[0], MII_LPA
);
925 int negotiated
= mii_lpa
& np
->mii_if
.advertising
;
929 if (!np
->an_enable
|| mii_lpa
== 0xffff) {
930 if (np
->mii_if
.full_duplex
)
931 writew (readw (ioaddr
+ MACCtrl0
) | EnbFullDuplex
,
936 /* Autonegotiation */
937 duplex
= (negotiated
& 0x0100) || (negotiated
& 0x01C0) == 0x0040;
938 if (np
->mii_if
.full_duplex
!= duplex
) {
939 np
->mii_if
.full_duplex
= duplex
;
940 if (netif_msg_link(np
))
941 printk(KERN_INFO
"%s: Setting %s-duplex based on MII #%d "
942 "negotiated capability %4.4x.\n", dev
->name
,
943 duplex
? "full" : "half", np
->phys
[0], negotiated
);
944 writew(readw(ioaddr
+ MACCtrl0
) | duplex
? 0x20 : 0, ioaddr
+ MACCtrl0
);
948 static void netdev_timer(unsigned long data
)
950 struct net_device
*dev
= (struct net_device
*)data
;
951 struct netdev_private
*np
= dev
->priv
;
952 long ioaddr
= dev
->base_addr
;
953 int next_tick
= 10*HZ
;
955 if (netif_msg_timer(np
)) {
956 printk(KERN_DEBUG
"%s: Media selection timer tick, intr status %4.4x, "
958 dev
->name
, readw(ioaddr
+ IntrEnable
),
959 readb(ioaddr
+ TxStatus
), readl(ioaddr
+ RxStatus
));
962 np
->timer
.expires
= jiffies
+ next_tick
;
963 add_timer(&np
->timer
);
966 static void tx_timeout(struct net_device
*dev
)
968 struct netdev_private
*np
= dev
->priv
;
969 long ioaddr
= dev
->base_addr
;
972 netif_stop_queue(dev
);
973 tasklet_disable(&np
->tx_tasklet
);
974 writew(0, ioaddr
+ IntrEnable
);
975 printk(KERN_WARNING
"%s: Transmit timed out, TxStatus %2.2x "
977 " resetting...\n", dev
->name
, readb(ioaddr
+ TxStatus
),
978 readb(ioaddr
+ TxFrameId
));
982 for (i
=0; i
<TX_RING_SIZE
; i
++) {
983 printk(KERN_DEBUG
"%02x %08Zx %08x %08x(%02x) %08x %08x\n", i
,
984 np
->tx_ring_dma
+ i
*sizeof(*np
->tx_ring
),
985 le32_to_cpu(np
->tx_ring
[i
].next_desc
),
986 le32_to_cpu(np
->tx_ring
[i
].status
),
987 (le32_to_cpu(np
->tx_ring
[i
].status
) >> 2) & 0xff,
988 le32_to_cpu(np
->tx_ring
[i
].frag
[0].addr
),
989 le32_to_cpu(np
->tx_ring
[i
].frag
[0].length
));
991 printk(KERN_DEBUG
"TxListPtr=%08x netif_queue_stopped=%d\n",
992 readl(dev
->base_addr
+ TxListPtr
),
993 netif_queue_stopped(dev
));
994 printk(KERN_DEBUG
"cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
995 np
->cur_tx
, np
->cur_tx
% TX_RING_SIZE
,
996 np
->dirty_tx
, np
->dirty_tx
% TX_RING_SIZE
);
997 printk(KERN_DEBUG
"cur_rx=%d dirty_rx=%d\n", np
->cur_rx
, np
->dirty_rx
);
998 printk(KERN_DEBUG
"cur_task=%d\n", np
->cur_task
);
1000 spin_lock_irqsave(&np
->lock
, flag
);
1002 /* Stop and restart the chip's Tx processes . */
1004 spin_unlock_irqrestore(&np
->lock
, flag
);
1008 dev
->trans_start
= jiffies
;
1009 np
->stats
.tx_errors
++;
1010 if (np
->cur_tx
- np
->dirty_tx
< TX_QUEUE_LEN
- 4) {
1011 netif_wake_queue(dev
);
1013 writew(DEFAULT_INTR
, ioaddr
+ IntrEnable
);
1014 tasklet_enable(&np
->tx_tasklet
);
1018 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1019 static void init_ring(struct net_device
*dev
)
1021 struct netdev_private
*np
= dev
->priv
;
1024 np
->cur_rx
= np
->cur_tx
= 0;
1025 np
->dirty_rx
= np
->dirty_tx
= 0;
1028 np
->rx_buf_sz
= (dev
->mtu
<= 1520 ? PKT_BUF_SZ
: dev
->mtu
+ 16);
1030 /* Initialize all Rx descriptors. */
1031 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1032 np
->rx_ring
[i
].next_desc
= cpu_to_le32(np
->rx_ring_dma
+
1033 ((i
+1)%RX_RING_SIZE
)*sizeof(*np
->rx_ring
));
1034 np
->rx_ring
[i
].status
= 0;
1035 np
->rx_ring
[i
].frag
[0].length
= 0;
1036 np
->rx_skbuff
[i
] = 0;
1039 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1040 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1041 struct sk_buff
*skb
= dev_alloc_skb(np
->rx_buf_sz
);
1042 np
->rx_skbuff
[i
] = skb
;
1045 skb
->dev
= dev
; /* Mark as being used by this device. */
1046 skb_reserve(skb
, 2); /* 16 byte align the IP header. */
1047 np
->rx_ring
[i
].frag
[0].addr
= cpu_to_le32(
1048 pci_map_single(np
->pci_dev
, skb
->tail
, np
->rx_buf_sz
,
1049 PCI_DMA_FROMDEVICE
));
1050 np
->rx_ring
[i
].frag
[0].length
= cpu_to_le32(np
->rx_buf_sz
| LastFrag
);
1052 np
->dirty_rx
= (unsigned int)(i
- RX_RING_SIZE
);
1054 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1055 np
->tx_skbuff
[i
] = 0;
1056 np
->tx_ring
[i
].status
= 0;
1061 static void tx_poll (unsigned long data
)
1063 struct net_device
*dev
= (struct net_device
*)data
;
1064 struct netdev_private
*np
= dev
->priv
;
1065 unsigned head
= np
->cur_task
% TX_RING_SIZE
;
1066 struct netdev_desc
*txdesc
=
1067 &np
->tx_ring
[(np
->cur_tx
- 1) % TX_RING_SIZE
];
1069 /* Chain the next pointer */
1070 for (; np
->cur_tx
- np
->cur_task
> 0; np
->cur_task
++) {
1071 int entry
= np
->cur_task
% TX_RING_SIZE
;
1072 txdesc
= &np
->tx_ring
[entry
];
1074 np
->last_tx
->next_desc
= cpu_to_le32(np
->tx_ring_dma
+
1075 entry
*sizeof(struct netdev_desc
));
1077 np
->last_tx
= txdesc
;
1079 /* Indicate the latest descriptor of tx ring */
1080 txdesc
->status
|= cpu_to_le32(DescIntrOnTx
);
1082 if (readl (dev
->base_addr
+ TxListPtr
) == 0)
1083 writel (np
->tx_ring_dma
+ head
* sizeof(struct netdev_desc
),
1084 dev
->base_addr
+ TxListPtr
);
1089 start_tx (struct sk_buff
*skb
, struct net_device
*dev
)
1091 struct netdev_private
*np
= dev
->priv
;
1092 struct netdev_desc
*txdesc
;
1095 /* Calculate the next Tx descriptor entry. */
1096 entry
= np
->cur_tx
% TX_RING_SIZE
;
1097 np
->tx_skbuff
[entry
] = skb
;
1098 txdesc
= &np
->tx_ring
[entry
];
1100 txdesc
->next_desc
= 0;
1101 txdesc
->status
= cpu_to_le32 ((entry
<< 2) | DisableAlign
);
1102 txdesc
->frag
[0].addr
= cpu_to_le32 (pci_map_single (np
->pci_dev
, skb
->data
,
1105 txdesc
->frag
[0].length
= cpu_to_le32 (skb
->len
| LastFrag
);
1107 /* Increment cur_tx before tasklet_schedule() */
1110 /* Schedule a tx_poll() task */
1111 tasklet_schedule(&np
->tx_tasklet
);
1113 /* On some architectures: explicitly flush cache lines here. */
1114 if (np
->cur_tx
- np
->dirty_tx
< TX_QUEUE_LEN
- 1
1115 && !netif_queue_stopped(dev
)) {
1118 netif_stop_queue (dev
);
1120 dev
->trans_start
= jiffies
;
1121 if (netif_msg_tx_queued(np
)) {
1123 "%s: Transmit frame #%d queued in slot %d.\n",
1124 dev
->name
, np
->cur_tx
, entry
);
1129 /* Reset hardware tx and free all of tx buffers */
1131 reset_tx (struct net_device
*dev
)
1133 struct netdev_private
*np
= (struct netdev_private
*) dev
->priv
;
1134 long ioaddr
= dev
->base_addr
;
1135 struct sk_buff
*skb
;
1137 int irq
= in_interrupt();
1139 /* Reset tx logic, TxListPtr will be cleaned */
1140 writew (TxDisable
, ioaddr
+ MACCtrl1
);
1141 writew (TxReset
| DMAReset
| FIFOReset
| NetworkReset
,
1142 ioaddr
+ ASICCtrl
+ 2);
1143 for (i
=50; i
> 0; i
--) {
1144 if ((readw(ioaddr
+ ASICCtrl
+ 2) & ResetBusy
) == 0)
1148 /* free all tx skbuff */
1149 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1150 skb
= np
->tx_skbuff
[i
];
1152 pci_unmap_single(np
->pci_dev
,
1153 np
->tx_ring
[i
].frag
[0].addr
, skb
->len
,
1156 dev_kfree_skb_irq (skb
);
1158 dev_kfree_skb (skb
);
1159 np
->tx_skbuff
[i
] = 0;
1160 np
->stats
.tx_dropped
++;
1163 np
->cur_tx
= np
->dirty_tx
= 0;
1165 writew (StatsEnable
| RxEnable
| TxEnable
, ioaddr
+ MACCtrl1
);
1169 /* The interrupt handler cleans up after the Tx thread,
1170 and schedule a Rx thread work */
1171 static irqreturn_t
intr_handler(int irq
, void *dev_instance
, struct pt_regs
*rgs
)
1173 struct net_device
*dev
= (struct net_device
*)dev_instance
;
1174 struct netdev_private
*np
;
1181 ioaddr
= dev
->base_addr
;
1185 int intr_status
= readw(ioaddr
+ IntrStatus
);
1186 writew(intr_status
, ioaddr
+ IntrStatus
);
1188 if (netif_msg_intr(np
))
1189 printk(KERN_DEBUG
"%s: Interrupt, status %4.4x.\n",
1190 dev
->name
, intr_status
);
1192 if (!(intr_status
& DEFAULT_INTR
))
1197 if (intr_status
& (IntrRxDMADone
)) {
1198 writew(DEFAULT_INTR
& ~(IntrRxDone
|IntrRxDMADone
),
1199 ioaddr
+ IntrEnable
);
1201 np
->budget
= RX_BUDGET
;
1202 tasklet_schedule(&np
->rx_tasklet
);
1204 if (intr_status
& (IntrTxDone
| IntrDrvRqst
)) {
1205 tx_status
= readw (ioaddr
+ TxStatus
);
1206 for (tx_cnt
=32; tx_status
& 0x80; --tx_cnt
) {
1207 if (netif_msg_tx_done(np
))
1209 ("%s: Transmit status is %2.2x.\n",
1210 dev
->name
, tx_status
);
1211 if (tx_status
& 0x1e) {
1212 np
->stats
.tx_errors
++;
1213 if (tx_status
& 0x10)
1214 np
->stats
.tx_fifo_errors
++;
1215 if (tx_status
& 0x08)
1216 np
->stats
.collisions
++;
1217 if (tx_status
& 0x02)
1218 np
->stats
.tx_window_errors
++;
1219 /* This reset has not been verified!. */
1220 if (tx_status
& 0x10) { /* Reset the Tx. */
1221 np
->stats
.tx_fifo_errors
++;
1222 spin_lock(&np
->lock
);
1224 spin_unlock(&np
->lock
);
1226 if (tx_status
& 0x1e) /* Restart the Tx. */
1230 /* Yup, this is a documentation bug. It cost me *hours*. */
1231 writew (0, ioaddr
+ TxStatus
);
1232 tx_status
= readw (ioaddr
+ TxStatus
);
1236 hw_frame_id
= (tx_status
>> 8) & 0xff;
1238 hw_frame_id
= readb(ioaddr
+ TxFrameId
);
1241 if (np
->pci_rev_id
>= 0x14) {
1242 spin_lock(&np
->lock
);
1243 for (; np
->cur_tx
- np
->dirty_tx
> 0; np
->dirty_tx
++) {
1244 int entry
= np
->dirty_tx
% TX_RING_SIZE
;
1245 struct sk_buff
*skb
;
1247 sw_frame_id
= (le32_to_cpu(
1248 np
->tx_ring
[entry
].status
) >> 2) & 0xff;
1249 if (sw_frame_id
== hw_frame_id
&&
1250 !(le32_to_cpu(np
->tx_ring
[entry
].status
)
1253 if (sw_frame_id
== (hw_frame_id
+ 1) %
1256 skb
= np
->tx_skbuff
[entry
];
1257 /* Free the original skb. */
1258 pci_unmap_single(np
->pci_dev
,
1259 np
->tx_ring
[entry
].frag
[0].addr
,
1260 skb
->len
, PCI_DMA_TODEVICE
);
1261 dev_kfree_skb_irq (np
->tx_skbuff
[entry
]);
1262 np
->tx_skbuff
[entry
] = 0;
1263 np
->tx_ring
[entry
].frag
[0].addr
= 0;
1264 np
->tx_ring
[entry
].frag
[0].length
= 0;
1266 spin_unlock(&np
->lock
);
1268 spin_lock(&np
->lock
);
1269 for (; np
->cur_tx
- np
->dirty_tx
> 0; np
->dirty_tx
++) {
1270 int entry
= np
->dirty_tx
% TX_RING_SIZE
;
1271 struct sk_buff
*skb
;
1272 if (!(le32_to_cpu(np
->tx_ring
[entry
].status
)
1275 skb
= np
->tx_skbuff
[entry
];
1276 /* Free the original skb. */
1277 pci_unmap_single(np
->pci_dev
,
1278 np
->tx_ring
[entry
].frag
[0].addr
,
1279 skb
->len
, PCI_DMA_TODEVICE
);
1280 dev_kfree_skb_irq (np
->tx_skbuff
[entry
]);
1281 np
->tx_skbuff
[entry
] = 0;
1282 np
->tx_ring
[entry
].frag
[0].addr
= 0;
1283 np
->tx_ring
[entry
].frag
[0].length
= 0;
1285 spin_unlock(&np
->lock
);
1288 if (netif_queue_stopped(dev
) &&
1289 np
->cur_tx
- np
->dirty_tx
< TX_QUEUE_LEN
- 4) {
1290 /* The ring is no longer full, clear busy flag. */
1291 netif_wake_queue (dev
);
1293 /* Abnormal error summary/uncommon events handlers. */
1294 if (intr_status
& (IntrPCIErr
| LinkChange
| StatsMax
))
1295 netdev_error(dev
, intr_status
);
1297 if (netif_msg_intr(np
))
1298 printk(KERN_DEBUG
"%s: exiting interrupt, status=%#4.4x.\n",
1299 dev
->name
, readw(ioaddr
+ IntrStatus
));
1300 writel(5000, ioaddr
+ DownCounter
);
1301 return IRQ_RETVAL(handled
);
1304 static void rx_poll(unsigned long data
)
1306 struct net_device
*dev
= (struct net_device
*)data
;
1307 struct netdev_private
*np
= dev
->priv
;
1308 int entry
= np
->cur_rx
% RX_RING_SIZE
;
1309 int boguscnt
= np
->budget
;
1310 long ioaddr
= dev
->base_addr
;
1313 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1315 struct netdev_desc
*desc
= &(np
->rx_ring
[entry
]);
1316 u32 frame_status
= le32_to_cpu(desc
->status
);
1319 if (--boguscnt
< 0) {
1322 if (!(frame_status
& DescOwn
))
1324 pkt_len
= frame_status
& 0x1fff; /* Chip omits the CRC. */
1325 if (netif_msg_rx_status(np
))
1326 printk(KERN_DEBUG
" netdev_rx() status was %8.8x.\n",
1328 pci_dma_sync_single(np
->pci_dev
, desc
->frag
[0].addr
,
1329 np
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1331 if (frame_status
& 0x001f4000) {
1332 /* There was a error. */
1333 if (netif_msg_rx_err(np
))
1334 printk(KERN_DEBUG
" netdev_rx() Rx error was %8.8x.\n",
1336 np
->stats
.rx_errors
++;
1337 if (frame_status
& 0x00100000) np
->stats
.rx_length_errors
++;
1338 if (frame_status
& 0x00010000) np
->stats
.rx_fifo_errors
++;
1339 if (frame_status
& 0x00060000) np
->stats
.rx_frame_errors
++;
1340 if (frame_status
& 0x00080000) np
->stats
.rx_crc_errors
++;
1341 if (frame_status
& 0x00100000) {
1342 printk(KERN_WARNING
"%s: Oversized Ethernet frame,"
1344 dev
->name
, frame_status
);
1347 struct sk_buff
*skb
;
1348 #ifndef final_version
1349 if (netif_msg_rx_status(np
))
1350 printk(KERN_DEBUG
" netdev_rx() normal Rx pkt length %d"
1351 ", bogus_cnt %d.\n",
1354 /* Check if the packet is long enough to accept without copying
1355 to a minimally-sized skbuff. */
1356 if (pkt_len
< rx_copybreak
1357 && (skb
= dev_alloc_skb(pkt_len
+ 2)) != NULL
) {
1359 skb_reserve(skb
, 2); /* 16 byte align the IP header */
1360 eth_copy_and_sum(skb
, np
->rx_skbuff
[entry
]->tail
, pkt_len
, 0);
1361 skb_put(skb
, pkt_len
);
1363 pci_unmap_single(np
->pci_dev
,
1366 PCI_DMA_FROMDEVICE
);
1367 skb_put(skb
= np
->rx_skbuff
[entry
], pkt_len
);
1368 np
->rx_skbuff
[entry
] = NULL
;
1370 skb
->protocol
= eth_type_trans(skb
, dev
);
1371 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1373 dev
->last_rx
= jiffies
;
1375 entry
= (entry
+ 1) % RX_RING_SIZE
;
1380 np
->budget
-= received
;
1381 writew(DEFAULT_INTR
, ioaddr
+ IntrEnable
);
1389 np
->budget
-= received
;
1390 if (np
->budget
<= 0)
1391 np
->budget
= RX_BUDGET
;
1392 tasklet_schedule(&np
->rx_tasklet
);
1396 static void refill_rx (struct net_device
*dev
)
1398 struct netdev_private
*np
= dev
->priv
;
1402 /* Refill the Rx ring buffers. */
1403 for (;(np
->cur_rx
- np
->dirty_rx
+ RX_RING_SIZE
) % RX_RING_SIZE
> 0;
1404 np
->dirty_rx
= (np
->dirty_rx
+ 1) % RX_RING_SIZE
) {
1405 struct sk_buff
*skb
;
1406 entry
= np
->dirty_rx
% RX_RING_SIZE
;
1407 if (np
->rx_skbuff
[entry
] == NULL
) {
1408 skb
= dev_alloc_skb(np
->rx_buf_sz
);
1409 np
->rx_skbuff
[entry
] = skb
;
1411 break; /* Better luck next round. */
1412 skb
->dev
= dev
; /* Mark as being used by this device. */
1413 skb_reserve(skb
, 2); /* Align IP on 16 byte boundaries */
1414 np
->rx_ring
[entry
].frag
[0].addr
= cpu_to_le32(
1415 pci_map_single(np
->pci_dev
, skb
->tail
,
1416 np
->rx_buf_sz
, PCI_DMA_FROMDEVICE
));
1418 /* Perhaps we need not reset this field. */
1419 np
->rx_ring
[entry
].frag
[0].length
=
1420 cpu_to_le32(np
->rx_buf_sz
| LastFrag
);
1421 np
->rx_ring
[entry
].status
= 0;
1426 static void netdev_error(struct net_device
*dev
, int intr_status
)
1428 long ioaddr
= dev
->base_addr
;
1429 struct netdev_private
*np
= dev
->priv
;
1430 u16 mii_ctl
, mii_advertise
, mii_lpa
;
1433 if (intr_status
& LinkChange
) {
1434 if (np
->an_enable
) {
1435 mii_advertise
= mdio_read (dev
, np
->phys
[0], MII_ADVERTISE
);
1436 mii_lpa
= mdio_read (dev
, np
->phys
[0], MII_LPA
);
1437 mii_advertise
&= mii_lpa
;
1438 printk (KERN_INFO
"%s: Link changed: ", dev
->name
);
1439 if (mii_advertise
& ADVERTISE_100FULL
) {
1441 printk ("100Mbps, full duplex\n");
1442 } else if (mii_advertise
& ADVERTISE_100HALF
) {
1444 printk ("100Mbps, half duplex\n");
1445 } else if (mii_advertise
& ADVERTISE_10FULL
) {
1447 printk ("10Mbps, full duplex\n");
1448 } else if (mii_advertise
& ADVERTISE_10HALF
) {
1450 printk ("10Mbps, half duplex\n");
1455 mii_ctl
= mdio_read (dev
, np
->phys
[0], MII_BMCR
);
1456 speed
= (mii_ctl
& BMCR_SPEED100
) ? 100 : 10;
1458 printk (KERN_INFO
"%s: Link changed: %dMbps ,",
1460 printk ("%s duplex.\n", (mii_ctl
& BMCR_FULLDPLX
) ?
1464 if (np
->flowctrl
&& np
->mii_if
.full_duplex
) {
1465 writew(readw(ioaddr
+ MulticastFilter1
+2) | 0x0200,
1466 ioaddr
+ MulticastFilter1
+2);
1467 writew(readw(ioaddr
+ MACCtrl0
) | EnbFlowCtrl
,
1471 if (intr_status
& StatsMax
) {
1474 if (intr_status
& IntrPCIErr
) {
1475 printk(KERN_ERR
"%s: Something Wicked happened! %4.4x.\n",
1476 dev
->name
, intr_status
);
1477 /* We must do a global reset of DMA to continue. */
1481 static struct net_device_stats
*get_stats(struct net_device
*dev
)
1483 struct netdev_private
*np
= dev
->priv
;
1484 long ioaddr
= dev
->base_addr
;
1487 /* We should lock this segment of code for SMP eventually, although
1488 the vulnerability window is very small and statistics are
1490 /* The chip only need report frame silently dropped. */
1491 np
->stats
.rx_missed_errors
+= readb(ioaddr
+ RxMissed
);
1492 np
->stats
.tx_packets
+= readw(ioaddr
+ TxFramesOK
);
1493 np
->stats
.rx_packets
+= readw(ioaddr
+ RxFramesOK
);
1494 np
->stats
.collisions
+= readb(ioaddr
+ StatsLateColl
);
1495 np
->stats
.collisions
+= readb(ioaddr
+ StatsMultiColl
);
1496 np
->stats
.collisions
+= readb(ioaddr
+ StatsOneColl
);
1497 np
->stats
.tx_carrier_errors
+= readb(ioaddr
+ StatsCarrierError
);
1498 readb(ioaddr
+ StatsTxDefer
);
1499 for (i
= StatsTxDefer
; i
<= StatsMcastRx
; i
++)
1501 np
->stats
.tx_bytes
+= readw(ioaddr
+ TxOctetsLow
);
1502 np
->stats
.tx_bytes
+= readw(ioaddr
+ TxOctetsHigh
) << 16;
1503 np
->stats
.rx_bytes
+= readw(ioaddr
+ RxOctetsLow
);
1504 np
->stats
.rx_bytes
+= readw(ioaddr
+ RxOctetsHigh
) << 16;
1509 static void set_rx_mode(struct net_device
*dev
)
1511 long ioaddr
= dev
->base_addr
;
1512 struct netdev_private
*np
= dev
->priv
;
1513 u16 mc_filter
[4]; /* Multicast hash filter */
1517 if (dev
->flags
& IFF_PROMISC
) { /* Set promiscuous. */
1518 /* Unconditionally log net taps. */
1519 printk(KERN_NOTICE
"%s: Promiscuous mode enabled.\n", dev
->name
);
1520 memset(mc_filter
, 0xff, sizeof(mc_filter
));
1521 rx_mode
= AcceptBroadcast
| AcceptMulticast
| AcceptAll
| AcceptMyPhys
;
1522 } else if ((dev
->mc_count
> multicast_filter_limit
)
1523 || (dev
->flags
& IFF_ALLMULTI
)) {
1524 /* Too many to match, or accept all multicasts. */
1525 memset(mc_filter
, 0xff, sizeof(mc_filter
));
1526 rx_mode
= AcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
;
1527 } else if (dev
->mc_count
) {
1528 struct dev_mc_list
*mclist
;
1532 memset (mc_filter
, 0, sizeof (mc_filter
));
1533 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
1534 i
++, mclist
= mclist
->next
) {
1535 crc
= ether_crc_le (ETH_ALEN
, mclist
->dmi_addr
);
1536 for (index
=0, bit
=0; bit
< 6; bit
++, crc
<<= 1)
1537 if (crc
& 0x80000000) index
|= 1 << bit
;
1538 mc_filter
[index
/16] |= (1 << (index
% 16));
1540 rx_mode
= AcceptBroadcast
| AcceptMultiHash
| AcceptMyPhys
;
1542 writeb(AcceptBroadcast
| AcceptMyPhys
, ioaddr
+ RxMode
);
1545 if (np
->mii_if
.full_duplex
&& np
->flowctrl
)
1546 mc_filter
[3] |= 0x0200;
1548 for (i
= 0; i
< 4; i
++)
1549 writew(mc_filter
[i
], ioaddr
+ MulticastFilter0
+ i
*2);
1550 writeb(rx_mode
, ioaddr
+ RxMode
);
1553 static int __set_mac_addr(struct net_device
*dev
)
1557 addr16
= (dev
->dev_addr
[0] | (dev
->dev_addr
[1] << 8));
1558 writew(addr16
, dev
->base_addr
+ StationAddr
);
1559 addr16
= (dev
->dev_addr
[2] | (dev
->dev_addr
[3] << 8));
1560 writew(addr16
, dev
->base_addr
+ StationAddr
+2);
1561 addr16
= (dev
->dev_addr
[4] | (dev
->dev_addr
[5] << 8));
1562 writew(addr16
, dev
->base_addr
+ StationAddr
+4);
1567 static int netdev_ethtool_ioctl(struct net_device
*dev
, void *useraddr
)
1569 struct netdev_private
*np
= dev
->priv
;
1572 if (copy_from_user(ðcmd
, useraddr
, sizeof(ethcmd
)))
1576 /* get constant driver settings/info */
1577 case ETHTOOL_GDRVINFO
: {
1578 struct ethtool_drvinfo info
= {ETHTOOL_GDRVINFO
};
1579 strcpy(info
.driver
, DRV_NAME
);
1580 strcpy(info
.version
, DRV_VERSION
);
1581 strcpy(info
.bus_info
, np
->pci_dev
->slot_name
);
1582 memset(&info
.fw_version
, 0, sizeof(info
.fw_version
));
1583 if (copy_to_user(useraddr
, &info
, sizeof(info
)))
1588 /* get media settings */
1589 case ETHTOOL_GSET
: {
1590 struct ethtool_cmd ecmd
= { ETHTOOL_GSET
};
1591 spin_lock_irq(&np
->lock
);
1592 mii_ethtool_gset(&np
->mii_if
, &ecmd
);
1593 spin_unlock_irq(&np
->lock
);
1594 if (copy_to_user(useraddr
, &ecmd
, sizeof(ecmd
)))
1598 /* set media settings */
1599 case ETHTOOL_SSET
: {
1601 struct ethtool_cmd ecmd
;
1602 if (copy_from_user(&ecmd
, useraddr
, sizeof(ecmd
)))
1604 spin_lock_irq(&np
->lock
);
1605 r
= mii_ethtool_sset(&np
->mii_if
, &ecmd
);
1606 spin_unlock_irq(&np
->lock
);
1610 /* restart autonegotiation */
1611 case ETHTOOL_NWAY_RST
: {
1612 return mii_nway_restart(&np
->mii_if
);
1615 /* get link status */
1616 case ETHTOOL_GLINK
: {
1617 struct ethtool_value edata
= {ETHTOOL_GLINK
};
1618 edata
.data
= mii_link_ok(&np
->mii_if
);
1619 if (copy_to_user(useraddr
, &edata
, sizeof(edata
)))
1624 /* get message-level */
1625 case ETHTOOL_GMSGLVL
: {
1626 struct ethtool_value edata
= {ETHTOOL_GMSGLVL
};
1627 edata
.data
= np
->msg_enable
;
1628 if (copy_to_user(useraddr
, &edata
, sizeof(edata
)))
1632 /* set message-level */
1633 case ETHTOOL_SMSGLVL
: {
1634 struct ethtool_value edata
;
1635 if (copy_from_user(&edata
, useraddr
, sizeof(edata
)))
1637 np
->msg_enable
= edata
.data
;
1647 static int netdev_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1649 struct netdev_private
*np
= dev
->priv
;
1650 struct mii_ioctl_data
*data
= (struct mii_ioctl_data
*) & rq
->ifr_data
;
1653 long ioaddr
= dev
->base_addr
;
1655 if (!netif_running(dev
))
1658 if (cmd
== SIOCETHTOOL
)
1659 rc
= netdev_ethtool_ioctl(dev
, (void *) rq
->ifr_data
);
1662 spin_lock_irq(&np
->lock
);
1663 rc
= generic_mii_ioctl(&np
->mii_if
, data
, cmd
, NULL
);
1664 spin_unlock_irq(&np
->lock
);
1667 case SIOCDEVPRIVATE
:
1668 for (i
=0; i
<TX_RING_SIZE
; i
++) {
1669 printk(KERN_DEBUG
"%02x %08Zx %08x %08x(%02x) %08x %08x\n", i
,
1670 np
->tx_ring_dma
+ i
*sizeof(*np
->tx_ring
),
1671 le32_to_cpu(np
->tx_ring
[i
].next_desc
),
1672 le32_to_cpu(np
->tx_ring
[i
].status
),
1673 (le32_to_cpu(np
->tx_ring
[i
].status
) >> 2)
1675 le32_to_cpu(np
->tx_ring
[i
].frag
[0].addr
),
1676 le32_to_cpu(np
->tx_ring
[i
].frag
[0].length
));
1678 printk(KERN_DEBUG
"TxListPtr=%08x netif_queue_stopped=%d\n",
1679 readl(dev
->base_addr
+ TxListPtr
),
1680 netif_queue_stopped(dev
));
1681 printk(KERN_DEBUG
"cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1682 np
->cur_tx
, np
->cur_tx
% TX_RING_SIZE
,
1683 np
->dirty_tx
, np
->dirty_tx
% TX_RING_SIZE
);
1684 printk(KERN_DEBUG
"cur_rx=%d dirty_rx=%d\n", np
->cur_rx
, np
->dirty_rx
);
1685 printk(KERN_DEBUG
"cur_task=%d\n", np
->cur_task
);
1686 printk(KERN_DEBUG
"TxStatus=%04x\n", readw(ioaddr
+ TxStatus
));
1694 static int netdev_close(struct net_device
*dev
)
1696 long ioaddr
= dev
->base_addr
;
1697 struct netdev_private
*np
= dev
->priv
;
1698 struct sk_buff
*skb
;
1701 netif_stop_queue(dev
);
1703 if (netif_msg_ifdown(np
)) {
1704 printk(KERN_DEBUG
"%s: Shutting down ethercard, status was Tx %2.2x "
1705 "Rx %4.4x Int %2.2x.\n",
1706 dev
->name
, readb(ioaddr
+ TxStatus
),
1707 readl(ioaddr
+ RxStatus
), readw(ioaddr
+ IntrStatus
));
1708 printk(KERN_DEBUG
"%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1709 dev
->name
, np
->cur_tx
, np
->dirty_tx
, np
->cur_rx
, np
->dirty_rx
);
1712 /* Disable interrupts by clearing the interrupt mask. */
1713 writew(0x0000, ioaddr
+ IntrEnable
);
1715 /* Stop the chip's Tx and Rx processes. */
1716 writew(TxDisable
| RxDisable
| StatsDisable
, ioaddr
+ MACCtrl1
);
1718 /* Wait and kill tasklet */
1719 tasklet_kill(&np
->rx_tasklet
);
1720 tasklet_kill(&np
->tx_tasklet
);
1723 if (netif_msg_hw(np
)) {
1724 printk("\n"KERN_DEBUG
" Tx ring at %8.8x:\n",
1725 (int)(np
->tx_ring_dma
));
1726 for (i
= 0; i
< TX_RING_SIZE
; i
++)
1727 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1728 i
, np
->tx_ring
[i
].status
, np
->tx_ring
[i
].frag
[0].addr
,
1729 np
->tx_ring
[i
].frag
[0].length
);
1730 printk("\n"KERN_DEBUG
" Rx ring %8.8x:\n",
1731 (int)(np
->rx_ring_dma
));
1732 for (i
= 0; i
< /*RX_RING_SIZE*/4 ; i
++) {
1733 printk(KERN_DEBUG
" #%d desc. %4.4x %4.4x %8.8x\n",
1734 i
, np
->rx_ring
[i
].status
, np
->rx_ring
[i
].frag
[0].addr
,
1735 np
->rx_ring
[i
].frag
[0].length
);
1738 #endif /* __i386__ debugging only */
1740 free_irq(dev
->irq
, dev
);
1742 del_timer_sync(&np
->timer
);
1744 /* Free all the skbuffs in the Rx queue. */
1745 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1746 np
->rx_ring
[i
].status
= 0;
1747 np
->rx_ring
[i
].frag
[0].addr
= 0xBADF00D0; /* An invalid address. */
1748 skb
= np
->rx_skbuff
[i
];
1750 pci_unmap_single(np
->pci_dev
,
1751 np
->rx_ring
[i
].frag
[0].addr
, np
->rx_buf_sz
,
1752 PCI_DMA_FROMDEVICE
);
1754 np
->rx_skbuff
[i
] = 0;
1757 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1758 skb
= np
->tx_skbuff
[i
];
1760 pci_unmap_single(np
->pci_dev
,
1761 np
->tx_ring
[i
].frag
[0].addr
, skb
->len
,
1764 np
->tx_skbuff
[i
] = 0;
1771 static void __devexit
sundance_remove1 (struct pci_dev
*pdev
)
1773 struct net_device
*dev
= pci_get_drvdata(pdev
);
1776 struct netdev_private
*np
= dev
->priv
;
1778 unregister_netdev(dev
);
1779 pci_free_consistent(pdev
, RX_TOTAL_SIZE
, np
->rx_ring
,
1781 pci_free_consistent(pdev
, TX_TOTAL_SIZE
, np
->tx_ring
,
1783 pci_release_regions(pdev
);
1785 iounmap((char *)(dev
->base_addr
));
1788 pci_set_drvdata(pdev
, NULL
);
1792 static struct pci_driver sundance_driver
= {
1794 .id_table
= sundance_pci_tbl
,
1795 .probe
= sundance_probe1
,
1796 .remove
= __devexit_p(sundance_remove1
),
1799 static int __init
sundance_init(void)
1801 /* when a module, this is printed whether or not devices are found in probe */
1805 return pci_module_init(&sundance_driver
);
1808 static void __exit
sundance_exit(void)
1810 pci_unregister_driver(&sundance_driver
);
1813 module_init(sundance_init
);
1814 module_exit(sundance_exit
);