1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
3 Written 1999-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
21 Version LK1.01a (jgarzik):
22 - Replace some MII-related magic numbers with constants
24 Version LK1.02 (D-Link):
25 - Add new board to PCI ID list
28 Version LK1.03 (D-Link):
29 - New Rx scheme, reduce Rx congestion
30 - Option to disable flow control
32 Version LK1.04 (D-Link):
34 - More support for ethtool.
37 - Remove unused/constant members from struct pci_id_info
38 (which then allows removal of 'drv_flags' from private struct)
40 - If no phy is found, fail to load that board (jgarzik)
41 - Always start phy id scan at id 1 to avoid problems (Donald Becker)
42 - Autodetect where mii_preable_required is needed,
43 default to not needed. (Donald Becker)
46 - Remove mii_preamble_required module parameter (Donald Becker)
47 - Add per-interface mii_preamble_required (setting is autodetected)
49 - Remove unnecessary cast from void pointer (jgarzik)
50 - Re-align comments in private struct (jgarzik)
52 Version LK1.04c (jgarzik):
53 - Support bitmapped message levels (NETIF_MSG_xxx), and the
54 two ethtool ioctls that get/set them
55 - Don't hand-code MII ethtool support, use standard API/lib
58 - Merge from Donald Becker's sundance.c: (Jason Lunz)
59 * proper support for variably-sized MTUs
60 * default to PIO, to fix chip bugs
61 - Add missing unregister_netdev (Jason Lunz)
62 - Add CONFIG_SUNDANCE_MMIO config option (jgarzik)
63 - Better rx buf size calculation (Donald Becker)
65 Version LK1.05 (D-Link):
66 - Fix DFE-580TX packet drop issue (for DL10050C)
69 Version LK1.06 (D-Link):
70 - Fix crash while unloading driver
72 Versin LK1.06b (D-Link):
73 - New tx scheme, adaptive tx_coalesce
75 Version LK1.07 (D-Link):
76 - Fix tx bugs in big-endian machines
77 - Remove unused max_interrupt_work module parameter, the new
78 NAPI-like rx scheme doesn't need it.
79 - Remove redundancy get_stats() in intr_handler(), those
80 I/O access could affect performance in ARM-based system
81 - Add Linux software VLAN support
83 Version LK1.08 (Philippe De Muyter phdm@macqel.be):
84 - Fix bug of custom mac address
85 (StationAddr register only accept word write)
87 Version LK1.09 (D-Link):
88 - Fix the flowctrl bug.
89 - Set Pause bit in MII ANAR if flow control enabled.
91 Version LK1.09a (ICPlus):
92 - Add the delay time in reading the contents of EEPROM
94 Version LK1.10 (Philippe De Muyter phdm@macqel.be):
95 - Make 'unblock interface after Tx underrun' work
99 #define DRV_NAME "sundance"
100 #define DRV_VERSION "1.01+LK1.10"
101 #define DRV_RELDATE "28-Oct-2005"
104 /* The user-configurable values.
105 These may be modified when a driver module is loaded.*/
106 static int debug
= 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
107 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
108 Typical is a 64 element hash table based on the Ethernet CRC. */
109 static int multicast_filter_limit
= 32;
111 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
112 Setting to > 1518 effectively disables this feature.
113 This chip can receive into offset buffers, so the Alpha does not
114 need a copy-align. */
115 static int rx_copybreak
;
116 static int flowctrl
=1;
118 /* media[] specifies the media type the NIC operates at.
119 autosense Autosensing active media.
120 10mbps_hd 10Mbps half duplex.
121 10mbps_fd 10Mbps full duplex.
122 100mbps_hd 100Mbps half duplex.
123 100mbps_fd 100Mbps full duplex.
124 0 Autosensing active media.
125 1 10Mbps half duplex.
126 2 10Mbps full duplex.
127 3 100Mbps half duplex.
128 4 100Mbps full duplex.
131 static char *media
[MAX_UNITS
];
134 /* Operational parameters that are set at compile time. */
136 /* Keep the ring sizes a power of two for compile efficiency.
137 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
138 Making the Tx ring too large decreases the effectiveness of channel
139 bonding and packet priority, and more than 128 requires modifying the
141 Large receive rings merely waste memory. */
142 #define TX_RING_SIZE 32
143 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
144 #define RX_RING_SIZE 64
146 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
147 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
149 /* Operational parameters that usually are not changed. */
150 /* Time in jiffies before concluding the transmitter is hung. */
151 #define TX_TIMEOUT (4*HZ)
152 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
154 /* Include files, designed to support most kernel versions 2.0.0 and later. */
155 #include <linux/module.h>
156 #include <linux/kernel.h>
157 #include <linux/string.h>
158 #include <linux/timer.h>
159 #include <linux/errno.h>
160 #include <linux/ioport.h>
161 #include <linux/slab.h>
162 #include <linux/interrupt.h>
163 #include <linux/pci.h>
164 #include <linux/netdevice.h>
165 #include <linux/etherdevice.h>
166 #include <linux/skbuff.h>
167 #include <linux/init.h>
168 #include <linux/bitops.h>
169 #include <asm/uaccess.h>
170 #include <asm/processor.h> /* Processor type for cache alignment. */
172 #include <linux/delay.h>
173 #include <linux/spinlock.h>
174 #ifndef _COMPAT_WITH_OLD_KERNEL
175 #include <linux/crc32.h>
176 #include <linux/ethtool.h>
177 #include <linux/mii.h>
185 /* These identify the driver base version and may not be removed. */
186 static char version
[] __devinitdata
=
187 KERN_INFO DRV_NAME
".c:v" DRV_VERSION
" " DRV_RELDATE
" Written by Donald Becker\n"
188 KERN_INFO
" http://www.scyld.com/network/sundance.html\n";
190 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
191 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
192 MODULE_LICENSE("GPL");
194 module_param(debug
, int, 0);
195 module_param(rx_copybreak
, int, 0);
196 module_param_array(media
, charp
, NULL
, 0);
197 module_param(flowctrl
, int, 0);
198 MODULE_PARM_DESC(debug
, "Sundance Alta debug level (0-5)");
199 MODULE_PARM_DESC(rx_copybreak
, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
200 MODULE_PARM_DESC(flowctrl
, "Sundance Alta flow control [0|1]");
205 I. Board Compatibility
207 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
209 II. Board-specific settings
211 III. Driver operation
215 This driver uses two statically allocated fixed-size descriptor lists
216 formed into rings by a branch from the final descriptor to the beginning of
217 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
218 Some chips explicitly use only 2^N sized rings, while others use a
219 'next descriptor' pointer that the driver forms into rings.
221 IIIb/c. Transmit/Receive Structure
223 This driver uses a zero-copy receive and transmit scheme.
224 The driver allocates full frame size skbuffs for the Rx ring buffers at
225 open() time and passes the skb->data field to the chip as receive data
226 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
227 a fresh skbuff is allocated and the frame is copied to the new skbuff.
228 When the incoming frame is larger, the skbuff is passed directly up the
229 protocol stack. Buffers consumed this way are replaced by newly allocated
230 skbuffs in a later phase of receives.
232 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
233 using a full-sized skbuff for small frames vs. the copying costs of larger
234 frames. New boards are typically used in generously configured machines
235 and the underfilled buffers have negligible impact compared to the benefit of
236 a single allocation size, so the default value of zero results in never
237 copying packets. When copying is done, the cost is usually mitigated by using
238 a combined copy/checksum routine. Copying also preloads the cache, which is
239 most useful with small frames.
241 A subtle aspect of the operation is that the IP header at offset 14 in an
242 ethernet frame isn't longword aligned for further processing.
243 Unaligned buffers are permitted by the Sundance hardware, so
244 frames are received into the skbuff at an offset of "+2", 16-byte aligning
247 IIId. Synchronization
249 The driver runs as two independent, single-threaded flows of control. One
250 is the send-packet routine, which enforces single-threaded use by the
251 dev->tbusy flag. The other thread is the interrupt handler, which is single
252 threaded by the hardware and interrupt handling software.
254 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
255 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
256 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
257 the 'lp->tx_full' flag.
259 The interrupt handler has exclusive control over the Rx ring and records stats
260 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
261 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
262 clears both the tx_full and tbusy flags.
268 The Sundance ST201 datasheet, preliminary version.
269 The Kendin KS8723 datasheet, preliminary version.
270 The ICplus IP100 datasheet, preliminary version.
271 http://www.scyld.com/expert/100mbps.html
272 http://www.scyld.com/expert/NWay.html
278 /* Work-around for Kendin chip bugs. */
279 #ifndef CONFIG_SUNDANCE_MMIO
283 static struct pci_device_id sundance_pci_tbl
[] = {
284 {0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0},
285 {0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1},
286 {0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2},
287 {0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3},
288 {0x1186, 0x1002, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 4},
289 {0x13F0, 0x0201, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 5},
292 MODULE_DEVICE_TABLE(pci
, sundance_pci_tbl
);
301 static struct pci_id_info pci_id_tbl
[] = {
302 {"D-Link DFE-550TX FAST Ethernet Adapter"},
303 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
304 {"D-Link DFE-580TX 4 port Server Adapter"},
305 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
306 {"D-Link DL10050-based FAST Ethernet Adapter"},
307 {"Sundance Technology Alta"},
308 {NULL
,}, /* 0 terminated list. */
311 /* This driver was written to use PCI memory space, however x86-oriented
312 hardware often uses I/O space accesses. */
314 /* Offsets to the device registers.
315 Unlike software-only systems, device drivers interact with complex hardware.
316 It's not useful to define symbolic names for every register bit in the
317 device. The name can only partially document the semantics and make
318 the driver longer and more difficult to read.
319 In general, only the important configuration values or bits changed
320 multiple times should be defined symbolically.
325 TxDMABurstThresh
= 0x08,
326 TxDMAUrgentThresh
= 0x09,
327 TxDMAPollPeriod
= 0x0a,
332 RxDMABurstThresh
= 0x14,
333 RxDMAUrgentThresh
= 0x15,
334 RxDMAPollPeriod
= 0x16,
339 TxStartThresh
= 0x3c,
340 RxEarlyThresh
= 0x3e,
355 MulticastFilter0
= 0x60,
356 MulticastFilter1
= 0x64,
363 StatsCarrierError
= 0x74,
364 StatsLateColl
= 0x75,
365 StatsMultiColl
= 0x76,
369 StatsTxXSDefer
= 0x7a,
375 /* Aliased and bogus values! */
378 enum ASICCtrl_HiWord_bit
{
379 GlobalReset
= 0x0001,
384 NetworkReset
= 0x0020,
389 /* Bits in the interrupt status/mask registers. */
390 enum intr_status_bits
{
391 IntrSummary
=0x0001, IntrPCIErr
=0x0002, IntrMACCtrl
=0x0008,
392 IntrTxDone
=0x0004, IntrRxDone
=0x0010, IntrRxStart
=0x0020,
394 StatsMax
=0x0080, LinkChange
=0x0100,
395 IntrTxDMADone
=0x0200, IntrRxDMADone
=0x0400,
398 /* Bits in the RxMode register. */
400 AcceptAllIPMulti
=0x20, AcceptMultiHash
=0x10, AcceptAll
=0x08,
401 AcceptBroadcast
=0x04, AcceptMulticast
=0x02, AcceptMyPhys
=0x01,
403 /* Bits in MACCtrl. */
404 enum mac_ctrl0_bits
{
405 EnbFullDuplex
=0x20, EnbRcvLargeFrame
=0x40,
406 EnbFlowCtrl
=0x100, EnbPassRxCRC
=0x200,
408 enum mac_ctrl1_bits
{
409 StatsEnable
=0x0020, StatsDisable
=0x0040, StatsEnabled
=0x0080,
410 TxEnable
=0x0100, TxDisable
=0x0200, TxEnabled
=0x0400,
411 RxEnable
=0x0800, RxDisable
=0x1000, RxEnabled
=0x2000,
414 /* The Rx and Tx buffer descriptors. */
415 /* Note that using only 32 bit fields simplifies conversion to big-endian
420 struct desc_frag
{ u32 addr
, length
; } frag
[1];
423 /* Bits in netdev_desc.status */
424 enum desc_status_bits
{
426 DescEndPacket
=0x4000,
430 DescIntrOnDMADone
=0x80000000,
431 DisableAlign
= 0x00000001,
434 #define PRIV_ALIGN 15 /* Required alignment mask */
435 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
436 within the structure. */
438 struct netdev_private
{
439 /* Descriptor rings first for alignment. */
440 struct netdev_desc
*rx_ring
;
441 struct netdev_desc
*tx_ring
;
442 struct sk_buff
* rx_skbuff
[RX_RING_SIZE
];
443 struct sk_buff
* tx_skbuff
[TX_RING_SIZE
];
444 dma_addr_t tx_ring_dma
;
445 dma_addr_t rx_ring_dma
;
446 struct net_device_stats stats
;
447 struct timer_list timer
; /* Media monitoring timer. */
448 /* Frequently used values: keep some adjacent for cache effect. */
450 spinlock_t rx_lock
; /* Group with Tx control cache line. */
453 unsigned int cur_rx
, dirty_rx
; /* Producer/consumer ring indices */
454 unsigned int rx_buf_sz
; /* Based on MTU+slack. */
455 struct netdev_desc
*last_tx
; /* Last Tx descriptor used. */
456 unsigned int cur_tx
, dirty_tx
;
457 /* These values are keep track of the transceiver/media in use. */
458 unsigned int flowctrl
:1;
459 unsigned int default_port
:4; /* Last dev->if_port value. */
460 unsigned int an_enable
:1;
462 struct tasklet_struct rx_tasklet
;
463 struct tasklet_struct tx_tasklet
;
466 /* Multicast and receive mode. */
467 spinlock_t mcastlock
; /* SMP lock multicast updates. */
469 /* MII transceiver section. */
470 struct mii_if_info mii_if
;
471 int mii_preamble_required
;
472 unsigned char phys
[MII_CNT
]; /* MII device addresses, only first one used. */
473 struct pci_dev
*pci_dev
;
475 unsigned char pci_rev_id
;
478 /* The station address location in the EEPROM. */
479 #define EEPROM_SA_OFFSET 0x10
480 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
481 IntrDrvRqst | IntrTxDone | StatsMax | \
484 static int change_mtu(struct net_device
*dev
, int new_mtu
);
485 static int eeprom_read(void __iomem
*ioaddr
, int location
);
486 static int mdio_read(struct net_device
*dev
, int phy_id
, int location
);
487 static void mdio_write(struct net_device
*dev
, int phy_id
, int location
, int value
);
488 static int netdev_open(struct net_device
*dev
);
489 static void check_duplex(struct net_device
*dev
);
490 static void netdev_timer(unsigned long data
);
491 static void tx_timeout(struct net_device
*dev
);
492 static void init_ring(struct net_device
*dev
);
493 static int start_tx(struct sk_buff
*skb
, struct net_device
*dev
);
494 static int reset_tx (struct net_device
*dev
);
495 static irqreturn_t
intr_handler(int irq
, void *dev_instance
, struct pt_regs
*regs
);
496 static void rx_poll(unsigned long data
);
497 static void tx_poll(unsigned long data
);
498 static void refill_rx (struct net_device
*dev
);
499 static void netdev_error(struct net_device
*dev
, int intr_status
);
500 static void netdev_error(struct net_device
*dev
, int intr_status
);
501 static void set_rx_mode(struct net_device
*dev
);
502 static int __set_mac_addr(struct net_device
*dev
);
503 static struct net_device_stats
*get_stats(struct net_device
*dev
);
504 static int netdev_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
505 static int netdev_close(struct net_device
*dev
);
506 static struct ethtool_ops ethtool_ops
;
508 static void sundance_reset(struct net_device
*dev
, unsigned long reset_cmd
)
510 struct netdev_private
*np
= netdev_priv(dev
);
511 void __iomem
*ioaddr
= np
->base
+ ASICCtrl
;
514 /* ST201 documentation states ASICCtrl is a 32bit register */
515 iowrite32 (reset_cmd
| ioread32 (ioaddr
), ioaddr
);
516 /* ST201 documentation states reset can take up to 1 ms */
518 while (ioread32 (ioaddr
) & (ResetBusy
<< 16)) {
519 if (--countdown
== 0) {
520 printk(KERN_WARNING
"%s : reset not completed !!\n", dev
->name
);
527 static int __devinit
sundance_probe1 (struct pci_dev
*pdev
,
528 const struct pci_device_id
*ent
)
530 struct net_device
*dev
;
531 struct netdev_private
*np
;
533 int chip_idx
= ent
->driver_data
;
536 void __iomem
*ioaddr
;
545 int phy
, phy_idx
= 0;
548 /* when built into the kernel, we only print version if device is found */
550 static int printed_version
;
551 if (!printed_version
++)
555 if (pci_enable_device(pdev
))
557 pci_set_master(pdev
);
561 dev
= alloc_etherdev(sizeof(*np
));
564 SET_MODULE_OWNER(dev
);
565 SET_NETDEV_DEV(dev
, &pdev
->dev
);
567 if (pci_request_regions(pdev
, DRV_NAME
))
570 ioaddr
= pci_iomap(pdev
, bar
, netdev_io_size
);
574 for (i
= 0; i
< 3; i
++)
575 ((u16
*)dev
->dev_addr
)[i
] =
576 le16_to_cpu(eeprom_read(ioaddr
, i
+ EEPROM_SA_OFFSET
));
577 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
579 dev
->base_addr
= (unsigned long)ioaddr
;
582 np
= netdev_priv(dev
);
585 np
->chip_id
= chip_idx
;
586 np
->msg_enable
= (1 << debug
) - 1;
587 spin_lock_init(&np
->lock
);
588 tasklet_init(&np
->rx_tasklet
, rx_poll
, (unsigned long)dev
);
589 tasklet_init(&np
->tx_tasklet
, tx_poll
, (unsigned long)dev
);
591 ring_space
= pci_alloc_consistent(pdev
, TX_TOTAL_SIZE
, &ring_dma
);
593 goto err_out_cleardev
;
594 np
->tx_ring
= (struct netdev_desc
*)ring_space
;
595 np
->tx_ring_dma
= ring_dma
;
597 ring_space
= pci_alloc_consistent(pdev
, RX_TOTAL_SIZE
, &ring_dma
);
599 goto err_out_unmap_tx
;
600 np
->rx_ring
= (struct netdev_desc
*)ring_space
;
601 np
->rx_ring_dma
= ring_dma
;
603 np
->mii_if
.dev
= dev
;
604 np
->mii_if
.mdio_read
= mdio_read
;
605 np
->mii_if
.mdio_write
= mdio_write
;
606 np
->mii_if
.phy_id_mask
= 0x1f;
607 np
->mii_if
.reg_num_mask
= 0x1f;
609 /* The chip-specific entries in the device structure. */
610 dev
->open
= &netdev_open
;
611 dev
->hard_start_xmit
= &start_tx
;
612 dev
->stop
= &netdev_close
;
613 dev
->get_stats
= &get_stats
;
614 dev
->set_multicast_list
= &set_rx_mode
;
615 dev
->do_ioctl
= &netdev_ioctl
;
616 SET_ETHTOOL_OPS(dev
, ðtool_ops
);
617 dev
->tx_timeout
= &tx_timeout
;
618 dev
->watchdog_timeo
= TX_TIMEOUT
;
619 dev
->change_mtu
= &change_mtu
;
620 pci_set_drvdata(pdev
, dev
);
622 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &np
->pci_rev_id
);
624 i
= register_netdev(dev
);
626 goto err_out_unmap_rx
;
628 printk(KERN_INFO
"%s: %s at %p, ",
629 dev
->name
, pci_id_tbl
[chip_idx
].name
, ioaddr
);
630 for (i
= 0; i
< 5; i
++)
631 printk("%2.2x:", dev
->dev_addr
[i
]);
632 printk("%2.2x, IRQ %d.\n", dev
->dev_addr
[i
], irq
);
634 np
->phys
[0] = 1; /* Default setting */
635 np
->mii_preamble_required
++;
636 for (phy
= 1; phy
<= 32 && phy_idx
< MII_CNT
; phy
++) {
637 int mii_status
= mdio_read(dev
, phy
, MII_BMSR
);
638 int phyx
= phy
& 0x1f;
639 if (mii_status
!= 0xffff && mii_status
!= 0x0000) {
640 np
->phys
[phy_idx
++] = phyx
;
641 np
->mii_if
.advertising
= mdio_read(dev
, phyx
, MII_ADVERTISE
);
642 if ((mii_status
& 0x0040) == 0)
643 np
->mii_preamble_required
++;
644 printk(KERN_INFO
"%s: MII PHY found at address %d, status "
645 "0x%4.4x advertising %4.4x.\n",
646 dev
->name
, phyx
, mii_status
, np
->mii_if
.advertising
);
649 np
->mii_preamble_required
--;
652 printk(KERN_INFO
"%s: No MII transceiver found, aborting. ASIC status %x\n",
653 dev
->name
, ioread32(ioaddr
+ ASICCtrl
));
654 goto err_out_unregister
;
657 np
->mii_if
.phy_id
= np
->phys
[0];
659 /* Parse override configuration */
661 if (card_idx
< MAX_UNITS
) {
662 if (media
[card_idx
] != NULL
) {
664 if (strcmp (media
[card_idx
], "100mbps_fd") == 0 ||
665 strcmp (media
[card_idx
], "4") == 0) {
667 np
->mii_if
.full_duplex
= 1;
668 } else if (strcmp (media
[card_idx
], "100mbps_hd") == 0
669 || strcmp (media
[card_idx
], "3") == 0) {
671 np
->mii_if
.full_duplex
= 0;
672 } else if (strcmp (media
[card_idx
], "10mbps_fd") == 0 ||
673 strcmp (media
[card_idx
], "2") == 0) {
675 np
->mii_if
.full_duplex
= 1;
676 } else if (strcmp (media
[card_idx
], "10mbps_hd") == 0 ||
677 strcmp (media
[card_idx
], "1") == 0) {
679 np
->mii_if
.full_duplex
= 0;
689 if (ioread32 (ioaddr
+ ASICCtrl
) & 0x80) {
690 /* Default 100Mbps Full */
693 np
->mii_if
.full_duplex
= 1;
698 mdio_write (dev
, np
->phys
[0], MII_BMCR
, BMCR_RESET
);
700 /* If flow control enabled, we need to advertise it.*/
702 mdio_write (dev
, np
->phys
[0], MII_ADVERTISE
, np
->mii_if
.advertising
| 0x0400);
703 mdio_write (dev
, np
->phys
[0], MII_BMCR
, BMCR_ANENABLE
|BMCR_ANRESTART
);
704 /* Force media type */
705 if (!np
->an_enable
) {
707 mii_ctl
|= (np
->speed
== 100) ? BMCR_SPEED100
: 0;
708 mii_ctl
|= (np
->mii_if
.full_duplex
) ? BMCR_FULLDPLX
: 0;
709 mdio_write (dev
, np
->phys
[0], MII_BMCR
, mii_ctl
);
710 printk (KERN_INFO
"Override speed=%d, %s duplex\n",
711 np
->speed
, np
->mii_if
.full_duplex
? "Full" : "Half");
715 /* Perhaps move the reset here? */
716 /* Reset the chip to erase previous misconfiguration. */
717 if (netif_msg_hw(np
))
718 printk("ASIC Control is %x.\n", ioread32(ioaddr
+ ASICCtrl
));
719 iowrite16(0x00ff, ioaddr
+ ASICCtrl
+ 2);
720 if (netif_msg_hw(np
))
721 printk("ASIC Control is now %x.\n", ioread32(ioaddr
+ ASICCtrl
));
727 unregister_netdev(dev
);
729 pci_free_consistent(pdev
, RX_TOTAL_SIZE
, np
->rx_ring
, np
->rx_ring_dma
);
731 pci_free_consistent(pdev
, TX_TOTAL_SIZE
, np
->tx_ring
, np
->tx_ring_dma
);
733 pci_set_drvdata(pdev
, NULL
);
734 pci_iounmap(pdev
, ioaddr
);
736 pci_release_regions(pdev
);
742 static int change_mtu(struct net_device
*dev
, int new_mtu
)
744 if ((new_mtu
< 68) || (new_mtu
> 8191)) /* Set by RxDMAFrameLen */
746 if (netif_running(dev
))
752 #define eeprom_delay(ee_addr) ioread32(ee_addr)
753 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
754 static int __devinit
eeprom_read(void __iomem
*ioaddr
, int location
)
756 int boguscnt
= 10000; /* Typical 1900 ticks. */
757 iowrite16(0x0200 | (location
& 0xff), ioaddr
+ EECtrl
);
759 eeprom_delay(ioaddr
+ EECtrl
);
760 if (! (ioread16(ioaddr
+ EECtrl
) & 0x8000)) {
761 return ioread16(ioaddr
+ EEData
);
763 } while (--boguscnt
> 0);
767 /* MII transceiver control section.
768 Read and write the MII registers using software-generated serial
769 MDIO protocol. See the MII specifications or DP83840A data sheet
772 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
773 met by back-to-back 33Mhz PCI cycles. */
774 #define mdio_delay() ioread8(mdio_addr)
777 MDIO_ShiftClk
=0x0001, MDIO_Data
=0x0002, MDIO_EnbOutput
=0x0004,
779 #define MDIO_EnbIn (0)
780 #define MDIO_WRITE0 (MDIO_EnbOutput)
781 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
783 /* Generate the preamble required for initial synchronization and
784 a few older transceivers. */
785 static void mdio_sync(void __iomem
*mdio_addr
)
789 /* Establish sync by sending at least 32 logic ones. */
790 while (--bits
>= 0) {
791 iowrite8(MDIO_WRITE1
, mdio_addr
);
793 iowrite8(MDIO_WRITE1
| MDIO_ShiftClk
, mdio_addr
);
798 static int mdio_read(struct net_device
*dev
, int phy_id
, int location
)
800 struct netdev_private
*np
= netdev_priv(dev
);
801 void __iomem
*mdio_addr
= np
->base
+ MIICtrl
;
802 int mii_cmd
= (0xf6 << 10) | (phy_id
<< 5) | location
;
805 if (np
->mii_preamble_required
)
806 mdio_sync(mdio_addr
);
808 /* Shift the read command bits out. */
809 for (i
= 15; i
>= 0; i
--) {
810 int dataval
= (mii_cmd
& (1 << i
)) ? MDIO_WRITE1
: MDIO_WRITE0
;
812 iowrite8(dataval
, mdio_addr
);
814 iowrite8(dataval
| MDIO_ShiftClk
, mdio_addr
);
817 /* Read the two transition, 16 data, and wire-idle bits. */
818 for (i
= 19; i
> 0; i
--) {
819 iowrite8(MDIO_EnbIn
, mdio_addr
);
821 retval
= (retval
<< 1) | ((ioread8(mdio_addr
) & MDIO_Data
) ? 1 : 0);
822 iowrite8(MDIO_EnbIn
| MDIO_ShiftClk
, mdio_addr
);
825 return (retval
>>1) & 0xffff;
828 static void mdio_write(struct net_device
*dev
, int phy_id
, int location
, int value
)
830 struct netdev_private
*np
= netdev_priv(dev
);
831 void __iomem
*mdio_addr
= np
->base
+ MIICtrl
;
832 int mii_cmd
= (0x5002 << 16) | (phy_id
<< 23) | (location
<<18) | value
;
835 if (np
->mii_preamble_required
)
836 mdio_sync(mdio_addr
);
838 /* Shift the command bits out. */
839 for (i
= 31; i
>= 0; i
--) {
840 int dataval
= (mii_cmd
& (1 << i
)) ? MDIO_WRITE1
: MDIO_WRITE0
;
842 iowrite8(dataval
, mdio_addr
);
844 iowrite8(dataval
| MDIO_ShiftClk
, mdio_addr
);
847 /* Clear out extra bits. */
848 for (i
= 2; i
> 0; i
--) {
849 iowrite8(MDIO_EnbIn
, mdio_addr
);
851 iowrite8(MDIO_EnbIn
| MDIO_ShiftClk
, mdio_addr
);
857 static int netdev_open(struct net_device
*dev
)
859 struct netdev_private
*np
= netdev_priv(dev
);
860 void __iomem
*ioaddr
= np
->base
;
863 /* Do we need to reset the chip??? */
865 i
= request_irq(dev
->irq
, &intr_handler
, SA_SHIRQ
, dev
->name
, dev
);
869 if (netif_msg_ifup(np
))
870 printk(KERN_DEBUG
"%s: netdev_open() irq %d.\n",
871 dev
->name
, dev
->irq
);
874 iowrite32(np
->rx_ring_dma
, ioaddr
+ RxListPtr
);
875 /* The Tx list pointer is written as packets are queued. */
877 /* Initialize other registers. */
879 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
880 iowrite16(dev
->mtu
+ 18, ioaddr
+ MaxFrameSize
);
882 iowrite16(dev
->mtu
+ 14, ioaddr
+ MaxFrameSize
);
885 iowrite32(ioread32(ioaddr
+ ASICCtrl
) | 0x0C, ioaddr
+ ASICCtrl
);
887 /* Configure the PCI bus bursts and FIFO thresholds. */
889 if (dev
->if_port
== 0)
890 dev
->if_port
= np
->default_port
;
892 spin_lock_init(&np
->mcastlock
);
895 iowrite16(0, ioaddr
+ IntrEnable
);
896 iowrite16(0, ioaddr
+ DownCounter
);
897 /* Set the chip to poll every N*320nsec. */
898 iowrite8(100, ioaddr
+ RxDMAPollPeriod
);
899 iowrite8(127, ioaddr
+ TxDMAPollPeriod
);
900 /* Fix DFE-580TX packet drop issue */
901 if (np
->pci_rev_id
>= 0x14)
902 iowrite8(0x01, ioaddr
+ DebugCtrl1
);
903 netif_start_queue(dev
);
905 iowrite16 (StatsEnable
| RxEnable
| TxEnable
, ioaddr
+ MACCtrl1
);
907 if (netif_msg_ifup(np
))
908 printk(KERN_DEBUG
"%s: Done netdev_open(), status: Rx %x Tx %x "
909 "MAC Control %x, %4.4x %4.4x.\n",
910 dev
->name
, ioread32(ioaddr
+ RxStatus
), ioread8(ioaddr
+ TxStatus
),
911 ioread32(ioaddr
+ MACCtrl0
),
912 ioread16(ioaddr
+ MACCtrl1
), ioread16(ioaddr
+ MACCtrl0
));
914 /* Set the timer to check for link beat. */
915 init_timer(&np
->timer
);
916 np
->timer
.expires
= jiffies
+ 3*HZ
;
917 np
->timer
.data
= (unsigned long)dev
;
918 np
->timer
.function
= &netdev_timer
; /* timer handler */
919 add_timer(&np
->timer
);
921 /* Enable interrupts by setting the interrupt mask. */
922 iowrite16(DEFAULT_INTR
, ioaddr
+ IntrEnable
);
927 static void check_duplex(struct net_device
*dev
)
929 struct netdev_private
*np
= netdev_priv(dev
);
930 void __iomem
*ioaddr
= np
->base
;
931 int mii_lpa
= mdio_read(dev
, np
->phys
[0], MII_LPA
);
932 int negotiated
= mii_lpa
& np
->mii_if
.advertising
;
936 if (!np
->an_enable
|| mii_lpa
== 0xffff) {
937 if (np
->mii_if
.full_duplex
)
938 iowrite16 (ioread16 (ioaddr
+ MACCtrl0
) | EnbFullDuplex
,
943 /* Autonegotiation */
944 duplex
= (negotiated
& 0x0100) || (negotiated
& 0x01C0) == 0x0040;
945 if (np
->mii_if
.full_duplex
!= duplex
) {
946 np
->mii_if
.full_duplex
= duplex
;
947 if (netif_msg_link(np
))
948 printk(KERN_INFO
"%s: Setting %s-duplex based on MII #%d "
949 "negotiated capability %4.4x.\n", dev
->name
,
950 duplex
? "full" : "half", np
->phys
[0], negotiated
);
951 iowrite16(ioread16(ioaddr
+ MACCtrl0
) | duplex
? 0x20 : 0, ioaddr
+ MACCtrl0
);
955 static void netdev_timer(unsigned long data
)
957 struct net_device
*dev
= (struct net_device
*)data
;
958 struct netdev_private
*np
= netdev_priv(dev
);
959 void __iomem
*ioaddr
= np
->base
;
960 int next_tick
= 10*HZ
;
962 if (netif_msg_timer(np
)) {
963 printk(KERN_DEBUG
"%s: Media selection timer tick, intr status %4.4x, "
965 dev
->name
, ioread16(ioaddr
+ IntrEnable
),
966 ioread8(ioaddr
+ TxStatus
), ioread32(ioaddr
+ RxStatus
));
969 np
->timer
.expires
= jiffies
+ next_tick
;
970 add_timer(&np
->timer
);
973 static void tx_timeout(struct net_device
*dev
)
975 struct netdev_private
*np
= netdev_priv(dev
);
976 void __iomem
*ioaddr
= np
->base
;
979 netif_stop_queue(dev
);
980 tasklet_disable(&np
->tx_tasklet
);
981 iowrite16(0, ioaddr
+ IntrEnable
);
982 printk(KERN_WARNING
"%s: Transmit timed out, TxStatus %2.2x "
984 " resetting...\n", dev
->name
, ioread8(ioaddr
+ TxStatus
),
985 ioread8(ioaddr
+ TxFrameId
));
989 for (i
=0; i
<TX_RING_SIZE
; i
++) {
990 printk(KERN_DEBUG
"%02x %08llx %08x %08x(%02x) %08x %08x\n", i
,
991 (unsigned long long)(np
->tx_ring_dma
+ i
*sizeof(*np
->tx_ring
)),
992 le32_to_cpu(np
->tx_ring
[i
].next_desc
),
993 le32_to_cpu(np
->tx_ring
[i
].status
),
994 (le32_to_cpu(np
->tx_ring
[i
].status
) >> 2) & 0xff,
995 le32_to_cpu(np
->tx_ring
[i
].frag
[0].addr
),
996 le32_to_cpu(np
->tx_ring
[i
].frag
[0].length
));
998 printk(KERN_DEBUG
"TxListPtr=%08x netif_queue_stopped=%d\n",
999 ioread32(np
->base
+ TxListPtr
),
1000 netif_queue_stopped(dev
));
1001 printk(KERN_DEBUG
"cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1002 np
->cur_tx
, np
->cur_tx
% TX_RING_SIZE
,
1003 np
->dirty_tx
, np
->dirty_tx
% TX_RING_SIZE
);
1004 printk(KERN_DEBUG
"cur_rx=%d dirty_rx=%d\n", np
->cur_rx
, np
->dirty_rx
);
1005 printk(KERN_DEBUG
"cur_task=%d\n", np
->cur_task
);
1007 spin_lock_irqsave(&np
->lock
, flag
);
1009 /* Stop and restart the chip's Tx processes . */
1011 spin_unlock_irqrestore(&np
->lock
, flag
);
1015 dev
->trans_start
= jiffies
;
1016 np
->stats
.tx_errors
++;
1017 if (np
->cur_tx
- np
->dirty_tx
< TX_QUEUE_LEN
- 4) {
1018 netif_wake_queue(dev
);
1020 iowrite16(DEFAULT_INTR
, ioaddr
+ IntrEnable
);
1021 tasklet_enable(&np
->tx_tasklet
);
1025 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1026 static void init_ring(struct net_device
*dev
)
1028 struct netdev_private
*np
= netdev_priv(dev
);
1031 np
->cur_rx
= np
->cur_tx
= 0;
1032 np
->dirty_rx
= np
->dirty_tx
= 0;
1035 np
->rx_buf_sz
= (dev
->mtu
<= 1520 ? PKT_BUF_SZ
: dev
->mtu
+ 16);
1037 /* Initialize all Rx descriptors. */
1038 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1039 np
->rx_ring
[i
].next_desc
= cpu_to_le32(np
->rx_ring_dma
+
1040 ((i
+1)%RX_RING_SIZE
)*sizeof(*np
->rx_ring
));
1041 np
->rx_ring
[i
].status
= 0;
1042 np
->rx_ring
[i
].frag
[0].length
= 0;
1043 np
->rx_skbuff
[i
] = NULL
;
1046 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1047 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1048 struct sk_buff
*skb
= dev_alloc_skb(np
->rx_buf_sz
);
1049 np
->rx_skbuff
[i
] = skb
;
1052 skb
->dev
= dev
; /* Mark as being used by this device. */
1053 skb_reserve(skb
, 2); /* 16 byte align the IP header. */
1054 np
->rx_ring
[i
].frag
[0].addr
= cpu_to_le32(
1055 pci_map_single(np
->pci_dev
, skb
->data
, np
->rx_buf_sz
,
1056 PCI_DMA_FROMDEVICE
));
1057 np
->rx_ring
[i
].frag
[0].length
= cpu_to_le32(np
->rx_buf_sz
| LastFrag
);
1059 np
->dirty_rx
= (unsigned int)(i
- RX_RING_SIZE
);
1061 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1062 np
->tx_skbuff
[i
] = NULL
;
1063 np
->tx_ring
[i
].status
= 0;
1068 static void tx_poll (unsigned long data
)
1070 struct net_device
*dev
= (struct net_device
*)data
;
1071 struct netdev_private
*np
= netdev_priv(dev
);
1072 unsigned head
= np
->cur_task
% TX_RING_SIZE
;
1073 struct netdev_desc
*txdesc
=
1074 &np
->tx_ring
[(np
->cur_tx
- 1) % TX_RING_SIZE
];
1076 /* Chain the next pointer */
1077 for (; np
->cur_tx
- np
->cur_task
> 0; np
->cur_task
++) {
1078 int entry
= np
->cur_task
% TX_RING_SIZE
;
1079 txdesc
= &np
->tx_ring
[entry
];
1081 np
->last_tx
->next_desc
= cpu_to_le32(np
->tx_ring_dma
+
1082 entry
*sizeof(struct netdev_desc
));
1084 np
->last_tx
= txdesc
;
1086 /* Indicate the latest descriptor of tx ring */
1087 txdesc
->status
|= cpu_to_le32(DescIntrOnTx
);
1089 if (ioread32 (np
->base
+ TxListPtr
) == 0)
1090 iowrite32 (np
->tx_ring_dma
+ head
* sizeof(struct netdev_desc
),
1091 np
->base
+ TxListPtr
);
1096 start_tx (struct sk_buff
*skb
, struct net_device
*dev
)
1098 struct netdev_private
*np
= netdev_priv(dev
);
1099 struct netdev_desc
*txdesc
;
1102 /* Calculate the next Tx descriptor entry. */
1103 entry
= np
->cur_tx
% TX_RING_SIZE
;
1104 np
->tx_skbuff
[entry
] = skb
;
1105 txdesc
= &np
->tx_ring
[entry
];
1107 txdesc
->next_desc
= 0;
1108 txdesc
->status
= cpu_to_le32 ((entry
<< 2) | DisableAlign
);
1109 txdesc
->frag
[0].addr
= cpu_to_le32 (pci_map_single (np
->pci_dev
, skb
->data
,
1112 txdesc
->frag
[0].length
= cpu_to_le32 (skb
->len
| LastFrag
);
1114 /* Increment cur_tx before tasklet_schedule() */
1117 /* Schedule a tx_poll() task */
1118 tasklet_schedule(&np
->tx_tasklet
);
1120 /* On some architectures: explicitly flush cache lines here. */
1121 if (np
->cur_tx
- np
->dirty_tx
< TX_QUEUE_LEN
- 1
1122 && !netif_queue_stopped(dev
)) {
1125 netif_stop_queue (dev
);
1127 dev
->trans_start
= jiffies
;
1128 if (netif_msg_tx_queued(np
)) {
1130 "%s: Transmit frame #%d queued in slot %d.\n",
1131 dev
->name
, np
->cur_tx
, entry
);
1136 /* Reset hardware tx and free all of tx buffers */
1138 reset_tx (struct net_device
*dev
)
1140 struct netdev_private
*np
= netdev_priv(dev
);
1141 void __iomem
*ioaddr
= np
->base
;
1142 struct sk_buff
*skb
;
1144 int irq
= in_interrupt();
1146 /* Reset tx logic, TxListPtr will be cleaned */
1147 iowrite16 (TxDisable
, ioaddr
+ MACCtrl1
);
1148 iowrite16 (TxReset
| DMAReset
| FIFOReset
| NetworkReset
,
1149 ioaddr
+ ASICCtrl
+ 2);
1150 for (i
=50; i
> 0; i
--) {
1151 if ((ioread16(ioaddr
+ ASICCtrl
+ 2) & ResetBusy
) == 0)
1155 /* free all tx skbuff */
1156 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1157 skb
= np
->tx_skbuff
[i
];
1159 pci_unmap_single(np
->pci_dev
,
1160 np
->tx_ring
[i
].frag
[0].addr
, skb
->len
,
1163 dev_kfree_skb_irq (skb
);
1165 dev_kfree_skb (skb
);
1166 np
->tx_skbuff
[i
] = NULL
;
1167 np
->stats
.tx_dropped
++;
1170 np
->cur_tx
= np
->dirty_tx
= 0;
1172 iowrite16 (StatsEnable
| RxEnable
| TxEnable
, ioaddr
+ MACCtrl1
);
1176 /* The interrupt handler cleans up after the Tx thread,
1177 and schedule a Rx thread work */
1178 static irqreturn_t
intr_handler(int irq
, void *dev_instance
, struct pt_regs
*rgs
)
1180 struct net_device
*dev
= (struct net_device
*)dev_instance
;
1181 struct netdev_private
*np
= netdev_priv(dev
);
1182 void __iomem
*ioaddr
= np
->base
;
1190 int intr_status
= ioread16(ioaddr
+ IntrStatus
);
1191 iowrite16(intr_status
, ioaddr
+ IntrStatus
);
1193 if (netif_msg_intr(np
))
1194 printk(KERN_DEBUG
"%s: Interrupt, status %4.4x.\n",
1195 dev
->name
, intr_status
);
1197 if (!(intr_status
& DEFAULT_INTR
))
1202 if (intr_status
& (IntrRxDMADone
)) {
1203 iowrite16(DEFAULT_INTR
& ~(IntrRxDone
|IntrRxDMADone
),
1204 ioaddr
+ IntrEnable
);
1206 np
->budget
= RX_BUDGET
;
1207 tasklet_schedule(&np
->rx_tasklet
);
1209 if (intr_status
& (IntrTxDone
| IntrDrvRqst
)) {
1210 tx_status
= ioread16 (ioaddr
+ TxStatus
);
1211 for (tx_cnt
=32; tx_status
& 0x80; --tx_cnt
) {
1212 if (netif_msg_tx_done(np
))
1214 ("%s: Transmit status is %2.2x.\n",
1215 dev
->name
, tx_status
);
1216 if (tx_status
& 0x1e) {
1217 if (netif_msg_tx_err(np
))
1218 printk("%s: Transmit error status %4.4x.\n",
1219 dev
->name
, tx_status
);
1220 np
->stats
.tx_errors
++;
1221 if (tx_status
& 0x10)
1222 np
->stats
.tx_fifo_errors
++;
1223 if (tx_status
& 0x08)
1224 np
->stats
.collisions
++;
1225 if (tx_status
& 0x04)
1226 np
->stats
.tx_fifo_errors
++;
1227 if (tx_status
& 0x02)
1228 np
->stats
.tx_window_errors
++;
1230 ** This reset has been verified on
1231 ** DFE-580TX boards ! phdm@macqel.be.
1233 if (tx_status
& 0x10) { /* TxUnderrun */
1234 unsigned short txthreshold
;
1236 txthreshold
= ioread16 (ioaddr
+ TxStartThresh
);
1237 /* Restart Tx FIFO and transmitter */
1238 sundance_reset(dev
, (NetworkReset
|FIFOReset
|TxReset
) << 16);
1239 iowrite16 (txthreshold
, ioaddr
+ TxStartThresh
);
1240 /* No need to reset the Tx pointer here */
1242 /* Restart the Tx. */
1243 iowrite16 (TxEnable
, ioaddr
+ MACCtrl1
);
1245 /* Yup, this is a documentation bug. It cost me *hours*. */
1246 iowrite16 (0, ioaddr
+ TxStatus
);
1248 iowrite32(5000, ioaddr
+ DownCounter
);
1251 tx_status
= ioread16 (ioaddr
+ TxStatus
);
1253 hw_frame_id
= (tx_status
>> 8) & 0xff;
1255 hw_frame_id
= ioread8(ioaddr
+ TxFrameId
);
1258 if (np
->pci_rev_id
>= 0x14) {
1259 spin_lock(&np
->lock
);
1260 for (; np
->cur_tx
- np
->dirty_tx
> 0; np
->dirty_tx
++) {
1261 int entry
= np
->dirty_tx
% TX_RING_SIZE
;
1262 struct sk_buff
*skb
;
1264 sw_frame_id
= (le32_to_cpu(
1265 np
->tx_ring
[entry
].status
) >> 2) & 0xff;
1266 if (sw_frame_id
== hw_frame_id
&&
1267 !(le32_to_cpu(np
->tx_ring
[entry
].status
)
1270 if (sw_frame_id
== (hw_frame_id
+ 1) %
1273 skb
= np
->tx_skbuff
[entry
];
1274 /* Free the original skb. */
1275 pci_unmap_single(np
->pci_dev
,
1276 np
->tx_ring
[entry
].frag
[0].addr
,
1277 skb
->len
, PCI_DMA_TODEVICE
);
1278 dev_kfree_skb_irq (np
->tx_skbuff
[entry
]);
1279 np
->tx_skbuff
[entry
] = NULL
;
1280 np
->tx_ring
[entry
].frag
[0].addr
= 0;
1281 np
->tx_ring
[entry
].frag
[0].length
= 0;
1283 spin_unlock(&np
->lock
);
1285 spin_lock(&np
->lock
);
1286 for (; np
->cur_tx
- np
->dirty_tx
> 0; np
->dirty_tx
++) {
1287 int entry
= np
->dirty_tx
% TX_RING_SIZE
;
1288 struct sk_buff
*skb
;
1289 if (!(le32_to_cpu(np
->tx_ring
[entry
].status
)
1292 skb
= np
->tx_skbuff
[entry
];
1293 /* Free the original skb. */
1294 pci_unmap_single(np
->pci_dev
,
1295 np
->tx_ring
[entry
].frag
[0].addr
,
1296 skb
->len
, PCI_DMA_TODEVICE
);
1297 dev_kfree_skb_irq (np
->tx_skbuff
[entry
]);
1298 np
->tx_skbuff
[entry
] = NULL
;
1299 np
->tx_ring
[entry
].frag
[0].addr
= 0;
1300 np
->tx_ring
[entry
].frag
[0].length
= 0;
1302 spin_unlock(&np
->lock
);
1305 if (netif_queue_stopped(dev
) &&
1306 np
->cur_tx
- np
->dirty_tx
< TX_QUEUE_LEN
- 4) {
1307 /* The ring is no longer full, clear busy flag. */
1308 netif_wake_queue (dev
);
1310 /* Abnormal error summary/uncommon events handlers. */
1311 if (intr_status
& (IntrPCIErr
| LinkChange
| StatsMax
))
1312 netdev_error(dev
, intr_status
);
1314 if (netif_msg_intr(np
))
1315 printk(KERN_DEBUG
"%s: exiting interrupt, status=%#4.4x.\n",
1316 dev
->name
, ioread16(ioaddr
+ IntrStatus
));
1317 return IRQ_RETVAL(handled
);
1320 static void rx_poll(unsigned long data
)
1322 struct net_device
*dev
= (struct net_device
*)data
;
1323 struct netdev_private
*np
= netdev_priv(dev
);
1324 int entry
= np
->cur_rx
% RX_RING_SIZE
;
1325 int boguscnt
= np
->budget
;
1326 void __iomem
*ioaddr
= np
->base
;
1329 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1331 struct netdev_desc
*desc
= &(np
->rx_ring
[entry
]);
1332 u32 frame_status
= le32_to_cpu(desc
->status
);
1335 if (--boguscnt
< 0) {
1338 if (!(frame_status
& DescOwn
))
1340 pkt_len
= frame_status
& 0x1fff; /* Chip omits the CRC. */
1341 if (netif_msg_rx_status(np
))
1342 printk(KERN_DEBUG
" netdev_rx() status was %8.8x.\n",
1344 if (frame_status
& 0x001f4000) {
1345 /* There was a error. */
1346 if (netif_msg_rx_err(np
))
1347 printk(KERN_DEBUG
" netdev_rx() Rx error was %8.8x.\n",
1349 np
->stats
.rx_errors
++;
1350 if (frame_status
& 0x00100000) np
->stats
.rx_length_errors
++;
1351 if (frame_status
& 0x00010000) np
->stats
.rx_fifo_errors
++;
1352 if (frame_status
& 0x00060000) np
->stats
.rx_frame_errors
++;
1353 if (frame_status
& 0x00080000) np
->stats
.rx_crc_errors
++;
1354 if (frame_status
& 0x00100000) {
1355 printk(KERN_WARNING
"%s: Oversized Ethernet frame,"
1357 dev
->name
, frame_status
);
1360 struct sk_buff
*skb
;
1361 #ifndef final_version
1362 if (netif_msg_rx_status(np
))
1363 printk(KERN_DEBUG
" netdev_rx() normal Rx pkt length %d"
1364 ", bogus_cnt %d.\n",
1367 /* Check if the packet is long enough to accept without copying
1368 to a minimally-sized skbuff. */
1369 if (pkt_len
< rx_copybreak
1370 && (skb
= dev_alloc_skb(pkt_len
+ 2)) != NULL
) {
1372 skb_reserve(skb
, 2); /* 16 byte align the IP header */
1373 pci_dma_sync_single_for_cpu(np
->pci_dev
,
1376 PCI_DMA_FROMDEVICE
);
1378 eth_copy_and_sum(skb
, np
->rx_skbuff
[entry
]->data
, pkt_len
, 0);
1379 pci_dma_sync_single_for_device(np
->pci_dev
,
1382 PCI_DMA_FROMDEVICE
);
1383 skb_put(skb
, pkt_len
);
1385 pci_unmap_single(np
->pci_dev
,
1388 PCI_DMA_FROMDEVICE
);
1389 skb_put(skb
= np
->rx_skbuff
[entry
], pkt_len
);
1390 np
->rx_skbuff
[entry
] = NULL
;
1392 skb
->protocol
= eth_type_trans(skb
, dev
);
1393 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1395 dev
->last_rx
= jiffies
;
1397 entry
= (entry
+ 1) % RX_RING_SIZE
;
1402 np
->budget
-= received
;
1403 iowrite16(DEFAULT_INTR
, ioaddr
+ IntrEnable
);
1411 np
->budget
-= received
;
1412 if (np
->budget
<= 0)
1413 np
->budget
= RX_BUDGET
;
1414 tasklet_schedule(&np
->rx_tasklet
);
1418 static void refill_rx (struct net_device
*dev
)
1420 struct netdev_private
*np
= netdev_priv(dev
);
1424 /* Refill the Rx ring buffers. */
1425 for (;(np
->cur_rx
- np
->dirty_rx
+ RX_RING_SIZE
) % RX_RING_SIZE
> 0;
1426 np
->dirty_rx
= (np
->dirty_rx
+ 1) % RX_RING_SIZE
) {
1427 struct sk_buff
*skb
;
1428 entry
= np
->dirty_rx
% RX_RING_SIZE
;
1429 if (np
->rx_skbuff
[entry
] == NULL
) {
1430 skb
= dev_alloc_skb(np
->rx_buf_sz
);
1431 np
->rx_skbuff
[entry
] = skb
;
1433 break; /* Better luck next round. */
1434 skb
->dev
= dev
; /* Mark as being used by this device. */
1435 skb_reserve(skb
, 2); /* Align IP on 16 byte boundaries */
1436 np
->rx_ring
[entry
].frag
[0].addr
= cpu_to_le32(
1437 pci_map_single(np
->pci_dev
, skb
->data
,
1438 np
->rx_buf_sz
, PCI_DMA_FROMDEVICE
));
1440 /* Perhaps we need not reset this field. */
1441 np
->rx_ring
[entry
].frag
[0].length
=
1442 cpu_to_le32(np
->rx_buf_sz
| LastFrag
);
1443 np
->rx_ring
[entry
].status
= 0;
1448 static void netdev_error(struct net_device
*dev
, int intr_status
)
1450 struct netdev_private
*np
= netdev_priv(dev
);
1451 void __iomem
*ioaddr
= np
->base
;
1452 u16 mii_ctl
, mii_advertise
, mii_lpa
;
1455 if (intr_status
& LinkChange
) {
1456 if (np
->an_enable
) {
1457 mii_advertise
= mdio_read (dev
, np
->phys
[0], MII_ADVERTISE
);
1458 mii_lpa
= mdio_read (dev
, np
->phys
[0], MII_LPA
);
1459 mii_advertise
&= mii_lpa
;
1460 printk (KERN_INFO
"%s: Link changed: ", dev
->name
);
1461 if (mii_advertise
& ADVERTISE_100FULL
) {
1463 printk ("100Mbps, full duplex\n");
1464 } else if (mii_advertise
& ADVERTISE_100HALF
) {
1466 printk ("100Mbps, half duplex\n");
1467 } else if (mii_advertise
& ADVERTISE_10FULL
) {
1469 printk ("10Mbps, full duplex\n");
1470 } else if (mii_advertise
& ADVERTISE_10HALF
) {
1472 printk ("10Mbps, half duplex\n");
1477 mii_ctl
= mdio_read (dev
, np
->phys
[0], MII_BMCR
);
1478 speed
= (mii_ctl
& BMCR_SPEED100
) ? 100 : 10;
1480 printk (KERN_INFO
"%s: Link changed: %dMbps ,",
1482 printk ("%s duplex.\n", (mii_ctl
& BMCR_FULLDPLX
) ?
1486 if (np
->flowctrl
&& np
->mii_if
.full_duplex
) {
1487 iowrite16(ioread16(ioaddr
+ MulticastFilter1
+2) | 0x0200,
1488 ioaddr
+ MulticastFilter1
+2);
1489 iowrite16(ioread16(ioaddr
+ MACCtrl0
) | EnbFlowCtrl
,
1493 if (intr_status
& StatsMax
) {
1496 if (intr_status
& IntrPCIErr
) {
1497 printk(KERN_ERR
"%s: Something Wicked happened! %4.4x.\n",
1498 dev
->name
, intr_status
);
1499 /* We must do a global reset of DMA to continue. */
1503 static struct net_device_stats
*get_stats(struct net_device
*dev
)
1505 struct netdev_private
*np
= netdev_priv(dev
);
1506 void __iomem
*ioaddr
= np
->base
;
1509 /* We should lock this segment of code for SMP eventually, although
1510 the vulnerability window is very small and statistics are
1512 /* The chip only need report frame silently dropped. */
1513 np
->stats
.rx_missed_errors
+= ioread8(ioaddr
+ RxMissed
);
1514 np
->stats
.tx_packets
+= ioread16(ioaddr
+ TxFramesOK
);
1515 np
->stats
.rx_packets
+= ioread16(ioaddr
+ RxFramesOK
);
1516 np
->stats
.collisions
+= ioread8(ioaddr
+ StatsLateColl
);
1517 np
->stats
.collisions
+= ioread8(ioaddr
+ StatsMultiColl
);
1518 np
->stats
.collisions
+= ioread8(ioaddr
+ StatsOneColl
);
1519 np
->stats
.tx_carrier_errors
+= ioread8(ioaddr
+ StatsCarrierError
);
1520 ioread8(ioaddr
+ StatsTxDefer
);
1521 for (i
= StatsTxDefer
; i
<= StatsMcastRx
; i
++)
1522 ioread8(ioaddr
+ i
);
1523 np
->stats
.tx_bytes
+= ioread16(ioaddr
+ TxOctetsLow
);
1524 np
->stats
.tx_bytes
+= ioread16(ioaddr
+ TxOctetsHigh
) << 16;
1525 np
->stats
.rx_bytes
+= ioread16(ioaddr
+ RxOctetsLow
);
1526 np
->stats
.rx_bytes
+= ioread16(ioaddr
+ RxOctetsHigh
) << 16;
1531 static void set_rx_mode(struct net_device
*dev
)
1533 struct netdev_private
*np
= netdev_priv(dev
);
1534 void __iomem
*ioaddr
= np
->base
;
1535 u16 mc_filter
[4]; /* Multicast hash filter */
1539 if (dev
->flags
& IFF_PROMISC
) { /* Set promiscuous. */
1540 /* Unconditionally log net taps. */
1541 printk(KERN_NOTICE
"%s: Promiscuous mode enabled.\n", dev
->name
);
1542 memset(mc_filter
, 0xff, sizeof(mc_filter
));
1543 rx_mode
= AcceptBroadcast
| AcceptMulticast
| AcceptAll
| AcceptMyPhys
;
1544 } else if ((dev
->mc_count
> multicast_filter_limit
)
1545 || (dev
->flags
& IFF_ALLMULTI
)) {
1546 /* Too many to match, or accept all multicasts. */
1547 memset(mc_filter
, 0xff, sizeof(mc_filter
));
1548 rx_mode
= AcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
;
1549 } else if (dev
->mc_count
) {
1550 struct dev_mc_list
*mclist
;
1554 memset (mc_filter
, 0, sizeof (mc_filter
));
1555 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
1556 i
++, mclist
= mclist
->next
) {
1557 crc
= ether_crc_le (ETH_ALEN
, mclist
->dmi_addr
);
1558 for (index
=0, bit
=0; bit
< 6; bit
++, crc
<<= 1)
1559 if (crc
& 0x80000000) index
|= 1 << bit
;
1560 mc_filter
[index
/16] |= (1 << (index
% 16));
1562 rx_mode
= AcceptBroadcast
| AcceptMultiHash
| AcceptMyPhys
;
1564 iowrite8(AcceptBroadcast
| AcceptMyPhys
, ioaddr
+ RxMode
);
1567 if (np
->mii_if
.full_duplex
&& np
->flowctrl
)
1568 mc_filter
[3] |= 0x0200;
1570 for (i
= 0; i
< 4; i
++)
1571 iowrite16(mc_filter
[i
], ioaddr
+ MulticastFilter0
+ i
*2);
1572 iowrite8(rx_mode
, ioaddr
+ RxMode
);
1575 static int __set_mac_addr(struct net_device
*dev
)
1577 struct netdev_private
*np
= netdev_priv(dev
);
1580 addr16
= (dev
->dev_addr
[0] | (dev
->dev_addr
[1] << 8));
1581 iowrite16(addr16
, np
->base
+ StationAddr
);
1582 addr16
= (dev
->dev_addr
[2] | (dev
->dev_addr
[3] << 8));
1583 iowrite16(addr16
, np
->base
+ StationAddr
+2);
1584 addr16
= (dev
->dev_addr
[4] | (dev
->dev_addr
[5] << 8));
1585 iowrite16(addr16
, np
->base
+ StationAddr
+4);
1589 static int check_if_running(struct net_device
*dev
)
1591 if (!netif_running(dev
))
1596 static void get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
1598 struct netdev_private
*np
= netdev_priv(dev
);
1599 strcpy(info
->driver
, DRV_NAME
);
1600 strcpy(info
->version
, DRV_VERSION
);
1601 strcpy(info
->bus_info
, pci_name(np
->pci_dev
));
1604 static int get_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
1606 struct netdev_private
*np
= netdev_priv(dev
);
1607 spin_lock_irq(&np
->lock
);
1608 mii_ethtool_gset(&np
->mii_if
, ecmd
);
1609 spin_unlock_irq(&np
->lock
);
1613 static int set_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
1615 struct netdev_private
*np
= netdev_priv(dev
);
1617 spin_lock_irq(&np
->lock
);
1618 res
= mii_ethtool_sset(&np
->mii_if
, ecmd
);
1619 spin_unlock_irq(&np
->lock
);
1623 static int nway_reset(struct net_device
*dev
)
1625 struct netdev_private
*np
= netdev_priv(dev
);
1626 return mii_nway_restart(&np
->mii_if
);
1629 static u32
get_link(struct net_device
*dev
)
1631 struct netdev_private
*np
= netdev_priv(dev
);
1632 return mii_link_ok(&np
->mii_if
);
1635 static u32
get_msglevel(struct net_device
*dev
)
1637 struct netdev_private
*np
= netdev_priv(dev
);
1638 return np
->msg_enable
;
1641 static void set_msglevel(struct net_device
*dev
, u32 val
)
1643 struct netdev_private
*np
= netdev_priv(dev
);
1644 np
->msg_enable
= val
;
1647 static struct ethtool_ops ethtool_ops
= {
1648 .begin
= check_if_running
,
1649 .get_drvinfo
= get_drvinfo
,
1650 .get_settings
= get_settings
,
1651 .set_settings
= set_settings
,
1652 .nway_reset
= nway_reset
,
1653 .get_link
= get_link
,
1654 .get_msglevel
= get_msglevel
,
1655 .set_msglevel
= set_msglevel
,
1656 .get_perm_addr
= ethtool_op_get_perm_addr
,
1659 static int netdev_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1661 struct netdev_private
*np
= netdev_priv(dev
);
1662 void __iomem
*ioaddr
= np
->base
;
1666 if (!netif_running(dev
))
1669 spin_lock_irq(&np
->lock
);
1670 rc
= generic_mii_ioctl(&np
->mii_if
, if_mii(rq
), cmd
, NULL
);
1671 spin_unlock_irq(&np
->lock
);
1673 case SIOCDEVPRIVATE
:
1674 for (i
=0; i
<TX_RING_SIZE
; i
++) {
1675 printk(KERN_DEBUG
"%02x %08llx %08x %08x(%02x) %08x %08x\n", i
,
1676 (unsigned long long)(np
->tx_ring_dma
+ i
*sizeof(*np
->tx_ring
)),
1677 le32_to_cpu(np
->tx_ring
[i
].next_desc
),
1678 le32_to_cpu(np
->tx_ring
[i
].status
),
1679 (le32_to_cpu(np
->tx_ring
[i
].status
) >> 2)
1681 le32_to_cpu(np
->tx_ring
[i
].frag
[0].addr
),
1682 le32_to_cpu(np
->tx_ring
[i
].frag
[0].length
));
1684 printk(KERN_DEBUG
"TxListPtr=%08x netif_queue_stopped=%d\n",
1685 ioread32(np
->base
+ TxListPtr
),
1686 netif_queue_stopped(dev
));
1687 printk(KERN_DEBUG
"cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1688 np
->cur_tx
, np
->cur_tx
% TX_RING_SIZE
,
1689 np
->dirty_tx
, np
->dirty_tx
% TX_RING_SIZE
);
1690 printk(KERN_DEBUG
"cur_rx=%d dirty_rx=%d\n", np
->cur_rx
, np
->dirty_rx
);
1691 printk(KERN_DEBUG
"cur_task=%d\n", np
->cur_task
);
1692 printk(KERN_DEBUG
"TxStatus=%04x\n", ioread16(ioaddr
+ TxStatus
));
1700 static int netdev_close(struct net_device
*dev
)
1702 struct netdev_private
*np
= netdev_priv(dev
);
1703 void __iomem
*ioaddr
= np
->base
;
1704 struct sk_buff
*skb
;
1707 netif_stop_queue(dev
);
1709 if (netif_msg_ifdown(np
)) {
1710 printk(KERN_DEBUG
"%s: Shutting down ethercard, status was Tx %2.2x "
1711 "Rx %4.4x Int %2.2x.\n",
1712 dev
->name
, ioread8(ioaddr
+ TxStatus
),
1713 ioread32(ioaddr
+ RxStatus
), ioread16(ioaddr
+ IntrStatus
));
1714 printk(KERN_DEBUG
"%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1715 dev
->name
, np
->cur_tx
, np
->dirty_tx
, np
->cur_rx
, np
->dirty_rx
);
1718 /* Disable interrupts by clearing the interrupt mask. */
1719 iowrite16(0x0000, ioaddr
+ IntrEnable
);
1721 /* Stop the chip's Tx and Rx processes. */
1722 iowrite16(TxDisable
| RxDisable
| StatsDisable
, ioaddr
+ MACCtrl1
);
1724 /* Wait and kill tasklet */
1725 tasklet_kill(&np
->rx_tasklet
);
1726 tasklet_kill(&np
->tx_tasklet
);
1729 if (netif_msg_hw(np
)) {
1730 printk("\n"KERN_DEBUG
" Tx ring at %8.8x:\n",
1731 (int)(np
->tx_ring_dma
));
1732 for (i
= 0; i
< TX_RING_SIZE
; i
++)
1733 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1734 i
, np
->tx_ring
[i
].status
, np
->tx_ring
[i
].frag
[0].addr
,
1735 np
->tx_ring
[i
].frag
[0].length
);
1736 printk("\n"KERN_DEBUG
" Rx ring %8.8x:\n",
1737 (int)(np
->rx_ring_dma
));
1738 for (i
= 0; i
< /*RX_RING_SIZE*/4 ; i
++) {
1739 printk(KERN_DEBUG
" #%d desc. %4.4x %4.4x %8.8x\n",
1740 i
, np
->rx_ring
[i
].status
, np
->rx_ring
[i
].frag
[0].addr
,
1741 np
->rx_ring
[i
].frag
[0].length
);
1744 #endif /* __i386__ debugging only */
1746 free_irq(dev
->irq
, dev
);
1748 del_timer_sync(&np
->timer
);
1750 /* Free all the skbuffs in the Rx queue. */
1751 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
1752 np
->rx_ring
[i
].status
= 0;
1753 np
->rx_ring
[i
].frag
[0].addr
= 0xBADF00D0; /* An invalid address. */
1754 skb
= np
->rx_skbuff
[i
];
1756 pci_unmap_single(np
->pci_dev
,
1757 np
->rx_ring
[i
].frag
[0].addr
, np
->rx_buf_sz
,
1758 PCI_DMA_FROMDEVICE
);
1760 np
->rx_skbuff
[i
] = NULL
;
1763 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
1764 skb
= np
->tx_skbuff
[i
];
1766 pci_unmap_single(np
->pci_dev
,
1767 np
->tx_ring
[i
].frag
[0].addr
, skb
->len
,
1770 np
->tx_skbuff
[i
] = NULL
;
1777 static void __devexit
sundance_remove1 (struct pci_dev
*pdev
)
1779 struct net_device
*dev
= pci_get_drvdata(pdev
);
1782 struct netdev_private
*np
= netdev_priv(dev
);
1784 unregister_netdev(dev
);
1785 pci_free_consistent(pdev
, RX_TOTAL_SIZE
, np
->rx_ring
,
1787 pci_free_consistent(pdev
, TX_TOTAL_SIZE
, np
->tx_ring
,
1789 pci_iounmap(pdev
, np
->base
);
1790 pci_release_regions(pdev
);
1792 pci_set_drvdata(pdev
, NULL
);
1796 static struct pci_driver sundance_driver
= {
1798 .id_table
= sundance_pci_tbl
,
1799 .probe
= sundance_probe1
,
1800 .remove
= __devexit_p(sundance_remove1
),
1803 static int __init
sundance_init(void)
1805 /* when a module, this is printed whether or not devices are found in probe */
1809 return pci_module_init(&sundance_driver
);
1812 static void __exit
sundance_exit(void)
1814 pci_unregister_driver(&sundance_driver
);
1817 module_init(sundance_init
);
1818 module_exit(sundance_exit
);