MOXA linux-2.6.x / linux-2.6.19-uc1 from UC-7110-LX-BOOTLOADER-1.9_VERSION-4.2.tgz
[linux-2.6.19-moxart.git] / drivers / net / 8139cp.c
blob30be3192de257be89c361ceee55740c3b6068520
1 /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
2 /*
3 Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
5 Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6 Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7 Copyright 2001 Manfred Spraul [natsemi.c]
8 Copyright 1999-2001 by Donald Becker. [natsemi.c]
9 Written 1997-2001 by Donald Becker. [8139too.c]
10 Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
12 This software may be used and distributed according to the terms of
13 the GNU General Public License (GPL), incorporated herein by reference.
14 Drivers based on or derived from this code fall under the GPL and must
15 retain the authorship, copyright and license notice. This file is not
16 a complete program and may only be used when the entire operating
17 system is licensed under the GPL.
19 See the file COPYING in this distribution for more information.
21 Contributors:
23 Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
25 LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
27 TODO:
28 * Test Tx checksumming thoroughly
29 * Implement dev->tx_timeout
31 Low priority TODO:
32 * Complete reset on PciErr
33 * Consider Rx interrupt mitigation using TimerIntr
34 * Investigate using skb->priority with h/w VLAN priority
35 * Investigate using High Priority Tx Queue with skb->priority
36 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
37 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
38 * Implement Tx software interrupt mitigation via
39 Tx descriptor bit
40 * The real minimum of CP_MIN_MTU is 4 bytes. However,
41 for this to be supported, one must(?) turn on packet padding.
42 * Support external MII transceivers (patch available)
44 NOTES:
45 * TX checksumming is considered experimental. It is off by
46 default, use ethtool to turn it on.
50 #define DRV_NAME "8139cp"
51 #define DRV_VERSION "1.3"
52 #define DRV_RELDATE "Mar 22, 2004"
55 #include <linux/module.h>
56 #include <linux/moduleparam.h>
57 #include <linux/kernel.h>
58 #include <linux/compiler.h>
59 #include <linux/netdevice.h>
60 #include <linux/etherdevice.h>
61 #include <linux/init.h>
62 #include <linux/pci.h>
63 #include <linux/dma-mapping.h>
64 #include <linux/delay.h>
65 #include <linux/ethtool.h>
66 #include <linux/mii.h>
67 #include <linux/if_vlan.h>
68 #include <linux/crc32.h>
69 #include <linux/in.h>
70 #include <linux/ip.h>
71 #include <linux/tcp.h>
72 #include <linux/udp.h>
73 #ifdef CONFIG_LEDMAN
74 #include <linux/ledman.h>
75 #endif
76 #include <linux/cache.h>
77 #include <asm/io.h>
78 #include <asm/irq.h>
79 #include <asm/uaccess.h>
81 /* do we want fast poll operation instead of interrupts */
82 #ifdef CONFIG_FAST_TIMER
83 #define FAST_POLL 1
84 #include <linux/fast_timer.h>
85 #endif
87 /* VLAN tagging feature enable/disable */
88 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
89 #define CP_VLAN_TAG_USED 1
90 #define CP_VLAN_TX_TAG(tx_desc, vlan_tag_value, do_vlan) \
91 do { (tx_desc)->opts2 = (do_vlan ? cpu_to_le16(TxVlanTag) : 0); \
92 tx_desc->vtag = cpu_to_be16(vlan_tag_value); } while (0)
93 #else
94 #define CP_VLAN_TAG_USED 0
95 #define CP_VLAN_TX_TAG(tx_desc, vlan_tag_value, do_vlan) \
96 do { (tx_desc)->opts2 = 0; } while (0)
97 #endif
99 /* These identify the driver base version and may not be removed. */
100 static char version[] =
101 KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
103 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
104 MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
105 MODULE_VERSION(DRV_VERSION);
106 MODULE_LICENSE("GPL");
108 static int debug = -1;
109 module_param(debug, int, 0);
110 MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
112 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
113 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
114 static int multicast_filter_limit = 32;
115 module_param(multicast_filter_limit, int, 0);
116 MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
118 #define PFX DRV_NAME ": "
120 #ifndef TRUE
121 #define FALSE 0
122 #define TRUE (!FALSE)
123 #endif
125 #define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
126 NETIF_MSG_PROBE | \
127 NETIF_MSG_LINK)
128 #define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */
129 #define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */
130 #define CP_REGS_SIZE (0xff + 1)
131 #define CP_REGS_VER 1 /* version 1 */
132 #define CP_RX_RING_SIZE 64
133 #define CP_TX_RING_SIZE 64
134 #define CP_RING_BYTES \
135 ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
136 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
137 CP_STATS_SIZE)
138 #define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
139 #define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
140 #define TX_BUFFS_AVAIL(CP) \
141 (((CP)->tx_tail <= (CP)->tx_head) ? \
142 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
143 (CP)->tx_tail - (CP)->tx_head - 1)
145 /* Used to calculate the size of the temporary rx buffer */
146 #define CP_RX_FIFO_SZ 2048
147 #define CP_RX_DMA_MARGIN 500
149 #define RX_OFFSET 2
150 #define CP_INTERNAL_PHY 32
152 /* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
153 #define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */
154 #define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */
155 #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
156 #define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */
158 /* Time in jiffies before concluding the transmitter is hung. */
159 #define TX_TIMEOUT (6*HZ)
161 /* hardware minimum and maximum for a single frame's data payload */
162 #define CP_MIN_MTU 60 /* TODO: allow lower, but pad */
163 #define CP_MAX_MTU 4096
165 enum {
166 /* NIC register offsets */
167 MAC0 = 0x00, /* Ethernet hardware address. */
168 MAR0 = 0x08, /* Multicast filter. */
169 StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
170 TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */
171 HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */
172 Cmd = 0x37, /* Command register */
173 IntrMask = 0x3C, /* Interrupt mask */
174 IntrStatus = 0x3E, /* Interrupt status */
175 TxConfig = 0x40, /* Tx configuration */
176 ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */
177 RxConfig = 0x44, /* Rx configuration */
178 RxMissed = 0x4C, /* 24 bits valid, write clears */
179 Cfg9346 = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
180 Config1 = 0x52, /* Config1 */
181 Config3 = 0x59, /* Config3 */
182 Config4 = 0x5A, /* Config4 */
183 MultiIntr = 0x5C, /* Multiple interrupt select */
184 BasicModeCtrl = 0x62, /* MII BMCR */
185 BasicModeStatus = 0x64, /* MII BMSR */
186 NWayAdvert = 0x66, /* MII ADVERTISE */
187 NWayLPAR = 0x68, /* MII LPA */
188 NWayExpansion = 0x6A, /* MII Expansion */
189 Config5 = 0xD8, /* Config5 */
190 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
191 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
192 CpCmd = 0xE0, /* C+ Command register (C+ mode only) */
193 IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */
194 RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */
195 TxThresh = 0xEC, /* Early Tx threshold */
196 OldRxBufAddr = 0x30, /* DMA address of Rx ring buffer (C mode) */
197 OldTSD0 = 0x10, /* DMA address of first Tx desc (C mode) */
198 MIIRegister = 0xFC, /*The mii register */
200 /* Tx and Rx status descriptors */
201 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
202 RingEnd = (1 << 30), /* End of descriptor ring */
203 FirstFrag = (1 << 29), /* First segment of a packet */
204 LastFrag = (1 << 28), /* Final segment of a packet */
205 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
206 MSSShift = 16, /* MSS value position */
207 MSSMask = 0xfff, /* MSS value: 11 bits */
208 TxError = (1 << 23), /* Tx error summary */
209 RxError = (1 << 20), /* Rx error summary */
210 IPCS = (1 << 18), /* Calculate IP checksum */
211 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
212 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
213 TxVlanTag = (1 << 1), /* Add VLAN tag */
214 RxVlanTagged = (1 << 0), /* Rx VLAN tag available */
215 IPFail = (1 << 15), /* IP checksum failed */
216 UDPFail = (1 << 14), /* UDP/IP checksum failed */
217 TCPFail = (1 << 13), /* TCP/IP checksum failed */
218 NormalTxPoll = (1 << 6), /* One or more normal Tx packets to send */
219 PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */
220 PID0 = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
221 RxProtoTCP = 1,
222 RxProtoUDP = 2,
223 RxProtoIP = 3,
224 TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */
225 TxOWC = (1 << 22), /* Tx Out-of-window collision */
226 TxLinkFail = (1 << 21), /* Link failed during Tx of packet */
227 TxMaxCol = (1 << 20), /* Tx aborted due to excessive collisions */
228 TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */
229 TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
230 RxErrFrame = (1 << 27), /* Rx frame alignment error */
231 RxMcast = (1 << 26), /* Rx multicast packet rcv'd */
232 RxErrCRC = (1 << 18), /* Rx CRC error */
233 RxErrRunt = (1 << 19), /* Rx error, packet < 64 bytes */
234 RxErrLong = (1 << 21), /* Rx error, packet > 4096 bytes */
235 RxErrFIFO = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
237 /* StatsAddr register */
238 DumpStats = (1 << 3), /* Begin stats dump */
240 /* RxConfig register */
241 RxCfgFIFOShift = 13, /* Shift, to get Rx FIFO thresh value */
242 RxCfgDMAShift = 8, /* Shift, to get Rx Max DMA value */
243 AcceptErr = 0x20, /* Accept packets with CRC errors */
244 AcceptRunt = 0x10, /* Accept runt (<64 bytes) packets */
245 AcceptBroadcast = 0x08, /* Accept broadcast packets */
246 AcceptMulticast = 0x04, /* Accept multicast packets */
247 AcceptMyPhys = 0x02, /* Accept pkts with our MAC as dest */
248 AcceptAllPhys = 0x01, /* Accept all pkts w/ physical dest */
250 /* IntrMask / IntrStatus registers */
251 PciErr = (1 << 15), /* System error on the PCI bus */
252 TimerIntr = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
253 LenChg = (1 << 13), /* Cable length change */
254 SWInt = (1 << 8), /* Software-requested interrupt */
255 TxEmpty = (1 << 7), /* No Tx descriptors available */
256 RxFIFOOvr = (1 << 6), /* Rx FIFO Overflow */
257 LinkChg = (1 << 5), /* Packet underrun, or link change */
258 RxEmpty = (1 << 4), /* No Rx descriptors available */
259 TxErr = (1 << 3), /* Tx error */
260 TxOK = (1 << 2), /* Tx packet sent */
261 RxErr = (1 << 1), /* Rx error */
262 RxOK = (1 << 0), /* Rx packet received */
263 IntrResvd = (1 << 10), /* reserved, according to RealTek engineers,
264 but hardware likes to raise it */
266 IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
267 RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
268 RxErr | RxOK | IntrResvd,
270 /* C mode command register */
271 CmdReset = (1 << 4), /* Enable to reset; self-clearing */
272 RxOn = (1 << 3), /* Rx mode enable */
273 TxOn = (1 << 2), /* Tx mode enable */
275 /* C+ mode command register */
276 RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */
277 RxChkSum = (1 << 5), /* Rx checksum offload enable */
278 PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */
279 PCIMulRW = (1 << 3), /* Enable PCI read/write multiple */
280 CpRxOn = (1 << 1), /* Rx mode enable */
281 CpTxOn = (1 << 0), /* Tx mode enable */
283 /* Cfg9436 EEPROM control register */
284 Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */
285 Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */
287 /* TxConfig register */
288 IFG = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
289 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
291 /* Early Tx Threshold register */
292 TxThreshMask = 0x3f, /* Mask bits 5-0 */
293 TxThreshMax = 2048, /* Max early Tx threshold */
295 /* Config1 register */
296 DriverLoaded = (1 << 5), /* Software marker, driver is loaded */
297 LWACT = (1 << 4), /* LWAKE active mode */
298 PMEnable = (1 << 0), /* Enable various PM features of chip */
300 /* Config3 register */
301 PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */
302 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
303 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
305 /* Config4 register */
306 LWPTN = (1 << 1), /* LWAKE Pattern */
307 LWPME = (1 << 4), /* LANWAKE vs PMEB */
309 /* Config5 register */
310 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
311 MWF = (1 << 5), /* Accept Multicast wakeup frame */
312 UWF = (1 << 4), /* Accept Unicast wakeup frame */
313 LANWake = (1 << 1), /* Enable LANWake signal */
314 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
316 /* mii register */
317 MDO = (1 << 26), /* The mdio pin output */
318 MDI = (1 << 25), /* the mdio pin input */
319 MDC = (1 << 24), /* the mdio pin clock pin */
320 MDM = (1 << 27),
322 cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
323 cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
324 cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
327 static const unsigned int cp_rx_config =
328 (RX_FIFO_THRESH << RxCfgFIFOShift) |
329 (RX_DMA_BURST << RxCfgDMAShift);
331 struct cp_desc {
332 u32 opts1;
334 * We break this filed into 2 16 bit fields to get around endiannes issues
336 u16 vtag;
337 u16 opts2;
338 u64 addr;
341 struct cp_dma_stats {
342 u64 tx_ok;
343 u64 rx_ok;
344 u64 tx_err;
345 u32 rx_err;
346 u16 rx_fifo;
347 u16 frame_align;
348 u32 tx_ok_1col;
349 u32 tx_ok_mcol;
350 u64 rx_ok_phys;
351 u64 rx_ok_bcast;
352 u32 rx_ok_mcast;
353 u16 tx_abort;
354 u16 tx_underrun;
355 } __attribute__((packed));
357 struct cp_extra_stats {
358 unsigned long rx_frags;
361 struct cp_private {
362 void __iomem *regs;
363 struct net_device *dev;
364 spinlock_t lock;
365 u32 msg_enable;
367 struct pci_dev *pdev;
368 u32 rx_config;
369 u16 cpcmd;
371 struct net_device_stats net_stats;
372 struct cp_extra_stats cp_stats;
374 unsigned rx_head ____cacheline_aligned;
375 unsigned rx_tail;
376 struct cp_desc *rx_ring;
377 struct sk_buff *rx_skb[CP_RX_RING_SIZE];
379 unsigned tx_head ____cacheline_aligned;
380 unsigned tx_tail;
381 struct cp_desc *tx_ring;
382 struct sk_buff *tx_skb[CP_TX_RING_SIZE];
384 unsigned rx_buf_sz;
385 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
387 #if CP_VLAN_TAG_USED
388 struct vlan_group *vlgrp;
389 #endif
390 dma_addr_t ring_dma;
392 struct mii_if_info mii_if;
395 #define cpr8(reg) readb(cp->regs + (reg))
396 #define cpr16(reg) readw(cp->regs + (reg))
397 #define cpr32(reg) readl(cp->regs + (reg))
398 #define cpw8(reg,val) writeb((val), cp->regs + (reg))
399 #define cpw16(reg,val) writew((val), cp->regs + (reg))
400 #define cpw32(reg,val) writel((val), cp->regs + (reg))
401 #define cpw8_f(reg,val) do { \
402 writeb((val), cp->regs + (reg)); \
403 readb(cp->regs + (reg)); \
404 } while (0)
405 #define cpw16_f(reg,val) do { \
406 writew((val), cp->regs + (reg)); \
407 readw(cp->regs + (reg)); \
408 } while (0)
409 #define cpw32_f(reg,val) do { \
410 writel((val), cp->regs + (reg)); \
411 readl(cp->regs + (reg)); \
412 } while (0)
415 static void __cp_set_rx_mode (struct net_device *dev);
416 static void cp_tx (struct cp_private *cp);
417 static void cp_clean_rings (struct cp_private *cp);
418 #ifdef CONFIG_NET_POLL_CONTROLLER
419 static void cp_poll_controller(struct net_device *dev);
420 #endif
421 static int cp_get_eeprom_len(struct net_device *dev);
422 static int cp_get_eeprom(struct net_device *dev,
423 struct ethtool_eeprom *eeprom, u8 *data);
424 static int cp_set_eeprom(struct net_device *dev,
425 struct ethtool_eeprom *eeprom, u8 *data);
427 static struct pci_device_id cp_pci_tbl[] = {
428 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
429 #if defined(CONFIG_MTD_NETtel) || defined(CONFIG_SH_SECUREEDGE5410)
430 /* Bogus 8139 silicon reports 8129 without external PROM :-( */
431 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8129), },
432 #endif
433 { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
434 { },
436 MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
438 static struct {
439 const char str[ETH_GSTRING_LEN];
440 } ethtool_stats_keys[] = {
441 { "tx_ok" },
442 { "rx_ok" },
443 { "tx_err" },
444 { "rx_err" },
445 { "rx_fifo" },
446 { "frame_align" },
447 { "tx_ok_1col" },
448 { "tx_ok_mcol" },
449 { "rx_ok_phys" },
450 { "rx_ok_bcast" },
451 { "rx_ok_mcast" },
452 { "tx_abort" },
453 { "tx_underrun" },
454 { "rx_frags" },
458 #if CP_VLAN_TAG_USED
459 static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
461 struct cp_private *cp = netdev_priv(dev);
462 unsigned long flags;
464 spin_lock_irqsave(&cp->lock, flags);
465 cp->vlgrp = grp;
466 cp->cpcmd |= RxVlanOn;
467 cpw16(CpCmd, cp->cpcmd);
468 spin_unlock_irqrestore(&cp->lock, flags);
471 static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
473 struct cp_private *cp = netdev_priv(dev);
474 unsigned long flags;
476 spin_lock_irqsave(&cp->lock, flags);
477 cp->cpcmd &= ~RxVlanOn;
478 cpw16(CpCmd, cp->cpcmd);
479 if (cp->vlgrp)
480 cp->vlgrp->vlan_devices[vid] = NULL;
481 spin_unlock_irqrestore(&cp->lock, flags);
483 #endif /* CP_VLAN_TAG_USED */
485 static inline void cp_set_rxbufsize (struct cp_private *cp)
487 unsigned int mtu = cp->dev->mtu;
490 We need to ensure that the DMA buffers are bigger
491 than the rx fifo of the 8139C+, otherwise the DMA engine on
492 the chip can get confused and we cease to be able to receive
493 packets :-(.
495 It seems to get confused if the size of the packet we are receiving
496 is the same size of as the DMA buffer.
498 /* MTU + ethernet header + FCS + optional VLAN tag */
499 if (mtu > (CP_RX_FIFO_SZ + CP_RX_DMA_MARGIN))
500 cp->rx_buf_sz = mtu + ETH_HLEN + 8;
501 else
502 cp->rx_buf_sz = (CP_RX_FIFO_SZ + CP_RX_DMA_MARGIN);
505 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
506 struct cp_desc *desc)
508 skb->protocol = eth_type_trans (skb, cp->dev);
510 cp->net_stats.rx_packets++;
511 cp->net_stats.rx_bytes += skb->len;
512 cp->dev->last_rx = jiffies;
514 #ifdef FAST_POLL
515 #if CP_VLAN_TAG_USED
516 if (cp->vlgrp && (le16_to_cpu(desc->opts2) & RxVlanTagged)) {
517 vlan_hwaccel_rx(skb, cp->vlgrp, be16_to_cpu(desc->vtag));
518 } else
519 #endif
520 netif_rx(skb);
521 #else /* FAST POLL */
522 #if CP_VLAN_TAG_USED
523 if (cp->vlgrp && (le16_to_cpu(desc->opts2) & RxVlanTagged)) {
524 vlan_hwaccel_receive_skb(skb, cp->vlgrp,
525 be16_to_cpu(desc->vtag));
526 } else
527 #endif
528 netif_receive_skb(skb);
529 #endif /* FAST POLL */
532 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
533 u32 status, u32 len)
535 if (netif_msg_rx_err (cp))
536 printk (KERN_DEBUG
537 "%s: rx err, slot %d status 0x%x len %d\n",
538 cp->dev->name, rx_tail, status, len);
539 cp->net_stats.rx_errors++;
540 if (status & RxErrFrame)
541 cp->net_stats.rx_frame_errors++;
542 if (status & RxErrCRC)
543 cp->net_stats.rx_crc_errors++;
544 if ((status & RxErrRunt) || (status & RxErrLong))
545 cp->net_stats.rx_length_errors++;
546 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
547 cp->net_stats.rx_length_errors++;
548 if (status & RxErrFIFO)
549 cp->net_stats.rx_fifo_errors++;
552 static inline unsigned int cp_rx_csum_ok (u32 status)
554 unsigned int protocol = (status >> 16) & 0x3;
556 if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
557 return 1;
558 else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
559 return 1;
560 else if ((protocol == RxProtoIP) && (!(status & IPFail)))
561 return 1;
562 return 0;
565 #ifdef FAST_POLL
566 static void cp_rx (struct cp_private *cp)
567 #else
568 static int cp_rx_poll (struct net_device *dev, int *budget)
569 #endif
571 #ifdef FAST_POLL
572 unsigned rx_work = 16;
573 struct net_device *dev = cp->dev;
574 #else
575 struct cp_private *cp = netdev_priv(dev);
576 unsigned rx_work = dev->quota;
577 unsigned rx;
578 #endif
579 unsigned rx_tail = cp->rx_tail;
580 int cng_level = 0;
582 #ifdef CONFIG_LEDMAN
583 ledman_cmd(LEDMAN_CMD_SET, (cp->dev->name[3] == '0') ? LEDMAN_LAN1_RX :
584 ((cp->dev->name[3] == '1') ? LEDMAN_LAN2_RX : LEDMAN_LAN3_RX));
585 #endif
587 #ifndef FAST_POLL
588 rx_status_loop:
589 rx = 0;
590 cpw16(IntrStatus, cp_rx_intr_mask);
591 #endif
593 while (1) {
594 u32 status, len;
595 dma_addr_t mapping;
596 struct sk_buff *skb, *new_skb;
597 struct cp_desc *desc;
598 unsigned buflen;
600 skb = cp->rx_skb[rx_tail];
601 BUG_ON(!skb);
603 desc = &cp->rx_ring[rx_tail];
604 status = le32_to_cpu(desc->opts1);
605 if (status & DescOwn)
606 break;
608 len = (status & 0x1fff) - 4;
609 mapping = le64_to_cpu(desc->addr);
611 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
612 /* we don't support incoming fragmented frames.
613 * instead, we attempt to ensure that the
614 * pre-allocated RX skbs are properly sized such
615 * that RX fragments are never encountered
617 cp_rx_err_acct(cp, rx_tail, status, len);
618 cp->net_stats.rx_dropped++;
619 cp->cp_stats.rx_frags++;
620 cng_level = NET_RX_SUCCESS;
621 goto rx_next;
624 if (status & (RxError | RxErrFIFO)) {
625 cp_rx_err_acct(cp, rx_tail, status, len);
626 cng_level = NET_RX_SUCCESS;
627 goto rx_next;
630 if (netif_msg_rx_status(cp))
631 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
632 dev->name, rx_tail, status, len);
634 buflen = cp->rx_buf_sz + RX_OFFSET;
635 new_skb = dev_alloc_skb (buflen);
636 if (!new_skb) {
637 cp->net_stats.rx_dropped++;
638 cng_level = NET_RX_DROP;
639 goto rx_next;
642 skb_reserve(new_skb, RX_OFFSET);
643 new_skb->dev = dev;
645 pci_unmap_single(cp->pdev, mapping,
646 buflen, PCI_DMA_FROMDEVICE);
648 /* Handle checksum offloading for incoming packets. */
649 if (cp_rx_csum_ok(status))
650 skb->ip_summed = CHECKSUM_UNNECESSARY;
651 else
652 skb->ip_summed = CHECKSUM_NONE;
654 skb_put(skb, len);
656 mapping = pci_map_single(cp->pdev, new_skb->data, buflen,
657 PCI_DMA_FROMDEVICE);
658 cp->rx_skb[rx_tail] = new_skb;
660 cp_rx_skb(cp, skb, desc);
661 #ifndef FAST_POLL
662 rx++;
663 #endif
665 rx_next:
666 cp->rx_ring[rx_tail].opts2 = 0;
667 cp->rx_ring[rx_tail].vtag = 0;
668 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
669 if (rx_tail == (CP_RX_RING_SIZE - 1))
670 desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
671 cp->rx_buf_sz);
672 else
673 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
674 rx_tail = NEXT_RX(rx_tail);
676 if (cng_level == NET_RX_DROP || cng_level == NET_RX_CN_HIGH
677 || cng_level == NET_RX_CN_MOD) {
678 rx_work = 0;
679 break;
682 if (!rx_work--)
683 break;
686 cp->rx_tail = rx_tail;
688 #ifndef FAST_POLL
689 dev->quota -= rx;
690 *budget -= rx;
692 /* if we did not reach work limit, then we're done with
693 * this round of polling
695 if (rx_work) {
696 if (cpr16(IntrStatus) & cp_rx_intr_mask)
697 goto rx_status_loop;
699 local_irq_disable();
700 cpw16_f(IntrMask, cp_intr_mask);
701 __netif_rx_complete(dev);
702 local_irq_enable();
704 return 0; /* done */
707 return 1; /* not done */
708 #endif
711 static irqreturn_t cp_interrupt (int irq, void *dev_instance)
713 struct net_device *dev = dev_instance;
714 struct cp_private *cp;
715 u16 status;
717 if (unlikely(dev == NULL))
718 return IRQ_NONE;
719 cp = netdev_priv(dev);
721 status = cpr16(IntrStatus);
722 if (!status || (status == 0xFFFF))
723 return IRQ_NONE;
725 if (netif_msg_intr(cp))
726 printk(KERN_DEBUG "%s: intr, status %04x cmd %02lx cpcmd %04lx\n",
727 dev->name, status, (unsigned long)cpr8(Cmd),
728 (unsigned long)cpr16(CpCmd));
730 #ifdef FAST_POLL
731 cpw16_f(IntrStatus, status);
732 #else
733 cpw16(IntrStatus, status & ~cp_rx_intr_mask);
734 #endif
736 spin_lock(&cp->lock);
738 /* close possible race's with dev_close */
739 if (unlikely(!netif_running(dev))) {
740 cpw16(IntrMask, 0);
741 spin_unlock(&cp->lock);
742 return IRQ_HANDLED;
745 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
746 #ifdef FAST_POLL
747 cp_rx(cp);
748 #else
749 if (netif_rx_schedule_prep(dev)) {
750 cpw16_f(IntrMask, cp_norx_intr_mask);
751 __netif_rx_schedule(dev);
753 #endif
755 if (status & (TxOK | TxErr | TxEmpty | SWInt))
756 cp_tx(cp);
757 if (status & LinkChg)
758 mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
760 spin_unlock(&cp->lock);
762 if (status & PciErr) {
763 u16 pci_status;
765 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
766 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
767 printk(KERN_ERR "%s: PCI bus error, status=%04x, PCI status=%04x\n",
768 dev->name, status, pci_status);
770 /* TODO: reset hardware */
773 return IRQ_HANDLED;
776 #ifdef CONFIG_NET_POLL_CONTROLLER
778 * Polling receive - used by netconsole and other diagnostic tools
779 * to allow network i/o with interrupts disabled.
781 static void cp_poll_controller(struct net_device *dev)
783 disable_irq(dev->irq);
784 cp_interrupt(dev->irq, dev);
785 enable_irq(dev->irq);
787 #endif
789 #ifdef FAST_POLL
790 static void fast_poll_8139cp(void *arg)
792 cp_interrupt (-1, arg);
794 #endif
796 static void cp_tx (struct cp_private *cp)
798 unsigned tx_head = cp->tx_head;
799 unsigned tx_tail = cp->tx_tail;
801 #ifdef CONFIG_LEDMAN
802 ledman_cmd(LEDMAN_CMD_SET, (cp->dev->name[3] == '0') ? LEDMAN_LAN1_TX :
803 ((cp->dev->name[3] == '1') ? LEDMAN_LAN2_TX : LEDMAN_LAN3_TX));
804 #endif
806 while (tx_tail != tx_head) {
807 struct cp_desc *txd = cp->tx_ring + tx_tail;
808 struct sk_buff *skb;
809 u32 status;
811 rmb();
812 status = le32_to_cpu(txd->opts1);
813 if (status & DescOwn)
814 break;
816 skb = cp->tx_skb[tx_tail];
817 BUG_ON(!skb);
819 pci_unmap_single(cp->pdev, le64_to_cpu(txd->addr),
820 le32_to_cpu(txd->opts1) & 0xffff,
821 PCI_DMA_TODEVICE);
823 if (status & LastFrag) {
824 if (status & (TxError | TxFIFOUnder)) {
825 if (netif_msg_tx_err(cp))
826 printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
827 cp->dev->name, status);
828 cp->net_stats.tx_errors++;
829 if (status & TxOWC)
830 cp->net_stats.tx_window_errors++;
831 if (status & TxMaxCol)
832 cp->net_stats.tx_aborted_errors++;
833 if (status & TxLinkFail)
834 cp->net_stats.tx_carrier_errors++;
835 if (status & TxFIFOUnder)
836 cp->net_stats.tx_fifo_errors++;
837 } else {
838 cp->net_stats.collisions +=
839 ((status >> TxColCntShift) & TxColCntMask);
840 cp->net_stats.tx_packets++;
841 cp->net_stats.tx_bytes += skb->len;
842 if (netif_msg_tx_done(cp))
843 printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail);
845 dev_kfree_skb_irq(skb);
848 cp->tx_skb[tx_tail] = NULL;
850 tx_tail = NEXT_TX(tx_tail);
853 cp->tx_tail = tx_tail;
855 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
856 netif_wake_queue(cp->dev);
859 static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
861 struct cp_private *cp = netdev_priv(dev);
862 unsigned entry;
863 u32 eor, flags;
864 #if CP_VLAN_TAG_USED
865 int do_vlan = 0;
866 u32 vlan_tag = 0;
867 #endif
868 int mss = 0;
870 spin_lock_irq(&cp->lock);
872 /* This is a hard error, log it. */
873 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
874 netif_stop_queue(dev);
875 spin_unlock_irq(&cp->lock);
876 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
877 dev->name);
878 return 1;
881 #if CP_VLAN_TAG_USED
882 if (cp->vlgrp && vlan_tx_tag_present(skb)) {
883 vlan_tag = vlan_tx_tag_get(skb);
884 do_vlan = 1;
886 #endif
888 entry = cp->tx_head;
889 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
890 if (dev->features & NETIF_F_TSO)
891 mss = skb_shinfo(skb)->gso_size;
893 if (skb_shinfo(skb)->nr_frags == 0) {
894 struct cp_desc *txd = &cp->tx_ring[entry];
895 u32 len;
896 dma_addr_t mapping;
898 len = skb->len;
899 mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE);
900 CP_VLAN_TX_TAG(txd, vlan_tag, do_vlan);
901 txd->addr = cpu_to_le64(mapping);
902 wmb();
904 flags = eor | len | DescOwn | FirstFrag | LastFrag;
906 if (mss)
907 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
908 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
909 const struct iphdr *ip = skb->nh.iph;
910 if (ip->protocol == IPPROTO_TCP)
911 flags |= IPCS | TCPCS;
912 else if (ip->protocol == IPPROTO_UDP)
913 flags |= IPCS | UDPCS;
914 else
915 WARN_ON(1); /* we need a WARN() */
918 txd->opts1 = cpu_to_le32(flags);
919 wmb();
921 cp->tx_skb[entry] = skb;
922 entry = NEXT_TX(entry);
923 } else {
924 struct cp_desc *txd;
925 u32 first_len, first_eor;
926 dma_addr_t first_mapping;
927 int frag, first_entry = entry;
928 const struct iphdr *ip = skb->nh.iph;
930 /* We must give this initial chunk to the device last.
931 * Otherwise we could race with the device.
933 first_eor = eor;
934 first_len = skb_headlen(skb);
935 first_mapping = pci_map_single(cp->pdev, skb->data,
936 first_len, PCI_DMA_TODEVICE);
937 cp->tx_skb[entry] = skb;
938 entry = NEXT_TX(entry);
940 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
941 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
942 u32 len;
943 u32 ctrl;
944 dma_addr_t mapping;
946 len = this_frag->size;
947 mapping = pci_map_single(cp->pdev,
948 ((void *) page_address(this_frag->page) +
949 this_frag->page_offset),
950 len, PCI_DMA_TODEVICE);
951 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
953 ctrl = eor | len | DescOwn;
955 if (mss)
956 ctrl |= LargeSend |
957 ((mss & MSSMask) << MSSShift);
958 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
959 if (ip->protocol == IPPROTO_TCP)
960 ctrl |= IPCS | TCPCS;
961 else if (ip->protocol == IPPROTO_UDP)
962 ctrl |= IPCS | UDPCS;
963 else
964 BUG();
967 if (frag == skb_shinfo(skb)->nr_frags - 1)
968 ctrl |= LastFrag;
970 txd = &cp->tx_ring[entry];
971 CP_VLAN_TX_TAG(txd, vlan_tag, do_vlan);
972 txd->addr = cpu_to_le64(mapping);
973 wmb();
975 txd->opts1 = cpu_to_le32(ctrl);
976 wmb();
978 cp->tx_skb[entry] = skb;
979 entry = NEXT_TX(entry);
982 txd = &cp->tx_ring[first_entry];
983 CP_VLAN_TX_TAG(txd, vlan_tag, do_vlan);
984 txd->addr = cpu_to_le64(first_mapping);
985 wmb();
987 if (skb->ip_summed == CHECKSUM_PARTIAL) {
988 if (ip->protocol == IPPROTO_TCP)
989 txd->opts1 = cpu_to_le32(first_eor | first_len |
990 FirstFrag | DescOwn |
991 IPCS | TCPCS);
992 else if (ip->protocol == IPPROTO_UDP)
993 txd->opts1 = cpu_to_le32(first_eor | first_len |
994 FirstFrag | DescOwn |
995 IPCS | UDPCS);
996 else
997 BUG();
998 } else
999 txd->opts1 = cpu_to_le32(first_eor | first_len |
1000 FirstFrag | DescOwn);
1001 wmb();
1003 cp->tx_head = entry;
1004 if (netif_msg_tx_queued(cp))
1005 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
1006 dev->name, entry, skb->len);
1007 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
1008 netif_stop_queue(dev);
1010 spin_unlock_irq(&cp->lock);
1012 cpw8(TxPoll, NormalTxPoll);
1013 dev->trans_start = jiffies;
1015 return 0;
1018 /* Set or clear the multicast filter for this adaptor.
1019 This routine is not state sensitive and need not be SMP locked. */
1021 static void __cp_set_rx_mode (struct net_device *dev)
1023 struct cp_private *cp = netdev_priv(dev);
1024 u32 mc_filter[2]; /* Multicast hash filter */
1025 int i, rx_mode;
1026 u32 tmp;
1028 /* Note: do not reorder, GCC is clever about common statements. */
1029 if (dev->flags & IFF_PROMISC) {
1030 /* Unconditionally log net taps. */
1031 rx_mode =
1032 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
1033 AcceptAllPhys;
1034 mc_filter[1] = mc_filter[0] = 0xffffffff;
1035 } else if ((dev->mc_count > multicast_filter_limit)
1036 || (dev->flags & IFF_ALLMULTI)) {
1037 /* Too many to filter perfectly -- accept all multicasts. */
1038 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1039 mc_filter[1] = mc_filter[0] = 0xffffffff;
1040 } else {
1041 struct dev_mc_list *mclist;
1042 rx_mode = AcceptBroadcast | AcceptMyPhys;
1043 mc_filter[1] = mc_filter[0] = 0;
1044 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1045 i++, mclist = mclist->next) {
1046 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1048 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1049 rx_mode |= AcceptMulticast;
1053 /* We can safely update without stopping the chip. */
1054 tmp = cp_rx_config | rx_mode;
1055 if (cp->rx_config != tmp) {
1056 cpw32_f (RxConfig, tmp);
1057 cp->rx_config = tmp;
1059 cpw32_f (MAR0 + 0, mc_filter[0]);
1060 cpw32_f (MAR0 + 4, mc_filter[1]);
1063 static void cp_set_rx_mode (struct net_device *dev)
1065 unsigned long flags;
1066 struct cp_private *cp = netdev_priv(dev);
1068 spin_lock_irqsave (&cp->lock, flags);
1069 __cp_set_rx_mode(dev);
1070 spin_unlock_irqrestore (&cp->lock, flags);
1073 static void __cp_get_stats(struct cp_private *cp)
1075 /* only lower 24 bits valid; write any value to clear */
1076 cp->net_stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
1077 cpw32 (RxMissed, 0);
1080 static struct net_device_stats *cp_get_stats(struct net_device *dev)
1082 struct cp_private *cp = netdev_priv(dev);
1083 unsigned long flags;
1085 /* The chip only need report frame silently dropped. */
1086 spin_lock_irqsave(&cp->lock, flags);
1087 if (netif_running(dev) && netif_device_present(dev))
1088 __cp_get_stats(cp);
1089 spin_unlock_irqrestore(&cp->lock, flags);
1091 return &cp->net_stats;
1094 static void cp_stop_hw (struct cp_private *cp)
1096 cpw16(IntrStatus, ~(cpr16(IntrStatus)));
1097 cpw16_f(IntrMask, 0);
1098 cpw8(Cmd, 0);
1099 cpw16_f(CpCmd, 0);
1100 cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
1102 cp->rx_tail = 0;
1103 cp->tx_head = cp->tx_tail = 0;
1106 static void cp_reset_hw (struct cp_private *cp)
1108 unsigned work = 1000;
1110 cpw8(Cmd, CmdReset);
1112 while (work--) {
1113 if (!(cpr8(Cmd) & CmdReset))
1114 return;
1116 schedule_timeout_uninterruptible(10);
1119 printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name);
1122 static inline void cp_start_hw (struct cp_private *cp)
1124 cpw16(CpCmd, cp->cpcmd);
1125 cpw8(Cmd, RxOn | TxOn);
1128 static void cp_init_hw (struct cp_private *cp)
1130 struct net_device *dev = cp->dev;
1131 dma_addr_t ring_dma;
1133 cp_reset_hw(cp);
1135 cpw8_f (Cfg9346, Cfg9346_Unlock);
1137 /* Restore our idea of the MAC address. */
1138 cpw32_f (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
1139 cpw32_f (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
1141 cp_start_hw(cp);
1142 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
1144 __cp_set_rx_mode(dev);
1145 cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1147 cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1148 /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1149 cpw8(Config3, PARMEnable);
1150 cp->wol_enabled = 0;
1152 cpw8(Config5, cpr8(Config5) & PMEStatus);
1154 cpw32_f(HiTxRingAddr, 0);
1155 cpw32_f(HiTxRingAddr + 4, 0);
1157 ring_dma = cp->ring_dma;
1158 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1159 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1161 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1162 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1163 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1165 cpw16(MultiIntr, 0);
1167 #ifndef FAST_POLL
1168 cpw16_f(IntrMask, cp_intr_mask);
1169 #endif
1171 cpw8_f(Cfg9346, Cfg9346_Lock);
1174 static int cp_refill_rx (struct cp_private *cp)
1176 unsigned i;
1178 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1179 struct sk_buff *skb;
1180 dma_addr_t mapping;
1182 skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET);
1183 if (!skb)
1184 goto err_out;
1186 skb->dev = cp->dev;
1187 skb_reserve(skb, RX_OFFSET);
1189 mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz,
1190 PCI_DMA_FROMDEVICE);
1191 cp->rx_skb[i] = skb;
1193 cp->rx_ring[i].opts2 = 0;
1194 cp->rx_ring[i].vtag = 0;
1195 cp->rx_ring[i].addr = cpu_to_le64(mapping);
1196 if (i == (CP_RX_RING_SIZE - 1))
1197 cp->rx_ring[i].opts1 =
1198 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1199 else
1200 cp->rx_ring[i].opts1 =
1201 cpu_to_le32(DescOwn | cp->rx_buf_sz);
1204 return 0;
1206 err_out:
1207 cp_clean_rings(cp);
1208 return -ENOMEM;
1211 static void cp_init_rings_index (struct cp_private *cp)
1213 cp->rx_tail = 0;
1214 cp->tx_head = cp->tx_tail = 0;
1217 static int cp_init_rings (struct cp_private *cp)
1219 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1220 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1222 cp_init_rings_index(cp);
1224 return cp_refill_rx (cp);
1227 static int cp_alloc_rings (struct cp_private *cp)
1229 void *mem;
1231 mem = pci_alloc_consistent(cp->pdev, CP_RING_BYTES, &cp->ring_dma);
1232 if (!mem)
1233 return -ENOMEM;
1235 cp->rx_ring = mem;
1236 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1238 return cp_init_rings(cp);
1241 static void cp_clean_rings (struct cp_private *cp)
1243 struct cp_desc *desc;
1244 unsigned i;
1246 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1247 if (cp->rx_skb[i]) {
1248 desc = cp->rx_ring + i;
1249 pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
1250 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1251 dev_kfree_skb(cp->rx_skb[i]);
1255 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1256 if (cp->tx_skb[i]) {
1257 struct sk_buff *skb = cp->tx_skb[i];
1259 desc = cp->tx_ring + i;
1260 pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
1261 le32_to_cpu(desc->opts1) & 0xffff,
1262 PCI_DMA_TODEVICE);
1263 if (le32_to_cpu(desc->opts1) & LastFrag)
1264 dev_kfree_skb(skb);
1265 cp->net_stats.tx_dropped++;
1269 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1270 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1272 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1273 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1276 static void cp_free_rings (struct cp_private *cp)
1278 cp_clean_rings(cp);
1279 pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1280 cp->rx_ring = NULL;
1281 cp->tx_ring = NULL;
1284 static int cp_open (struct net_device *dev)
1286 struct cp_private *cp = netdev_priv(dev);
1287 int rc;
1289 if (netif_msg_ifup(cp))
1290 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1292 rc = cp_alloc_rings(cp);
1293 if (rc)
1294 return rc;
1296 cp_init_hw(cp);
1298 #ifndef FAST_POLL
1299 rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1300 if (rc)
1301 goto err_out_hw;
1302 #else
1303 fast_timer_add(fast_poll_8139cp, dev);
1304 #endif
1306 netif_carrier_off(dev);
1307 mii_check_media(&cp->mii_if, netif_msg_link(cp), TRUE);
1308 netif_start_queue(dev);
1310 return 0;
1312 #ifndef FAST_POLL
1313 err_out_hw:
1314 cp_stop_hw(cp);
1315 cp_free_rings(cp);
1316 return rc;
1317 #endif
1320 static int cp_close (struct net_device *dev)
1322 struct cp_private *cp = netdev_priv(dev);
1323 unsigned long flags;
1325 if (netif_msg_ifdown(cp))
1326 printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1328 spin_lock_irqsave(&cp->lock, flags);
1330 netif_stop_queue(dev);
1331 netif_carrier_off(dev);
1333 cp_stop_hw(cp);
1335 spin_unlock_irqrestore(&cp->lock, flags);
1337 #ifndef FAST_POLL
1338 synchronize_irq(dev->irq);
1339 free_irq(dev->irq, dev);
1340 #else
1341 fast_timer_remove(fast_poll_8139cp, dev);
1342 #endif
1344 cp_free_rings(cp);
1345 return 0;
1348 #ifdef BROKEN
1349 static int cp_change_mtu(struct net_device *dev, int new_mtu)
1351 struct cp_private *cp = netdev_priv(dev);
1352 int rc;
1353 unsigned long flags;
1355 /* check for invalid MTU, according to hardware limits */
1356 if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1357 return -EINVAL;
1359 /* if network interface not up, no need for complexity */
1360 if (!netif_running(dev)) {
1361 dev->mtu = new_mtu;
1362 cp_set_rxbufsize(cp); /* set new rx buf size */
1363 return 0;
1366 spin_lock_irqsave(&cp->lock, flags);
1368 cp_stop_hw(cp); /* stop h/w and free rings */
1369 cp_clean_rings(cp);
1371 dev->mtu = new_mtu;
1372 cp_set_rxbufsize(cp); /* set new rx buf size */
1374 rc = cp_init_rings(cp); /* realloc and restart h/w */
1375 cp_start_hw(cp);
1377 spin_unlock_irqrestore(&cp->lock, flags);
1379 return rc;
1381 #endif /* BROKEN */
1383 static const char mii_2_8139_map[8] = {
1384 BasicModeCtrl,
1385 BasicModeStatus,
1388 NWayAdvert,
1389 NWayLPAR,
1390 NWayExpansion,
1395 #ifdef CONFIG_8139CP_EXTERNAL_PHY
1397 /* MII serial management: mostly bogus for now. */
1398 /* Read and write the MII management registers using software-generated
1399 serial MDIO protocol.
1400 The maximum data clock rate is 25 Mhz. The minimum timing is usually
1401 met by back-to-back PCI I/O cycles, but we insert a delay to avoid
1402 "overclocking" issues. */
1403 #define mdio_delay() cpr32(MIIRegister)
1405 #define MAX_PHYID (31)
1407 /* Syncronize the MII management interface by shifting 32 one bits out. */
1408 static void mdio_cp_sync (struct cp_private *cp) {
1409 int i;
1411 for (i = 32; i >= 0; i--) {
1412 cpw32 (MIIRegister, MDO|MDM);
1413 mdio_delay ();
1414 cpw32 (MIIRegister, MDO | MDC | MDM);
1415 mdio_delay ();
1418 #endif
1421 static int mdio_read(struct net_device *dev, int phy_id, int location)
1423 struct cp_private *cp = netdev_priv(dev);
1425 #ifdef CONFIG_8139CP_EXTERNAL_PHY
1426 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
1427 int i;
1428 int retval = 0;
1430 /* The CP can only use 32 external PHYs so try the internal */
1431 if (phy_id <= MAX_PHYID) {
1432 mdio_cp_sync (cp);
1433 /* Shift the read command bits out. */
1434 for (i = 15; i >= 0; i--) {
1435 int dataval = (mii_cmd & (1 << i)) ? MDO : 0;
1437 cpw32 (MIIRegister, dataval | MDM);
1438 mdio_delay ();
1439 cpw32 (MIIRegister, dataval | MDC | MDM);
1440 mdio_delay ();
1443 /* Read the two transition, 16 data, and wire-idle bits. */
1444 for (i = 19; i > 0; i--) {
1445 cpw32 (MIIRegister, 0);
1446 mdio_delay ();
1447 retval = (retval << 1) | ((cpr32 (MIIRegister) & MDI) ? 1 : 0);
1448 cpw32 (MIIRegister, MDC);
1449 mdio_delay ();
1451 return (retval >> 1) & 0xffff;
1453 #endif
1455 return location < 8 && mii_2_8139_map[location] ?
1456 readw(cp->regs + mii_2_8139_map[location]) : 0;
1460 static void mdio_write(struct net_device *dev, int phy_id, int location,
1461 int value)
1463 struct cp_private *cp = netdev_priv(dev);
1464 #ifdef CONFIG_8139CP_EXTERNAL_PHY
1465 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
1466 int i;
1468 /* If we select a valid internal phy */
1469 if (phy_id <= MAX_PHYID) {
1470 mdio_cp_sync (cp);
1472 /* Shift the command bits out. */
1473 for (i = 31; i >= 0; i--) {
1474 int dataval =
1475 (mii_cmd & (1 << i)) ? MDO : 0;
1476 cpw32 (MIIRegister, dataval | MDM);
1477 mdio_delay ();
1478 cpw32 (MIIRegister, dataval | MDC | MDM);
1479 mdio_delay ();
1481 /* Clear out extra bits. */
1482 for (i = 2; i > 0; i--) {
1483 cpw32 (MIIRegister, 0|MDM);
1484 mdio_delay ();
1485 cpw32 (MIIRegister, MDC|MDM);
1486 mdio_delay ();
1488 } else
1489 #endif
1490 if (location == 0) {
1491 cpw8(Cfg9346, Cfg9346_Unlock);
1492 cpw16(BasicModeCtrl, value);
1493 cpw8(Cfg9346, Cfg9346_Lock);
1494 } else if (location < 8 && mii_2_8139_map[location])
1495 cpw16(mii_2_8139_map[location], value);
1498 /* Set the ethtool Wake-on-LAN settings */
1499 static int netdev_set_wol (struct cp_private *cp,
1500 const struct ethtool_wolinfo *wol)
1502 u8 options;
1504 options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1505 /* If WOL is being disabled, no need for complexity */
1506 if (wol->wolopts) {
1507 if (wol->wolopts & WAKE_PHY) options |= LinkUp;
1508 if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket;
1511 cpw8 (Cfg9346, Cfg9346_Unlock);
1512 cpw8 (Config3, options);
1513 cpw8 (Cfg9346, Cfg9346_Lock);
1515 options = 0; /* Paranoia setting */
1516 options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1517 /* If WOL is being disabled, no need for complexity */
1518 if (wol->wolopts) {
1519 if (wol->wolopts & WAKE_UCAST) options |= UWF;
1520 if (wol->wolopts & WAKE_BCAST) options |= BWF;
1521 if (wol->wolopts & WAKE_MCAST) options |= MWF;
1524 cpw8 (Config5, options);
1526 cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1528 return 0;
1531 /* Get the ethtool Wake-on-LAN settings */
1532 static void netdev_get_wol (struct cp_private *cp,
1533 struct ethtool_wolinfo *wol)
1535 u8 options;
1537 wol->wolopts = 0; /* Start from scratch */
1538 wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC |
1539 WAKE_MCAST | WAKE_UCAST;
1540 /* We don't need to go on if WOL is disabled */
1541 if (!cp->wol_enabled) return;
1543 options = cpr8 (Config3);
1544 if (options & LinkUp) wol->wolopts |= WAKE_PHY;
1545 if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
1547 options = 0; /* Paranoia setting */
1548 options = cpr8 (Config5);
1549 if (options & UWF) wol->wolopts |= WAKE_UCAST;
1550 if (options & BWF) wol->wolopts |= WAKE_BCAST;
1551 if (options & MWF) wol->wolopts |= WAKE_MCAST;
1554 static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1556 struct cp_private *cp = netdev_priv(dev);
1558 strcpy (info->driver, DRV_NAME);
1559 strcpy (info->version, DRV_VERSION);
1560 strcpy (info->bus_info, pci_name(cp->pdev));
1563 static int cp_get_regs_len(struct net_device *dev)
1565 return CP_REGS_SIZE;
1568 static int cp_get_stats_count (struct net_device *dev)
1570 return CP_NUM_STATS;
1573 static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1575 struct cp_private *cp = netdev_priv(dev);
1576 int rc;
1577 unsigned long flags;
1579 spin_lock_irqsave(&cp->lock, flags);
1580 rc = mii_ethtool_gset(&cp->mii_if, cmd);
1581 spin_unlock_irqrestore(&cp->lock, flags);
1583 return rc;
1586 static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1588 struct cp_private *cp = netdev_priv(dev);
1589 int rc;
1590 unsigned long flags;
1592 spin_lock_irqsave(&cp->lock, flags);
1593 rc = mii_ethtool_sset(&cp->mii_if, cmd);
1594 spin_unlock_irqrestore(&cp->lock, flags);
1596 return rc;
1599 static int cp_nway_reset(struct net_device *dev)
1601 struct cp_private *cp = netdev_priv(dev);
1602 return mii_nway_restart(&cp->mii_if);
1605 static u32 cp_get_msglevel(struct net_device *dev)
1607 struct cp_private *cp = netdev_priv(dev);
1608 return cp->msg_enable;
1611 static void cp_set_msglevel(struct net_device *dev, u32 value)
1613 struct cp_private *cp = netdev_priv(dev);
1614 cp->msg_enable = value;
1617 static u32 cp_get_rx_csum(struct net_device *dev)
1619 struct cp_private *cp = netdev_priv(dev);
1620 return (cpr16(CpCmd) & RxChkSum) ? 1 : 0;
1623 static int cp_set_rx_csum(struct net_device *dev, u32 data)
1625 struct cp_private *cp = netdev_priv(dev);
1626 u16 cmd = cp->cpcmd, newcmd;
1628 newcmd = cmd;
1630 if (data)
1631 newcmd |= RxChkSum;
1632 else
1633 newcmd &= ~RxChkSum;
1635 if (newcmd != cmd) {
1636 unsigned long flags;
1638 spin_lock_irqsave(&cp->lock, flags);
1639 cp->cpcmd = newcmd;
1640 cpw16_f(CpCmd, newcmd);
1641 spin_unlock_irqrestore(&cp->lock, flags);
1644 return 0;
1647 static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1648 void *p)
1650 struct cp_private *cp = netdev_priv(dev);
1651 unsigned long flags;
1653 if (regs->len < CP_REGS_SIZE)
1654 return /* -EINVAL */;
1656 regs->version = CP_REGS_VER;
1658 spin_lock_irqsave(&cp->lock, flags);
1659 memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1660 spin_unlock_irqrestore(&cp->lock, flags);
1663 static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1665 struct cp_private *cp = netdev_priv(dev);
1666 unsigned long flags;
1668 spin_lock_irqsave (&cp->lock, flags);
1669 netdev_get_wol (cp, wol);
1670 spin_unlock_irqrestore (&cp->lock, flags);
1673 static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1675 struct cp_private *cp = netdev_priv(dev);
1676 unsigned long flags;
1677 int rc;
1679 spin_lock_irqsave (&cp->lock, flags);
1680 rc = netdev_set_wol (cp, wol);
1681 spin_unlock_irqrestore (&cp->lock, flags);
1683 return rc;
1686 static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1688 switch (stringset) {
1689 case ETH_SS_STATS:
1690 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1691 break;
1692 default:
1693 BUG();
1694 break;
1698 static void cp_get_ethtool_stats (struct net_device *dev,
1699 struct ethtool_stats *estats, u64 *tmp_stats)
1701 struct cp_private *cp = netdev_priv(dev);
1702 struct cp_dma_stats *nic_stats;
1703 dma_addr_t dma;
1704 int i;
1706 nic_stats = pci_alloc_consistent(cp->pdev, sizeof(*nic_stats), &dma);
1707 if (!nic_stats)
1708 return;
1710 /* begin NIC statistics dump */
1711 cpw32(StatsAddr + 4, (u64)dma >> 32);
1712 cpw32(StatsAddr, ((u64)dma & DMA_32BIT_MASK) | DumpStats);
1713 cpr32(StatsAddr);
1715 for (i = 0; i < 1000; i++) {
1716 if ((cpr32(StatsAddr) & DumpStats) == 0)
1717 break;
1718 udelay(10);
1720 cpw32(StatsAddr, 0);
1721 cpw32(StatsAddr + 4, 0);
1722 cpr32(StatsAddr);
1724 i = 0;
1725 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1726 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1727 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1728 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1729 tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1730 tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1731 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1732 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1733 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1734 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1735 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1736 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1737 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1738 tmp_stats[i++] = cp->cp_stats.rx_frags;
1739 BUG_ON(i != CP_NUM_STATS);
1741 pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma);
1744 static const struct ethtool_ops cp_ethtool_ops = {
1745 .get_drvinfo = cp_get_drvinfo,
1746 .get_regs_len = cp_get_regs_len,
1747 .get_stats_count = cp_get_stats_count,
1748 .get_settings = cp_get_settings,
1749 .set_settings = cp_set_settings,
1750 .nway_reset = cp_nway_reset,
1751 .get_link = ethtool_op_get_link,
1752 .get_msglevel = cp_get_msglevel,
1753 .set_msglevel = cp_set_msglevel,
1754 .get_rx_csum = cp_get_rx_csum,
1755 .set_rx_csum = cp_set_rx_csum,
1756 .get_tx_csum = ethtool_op_get_tx_csum,
1757 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
1758 .get_sg = ethtool_op_get_sg,
1759 .set_sg = ethtool_op_set_sg,
1760 .get_tso = ethtool_op_get_tso,
1761 .set_tso = ethtool_op_set_tso,
1762 .get_regs = cp_get_regs,
1763 .get_wol = cp_get_wol,
1764 .set_wol = cp_set_wol,
1765 .get_strings = cp_get_strings,
1766 .get_ethtool_stats = cp_get_ethtool_stats,
1767 .get_perm_addr = ethtool_op_get_perm_addr,
1768 .get_eeprom_len = cp_get_eeprom_len,
1769 .get_eeprom = cp_get_eeprom,
1770 .set_eeprom = cp_set_eeprom,
1773 static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1775 struct cp_private *cp = netdev_priv(dev);
1776 int rc;
1777 unsigned long flags;
1779 if (!netif_running(dev))
1780 return -EINVAL;
1782 spin_lock_irqsave(&cp->lock, flags);
1783 rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1784 spin_unlock_irqrestore(&cp->lock, flags);
1785 return rc;
1788 /* Serial EEPROM section. */
1790 /* EEPROM_Ctrl bits. */
1791 #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
1792 #define EE_CS 0x08 /* EEPROM chip select. */
1793 #define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
1794 #define EE_WRITE_0 0x00
1795 #define EE_WRITE_1 0x02
1796 #define EE_DATA_READ 0x01 /* EEPROM chip data out. */
1797 #define EE_ENB (0x80 | EE_CS)
1799 /* Delay between EEPROM clock transitions.
1800 No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1803 #define eeprom_delay() readl(ee_addr)
1805 /* The EEPROM commands include the alway-set leading bit. */
1806 #define EE_EXTEND_CMD (4)
1807 #define EE_WRITE_CMD (5)
1808 #define EE_READ_CMD (6)
1809 #define EE_ERASE_CMD (7)
1811 #define EE_EWDS_ADDR (0)
1812 #define EE_WRAL_ADDR (1)
1813 #define EE_ERAL_ADDR (2)
1814 #define EE_EWEN_ADDR (3)
1816 #define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1818 static void eeprom_cmd_start(void __iomem *ee_addr)
1820 writeb (EE_ENB & ~EE_CS, ee_addr);
1821 writeb (EE_ENB, ee_addr);
1822 eeprom_delay ();
1825 static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1827 int i;
1829 /* Shift the command bits out. */
1830 for (i = cmd_len - 1; i >= 0; i--) {
1831 int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1832 writeb (EE_ENB | dataval, ee_addr);
1833 eeprom_delay ();
1834 writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1835 eeprom_delay ();
1837 writeb (EE_ENB, ee_addr);
1838 eeprom_delay ();
1841 static void eeprom_cmd_end(void __iomem *ee_addr)
1843 writeb (~EE_CS, ee_addr);
1844 eeprom_delay ();
1847 static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1848 int addr_len)
1850 int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1852 eeprom_cmd_start(ee_addr);
1853 eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1854 eeprom_cmd_end(ee_addr);
1857 static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1859 int i;
1860 u16 retval = 0;
1861 void __iomem *ee_addr = ioaddr + Cfg9346;
1862 int read_cmd = location | (EE_READ_CMD << addr_len);
1864 eeprom_cmd_start(ee_addr);
1865 eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1867 for (i = 16; i > 0; i--) {
1868 writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1869 eeprom_delay ();
1870 retval =
1871 (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1873 writeb (EE_ENB, ee_addr);
1874 eeprom_delay ();
1877 eeprom_cmd_end(ee_addr);
1879 return retval;
1883 static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1884 int addr_len)
1886 int i;
1887 void __iomem *ee_addr = ioaddr + Cfg9346;
1888 int write_cmd = location | (EE_WRITE_CMD << addr_len);
1890 eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1892 eeprom_cmd_start(ee_addr);
1893 eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1894 eeprom_cmd(ee_addr, val, 16);
1895 eeprom_cmd_end(ee_addr);
1897 eeprom_cmd_start(ee_addr);
1898 for (i = 0; i < 20000; i++)
1899 if (readb(ee_addr) & EE_DATA_READ)
1900 break;
1901 eeprom_cmd_end(ee_addr);
1903 eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1906 static int cp_get_eeprom_len(struct net_device *dev)
1908 struct cp_private *cp = netdev_priv(dev);
1909 int size;
1911 spin_lock_irq(&cp->lock);
1912 size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1913 spin_unlock_irq(&cp->lock);
1915 return size;
1918 static int cp_get_eeprom(struct net_device *dev,
1919 struct ethtool_eeprom *eeprom, u8 *data)
1921 struct cp_private *cp = netdev_priv(dev);
1922 unsigned int addr_len;
1923 u16 val;
1924 u32 offset = eeprom->offset >> 1;
1925 u32 len = eeprom->len;
1926 u32 i = 0;
1928 eeprom->magic = CP_EEPROM_MAGIC;
1930 spin_lock_irq(&cp->lock);
1932 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1934 if (eeprom->offset & 1) {
1935 val = read_eeprom(cp->regs, offset, addr_len);
1936 data[i++] = (u8)(val >> 8);
1937 offset++;
1940 while (i < len - 1) {
1941 val = read_eeprom(cp->regs, offset, addr_len);
1942 data[i++] = (u8)val;
1943 data[i++] = (u8)(val >> 8);
1944 offset++;
1947 if (i < len) {
1948 val = read_eeprom(cp->regs, offset, addr_len);
1949 data[i] = (u8)val;
1952 spin_unlock_irq(&cp->lock);
1953 return 0;
1956 static int cp_set_eeprom(struct net_device *dev,
1957 struct ethtool_eeprom *eeprom, u8 *data)
1959 struct cp_private *cp = netdev_priv(dev);
1960 unsigned int addr_len;
1961 u16 val;
1962 u32 offset = eeprom->offset >> 1;
1963 u32 len = eeprom->len;
1964 u32 i = 0;
1966 if (eeprom->magic != CP_EEPROM_MAGIC)
1967 return -EINVAL;
1969 spin_lock_irq(&cp->lock);
1971 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1973 if (eeprom->offset & 1) {
1974 val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1975 val |= (u16)data[i++] << 8;
1976 write_eeprom(cp->regs, offset, val, addr_len);
1977 offset++;
1980 while (i < len - 1) {
1981 val = (u16)data[i++];
1982 val |= (u16)data[i++] << 8;
1983 write_eeprom(cp->regs, offset, val, addr_len);
1984 offset++;
1987 if (i < len) {
1988 val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1989 val |= (u16)data[i];
1990 write_eeprom(cp->regs, offset, val, addr_len);
1993 spin_unlock_irq(&cp->lock);
1994 return 0;
1997 /* Put the board into D3cold state and wait for WakeUp signal */
1998 static void cp_set_d3_state (struct cp_private *cp)
2000 pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
2001 pci_set_power_state (cp->pdev, PCI_D3hot);
2004 static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
2006 struct net_device *dev;
2007 struct cp_private *cp;
2008 int rc;
2009 void __iomem *regs;
2010 resource_size_t pciaddr;
2011 unsigned int i, pci_using_dac;
2012 u8 pci_rev;
2014 #ifndef MODULE
2015 static int version_printed;
2016 if (version_printed++ == 0)
2017 printk("%s", version);
2018 #endif
2020 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
2022 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
2023 (pdev->device == PCI_DEVICE_ID_REALTEK_8139 ||
2024 pdev->device == PCI_DEVICE_ID_REALTEK_8129)
2025 && pci_rev < 0x20) {
2026 dev_err(&pdev->dev,
2027 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
2028 pdev->vendor, pdev->device, pci_rev);
2029 dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n");
2030 return -ENODEV;
2033 dev = alloc_etherdev(sizeof(struct cp_private));
2034 if (!dev)
2035 return -ENOMEM;
2036 SET_MODULE_OWNER(dev);
2037 SET_NETDEV_DEV(dev, &pdev->dev);
2039 cp = netdev_priv(dev);
2040 cp->pdev = pdev;
2041 cp->dev = dev;
2042 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
2043 spin_lock_init (&cp->lock);
2044 cp->mii_if.dev = dev;
2045 cp->mii_if.mdio_read = mdio_read;
2046 cp->mii_if.mdio_write = mdio_write;
2047 cp->mii_if.phy_id = CP_INTERNAL_PHY;
2048 #ifdef CONFIG_8139CP_EXTERNAL_PHY
2049 cp->mii_if.phy_id_mask = 0x3f;
2050 #else
2051 cp->mii_if.phy_id_mask = 0x1f;
2052 #endif
2053 cp->mii_if.reg_num_mask = 0x1f;
2054 cp_set_rxbufsize(cp);
2056 rc = pci_enable_device(pdev);
2057 if (rc)
2058 goto err_out_free;
2060 rc = pci_set_mwi(pdev);
2061 if (rc)
2062 goto err_out_disable;
2064 rc = pci_request_regions(pdev, DRV_NAME);
2065 if (rc)
2066 goto err_out_mwi;
2068 pciaddr = pci_resource_start(pdev, 1);
2069 if (!pciaddr) {
2070 rc = -EIO;
2071 dev_err(&pdev->dev, "no MMIO resource\n");
2072 goto err_out_res;
2074 if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
2075 rc = -EIO;
2076 dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
2077 (unsigned long long)pci_resource_len(pdev, 1));
2078 goto err_out_res;
2081 /* Configure DMA attributes. */
2082 if ((sizeof(dma_addr_t) > 4) &&
2083 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) &&
2084 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2085 pci_using_dac = 1;
2086 } else {
2087 pci_using_dac = 0;
2089 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2090 if (rc) {
2091 dev_err(&pdev->dev,
2092 "No usable DMA configuration, aborting.\n");
2093 goto err_out_res;
2095 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2096 if (rc) {
2097 dev_err(&pdev->dev,
2098 "No usable consistent DMA configuration, "
2099 "aborting.\n");
2100 goto err_out_res;
2104 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
2105 PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
2107 regs = ioremap(pciaddr, CP_REGS_SIZE);
2108 if (!regs) {
2109 rc = -EIO;
2110 dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
2111 (unsigned long long)pci_resource_len(pdev, 1),
2112 (unsigned long long)pciaddr);
2113 goto err_out_res;
2115 dev->base_addr = (unsigned long) regs;
2116 cp->regs = regs;
2118 cp_stop_hw(cp);
2120 #if defined(CONFIG_MTD_NETtel) || defined(CONFIG_SH_SECUREEDGE5410) || defined(CONFIG_MTD_SNAPGEODE)
2121 /* Don't rely on the eeprom, get MAC from chip. */
2122 for (i = 0; i < 6; i++)
2123 dev->dev_addr[i] = readb(regs + MAC0 + i);
2124 #else
2126 unsigned int addr_len;
2127 /* read MAC address from EEPROM */
2128 addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
2129 for (i = 0; i < 3; i++)
2130 ((u16 *) (dev->dev_addr))[i] =
2131 le16_to_cpu (read_eeprom (regs, i + 7, addr_len));
2133 #endif
2134 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
2136 dev->open = cp_open;
2137 dev->stop = cp_close;
2138 dev->set_multicast_list = cp_set_rx_mode;
2139 dev->hard_start_xmit = cp_start_xmit;
2140 dev->get_stats = cp_get_stats;
2141 dev->do_ioctl = cp_ioctl;
2142 #ifndef FAST_POLL
2143 dev->poll = cp_rx_poll;
2144 #ifdef CONFIG_NET_POLL_CONTROLLER
2145 dev->poll_controller = cp_poll_controller;
2146 #endif
2147 dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */
2148 #endif
2149 #ifdef BROKEN
2150 dev->change_mtu = cp_change_mtu;
2151 #endif
2152 dev->ethtool_ops = &cp_ethtool_ops;
2153 #if 0
2154 dev->tx_timeout = cp_tx_timeout;
2155 dev->watchdog_timeo = TX_TIMEOUT;
2156 #endif
2158 #if CP_VLAN_TAG_USED
2159 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2160 dev->vlan_rx_register = cp_vlan_rx_register;
2161 dev->vlan_rx_kill_vid = cp_vlan_rx_kill_vid;
2162 #endif
2164 if (pci_using_dac)
2165 dev->features |= NETIF_F_HIGHDMA;
2167 #if 0 /* disabled by default until verified */
2168 dev->features |= NETIF_F_TSO;
2169 #endif
2171 dev->irq = pdev->irq;
2173 rc = register_netdev(dev);
2174 if (rc)
2175 goto err_out_iomap;
2177 printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, "
2178 "%02x:%02x:%02x:%02x:%02x:%02x, "
2179 "IRQ %d\n",
2180 dev->name,
2181 dev->base_addr,
2182 dev->dev_addr[0], dev->dev_addr[1],
2183 dev->dev_addr[2], dev->dev_addr[3],
2184 dev->dev_addr[4], dev->dev_addr[5],
2185 dev->irq);
2187 pci_set_drvdata(pdev, dev);
2189 /* enable busmastering and memory-write-invalidate */
2190 pci_set_master(pdev);
2192 #ifdef CONFIG_8139CP_EXTERNAL_PHY
2194 /* Check if external phy exists. */
2195 int mii_status = mdio_read(dev, CONFIG_8139CP_PHY_NUM, 1);
2196 if (mii_status != 0xffff && mii_status != 0x0000)
2197 cp->mii_if.phy_id = CONFIG_8139CP_PHY_NUM;
2199 #endif
2201 if (cp->wol_enabled)
2202 cp_set_d3_state (cp);
2204 return 0;
2206 err_out_iomap:
2207 iounmap(regs);
2208 err_out_res:
2209 pci_release_regions(pdev);
2210 err_out_mwi:
2211 pci_clear_mwi(pdev);
2212 err_out_disable:
2213 pci_disable_device(pdev);
2214 err_out_free:
2215 free_netdev(dev);
2216 return rc;
2219 static void cp_remove_one (struct pci_dev *pdev)
2221 struct net_device *dev = pci_get_drvdata(pdev);
2222 struct cp_private *cp = netdev_priv(dev);
2224 unregister_netdev(dev);
2225 iounmap(cp->regs);
2226 if (cp->wol_enabled)
2227 pci_set_power_state (pdev, PCI_D0);
2228 pci_release_regions(pdev);
2229 pci_clear_mwi(pdev);
2230 pci_disable_device(pdev);
2231 pci_set_drvdata(pdev, NULL);
2232 free_netdev(dev);
2235 #ifdef CONFIG_PM
2236 static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
2238 struct net_device *dev = pci_get_drvdata(pdev);
2239 struct cp_private *cp = netdev_priv(dev);
2240 unsigned long flags;
2242 if (!netif_running(dev))
2243 return 0;
2245 netif_device_detach (dev);
2246 netif_stop_queue (dev);
2248 spin_lock_irqsave (&cp->lock, flags);
2250 /* Disable Rx and Tx */
2251 cpw16 (IntrMask, 0);
2252 cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2254 spin_unlock_irqrestore (&cp->lock, flags);
2256 pci_save_state(pdev);
2257 pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2258 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2260 return 0;
2263 static int cp_resume (struct pci_dev *pdev)
2265 struct net_device *dev = pci_get_drvdata (pdev);
2266 struct cp_private *cp = netdev_priv(dev);
2267 unsigned long flags;
2269 if (!netif_running(dev))
2270 return 0;
2272 netif_device_attach (dev);
2274 pci_set_power_state(pdev, PCI_D0);
2275 pci_restore_state(pdev);
2276 pci_enable_wake(pdev, PCI_D0, 0);
2278 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2279 cp_init_rings_index (cp);
2280 cp_init_hw (cp);
2281 netif_start_queue (dev);
2283 spin_lock_irqsave (&cp->lock, flags);
2285 mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
2287 spin_unlock_irqrestore (&cp->lock, flags);
2289 return 0;
2291 #endif /* CONFIG_PM */
2293 static struct pci_driver cp_driver = {
2294 .name = DRV_NAME,
2295 .id_table = cp_pci_tbl,
2296 .probe = cp_init_one,
2297 .remove = cp_remove_one,
2298 #ifdef CONFIG_PM
2299 .resume = cp_resume,
2300 .suspend = cp_suspend,
2301 #endif
2304 static int __init cp_init (void)
2306 #ifdef MODULE
2307 printk("%s", version);
2308 #endif
2309 return pci_register_driver(&cp_driver);
2312 static void __exit cp_exit (void)
2314 pci_unregister_driver (&cp_driver);
2317 module_init(cp_init);
2318 module_exit(cp_exit);