1 /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
3 Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
5 Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6 Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7 Copyright 2001 Manfred Spraul [natsemi.c]
8 Copyright 1999-2001 by Donald Becker. [natsemi.c]
9 Written 1997-2001 by Donald Becker. [8139too.c]
10 Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
12 This software may be used and distributed according to the terms of
13 the GNU General Public License (GPL), incorporated herein by reference.
14 Drivers based on or derived from this code fall under the GPL and must
15 retain the authorship, copyright and license notice. This file is not
16 a complete program and may only be used when the entire operating
17 system is licensed under the GPL.
19 See the file COPYING in this distribution for more information.
23 Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
25 LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
28 * Test Tx checksumming thoroughly
29 * Implement dev->tx_timeout
32 * Complete reset on PciErr
33 * Consider Rx interrupt mitigation using TimerIntr
34 * Investigate using skb->priority with h/w VLAN priority
35 * Investigate using High Priority Tx Queue with skb->priority
36 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
37 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
38 * Implement Tx software interrupt mitigation via
40 * The real minimum of CP_MIN_MTU is 4 bytes. However,
41 for this to be supported, one must(?) turn on packet padding.
42 * Support external MII transceivers (patch available)
45 * TX checksumming is considered experimental. It is off by
46 default, use ethtool to turn it on.
50 #define DRV_NAME "8139cp"
51 #define DRV_VERSION "1.2"
52 #define DRV_RELDATE "Mar 22, 2004"
55 #include <linux/module.h>
56 #include <linux/moduleparam.h>
57 #include <linux/kernel.h>
58 #include <linux/compiler.h>
59 #include <linux/netdevice.h>
60 #include <linux/etherdevice.h>
61 #include <linux/init.h>
62 #include <linux/pci.h>
63 #include <linux/dma-mapping.h>
64 #include <linux/delay.h>
65 #include <linux/ethtool.h>
66 #include <linux/mii.h>
67 #include <linux/if_vlan.h>
68 #include <linux/crc32.h>
71 #include <linux/tcp.h>
72 #include <linux/udp.h>
73 #include <linux/cache.h>
76 #include <asm/uaccess.h>
78 /* VLAN tagging feature enable/disable */
79 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
80 #define CP_VLAN_TAG_USED 1
81 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
82 do { (tx_desc)->opts2 = (vlan_tag_value); } while (0)
84 #define CP_VLAN_TAG_USED 0
85 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
86 do { (tx_desc)->opts2 = 0; } while (0)
89 /* These identify the driver base version and may not be removed. */
90 static char version
[] =
91 KERN_INFO DRV_NAME
": 10/100 PCI Ethernet driver v" DRV_VERSION
" (" DRV_RELDATE
")\n";
93 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
94 MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
95 MODULE_VERSION(DRV_VERSION
);
96 MODULE_LICENSE("GPL");
98 static int debug
= -1;
99 module_param(debug
, int, 0);
100 MODULE_PARM_DESC (debug
, "8139cp: bitmapped message enable number");
102 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
103 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
104 static int multicast_filter_limit
= 32;
105 module_param(multicast_filter_limit
, int, 0);
106 MODULE_PARM_DESC (multicast_filter_limit
, "8139cp: maximum number of filtered multicast addresses");
108 #define PFX DRV_NAME ": "
112 #define TRUE (!FALSE)
115 #define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
118 #define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */
119 #define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */
120 #define CP_REGS_SIZE (0xff + 1)
121 #define CP_REGS_VER 1 /* version 1 */
122 #define CP_RX_RING_SIZE 64
123 #define CP_TX_RING_SIZE 64
124 #define CP_RING_BYTES \
125 ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
126 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
128 #define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
129 #define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
130 #define TX_BUFFS_AVAIL(CP) \
131 (((CP)->tx_tail <= (CP)->tx_head) ? \
132 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
133 (CP)->tx_tail - (CP)->tx_head - 1)
135 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
137 #define CP_INTERNAL_PHY 32
139 /* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
140 #define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */
141 #define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */
142 #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
143 #define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */
145 /* Time in jiffies before concluding the transmitter is hung. */
146 #define TX_TIMEOUT (6*HZ)
148 /* hardware minimum and maximum for a single frame's data payload */
149 #define CP_MIN_MTU 60 /* TODO: allow lower, but pad */
150 #define CP_MAX_MTU 4096
153 /* NIC register offsets */
154 MAC0
= 0x00, /* Ethernet hardware address. */
155 MAR0
= 0x08, /* Multicast filter. */
156 StatsAddr
= 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
157 TxRingAddr
= 0x20, /* 64-bit start addr of Tx ring */
158 HiTxRingAddr
= 0x28, /* 64-bit start addr of high priority Tx ring */
159 Cmd
= 0x37, /* Command register */
160 IntrMask
= 0x3C, /* Interrupt mask */
161 IntrStatus
= 0x3E, /* Interrupt status */
162 TxConfig
= 0x40, /* Tx configuration */
163 ChipVersion
= 0x43, /* 8-bit chip version, inside TxConfig */
164 RxConfig
= 0x44, /* Rx configuration */
165 RxMissed
= 0x4C, /* 24 bits valid, write clears */
166 Cfg9346
= 0x50, /* EEPROM select/control; Cfg reg [un]lock */
167 Config1
= 0x52, /* Config1 */
168 Config3
= 0x59, /* Config3 */
169 Config4
= 0x5A, /* Config4 */
170 MultiIntr
= 0x5C, /* Multiple interrupt select */
171 BasicModeCtrl
= 0x62, /* MII BMCR */
172 BasicModeStatus
= 0x64, /* MII BMSR */
173 NWayAdvert
= 0x66, /* MII ADVERTISE */
174 NWayLPAR
= 0x68, /* MII LPA */
175 NWayExpansion
= 0x6A, /* MII Expansion */
176 Config5
= 0xD8, /* Config5 */
177 TxPoll
= 0xD9, /* Tell chip to check Tx descriptors for work */
178 RxMaxSize
= 0xDA, /* Max size of an Rx packet (8169 only) */
179 CpCmd
= 0xE0, /* C+ Command register (C+ mode only) */
180 IntrMitigate
= 0xE2, /* rx/tx interrupt mitigation control */
181 RxRingAddr
= 0xE4, /* 64-bit start addr of Rx ring */
182 TxThresh
= 0xEC, /* Early Tx threshold */
183 OldRxBufAddr
= 0x30, /* DMA address of Rx ring buffer (C mode) */
184 OldTSD0
= 0x10, /* DMA address of first Tx desc (C mode) */
186 /* Tx and Rx status descriptors */
187 DescOwn
= (1 << 31), /* Descriptor is owned by NIC */
188 RingEnd
= (1 << 30), /* End of descriptor ring */
189 FirstFrag
= (1 << 29), /* First segment of a packet */
190 LastFrag
= (1 << 28), /* Final segment of a packet */
191 LargeSend
= (1 << 27), /* TCP Large Send Offload (TSO) */
192 MSSShift
= 16, /* MSS value position */
193 MSSMask
= 0xfff, /* MSS value: 11 bits */
194 TxError
= (1 << 23), /* Tx error summary */
195 RxError
= (1 << 20), /* Rx error summary */
196 IPCS
= (1 << 18), /* Calculate IP checksum */
197 UDPCS
= (1 << 17), /* Calculate UDP/IP checksum */
198 TCPCS
= (1 << 16), /* Calculate TCP/IP checksum */
199 TxVlanTag
= (1 << 17), /* Add VLAN tag */
200 RxVlanTagged
= (1 << 16), /* Rx VLAN tag available */
201 IPFail
= (1 << 15), /* IP checksum failed */
202 UDPFail
= (1 << 14), /* UDP/IP checksum failed */
203 TCPFail
= (1 << 13), /* TCP/IP checksum failed */
204 NormalTxPoll
= (1 << 6), /* One or more normal Tx packets to send */
205 PID1
= (1 << 17), /* 2 protocol id bits: 0==non-IP, */
206 PID0
= (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
210 TxFIFOUnder
= (1 << 25), /* Tx FIFO underrun */
211 TxOWC
= (1 << 22), /* Tx Out-of-window collision */
212 TxLinkFail
= (1 << 21), /* Link failed during Tx of packet */
213 TxMaxCol
= (1 << 20), /* Tx aborted due to excessive collisions */
214 TxColCntShift
= 16, /* Shift, to get 4-bit Tx collision cnt */
215 TxColCntMask
= 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
216 RxErrFrame
= (1 << 27), /* Rx frame alignment error */
217 RxMcast
= (1 << 26), /* Rx multicast packet rcv'd */
218 RxErrCRC
= (1 << 18), /* Rx CRC error */
219 RxErrRunt
= (1 << 19), /* Rx error, packet < 64 bytes */
220 RxErrLong
= (1 << 21), /* Rx error, packet > 4096 bytes */
221 RxErrFIFO
= (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
223 /* StatsAddr register */
224 DumpStats
= (1 << 3), /* Begin stats dump */
226 /* RxConfig register */
227 RxCfgFIFOShift
= 13, /* Shift, to get Rx FIFO thresh value */
228 RxCfgDMAShift
= 8, /* Shift, to get Rx Max DMA value */
229 AcceptErr
= 0x20, /* Accept packets with CRC errors */
230 AcceptRunt
= 0x10, /* Accept runt (<64 bytes) packets */
231 AcceptBroadcast
= 0x08, /* Accept broadcast packets */
232 AcceptMulticast
= 0x04, /* Accept multicast packets */
233 AcceptMyPhys
= 0x02, /* Accept pkts with our MAC as dest */
234 AcceptAllPhys
= 0x01, /* Accept all pkts w/ physical dest */
236 /* IntrMask / IntrStatus registers */
237 PciErr
= (1 << 15), /* System error on the PCI bus */
238 TimerIntr
= (1 << 14), /* Asserted when TCTR reaches TimerInt value */
239 LenChg
= (1 << 13), /* Cable length change */
240 SWInt
= (1 << 8), /* Software-requested interrupt */
241 TxEmpty
= (1 << 7), /* No Tx descriptors available */
242 RxFIFOOvr
= (1 << 6), /* Rx FIFO Overflow */
243 LinkChg
= (1 << 5), /* Packet underrun, or link change */
244 RxEmpty
= (1 << 4), /* No Rx descriptors available */
245 TxErr
= (1 << 3), /* Tx error */
246 TxOK
= (1 << 2), /* Tx packet sent */
247 RxErr
= (1 << 1), /* Rx error */
248 RxOK
= (1 << 0), /* Rx packet received */
249 IntrResvd
= (1 << 10), /* reserved, according to RealTek engineers,
250 but hardware likes to raise it */
252 IntrAll
= PciErr
| TimerIntr
| LenChg
| SWInt
| TxEmpty
|
253 RxFIFOOvr
| LinkChg
| RxEmpty
| TxErr
| TxOK
|
254 RxErr
| RxOK
| IntrResvd
,
256 /* C mode command register */
257 CmdReset
= (1 << 4), /* Enable to reset; self-clearing */
258 RxOn
= (1 << 3), /* Rx mode enable */
259 TxOn
= (1 << 2), /* Tx mode enable */
261 /* C+ mode command register */
262 RxVlanOn
= (1 << 6), /* Rx VLAN de-tagging enable */
263 RxChkSum
= (1 << 5), /* Rx checksum offload enable */
264 PCIDAC
= (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */
265 PCIMulRW
= (1 << 3), /* Enable PCI read/write multiple */
266 CpRxOn
= (1 << 1), /* Rx mode enable */
267 CpTxOn
= (1 << 0), /* Tx mode enable */
269 /* Cfg9436 EEPROM control register */
270 Cfg9346_Lock
= 0x00, /* Lock ConfigX/MII register access */
271 Cfg9346_Unlock
= 0xC0, /* Unlock ConfigX/MII register access */
273 /* TxConfig register */
274 IFG
= (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
275 TxDMAShift
= 8, /* DMA burst value (0-7) is shift this many bits */
277 /* Early Tx Threshold register */
278 TxThreshMask
= 0x3f, /* Mask bits 5-0 */
279 TxThreshMax
= 2048, /* Max early Tx threshold */
281 /* Config1 register */
282 DriverLoaded
= (1 << 5), /* Software marker, driver is loaded */
283 LWACT
= (1 << 4), /* LWAKE active mode */
284 PMEnable
= (1 << 0), /* Enable various PM features of chip */
286 /* Config3 register */
287 PARMEnable
= (1 << 6), /* Enable auto-loading of PHY parms */
288 MagicPacket
= (1 << 5), /* Wake up when receives a Magic Packet */
289 LinkUp
= (1 << 4), /* Wake up when the cable connection is re-established */
291 /* Config4 register */
292 LWPTN
= (1 << 1), /* LWAKE Pattern */
293 LWPME
= (1 << 4), /* LANWAKE vs PMEB */
295 /* Config5 register */
296 BWF
= (1 << 6), /* Accept Broadcast wakeup frame */
297 MWF
= (1 << 5), /* Accept Multicast wakeup frame */
298 UWF
= (1 << 4), /* Accept Unicast wakeup frame */
299 LANWake
= (1 << 1), /* Enable LANWake signal */
300 PMEStatus
= (1 << 0), /* PME status can be reset by PCI RST# */
302 cp_norx_intr_mask
= PciErr
| LinkChg
| TxOK
| TxErr
| TxEmpty
,
303 cp_rx_intr_mask
= RxOK
| RxErr
| RxEmpty
| RxFIFOOvr
,
304 cp_intr_mask
= cp_rx_intr_mask
| cp_norx_intr_mask
,
307 static const unsigned int cp_rx_config
=
308 (RX_FIFO_THRESH
<< RxCfgFIFOShift
) |
309 (RX_DMA_BURST
<< RxCfgDMAShift
);
322 struct cp_dma_stats
{
336 } __attribute__((packed
));
338 struct cp_extra_stats
{
339 unsigned long rx_frags
;
344 struct net_device
*dev
;
348 struct pci_dev
*pdev
;
352 struct net_device_stats net_stats
;
353 struct cp_extra_stats cp_stats
;
355 unsigned rx_tail ____cacheline_aligned
;
356 struct cp_desc
*rx_ring
;
357 struct ring_info rx_skb
[CP_RX_RING_SIZE
];
360 unsigned tx_head ____cacheline_aligned
;
363 struct cp_desc
*tx_ring
;
364 struct ring_info tx_skb
[CP_TX_RING_SIZE
];
368 struct vlan_group
*vlgrp
;
371 unsigned int wol_enabled
: 1; /* Is Wake-on-LAN enabled? */
373 struct mii_if_info mii_if
;
376 #define cpr8(reg) readb(cp->regs + (reg))
377 #define cpr16(reg) readw(cp->regs + (reg))
378 #define cpr32(reg) readl(cp->regs + (reg))
379 #define cpw8(reg,val) writeb((val), cp->regs + (reg))
380 #define cpw16(reg,val) writew((val), cp->regs + (reg))
381 #define cpw32(reg,val) writel((val), cp->regs + (reg))
382 #define cpw8_f(reg,val) do { \
383 writeb((val), cp->regs + (reg)); \
384 readb(cp->regs + (reg)); \
386 #define cpw16_f(reg,val) do { \
387 writew((val), cp->regs + (reg)); \
388 readw(cp->regs + (reg)); \
390 #define cpw32_f(reg,val) do { \
391 writel((val), cp->regs + (reg)); \
392 readl(cp->regs + (reg)); \
396 static void __cp_set_rx_mode (struct net_device
*dev
);
397 static void cp_tx (struct cp_private
*cp
);
398 static void cp_clean_rings (struct cp_private
*cp
);
399 #ifdef CONFIG_NET_POLL_CONTROLLER
400 static void cp_poll_controller(struct net_device
*dev
);
402 static int cp_get_eeprom_len(struct net_device
*dev
);
403 static int cp_get_eeprom(struct net_device
*dev
,
404 struct ethtool_eeprom
*eeprom
, u8
*data
);
405 static int cp_set_eeprom(struct net_device
*dev
,
406 struct ethtool_eeprom
*eeprom
, u8
*data
);
408 static struct pci_device_id cp_pci_tbl
[] = {
409 { PCI_VENDOR_ID_REALTEK
, PCI_DEVICE_ID_REALTEK_8139
,
410 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, },
411 { PCI_VENDOR_ID_TTTECH
, PCI_DEVICE_ID_TTTECH_MC322
,
412 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, },
415 MODULE_DEVICE_TABLE(pci
, cp_pci_tbl
);
418 const char str
[ETH_GSTRING_LEN
];
419 } ethtool_stats_keys
[] = {
438 static void cp_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
440 struct cp_private
*cp
= netdev_priv(dev
);
443 spin_lock_irqsave(&cp
->lock
, flags
);
445 cp
->cpcmd
|= RxVlanOn
;
446 cpw16(CpCmd
, cp
->cpcmd
);
447 spin_unlock_irqrestore(&cp
->lock
, flags
);
450 static void cp_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
452 struct cp_private
*cp
= netdev_priv(dev
);
455 spin_lock_irqsave(&cp
->lock
, flags
);
456 cp
->cpcmd
&= ~RxVlanOn
;
457 cpw16(CpCmd
, cp
->cpcmd
);
459 cp
->vlgrp
->vlan_devices
[vid
] = NULL
;
460 spin_unlock_irqrestore(&cp
->lock
, flags
);
462 #endif /* CP_VLAN_TAG_USED */
464 static inline void cp_set_rxbufsize (struct cp_private
*cp
)
466 unsigned int mtu
= cp
->dev
->mtu
;
468 if (mtu
> ETH_DATA_LEN
)
469 /* MTU + ethernet header + FCS + optional VLAN tag */
470 cp
->rx_buf_sz
= mtu
+ ETH_HLEN
+ 8;
472 cp
->rx_buf_sz
= PKT_BUF_SZ
;
475 static inline void cp_rx_skb (struct cp_private
*cp
, struct sk_buff
*skb
,
476 struct cp_desc
*desc
)
478 skb
->protocol
= eth_type_trans (skb
, cp
->dev
);
480 cp
->net_stats
.rx_packets
++;
481 cp
->net_stats
.rx_bytes
+= skb
->len
;
482 cp
->dev
->last_rx
= jiffies
;
485 if (cp
->vlgrp
&& (desc
->opts2
& RxVlanTagged
)) {
486 vlan_hwaccel_receive_skb(skb
, cp
->vlgrp
,
487 be16_to_cpu(desc
->opts2
& 0xffff));
490 netif_receive_skb(skb
);
493 static void cp_rx_err_acct (struct cp_private
*cp
, unsigned rx_tail
,
496 if (netif_msg_rx_err (cp
))
498 "%s: rx err, slot %d status 0x%x len %d\n",
499 cp
->dev
->name
, rx_tail
, status
, len
);
500 cp
->net_stats
.rx_errors
++;
501 if (status
& RxErrFrame
)
502 cp
->net_stats
.rx_frame_errors
++;
503 if (status
& RxErrCRC
)
504 cp
->net_stats
.rx_crc_errors
++;
505 if ((status
& RxErrRunt
) || (status
& RxErrLong
))
506 cp
->net_stats
.rx_length_errors
++;
507 if ((status
& (FirstFrag
| LastFrag
)) != (FirstFrag
| LastFrag
))
508 cp
->net_stats
.rx_length_errors
++;
509 if (status
& RxErrFIFO
)
510 cp
->net_stats
.rx_fifo_errors
++;
513 static inline unsigned int cp_rx_csum_ok (u32 status
)
515 unsigned int protocol
= (status
>> 16) & 0x3;
517 if (likely((protocol
== RxProtoTCP
) && (!(status
& TCPFail
))))
519 else if ((protocol
== RxProtoUDP
) && (!(status
& UDPFail
)))
521 else if ((protocol
== RxProtoIP
) && (!(status
& IPFail
)))
526 static int cp_rx_poll (struct net_device
*dev
, int *budget
)
528 struct cp_private
*cp
= netdev_priv(dev
);
529 unsigned rx_tail
= cp
->rx_tail
;
530 unsigned rx_work
= dev
->quota
;
535 cpw16(IntrStatus
, cp_rx_intr_mask
);
540 struct sk_buff
*skb
, *new_skb
;
541 struct cp_desc
*desc
;
544 skb
= cp
->rx_skb
[rx_tail
].skb
;
547 desc
= &cp
->rx_ring
[rx_tail
];
548 status
= le32_to_cpu(desc
->opts1
);
549 if (status
& DescOwn
)
552 len
= (status
& 0x1fff) - 4;
553 mapping
= le64_to_cpu(desc
->addr
);
555 if ((status
& (FirstFrag
| LastFrag
)) != (FirstFrag
| LastFrag
)) {
556 /* we don't support incoming fragmented frames.
557 * instead, we attempt to ensure that the
558 * pre-allocated RX skbs are properly sized such
559 * that RX fragments are never encountered
561 cp_rx_err_acct(cp
, rx_tail
, status
, len
);
562 cp
->net_stats
.rx_dropped
++;
563 cp
->cp_stats
.rx_frags
++;
567 if (status
& (RxError
| RxErrFIFO
)) {
568 cp_rx_err_acct(cp
, rx_tail
, status
, len
);
572 if (netif_msg_rx_status(cp
))
573 printk(KERN_DEBUG
"%s: rx slot %d status 0x%x len %d\n",
574 dev
->name
, rx_tail
, status
, len
);
576 buflen
= cp
->rx_buf_sz
+ RX_OFFSET
;
577 new_skb
= dev_alloc_skb (buflen
);
579 cp
->net_stats
.rx_dropped
++;
583 skb_reserve(new_skb
, RX_OFFSET
);
586 pci_unmap_single(cp
->pdev
, mapping
,
587 buflen
, PCI_DMA_FROMDEVICE
);
589 /* Handle checksum offloading for incoming packets. */
590 if (cp_rx_csum_ok(status
))
591 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
593 skb
->ip_summed
= CHECKSUM_NONE
;
597 mapping
= pci_map_single(cp
->pdev
, new_skb
->data
, buflen
,
599 cp
->rx_skb
[rx_tail
].skb
= new_skb
;
601 cp_rx_skb(cp
, skb
, desc
);
605 cp
->rx_ring
[rx_tail
].opts2
= 0;
606 cp
->rx_ring
[rx_tail
].addr
= cpu_to_le64(mapping
);
607 if (rx_tail
== (CP_RX_RING_SIZE
- 1))
608 desc
->opts1
= cpu_to_le32(DescOwn
| RingEnd
|
611 desc
->opts1
= cpu_to_le32(DescOwn
| cp
->rx_buf_sz
);
612 rx_tail
= NEXT_RX(rx_tail
);
618 cp
->rx_tail
= rx_tail
;
623 /* if we did not reach work limit, then we're done with
624 * this round of polling
627 if (cpr16(IntrStatus
) & cp_rx_intr_mask
)
631 cpw16_f(IntrMask
, cp_intr_mask
);
632 __netif_rx_complete(dev
);
638 return 1; /* not done */
642 cp_interrupt (int irq
, void *dev_instance
, struct pt_regs
*regs
)
644 struct net_device
*dev
= dev_instance
;
645 struct cp_private
*cp
;
648 if (unlikely(dev
== NULL
))
650 cp
= netdev_priv(dev
);
652 status
= cpr16(IntrStatus
);
653 if (!status
|| (status
== 0xFFFF))
656 if (netif_msg_intr(cp
))
657 printk(KERN_DEBUG
"%s: intr, status %04x cmd %02x cpcmd %04x\n",
658 dev
->name
, status
, cpr8(Cmd
), cpr16(CpCmd
));
660 cpw16(IntrStatus
, status
& ~cp_rx_intr_mask
);
662 spin_lock(&cp
->lock
);
664 /* close possible race's with dev_close */
665 if (unlikely(!netif_running(dev
))) {
667 spin_unlock(&cp
->lock
);
671 if (status
& (RxOK
| RxErr
| RxEmpty
| RxFIFOOvr
))
672 if (netif_rx_schedule_prep(dev
)) {
673 cpw16_f(IntrMask
, cp_norx_intr_mask
);
674 __netif_rx_schedule(dev
);
677 if (status
& (TxOK
| TxErr
| TxEmpty
| SWInt
))
679 if (status
& LinkChg
)
680 mii_check_media(&cp
->mii_if
, netif_msg_link(cp
), FALSE
);
682 spin_unlock(&cp
->lock
);
684 if (status
& PciErr
) {
687 pci_read_config_word(cp
->pdev
, PCI_STATUS
, &pci_status
);
688 pci_write_config_word(cp
->pdev
, PCI_STATUS
, pci_status
);
689 printk(KERN_ERR
"%s: PCI bus error, status=%04x, PCI status=%04x\n",
690 dev
->name
, status
, pci_status
);
692 /* TODO: reset hardware */
698 #ifdef CONFIG_NET_POLL_CONTROLLER
700 * Polling receive - used by netconsole and other diagnostic tools
701 * to allow network i/o with interrupts disabled.
703 static void cp_poll_controller(struct net_device
*dev
)
705 disable_irq(dev
->irq
);
706 cp_interrupt(dev
->irq
, dev
, NULL
);
707 enable_irq(dev
->irq
);
711 static void cp_tx (struct cp_private
*cp
)
713 unsigned tx_head
= cp
->tx_head
;
714 unsigned tx_tail
= cp
->tx_tail
;
716 while (tx_tail
!= tx_head
) {
717 struct cp_desc
*txd
= cp
->tx_ring
+ tx_tail
;
722 status
= le32_to_cpu(txd
->opts1
);
723 if (status
& DescOwn
)
726 skb
= cp
->tx_skb
[tx_tail
].skb
;
729 pci_unmap_single(cp
->pdev
, le64_to_cpu(txd
->addr
),
730 cp
->tx_skb
[tx_tail
].len
, PCI_DMA_TODEVICE
);
732 if (status
& LastFrag
) {
733 if (status
& (TxError
| TxFIFOUnder
)) {
734 if (netif_msg_tx_err(cp
))
735 printk(KERN_DEBUG
"%s: tx err, status 0x%x\n",
736 cp
->dev
->name
, status
);
737 cp
->net_stats
.tx_errors
++;
739 cp
->net_stats
.tx_window_errors
++;
740 if (status
& TxMaxCol
)
741 cp
->net_stats
.tx_aborted_errors
++;
742 if (status
& TxLinkFail
)
743 cp
->net_stats
.tx_carrier_errors
++;
744 if (status
& TxFIFOUnder
)
745 cp
->net_stats
.tx_fifo_errors
++;
747 cp
->net_stats
.collisions
+=
748 ((status
>> TxColCntShift
) & TxColCntMask
);
749 cp
->net_stats
.tx_packets
++;
750 cp
->net_stats
.tx_bytes
+= skb
->len
;
751 if (netif_msg_tx_done(cp
))
752 printk(KERN_DEBUG
"%s: tx done, slot %d\n", cp
->dev
->name
, tx_tail
);
754 dev_kfree_skb_irq(skb
);
757 cp
->tx_skb
[tx_tail
].skb
= NULL
;
759 tx_tail
= NEXT_TX(tx_tail
);
762 cp
->tx_tail
= tx_tail
;
764 if (TX_BUFFS_AVAIL(cp
) > (MAX_SKB_FRAGS
+ 1))
765 netif_wake_queue(cp
->dev
);
768 static int cp_start_xmit (struct sk_buff
*skb
, struct net_device
*dev
)
770 struct cp_private
*cp
= netdev_priv(dev
);
778 spin_lock_irq(&cp
->lock
);
780 /* This is a hard error, log it. */
781 if (TX_BUFFS_AVAIL(cp
) <= (skb_shinfo(skb
)->nr_frags
+ 1)) {
782 netif_stop_queue(dev
);
783 spin_unlock_irq(&cp
->lock
);
784 printk(KERN_ERR PFX
"%s: BUG! Tx Ring full when queue awake!\n",
790 if (cp
->vlgrp
&& vlan_tx_tag_present(skb
))
791 vlan_tag
= TxVlanTag
| cpu_to_be16(vlan_tx_tag_get(skb
));
795 eor
= (entry
== (CP_TX_RING_SIZE
- 1)) ? RingEnd
: 0;
796 if (dev
->features
& NETIF_F_TSO
)
797 mss
= skb_shinfo(skb
)->gso_size
;
799 if (skb_shinfo(skb
)->nr_frags
== 0) {
800 struct cp_desc
*txd
= &cp
->tx_ring
[entry
];
805 mapping
= pci_map_single(cp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
806 CP_VLAN_TX_TAG(txd
, vlan_tag
);
807 txd
->addr
= cpu_to_le64(mapping
);
810 flags
= eor
| len
| DescOwn
| FirstFrag
| LastFrag
;
813 flags
|= LargeSend
| ((mss
& MSSMask
) << MSSShift
);
814 else if (skb
->ip_summed
== CHECKSUM_HW
) {
815 const struct iphdr
*ip
= skb
->nh
.iph
;
816 if (ip
->protocol
== IPPROTO_TCP
)
817 flags
|= IPCS
| TCPCS
;
818 else if (ip
->protocol
== IPPROTO_UDP
)
819 flags
|= IPCS
| UDPCS
;
821 WARN_ON(1); /* we need a WARN() */
824 txd
->opts1
= cpu_to_le32(flags
);
827 cp
->tx_skb
[entry
].skb
= skb
;
828 cp
->tx_skb
[entry
].len
= len
;
829 entry
= NEXT_TX(entry
);
832 u32 first_len
, first_eor
;
833 dma_addr_t first_mapping
;
834 int frag
, first_entry
= entry
;
835 const struct iphdr
*ip
= skb
->nh
.iph
;
837 /* We must give this initial chunk to the device last.
838 * Otherwise we could race with the device.
841 first_len
= skb_headlen(skb
);
842 first_mapping
= pci_map_single(cp
->pdev
, skb
->data
,
843 first_len
, PCI_DMA_TODEVICE
);
844 cp
->tx_skb
[entry
].skb
= skb
;
845 cp
->tx_skb
[entry
].len
= first_len
;
846 entry
= NEXT_TX(entry
);
848 for (frag
= 0; frag
< skb_shinfo(skb
)->nr_frags
; frag
++) {
849 skb_frag_t
*this_frag
= &skb_shinfo(skb
)->frags
[frag
];
854 len
= this_frag
->size
;
855 mapping
= pci_map_single(cp
->pdev
,
856 ((void *) page_address(this_frag
->page
) +
857 this_frag
->page_offset
),
858 len
, PCI_DMA_TODEVICE
);
859 eor
= (entry
== (CP_TX_RING_SIZE
- 1)) ? RingEnd
: 0;
861 ctrl
= eor
| len
| DescOwn
;
865 ((mss
& MSSMask
) << MSSShift
);
866 else if (skb
->ip_summed
== CHECKSUM_HW
) {
867 if (ip
->protocol
== IPPROTO_TCP
)
868 ctrl
|= IPCS
| TCPCS
;
869 else if (ip
->protocol
== IPPROTO_UDP
)
870 ctrl
|= IPCS
| UDPCS
;
875 if (frag
== skb_shinfo(skb
)->nr_frags
- 1)
878 txd
= &cp
->tx_ring
[entry
];
879 CP_VLAN_TX_TAG(txd
, vlan_tag
);
880 txd
->addr
= cpu_to_le64(mapping
);
883 txd
->opts1
= cpu_to_le32(ctrl
);
886 cp
->tx_skb
[entry
].skb
= skb
;
887 cp
->tx_skb
[entry
].len
= len
;
888 entry
= NEXT_TX(entry
);
891 txd
= &cp
->tx_ring
[first_entry
];
892 CP_VLAN_TX_TAG(txd
, vlan_tag
);
893 txd
->addr
= cpu_to_le64(first_mapping
);
896 if (skb
->ip_summed
== CHECKSUM_HW
) {
897 if (ip
->protocol
== IPPROTO_TCP
)
898 txd
->opts1
= cpu_to_le32(first_eor
| first_len
|
899 FirstFrag
| DescOwn
|
901 else if (ip
->protocol
== IPPROTO_UDP
)
902 txd
->opts1
= cpu_to_le32(first_eor
| first_len
|
903 FirstFrag
| DescOwn
|
908 txd
->opts1
= cpu_to_le32(first_eor
| first_len
|
909 FirstFrag
| DescOwn
);
913 if (netif_msg_tx_queued(cp
))
914 printk(KERN_DEBUG
"%s: tx queued, slot %d, skblen %d\n",
915 dev
->name
, entry
, skb
->len
);
916 if (TX_BUFFS_AVAIL(cp
) <= (MAX_SKB_FRAGS
+ 1))
917 netif_stop_queue(dev
);
919 spin_unlock_irq(&cp
->lock
);
921 cpw8(TxPoll
, NormalTxPoll
);
922 dev
->trans_start
= jiffies
;
927 /* Set or clear the multicast filter for this adaptor.
928 This routine is not state sensitive and need not be SMP locked. */
930 static void __cp_set_rx_mode (struct net_device
*dev
)
932 struct cp_private
*cp
= netdev_priv(dev
);
933 u32 mc_filter
[2]; /* Multicast hash filter */
937 /* Note: do not reorder, GCC is clever about common statements. */
938 if (dev
->flags
& IFF_PROMISC
) {
939 /* Unconditionally log net taps. */
940 printk (KERN_NOTICE
"%s: Promiscuous mode enabled.\n",
943 AcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
|
945 mc_filter
[1] = mc_filter
[0] = 0xffffffff;
946 } else if ((dev
->mc_count
> multicast_filter_limit
)
947 || (dev
->flags
& IFF_ALLMULTI
)) {
948 /* Too many to filter perfectly -- accept all multicasts. */
949 rx_mode
= AcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
;
950 mc_filter
[1] = mc_filter
[0] = 0xffffffff;
952 struct dev_mc_list
*mclist
;
953 rx_mode
= AcceptBroadcast
| AcceptMyPhys
;
954 mc_filter
[1] = mc_filter
[0] = 0;
955 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
956 i
++, mclist
= mclist
->next
) {
957 int bit_nr
= ether_crc(ETH_ALEN
, mclist
->dmi_addr
) >> 26;
959 mc_filter
[bit_nr
>> 5] |= 1 << (bit_nr
& 31);
960 rx_mode
|= AcceptMulticast
;
964 /* We can safely update without stopping the chip. */
965 tmp
= cp_rx_config
| rx_mode
;
966 if (cp
->rx_config
!= tmp
) {
967 cpw32_f (RxConfig
, tmp
);
970 cpw32_f (MAR0
+ 0, mc_filter
[0]);
971 cpw32_f (MAR0
+ 4, mc_filter
[1]);
974 static void cp_set_rx_mode (struct net_device
*dev
)
977 struct cp_private
*cp
= netdev_priv(dev
);
979 spin_lock_irqsave (&cp
->lock
, flags
);
980 __cp_set_rx_mode(dev
);
981 spin_unlock_irqrestore (&cp
->lock
, flags
);
984 static void __cp_get_stats(struct cp_private
*cp
)
986 /* only lower 24 bits valid; write any value to clear */
987 cp
->net_stats
.rx_missed_errors
+= (cpr32 (RxMissed
) & 0xffffff);
991 static struct net_device_stats
*cp_get_stats(struct net_device
*dev
)
993 struct cp_private
*cp
= netdev_priv(dev
);
996 /* The chip only need report frame silently dropped. */
997 spin_lock_irqsave(&cp
->lock
, flags
);
998 if (netif_running(dev
) && netif_device_present(dev
))
1000 spin_unlock_irqrestore(&cp
->lock
, flags
);
1002 return &cp
->net_stats
;
1005 static void cp_stop_hw (struct cp_private
*cp
)
1007 cpw16(IntrStatus
, ~(cpr16(IntrStatus
)));
1008 cpw16_f(IntrMask
, 0);
1011 cpw16_f(IntrStatus
, ~(cpr16(IntrStatus
)));
1014 cp
->tx_head
= cp
->tx_tail
= 0;
1017 static void cp_reset_hw (struct cp_private
*cp
)
1019 unsigned work
= 1000;
1021 cpw8(Cmd
, CmdReset
);
1024 if (!(cpr8(Cmd
) & CmdReset
))
1027 schedule_timeout_uninterruptible(10);
1030 printk(KERN_ERR
"%s: hardware reset timeout\n", cp
->dev
->name
);
1033 static inline void cp_start_hw (struct cp_private
*cp
)
1035 cpw16(CpCmd
, cp
->cpcmd
);
1036 cpw8(Cmd
, RxOn
| TxOn
);
1039 static void cp_init_hw (struct cp_private
*cp
)
1041 struct net_device
*dev
= cp
->dev
;
1042 dma_addr_t ring_dma
;
1046 cpw8_f (Cfg9346
, Cfg9346_Unlock
);
1048 /* Restore our idea of the MAC address. */
1049 cpw32_f (MAC0
+ 0, cpu_to_le32 (*(u32
*) (dev
->dev_addr
+ 0)));
1050 cpw32_f (MAC0
+ 4, cpu_to_le32 (*(u32
*) (dev
->dev_addr
+ 4)));
1053 cpw8(TxThresh
, 0x06); /* XXX convert magic num to a constant */
1055 __cp_set_rx_mode(dev
);
1056 cpw32_f (TxConfig
, IFG
| (TX_DMA_BURST
<< TxDMAShift
));
1058 cpw8(Config1
, cpr8(Config1
) | DriverLoaded
| PMEnable
);
1059 /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1060 cpw8(Config3
, PARMEnable
);
1061 cp
->wol_enabled
= 0;
1063 cpw8(Config5
, cpr8(Config5
) & PMEStatus
);
1065 cpw32_f(HiTxRingAddr
, 0);
1066 cpw32_f(HiTxRingAddr
+ 4, 0);
1068 ring_dma
= cp
->ring_dma
;
1069 cpw32_f(RxRingAddr
, ring_dma
& 0xffffffff);
1070 cpw32_f(RxRingAddr
+ 4, (ring_dma
>> 16) >> 16);
1072 ring_dma
+= sizeof(struct cp_desc
) * CP_RX_RING_SIZE
;
1073 cpw32_f(TxRingAddr
, ring_dma
& 0xffffffff);
1074 cpw32_f(TxRingAddr
+ 4, (ring_dma
>> 16) >> 16);
1076 cpw16(MultiIntr
, 0);
1078 cpw16_f(IntrMask
, cp_intr_mask
);
1080 cpw8_f(Cfg9346
, Cfg9346_Lock
);
1083 static int cp_refill_rx (struct cp_private
*cp
)
1087 for (i
= 0; i
< CP_RX_RING_SIZE
; i
++) {
1088 struct sk_buff
*skb
;
1091 skb
= dev_alloc_skb(cp
->rx_buf_sz
+ RX_OFFSET
);
1096 skb_reserve(skb
, RX_OFFSET
);
1098 mapping
= pci_map_single(cp
->pdev
, skb
->data
, cp
->rx_buf_sz
,
1099 PCI_DMA_FROMDEVICE
);
1100 cp
->rx_skb
[i
].skb
= skb
;
1102 cp
->rx_ring
[i
].opts2
= 0;
1103 cp
->rx_ring
[i
].addr
= cpu_to_le64(mapping
);
1104 if (i
== (CP_RX_RING_SIZE
- 1))
1105 cp
->rx_ring
[i
].opts1
=
1106 cpu_to_le32(DescOwn
| RingEnd
| cp
->rx_buf_sz
);
1108 cp
->rx_ring
[i
].opts1
=
1109 cpu_to_le32(DescOwn
| cp
->rx_buf_sz
);
1119 static void cp_init_rings_index (struct cp_private
*cp
)
1122 cp
->tx_head
= cp
->tx_tail
= 0;
1125 static int cp_init_rings (struct cp_private
*cp
)
1127 memset(cp
->tx_ring
, 0, sizeof(struct cp_desc
) * CP_TX_RING_SIZE
);
1128 cp
->tx_ring
[CP_TX_RING_SIZE
- 1].opts1
= cpu_to_le32(RingEnd
);
1130 cp_init_rings_index(cp
);
1132 return cp_refill_rx (cp
);
1135 static int cp_alloc_rings (struct cp_private
*cp
)
1139 mem
= pci_alloc_consistent(cp
->pdev
, CP_RING_BYTES
, &cp
->ring_dma
);
1144 cp
->tx_ring
= &cp
->rx_ring
[CP_RX_RING_SIZE
];
1146 return cp_init_rings(cp
);
1149 static void cp_clean_rings (struct cp_private
*cp
)
1151 struct cp_desc
*desc
;
1154 for (i
= 0; i
< CP_RX_RING_SIZE
; i
++) {
1155 if (cp
->rx_skb
[i
].skb
) {
1156 desc
= cp
->rx_ring
+ i
;
1157 pci_unmap_single(cp
->pdev
, le64_to_cpu(desc
->addr
),
1158 cp
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1159 dev_kfree_skb(cp
->rx_skb
[i
].skb
);
1163 for (i
= 0; i
< CP_TX_RING_SIZE
; i
++) {
1164 if (cp
->tx_skb
[i
].skb
) {
1165 struct sk_buff
*skb
= cp
->tx_skb
[i
].skb
;
1167 desc
= cp
->tx_ring
+ i
;
1168 pci_unmap_single(cp
->pdev
, le64_to_cpu(desc
->addr
),
1169 cp
->tx_skb
[i
].len
, PCI_DMA_TODEVICE
);
1170 if (le32_to_cpu(desc
->opts1
) & LastFrag
)
1172 cp
->net_stats
.tx_dropped
++;
1176 memset(cp
->rx_ring
, 0, sizeof(struct cp_desc
) * CP_RX_RING_SIZE
);
1177 memset(cp
->tx_ring
, 0, sizeof(struct cp_desc
) * CP_TX_RING_SIZE
);
1179 memset(&cp
->rx_skb
, 0, sizeof(struct ring_info
) * CP_RX_RING_SIZE
);
1180 memset(&cp
->tx_skb
, 0, sizeof(struct ring_info
) * CP_TX_RING_SIZE
);
1183 static void cp_free_rings (struct cp_private
*cp
)
1186 pci_free_consistent(cp
->pdev
, CP_RING_BYTES
, cp
->rx_ring
, cp
->ring_dma
);
1191 static int cp_open (struct net_device
*dev
)
1193 struct cp_private
*cp
= netdev_priv(dev
);
1196 if (netif_msg_ifup(cp
))
1197 printk(KERN_DEBUG
"%s: enabling interface\n", dev
->name
);
1199 rc
= cp_alloc_rings(cp
);
1205 rc
= request_irq(dev
->irq
, cp_interrupt
, IRQF_SHARED
, dev
->name
, dev
);
1209 netif_carrier_off(dev
);
1210 mii_check_media(&cp
->mii_if
, netif_msg_link(cp
), TRUE
);
1211 netif_start_queue(dev
);
1221 static int cp_close (struct net_device
*dev
)
1223 struct cp_private
*cp
= netdev_priv(dev
);
1224 unsigned long flags
;
1226 if (netif_msg_ifdown(cp
))
1227 printk(KERN_DEBUG
"%s: disabling interface\n", dev
->name
);
1229 spin_lock_irqsave(&cp
->lock
, flags
);
1231 netif_stop_queue(dev
);
1232 netif_carrier_off(dev
);
1236 spin_unlock_irqrestore(&cp
->lock
, flags
);
1238 synchronize_irq(dev
->irq
);
1239 free_irq(dev
->irq
, dev
);
1246 static int cp_change_mtu(struct net_device
*dev
, int new_mtu
)
1248 struct cp_private
*cp
= netdev_priv(dev
);
1250 unsigned long flags
;
1252 /* check for invalid MTU, according to hardware limits */
1253 if (new_mtu
< CP_MIN_MTU
|| new_mtu
> CP_MAX_MTU
)
1256 /* if network interface not up, no need for complexity */
1257 if (!netif_running(dev
)) {
1259 cp_set_rxbufsize(cp
); /* set new rx buf size */
1263 spin_lock_irqsave(&cp
->lock
, flags
);
1265 cp_stop_hw(cp
); /* stop h/w and free rings */
1269 cp_set_rxbufsize(cp
); /* set new rx buf size */
1271 rc
= cp_init_rings(cp
); /* realloc and restart h/w */
1274 spin_unlock_irqrestore(&cp
->lock
, flags
);
1280 static const char mii_2_8139_map
[8] = {
1291 static int mdio_read(struct net_device
*dev
, int phy_id
, int location
)
1293 struct cp_private
*cp
= netdev_priv(dev
);
1295 return location
< 8 && mii_2_8139_map
[location
] ?
1296 readw(cp
->regs
+ mii_2_8139_map
[location
]) : 0;
1300 static void mdio_write(struct net_device
*dev
, int phy_id
, int location
,
1303 struct cp_private
*cp
= netdev_priv(dev
);
1305 if (location
== 0) {
1306 cpw8(Cfg9346
, Cfg9346_Unlock
);
1307 cpw16(BasicModeCtrl
, value
);
1308 cpw8(Cfg9346
, Cfg9346_Lock
);
1309 } else if (location
< 8 && mii_2_8139_map
[location
])
1310 cpw16(mii_2_8139_map
[location
], value
);
1313 /* Set the ethtool Wake-on-LAN settings */
1314 static int netdev_set_wol (struct cp_private
*cp
,
1315 const struct ethtool_wolinfo
*wol
)
1319 options
= cpr8 (Config3
) & ~(LinkUp
| MagicPacket
);
1320 /* If WOL is being disabled, no need for complexity */
1322 if (wol
->wolopts
& WAKE_PHY
) options
|= LinkUp
;
1323 if (wol
->wolopts
& WAKE_MAGIC
) options
|= MagicPacket
;
1326 cpw8 (Cfg9346
, Cfg9346_Unlock
);
1327 cpw8 (Config3
, options
);
1328 cpw8 (Cfg9346
, Cfg9346_Lock
);
1330 options
= 0; /* Paranoia setting */
1331 options
= cpr8 (Config5
) & ~(UWF
| MWF
| BWF
);
1332 /* If WOL is being disabled, no need for complexity */
1334 if (wol
->wolopts
& WAKE_UCAST
) options
|= UWF
;
1335 if (wol
->wolopts
& WAKE_BCAST
) options
|= BWF
;
1336 if (wol
->wolopts
& WAKE_MCAST
) options
|= MWF
;
1339 cpw8 (Config5
, options
);
1341 cp
->wol_enabled
= (wol
->wolopts
) ? 1 : 0;
1346 /* Get the ethtool Wake-on-LAN settings */
1347 static void netdev_get_wol (struct cp_private
*cp
,
1348 struct ethtool_wolinfo
*wol
)
1352 wol
->wolopts
= 0; /* Start from scratch */
1353 wol
->supported
= WAKE_PHY
| WAKE_BCAST
| WAKE_MAGIC
|
1354 WAKE_MCAST
| WAKE_UCAST
;
1355 /* We don't need to go on if WOL is disabled */
1356 if (!cp
->wol_enabled
) return;
1358 options
= cpr8 (Config3
);
1359 if (options
& LinkUp
) wol
->wolopts
|= WAKE_PHY
;
1360 if (options
& MagicPacket
) wol
->wolopts
|= WAKE_MAGIC
;
1362 options
= 0; /* Paranoia setting */
1363 options
= cpr8 (Config5
);
1364 if (options
& UWF
) wol
->wolopts
|= WAKE_UCAST
;
1365 if (options
& BWF
) wol
->wolopts
|= WAKE_BCAST
;
1366 if (options
& MWF
) wol
->wolopts
|= WAKE_MCAST
;
1369 static void cp_get_drvinfo (struct net_device
*dev
, struct ethtool_drvinfo
*info
)
1371 struct cp_private
*cp
= netdev_priv(dev
);
1373 strcpy (info
->driver
, DRV_NAME
);
1374 strcpy (info
->version
, DRV_VERSION
);
1375 strcpy (info
->bus_info
, pci_name(cp
->pdev
));
1378 static int cp_get_regs_len(struct net_device
*dev
)
1380 return CP_REGS_SIZE
;
1383 static int cp_get_stats_count (struct net_device
*dev
)
1385 return CP_NUM_STATS
;
1388 static int cp_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1390 struct cp_private
*cp
= netdev_priv(dev
);
1392 unsigned long flags
;
1394 spin_lock_irqsave(&cp
->lock
, flags
);
1395 rc
= mii_ethtool_gset(&cp
->mii_if
, cmd
);
1396 spin_unlock_irqrestore(&cp
->lock
, flags
);
1401 static int cp_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1403 struct cp_private
*cp
= netdev_priv(dev
);
1405 unsigned long flags
;
1407 spin_lock_irqsave(&cp
->lock
, flags
);
1408 rc
= mii_ethtool_sset(&cp
->mii_if
, cmd
);
1409 spin_unlock_irqrestore(&cp
->lock
, flags
);
1414 static int cp_nway_reset(struct net_device
*dev
)
1416 struct cp_private
*cp
= netdev_priv(dev
);
1417 return mii_nway_restart(&cp
->mii_if
);
1420 static u32
cp_get_msglevel(struct net_device
*dev
)
1422 struct cp_private
*cp
= netdev_priv(dev
);
1423 return cp
->msg_enable
;
1426 static void cp_set_msglevel(struct net_device
*dev
, u32 value
)
1428 struct cp_private
*cp
= netdev_priv(dev
);
1429 cp
->msg_enable
= value
;
1432 static u32
cp_get_rx_csum(struct net_device
*dev
)
1434 struct cp_private
*cp
= netdev_priv(dev
);
1435 return (cpr16(CpCmd
) & RxChkSum
) ? 1 : 0;
1438 static int cp_set_rx_csum(struct net_device
*dev
, u32 data
)
1440 struct cp_private
*cp
= netdev_priv(dev
);
1441 u16 cmd
= cp
->cpcmd
, newcmd
;
1448 newcmd
&= ~RxChkSum
;
1450 if (newcmd
!= cmd
) {
1451 unsigned long flags
;
1453 spin_lock_irqsave(&cp
->lock
, flags
);
1455 cpw16_f(CpCmd
, newcmd
);
1456 spin_unlock_irqrestore(&cp
->lock
, flags
);
1462 static void cp_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
1465 struct cp_private
*cp
= netdev_priv(dev
);
1466 unsigned long flags
;
1468 if (regs
->len
< CP_REGS_SIZE
)
1469 return /* -EINVAL */;
1471 regs
->version
= CP_REGS_VER
;
1473 spin_lock_irqsave(&cp
->lock
, flags
);
1474 memcpy_fromio(p
, cp
->regs
, CP_REGS_SIZE
);
1475 spin_unlock_irqrestore(&cp
->lock
, flags
);
1478 static void cp_get_wol (struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
1480 struct cp_private
*cp
= netdev_priv(dev
);
1481 unsigned long flags
;
1483 spin_lock_irqsave (&cp
->lock
, flags
);
1484 netdev_get_wol (cp
, wol
);
1485 spin_unlock_irqrestore (&cp
->lock
, flags
);
1488 static int cp_set_wol (struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
1490 struct cp_private
*cp
= netdev_priv(dev
);
1491 unsigned long flags
;
1494 spin_lock_irqsave (&cp
->lock
, flags
);
1495 rc
= netdev_set_wol (cp
, wol
);
1496 spin_unlock_irqrestore (&cp
->lock
, flags
);
1501 static void cp_get_strings (struct net_device
*dev
, u32 stringset
, u8
*buf
)
1503 switch (stringset
) {
1505 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
1513 static void cp_get_ethtool_stats (struct net_device
*dev
,
1514 struct ethtool_stats
*estats
, u64
*tmp_stats
)
1516 struct cp_private
*cp
= netdev_priv(dev
);
1517 struct cp_dma_stats
*nic_stats
;
1521 nic_stats
= pci_alloc_consistent(cp
->pdev
, sizeof(*nic_stats
), &dma
);
1525 /* begin NIC statistics dump */
1526 cpw32(StatsAddr
+ 4, (u64
)dma
>> 32);
1527 cpw32(StatsAddr
, ((u64
)dma
& DMA_32BIT_MASK
) | DumpStats
);
1530 for (i
= 0; i
< 1000; i
++) {
1531 if ((cpr32(StatsAddr
) & DumpStats
) == 0)
1535 cpw32(StatsAddr
, 0);
1536 cpw32(StatsAddr
+ 4, 0);
1540 tmp_stats
[i
++] = le64_to_cpu(nic_stats
->tx_ok
);
1541 tmp_stats
[i
++] = le64_to_cpu(nic_stats
->rx_ok
);
1542 tmp_stats
[i
++] = le64_to_cpu(nic_stats
->tx_err
);
1543 tmp_stats
[i
++] = le32_to_cpu(nic_stats
->rx_err
);
1544 tmp_stats
[i
++] = le16_to_cpu(nic_stats
->rx_fifo
);
1545 tmp_stats
[i
++] = le16_to_cpu(nic_stats
->frame_align
);
1546 tmp_stats
[i
++] = le32_to_cpu(nic_stats
->tx_ok_1col
);
1547 tmp_stats
[i
++] = le32_to_cpu(nic_stats
->tx_ok_mcol
);
1548 tmp_stats
[i
++] = le64_to_cpu(nic_stats
->rx_ok_phys
);
1549 tmp_stats
[i
++] = le64_to_cpu(nic_stats
->rx_ok_bcast
);
1550 tmp_stats
[i
++] = le32_to_cpu(nic_stats
->rx_ok_mcast
);
1551 tmp_stats
[i
++] = le16_to_cpu(nic_stats
->tx_abort
);
1552 tmp_stats
[i
++] = le16_to_cpu(nic_stats
->tx_underrun
);
1553 tmp_stats
[i
++] = cp
->cp_stats
.rx_frags
;
1554 BUG_ON(i
!= CP_NUM_STATS
);
1556 pci_free_consistent(cp
->pdev
, sizeof(*nic_stats
), nic_stats
, dma
);
1559 static struct ethtool_ops cp_ethtool_ops
= {
1560 .get_drvinfo
= cp_get_drvinfo
,
1561 .get_regs_len
= cp_get_regs_len
,
1562 .get_stats_count
= cp_get_stats_count
,
1563 .get_settings
= cp_get_settings
,
1564 .set_settings
= cp_set_settings
,
1565 .nway_reset
= cp_nway_reset
,
1566 .get_link
= ethtool_op_get_link
,
1567 .get_msglevel
= cp_get_msglevel
,
1568 .set_msglevel
= cp_set_msglevel
,
1569 .get_rx_csum
= cp_get_rx_csum
,
1570 .set_rx_csum
= cp_set_rx_csum
,
1571 .get_tx_csum
= ethtool_op_get_tx_csum
,
1572 .set_tx_csum
= ethtool_op_set_tx_csum
, /* local! */
1573 .get_sg
= ethtool_op_get_sg
,
1574 .set_sg
= ethtool_op_set_sg
,
1575 .get_tso
= ethtool_op_get_tso
,
1576 .set_tso
= ethtool_op_set_tso
,
1577 .get_regs
= cp_get_regs
,
1578 .get_wol
= cp_get_wol
,
1579 .set_wol
= cp_set_wol
,
1580 .get_strings
= cp_get_strings
,
1581 .get_ethtool_stats
= cp_get_ethtool_stats
,
1582 .get_perm_addr
= ethtool_op_get_perm_addr
,
1583 .get_eeprom_len
= cp_get_eeprom_len
,
1584 .get_eeprom
= cp_get_eeprom
,
1585 .set_eeprom
= cp_set_eeprom
,
1588 static int cp_ioctl (struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1590 struct cp_private
*cp
= netdev_priv(dev
);
1592 unsigned long flags
;
1594 if (!netif_running(dev
))
1597 spin_lock_irqsave(&cp
->lock
, flags
);
1598 rc
= generic_mii_ioctl(&cp
->mii_if
, if_mii(rq
), cmd
, NULL
);
1599 spin_unlock_irqrestore(&cp
->lock
, flags
);
1603 /* Serial EEPROM section. */
1605 /* EEPROM_Ctrl bits. */
1606 #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
1607 #define EE_CS 0x08 /* EEPROM chip select. */
1608 #define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
1609 #define EE_WRITE_0 0x00
1610 #define EE_WRITE_1 0x02
1611 #define EE_DATA_READ 0x01 /* EEPROM chip data out. */
1612 #define EE_ENB (0x80 | EE_CS)
1614 /* Delay between EEPROM clock transitions.
1615 No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1618 #define eeprom_delay() readl(ee_addr)
1620 /* The EEPROM commands include the alway-set leading bit. */
1621 #define EE_EXTEND_CMD (4)
1622 #define EE_WRITE_CMD (5)
1623 #define EE_READ_CMD (6)
1624 #define EE_ERASE_CMD (7)
1626 #define EE_EWDS_ADDR (0)
1627 #define EE_WRAL_ADDR (1)
1628 #define EE_ERAL_ADDR (2)
1629 #define EE_EWEN_ADDR (3)
1631 #define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1633 static void eeprom_cmd_start(void __iomem
*ee_addr
)
1635 writeb (EE_ENB
& ~EE_CS
, ee_addr
);
1636 writeb (EE_ENB
, ee_addr
);
1640 static void eeprom_cmd(void __iomem
*ee_addr
, int cmd
, int cmd_len
)
1644 /* Shift the command bits out. */
1645 for (i
= cmd_len
- 1; i
>= 0; i
--) {
1646 int dataval
= (cmd
& (1 << i
)) ? EE_DATA_WRITE
: 0;
1647 writeb (EE_ENB
| dataval
, ee_addr
);
1649 writeb (EE_ENB
| dataval
| EE_SHIFT_CLK
, ee_addr
);
1652 writeb (EE_ENB
, ee_addr
);
1656 static void eeprom_cmd_end(void __iomem
*ee_addr
)
1658 writeb (~EE_CS
, ee_addr
);
1662 static void eeprom_extend_cmd(void __iomem
*ee_addr
, int extend_cmd
,
1665 int cmd
= (EE_EXTEND_CMD
<< addr_len
) | (extend_cmd
<< (addr_len
- 2));
1667 eeprom_cmd_start(ee_addr
);
1668 eeprom_cmd(ee_addr
, cmd
, 3 + addr_len
);
1669 eeprom_cmd_end(ee_addr
);
1672 static u16
read_eeprom (void __iomem
*ioaddr
, int location
, int addr_len
)
1676 void __iomem
*ee_addr
= ioaddr
+ Cfg9346
;
1677 int read_cmd
= location
| (EE_READ_CMD
<< addr_len
);
1679 eeprom_cmd_start(ee_addr
);
1680 eeprom_cmd(ee_addr
, read_cmd
, 3 + addr_len
);
1682 for (i
= 16; i
> 0; i
--) {
1683 writeb (EE_ENB
| EE_SHIFT_CLK
, ee_addr
);
1686 (retval
<< 1) | ((readb (ee_addr
) & EE_DATA_READ
) ? 1 :
1688 writeb (EE_ENB
, ee_addr
);
1692 eeprom_cmd_end(ee_addr
);
1697 static void write_eeprom(void __iomem
*ioaddr
, int location
, u16 val
,
1701 void __iomem
*ee_addr
= ioaddr
+ Cfg9346
;
1702 int write_cmd
= location
| (EE_WRITE_CMD
<< addr_len
);
1704 eeprom_extend_cmd(ee_addr
, EE_EWEN_ADDR
, addr_len
);
1706 eeprom_cmd_start(ee_addr
);
1707 eeprom_cmd(ee_addr
, write_cmd
, 3 + addr_len
);
1708 eeprom_cmd(ee_addr
, val
, 16);
1709 eeprom_cmd_end(ee_addr
);
1711 eeprom_cmd_start(ee_addr
);
1712 for (i
= 0; i
< 20000; i
++)
1713 if (readb(ee_addr
) & EE_DATA_READ
)
1715 eeprom_cmd_end(ee_addr
);
1717 eeprom_extend_cmd(ee_addr
, EE_EWDS_ADDR
, addr_len
);
1720 static int cp_get_eeprom_len(struct net_device
*dev
)
1722 struct cp_private
*cp
= netdev_priv(dev
);
1725 spin_lock_irq(&cp
->lock
);
1726 size
= read_eeprom(cp
->regs
, 0, 8) == 0x8129 ? 256 : 128;
1727 spin_unlock_irq(&cp
->lock
);
1732 static int cp_get_eeprom(struct net_device
*dev
,
1733 struct ethtool_eeprom
*eeprom
, u8
*data
)
1735 struct cp_private
*cp
= netdev_priv(dev
);
1736 unsigned int addr_len
;
1738 u32 offset
= eeprom
->offset
>> 1;
1739 u32 len
= eeprom
->len
;
1742 eeprom
->magic
= CP_EEPROM_MAGIC
;
1744 spin_lock_irq(&cp
->lock
);
1746 addr_len
= read_eeprom(cp
->regs
, 0, 8) == 0x8129 ? 8 : 6;
1748 if (eeprom
->offset
& 1) {
1749 val
= read_eeprom(cp
->regs
, offset
, addr_len
);
1750 data
[i
++] = (u8
)(val
>> 8);
1754 while (i
< len
- 1) {
1755 val
= read_eeprom(cp
->regs
, offset
, addr_len
);
1756 data
[i
++] = (u8
)val
;
1757 data
[i
++] = (u8
)(val
>> 8);
1762 val
= read_eeprom(cp
->regs
, offset
, addr_len
);
1766 spin_unlock_irq(&cp
->lock
);
1770 static int cp_set_eeprom(struct net_device
*dev
,
1771 struct ethtool_eeprom
*eeprom
, u8
*data
)
1773 struct cp_private
*cp
= netdev_priv(dev
);
1774 unsigned int addr_len
;
1776 u32 offset
= eeprom
->offset
>> 1;
1777 u32 len
= eeprom
->len
;
1780 if (eeprom
->magic
!= CP_EEPROM_MAGIC
)
1783 spin_lock_irq(&cp
->lock
);
1785 addr_len
= read_eeprom(cp
->regs
, 0, 8) == 0x8129 ? 8 : 6;
1787 if (eeprom
->offset
& 1) {
1788 val
= read_eeprom(cp
->regs
, offset
, addr_len
) & 0xff;
1789 val
|= (u16
)data
[i
++] << 8;
1790 write_eeprom(cp
->regs
, offset
, val
, addr_len
);
1794 while (i
< len
- 1) {
1795 val
= (u16
)data
[i
++];
1796 val
|= (u16
)data
[i
++] << 8;
1797 write_eeprom(cp
->regs
, offset
, val
, addr_len
);
1802 val
= read_eeprom(cp
->regs
, offset
, addr_len
) & 0xff00;
1803 val
|= (u16
)data
[i
];
1804 write_eeprom(cp
->regs
, offset
, val
, addr_len
);
1807 spin_unlock_irq(&cp
->lock
);
1811 /* Put the board into D3cold state and wait for WakeUp signal */
1812 static void cp_set_d3_state (struct cp_private
*cp
)
1814 pci_enable_wake (cp
->pdev
, 0, 1); /* Enable PME# generation */
1815 pci_set_power_state (cp
->pdev
, PCI_D3hot
);
1818 static int cp_init_one (struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1820 struct net_device
*dev
;
1821 struct cp_private
*cp
;
1824 resource_size_t pciaddr
;
1825 unsigned int addr_len
, i
, pci_using_dac
;
1829 static int version_printed
;
1830 if (version_printed
++ == 0)
1831 printk("%s", version
);
1834 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &pci_rev
);
1836 if (pdev
->vendor
== PCI_VENDOR_ID_REALTEK
&&
1837 pdev
->device
== PCI_DEVICE_ID_REALTEK_8139
&& pci_rev
< 0x20) {
1839 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
1840 pdev
->vendor
, pdev
->device
, pci_rev
);
1841 dev_err(&pdev
->dev
, "Try the \"8139too\" driver instead.\n");
1845 dev
= alloc_etherdev(sizeof(struct cp_private
));
1848 SET_MODULE_OWNER(dev
);
1849 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1851 cp
= netdev_priv(dev
);
1854 cp
->msg_enable
= (debug
< 0 ? CP_DEF_MSG_ENABLE
: debug
);
1855 spin_lock_init (&cp
->lock
);
1856 cp
->mii_if
.dev
= dev
;
1857 cp
->mii_if
.mdio_read
= mdio_read
;
1858 cp
->mii_if
.mdio_write
= mdio_write
;
1859 cp
->mii_if
.phy_id
= CP_INTERNAL_PHY
;
1860 cp
->mii_if
.phy_id_mask
= 0x1f;
1861 cp
->mii_if
.reg_num_mask
= 0x1f;
1862 cp_set_rxbufsize(cp
);
1864 rc
= pci_enable_device(pdev
);
1868 rc
= pci_set_mwi(pdev
);
1870 goto err_out_disable
;
1872 rc
= pci_request_regions(pdev
, DRV_NAME
);
1876 pciaddr
= pci_resource_start(pdev
, 1);
1879 dev_err(&pdev
->dev
, "no MMIO resource\n");
1882 if (pci_resource_len(pdev
, 1) < CP_REGS_SIZE
) {
1884 dev_err(&pdev
->dev
, "MMIO resource (%llx) too small\n",
1885 (unsigned long long)pci_resource_len(pdev
, 1));
1889 /* Configure DMA attributes. */
1890 if ((sizeof(dma_addr_t
) > 4) &&
1891 !pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
) &&
1892 !pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
1897 rc
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
1900 "No usable DMA configuration, aborting.\n");
1903 rc
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
1906 "No usable consistent DMA configuration, "
1912 cp
->cpcmd
= (pci_using_dac
? PCIDAC
: 0) |
1913 PCIMulRW
| RxChkSum
| CpRxOn
| CpTxOn
;
1915 regs
= ioremap(pciaddr
, CP_REGS_SIZE
);
1918 dev_err(&pdev
->dev
, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1919 (unsigned long long)pci_resource_len(pdev
, 1),
1920 (unsigned long long)pciaddr
);
1923 dev
->base_addr
= (unsigned long) regs
;
1928 /* read MAC address from EEPROM */
1929 addr_len
= read_eeprom (regs
, 0, 8) == 0x8129 ? 8 : 6;
1930 for (i
= 0; i
< 3; i
++)
1931 ((u16
*) (dev
->dev_addr
))[i
] =
1932 le16_to_cpu (read_eeprom (regs
, i
+ 7, addr_len
));
1933 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
1935 dev
->open
= cp_open
;
1936 dev
->stop
= cp_close
;
1937 dev
->set_multicast_list
= cp_set_rx_mode
;
1938 dev
->hard_start_xmit
= cp_start_xmit
;
1939 dev
->get_stats
= cp_get_stats
;
1940 dev
->do_ioctl
= cp_ioctl
;
1941 dev
->poll
= cp_rx_poll
;
1942 #ifdef CONFIG_NET_POLL_CONTROLLER
1943 dev
->poll_controller
= cp_poll_controller
;
1945 dev
->weight
= 16; /* arbitrary? from NAPI_HOWTO.txt. */
1947 dev
->change_mtu
= cp_change_mtu
;
1949 dev
->ethtool_ops
= &cp_ethtool_ops
;
1951 dev
->tx_timeout
= cp_tx_timeout
;
1952 dev
->watchdog_timeo
= TX_TIMEOUT
;
1955 #if CP_VLAN_TAG_USED
1956 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
1957 dev
->vlan_rx_register
= cp_vlan_rx_register
;
1958 dev
->vlan_rx_kill_vid
= cp_vlan_rx_kill_vid
;
1962 dev
->features
|= NETIF_F_HIGHDMA
;
1964 #if 0 /* disabled by default until verified */
1965 dev
->features
|= NETIF_F_TSO
;
1968 dev
->irq
= pdev
->irq
;
1970 rc
= register_netdev(dev
);
1974 printk (KERN_INFO
"%s: RTL-8139C+ at 0x%lx, "
1975 "%02x:%02x:%02x:%02x:%02x:%02x, "
1979 dev
->dev_addr
[0], dev
->dev_addr
[1],
1980 dev
->dev_addr
[2], dev
->dev_addr
[3],
1981 dev
->dev_addr
[4], dev
->dev_addr
[5],
1984 pci_set_drvdata(pdev
, dev
);
1986 /* enable busmastering and memory-write-invalidate */
1987 pci_set_master(pdev
);
1989 if (cp
->wol_enabled
)
1990 cp_set_d3_state (cp
);
1997 pci_release_regions(pdev
);
1999 pci_clear_mwi(pdev
);
2001 pci_disable_device(pdev
);
2007 static void cp_remove_one (struct pci_dev
*pdev
)
2009 struct net_device
*dev
= pci_get_drvdata(pdev
);
2010 struct cp_private
*cp
= netdev_priv(dev
);
2013 unregister_netdev(dev
);
2015 if (cp
->wol_enabled
)
2016 pci_set_power_state (pdev
, PCI_D0
);
2017 pci_release_regions(pdev
);
2018 pci_clear_mwi(pdev
);
2019 pci_disable_device(pdev
);
2020 pci_set_drvdata(pdev
, NULL
);
2025 static int cp_suspend (struct pci_dev
*pdev
, pm_message_t state
)
2027 struct net_device
*dev
;
2028 struct cp_private
*cp
;
2029 unsigned long flags
;
2031 dev
= pci_get_drvdata (pdev
);
2032 cp
= netdev_priv(dev
);
2034 if (!dev
|| !netif_running (dev
)) return 0;
2036 netif_device_detach (dev
);
2037 netif_stop_queue (dev
);
2039 spin_lock_irqsave (&cp
->lock
, flags
);
2041 /* Disable Rx and Tx */
2042 cpw16 (IntrMask
, 0);
2043 cpw8 (Cmd
, cpr8 (Cmd
) & (~RxOn
| ~TxOn
));
2045 spin_unlock_irqrestore (&cp
->lock
, flags
);
2047 pci_save_state(pdev
);
2048 pci_enable_wake(pdev
, pci_choose_state(pdev
, state
), cp
->wol_enabled
);
2049 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
2054 static int cp_resume (struct pci_dev
*pdev
)
2056 struct net_device
*dev
= pci_get_drvdata (pdev
);
2057 struct cp_private
*cp
= netdev_priv(dev
);
2058 unsigned long flags
;
2060 if (!netif_running(dev
))
2063 netif_device_attach (dev
);
2065 pci_set_power_state(pdev
, PCI_D0
);
2066 pci_restore_state(pdev
);
2067 pci_enable_wake(pdev
, PCI_D0
, 0);
2069 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2070 cp_init_rings_index (cp
);
2072 netif_start_queue (dev
);
2074 spin_lock_irqsave (&cp
->lock
, flags
);
2076 mii_check_media(&cp
->mii_if
, netif_msg_link(cp
), FALSE
);
2078 spin_unlock_irqrestore (&cp
->lock
, flags
);
2082 #endif /* CONFIG_PM */
2084 static struct pci_driver cp_driver
= {
2086 .id_table
= cp_pci_tbl
,
2087 .probe
= cp_init_one
,
2088 .remove
= cp_remove_one
,
2090 .resume
= cp_resume
,
2091 .suspend
= cp_suspend
,
2095 static int __init
cp_init (void)
2098 printk("%s", version
);
2100 return pci_module_init (&cp_driver
);
2103 static void __exit
cp_exit (void)
2105 pci_unregister_driver (&cp_driver
);
2108 module_init(cp_init
);
2109 module_exit(cp_exit
);