1 /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
3 Copyright 2001,2002 Jeff Garzik <jgarzik@pobox.com>
5 Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6 Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7 Copyright 2001 Manfred Spraul [natsemi.c]
8 Copyright 1999-2001 by Donald Becker. [natsemi.c]
9 Written 1997-2001 by Donald Becker. [8139too.c]
10 Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
12 This software may be used and distributed according to the terms of
13 the GNU General Public License (GPL), incorporated herein by reference.
14 Drivers based on or derived from this code fall under the GPL and must
15 retain the authorship, copyright and license notice. This file is not
16 a complete program and may only be used when the entire operating
17 system is licensed under the GPL.
19 See the file COPYING in this distribution for more information.
23 Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
25 LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
27 TODO, in rough priority order:
28 * Test Tx checksumming thoroughly
30 * Constants (module parms?) for Rx work limit
31 * Complete reset on PciErr
32 * Consider Rx interrupt mitigation using TimerIntr
33 * Implement 8139C+ statistics dump; maybe not...
34 h/w stats can be reset only by software reset
35 * Handle netif_rx return value
36 * Investigate using skb->priority with h/w VLAN priority
37 * Investigate using High Priority Tx Queue with skb->priority
38 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
39 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
40 * Implement Tx software interrupt mitigation via
42 * The real minimum of CP_MIN_MTU is 4 bytes. However,
43 for this to be supported, one must(?) turn on packet padding.
45 * Support external MII transceivers
49 #define DRV_NAME "8139cp"
50 #define DRV_VERSION "0.3.0"
51 #define DRV_RELDATE "Sep 29, 2002"
54 #include <linux/config.h>
55 #include <linux/module.h>
56 #include <linux/kernel.h>
57 #include <linux/compiler.h>
58 #include <linux/netdevice.h>
59 #include <linux/etherdevice.h>
60 #include <linux/init.h>
61 #include <linux/pci.h>
62 #include <linux/delay.h>
63 #include <linux/ethtool.h>
64 #include <linux/mii.h>
65 #include <linux/if_vlan.h>
66 #include <linux/crc32.h>
69 #include <linux/tcp.h>
70 #include <linux/udp.h>
72 #include <asm/uaccess.h>
74 /* experimental TX checksumming feature enable/disable */
77 /* VLAN tagging feature enable/disable */
78 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
79 #define CP_VLAN_TAG_USED 1
80 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
81 do { (tx_desc)->opts2 = (vlan_tag_value); } while (0)
83 #define CP_VLAN_TAG_USED 0
84 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
85 do { (tx_desc)->opts2 = 0; } while (0)
88 /* These identify the driver base version and may not be removed. */
89 static char version
[] __devinitdata
=
90 KERN_INFO DRV_NAME
": 10/100 PCI Ethernet driver v" DRV_VERSION
" (" DRV_RELDATE
")\n";
92 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
93 MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
94 MODULE_LICENSE("GPL");
96 static int debug
= -1;
97 MODULE_PARM (debug
, "i");
98 MODULE_PARM_DESC (debug
, "8139cp: bitmapped message enable number");
100 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
101 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
102 static int multicast_filter_limit
= 32;
103 MODULE_PARM (multicast_filter_limit
, "i");
104 MODULE_PARM_DESC (multicast_filter_limit
, "8139cp: maximum number of filtered multicast addresses");
106 #define PFX DRV_NAME ": "
110 #define TRUE (!FALSE)
113 #define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
116 #define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */
117 #define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */
118 #define CP_REGS_SIZE (0xff + 1)
119 #define CP_REGS_VER 1 /* version 1 */
120 #define CP_RX_RING_SIZE 64
121 #define CP_TX_RING_SIZE 64
122 #define CP_RING_BYTES \
123 ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
124 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
126 #define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
127 #define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
128 #define TX_BUFFS_AVAIL(CP) \
129 (((CP)->tx_tail <= (CP)->tx_head) ? \
130 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
131 (CP)->tx_tail - (CP)->tx_head - 1)
133 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
135 #define CP_INTERNAL_PHY 32
137 /* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
138 #define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */
139 #define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */
140 #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
141 #define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */
143 /* Time in jiffies before concluding the transmitter is hung. */
144 #define TX_TIMEOUT (6*HZ)
146 /* hardware minimum and maximum for a single frame's data payload */
147 #define CP_MIN_MTU 60 /* TODO: allow lower, but pad */
148 #define CP_MAX_MTU 4096
151 /* NIC register offsets */
152 MAC0
= 0x00, /* Ethernet hardware address. */
153 MAR0
= 0x08, /* Multicast filter. */
154 StatsAddr
= 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
155 TxRingAddr
= 0x20, /* 64-bit start addr of Tx ring */
156 HiTxRingAddr
= 0x28, /* 64-bit start addr of high priority Tx ring */
157 Cmd
= 0x37, /* Command register */
158 IntrMask
= 0x3C, /* Interrupt mask */
159 IntrStatus
= 0x3E, /* Interrupt status */
160 TxConfig
= 0x40, /* Tx configuration */
161 ChipVersion
= 0x43, /* 8-bit chip version, inside TxConfig */
162 RxConfig
= 0x44, /* Rx configuration */
163 Cfg9346
= 0x50, /* EEPROM select/control; Cfg reg [un]lock */
164 Config1
= 0x52, /* Config1 */
165 Config3
= 0x59, /* Config3 */
166 Config4
= 0x5A, /* Config4 */
167 MultiIntr
= 0x5C, /* Multiple interrupt select */
168 BasicModeCtrl
= 0x62, /* MII BMCR */
169 BasicModeStatus
= 0x64, /* MII BMSR */
170 NWayAdvert
= 0x66, /* MII ADVERTISE */
171 NWayLPAR
= 0x68, /* MII LPA */
172 NWayExpansion
= 0x6A, /* MII Expansion */
173 Config5
= 0xD8, /* Config5 */
174 TxPoll
= 0xD9, /* Tell chip to check Tx descriptors for work */
175 RxMaxSize
= 0xDA, /* Max size of an Rx packet (8169 only) */
176 CpCmd
= 0xE0, /* C+ Command register (C+ mode only) */
177 IntrMitigate
= 0xE2, /* rx/tx interrupt mitigation control */
178 RxRingAddr
= 0xE4, /* 64-bit start addr of Rx ring */
179 TxThresh
= 0xEC, /* Early Tx threshold */
180 OldRxBufAddr
= 0x30, /* DMA address of Rx ring buffer (C mode) */
181 OldTSD0
= 0x10, /* DMA address of first Tx desc (C mode) */
183 /* Tx and Rx status descriptors */
184 DescOwn
= (1 << 31), /* Descriptor is owned by NIC */
185 RingEnd
= (1 << 30), /* End of descriptor ring */
186 FirstFrag
= (1 << 29), /* First segment of a packet */
187 LastFrag
= (1 << 28), /* Final segment of a packet */
188 TxError
= (1 << 23), /* Tx error summary */
189 RxError
= (1 << 20), /* Rx error summary */
190 IPCS
= (1 << 18), /* Calculate IP checksum */
191 UDPCS
= (1 << 17), /* Calculate UDP/IP checksum */
192 TCPCS
= (1 << 16), /* Calculate TCP/IP checksum */
193 TxVlanTag
= (1 << 17), /* Add VLAN tag */
194 RxVlanTagged
= (1 << 16), /* Rx VLAN tag available */
195 IPFail
= (1 << 15), /* IP checksum failed */
196 UDPFail
= (1 << 14), /* UDP/IP checksum failed */
197 TCPFail
= (1 << 13), /* TCP/IP checksum failed */
198 NormalTxPoll
= (1 << 6), /* One or more normal Tx packets to send */
199 PID1
= (1 << 17), /* 2 protocol id bits: 0==non-IP, */
200 PID0
= (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
204 TxFIFOUnder
= (1 << 25), /* Tx FIFO underrun */
205 TxOWC
= (1 << 22), /* Tx Out-of-window collision */
206 TxLinkFail
= (1 << 21), /* Link failed during Tx of packet */
207 TxMaxCol
= (1 << 20), /* Tx aborted due to excessive collisions */
208 TxColCntShift
= 16, /* Shift, to get 4-bit Tx collision cnt */
209 TxColCntMask
= 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
210 RxErrFrame
= (1 << 27), /* Rx frame alignment error */
211 RxMcast
= (1 << 26), /* Rx multicast packet rcv'd */
212 RxErrCRC
= (1 << 18), /* Rx CRC error */
213 RxErrRunt
= (1 << 19), /* Rx error, packet < 64 bytes */
214 RxErrLong
= (1 << 21), /* Rx error, packet > 4096 bytes */
215 RxErrFIFO
= (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
217 /* StatsAddr register */
218 DumpStats
= (1 << 3), /* Begin stats dump */
220 /* RxConfig register */
221 RxCfgFIFOShift
= 13, /* Shift, to get Rx FIFO thresh value */
222 RxCfgDMAShift
= 8, /* Shift, to get Rx Max DMA value */
223 AcceptErr
= 0x20, /* Accept packets with CRC errors */
224 AcceptRunt
= 0x10, /* Accept runt (<64 bytes) packets */
225 AcceptBroadcast
= 0x08, /* Accept broadcast packets */
226 AcceptMulticast
= 0x04, /* Accept multicast packets */
227 AcceptMyPhys
= 0x02, /* Accept pkts with our MAC as dest */
228 AcceptAllPhys
= 0x01, /* Accept all pkts w/ physical dest */
230 /* IntrMask / IntrStatus registers */
231 PciErr
= (1 << 15), /* System error on the PCI bus */
232 TimerIntr
= (1 << 14), /* Asserted when TCTR reaches TimerInt value */
233 LenChg
= (1 << 13), /* Cable length change */
234 SWInt
= (1 << 8), /* Software-requested interrupt */
235 TxEmpty
= (1 << 7), /* No Tx descriptors available */
236 RxFIFOOvr
= (1 << 6), /* Rx FIFO Overflow */
237 LinkChg
= (1 << 5), /* Packet underrun, or link change */
238 RxEmpty
= (1 << 4), /* No Rx descriptors available */
239 TxErr
= (1 << 3), /* Tx error */
240 TxOK
= (1 << 2), /* Tx packet sent */
241 RxErr
= (1 << 1), /* Rx error */
242 RxOK
= (1 << 0), /* Rx packet received */
243 IntrResvd
= (1 << 10), /* reserved, according to RealTek engineers,
244 but hardware likes to raise it */
246 IntrAll
= PciErr
| TimerIntr
| LenChg
| SWInt
| TxEmpty
|
247 RxFIFOOvr
| LinkChg
| RxEmpty
| TxErr
| TxOK
|
248 RxErr
| RxOK
| IntrResvd
,
250 /* C mode command register */
251 CmdReset
= (1 << 4), /* Enable to reset; self-clearing */
252 RxOn
= (1 << 3), /* Rx mode enable */
253 TxOn
= (1 << 2), /* Tx mode enable */
255 /* C+ mode command register */
256 RxVlanOn
= (1 << 6), /* Rx VLAN de-tagging enable */
257 RxChkSum
= (1 << 5), /* Rx checksum offload enable */
258 PCIDAC
= (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */
259 PCIMulRW
= (1 << 3), /* Enable PCI read/write multiple */
260 CpRxOn
= (1 << 1), /* Rx mode enable */
261 CpTxOn
= (1 << 0), /* Tx mode enable */
263 /* Cfg9436 EEPROM control register */
264 Cfg9346_Lock
= 0x00, /* Lock ConfigX/MII register access */
265 Cfg9346_Unlock
= 0xC0, /* Unlock ConfigX/MII register access */
267 /* TxConfig register */
268 IFG
= (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
269 TxDMAShift
= 8, /* DMA burst value (0-7) is shift this many bits */
271 /* Early Tx Threshold register */
272 TxThreshMask
= 0x3f, /* Mask bits 5-0 */
273 TxThreshMax
= 2048, /* Max early Tx threshold */
275 /* Config1 register */
276 DriverLoaded
= (1 << 5), /* Software marker, driver is loaded */
277 LWACT
= (1 << 4), /* LWAKE active mode */
278 PMEnable
= (1 << 0), /* Enable various PM features of chip */
280 /* Config3 register */
281 PARMEnable
= (1 << 6), /* Enable auto-loading of PHY parms */
282 MagicPacket
= (1 << 5), /* Wake up when receives a Magic Packet */
283 LinkUp
= (1 << 4), /* Wake up when the cable connection is re-established */
285 /* Config4 register */
286 LWPTN
= (1 << 1), /* LWAKE Pattern */
287 LWPME
= (1 << 4), /* LANWAKE vs PMEB */
289 /* Config5 register */
290 BWF
= (1 << 6), /* Accept Broadcast wakeup frame */
291 MWF
= (1 << 5), /* Accept Multicast wakeup frame */
292 UWF
= (1 << 4), /* Accept Unicast wakeup frame */
293 LANWake
= (1 << 1), /* Enable LANWake signal */
294 PMEStatus
= (1 << 0), /* PME status can be reset by PCI RST# */
297 static const unsigned int cp_intr_mask
=
299 RxOK
| RxErr
| RxEmpty
| RxFIFOOvr
|
300 TxOK
| TxErr
| TxEmpty
;
302 static const unsigned int cp_rx_config
=
303 (RX_FIFO_THRESH
<< RxCfgFIFOShift
) |
304 (RX_DMA_BURST
<< RxCfgDMAShift
);
318 struct cp_dma_stats
{
332 } __attribute__((packed
));
334 struct cp_extra_stats
{
335 unsigned long rx_frags
;
344 struct net_device
*dev
;
347 struct cp_desc
*rx_ring
;
348 struct cp_desc
*tx_ring
;
349 struct ring_info tx_skb
[CP_TX_RING_SIZE
];
350 struct ring_info rx_skb
[CP_RX_RING_SIZE
];
355 struct vlan_group
*vlgrp
;
360 struct net_device_stats net_stats
;
361 struct cp_extra_stats cp_stats
;
362 struct cp_dma_stats
*nic_stats
;
363 dma_addr_t nic_stats_dma
;
365 struct pci_dev
*pdev
;
368 struct sk_buff
*frag_skb
;
369 unsigned dropping_frag
: 1;
370 unsigned pci_using_dac
: 1;
371 unsigned int board_type
;
373 unsigned int wol_enabled
: 1; /* Is Wake-on-LAN enabled? */
376 struct mii_if_info mii_if
;
379 #define cpr8(reg) readb(cp->regs + (reg))
380 #define cpr16(reg) readw(cp->regs + (reg))
381 #define cpr32(reg) readl(cp->regs + (reg))
382 #define cpw8(reg,val) writeb((val), cp->regs + (reg))
383 #define cpw16(reg,val) writew((val), cp->regs + (reg))
384 #define cpw32(reg,val) writel((val), cp->regs + (reg))
385 #define cpw8_f(reg,val) do { \
386 writeb((val), cp->regs + (reg)); \
387 readb(cp->regs + (reg)); \
389 #define cpw16_f(reg,val) do { \
390 writew((val), cp->regs + (reg)); \
391 readw(cp->regs + (reg)); \
393 #define cpw32_f(reg,val) do { \
394 writel((val), cp->regs + (reg)); \
395 readl(cp->regs + (reg)); \
399 static void __cp_set_rx_mode (struct net_device
*dev
);
400 static void cp_tx (struct cp_private
*cp
);
401 static void cp_clean_rings (struct cp_private
*cp
);
408 static struct cp_board_info
{
410 } cp_board_tbl
[] __devinitdata
= {
418 static struct pci_device_id cp_pci_tbl
[] __devinitdata
= {
419 { PCI_VENDOR_ID_REALTEK
, PCI_DEVICE_ID_REALTEK_8139
,
420 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, RTL8139Cp
},
422 { PCI_VENDOR_ID_REALTEK
, PCI_DEVICE_ID_REALTEK_8169
,
423 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, RTL8169
},
427 MODULE_DEVICE_TABLE(pci
, cp_pci_tbl
);
430 const char str
[ETH_GSTRING_LEN
];
431 } ethtool_stats_keys
[] = {
449 static inline void cp_set_rxbufsize (struct cp_private
*cp
)
451 unsigned int mtu
= cp
->dev
->mtu
;
453 if (mtu
> ETH_DATA_LEN
)
454 /* MTU + ethernet header + FCS + optional VLAN tag */
455 cp
->rx_buf_sz
= mtu
+ ETH_HLEN
+ 8;
457 cp
->rx_buf_sz
= PKT_BUF_SZ
;
460 static inline void cp_rx_skb (struct cp_private
*cp
, struct sk_buff
*skb
,
461 struct cp_desc
*desc
)
463 skb
->protocol
= eth_type_trans (skb
, cp
->dev
);
465 cp
->net_stats
.rx_packets
++;
466 cp
->net_stats
.rx_bytes
+= skb
->len
;
467 cp
->dev
->last_rx
= jiffies
;
470 if (cp
->vlgrp
&& (desc
->opts2
& RxVlanTagged
)) {
471 vlan_hwaccel_rx(skb
, cp
->vlgrp
, desc
->opts2
& 0xffff);
477 static void cp_rx_err_acct (struct cp_private
*cp
, unsigned rx_tail
,
480 if (netif_msg_rx_err (cp
))
482 "%s: rx err, slot %d status 0x%x len %d\n",
483 cp
->dev
->name
, rx_tail
, status
, len
);
484 cp
->net_stats
.rx_errors
++;
485 if (status
& RxErrFrame
)
486 cp
->net_stats
.rx_frame_errors
++;
487 if (status
& RxErrCRC
)
488 cp
->net_stats
.rx_crc_errors
++;
489 if (status
& RxErrRunt
)
490 cp
->net_stats
.rx_length_errors
++;
491 if (status
& RxErrLong
)
492 cp
->net_stats
.rx_length_errors
++;
493 if (status
& RxErrFIFO
)
494 cp
->net_stats
.rx_fifo_errors
++;
497 static void cp_rx_frag (struct cp_private
*cp
, unsigned rx_tail
,
498 struct sk_buff
*skb
, u32 status
, u32 len
)
500 struct sk_buff
*copy_skb
, *frag_skb
= cp
->frag_skb
;
501 unsigned orig_len
= frag_skb
? frag_skb
->len
: 0;
502 unsigned target_len
= orig_len
+ len
;
503 unsigned first_frag
= status
& FirstFrag
;
504 unsigned last_frag
= status
& LastFrag
;
506 if (netif_msg_rx_status (cp
))
507 printk (KERN_DEBUG
"%s: rx %s%sfrag, slot %d status 0x%x len %d\n",
509 cp
->dropping_frag
? "dropping " : "",
510 first_frag
? "first " :
511 last_frag
? "last " : "",
512 rx_tail
, status
, len
);
514 cp
->cp_stats
.rx_frags
++;
516 if (!frag_skb
&& !first_frag
)
517 cp
->dropping_frag
= 1;
518 if (cp
->dropping_frag
)
521 copy_skb
= dev_alloc_skb (target_len
+ RX_OFFSET
);
523 printk(KERN_WARNING
"%s: rx slot %d alloc failed\n",
524 cp
->dev
->name
, rx_tail
);
526 cp
->dropping_frag
= 1;
529 dev_kfree_skb_irq(frag_skb
);
533 cp
->net_stats
.rx_dropped
++;
534 cp
->dropping_frag
= 0;
539 copy_skb
->dev
= cp
->dev
;
540 skb_reserve(copy_skb
, RX_OFFSET
);
541 skb_put(copy_skb
, target_len
);
543 memcpy(copy_skb
->data
, frag_skb
->data
, orig_len
);
544 dev_kfree_skb_irq(frag_skb
);
546 pci_dma_sync_single(cp
->pdev
, cp
->rx_skb
[rx_tail
].mapping
,
547 len
, PCI_DMA_FROMDEVICE
);
548 memcpy(copy_skb
->data
+ orig_len
, skb
->data
, len
);
550 copy_skb
->ip_summed
= CHECKSUM_NONE
;
553 if (status
& (RxError
| RxErrFIFO
)) {
554 cp_rx_err_acct(cp
, rx_tail
, status
, len
);
555 dev_kfree_skb_irq(copy_skb
);
557 cp_rx_skb(cp
, copy_skb
, &cp
->rx_ring
[rx_tail
]);
560 cp
->frag_skb
= copy_skb
;
564 static inline unsigned int cp_rx_csum_ok (u32 status
)
566 unsigned int protocol
= (status
>> 16) & 0x3;
568 if (likely((protocol
== RxProtoTCP
) && (!(status
& TCPFail
))))
570 else if ((protocol
== RxProtoUDP
) && (!(status
& UDPFail
)))
572 else if ((protocol
== RxProtoIP
) && (!(status
& IPFail
)))
577 static void cp_rx (struct cp_private
*cp
)
579 unsigned rx_tail
= cp
->rx_tail
;
580 unsigned rx_work
= 100;
585 struct sk_buff
*skb
, *new_skb
;
586 struct cp_desc
*desc
;
589 skb
= cp
->rx_skb
[rx_tail
].skb
;
593 desc
= &cp
->rx_ring
[rx_tail
];
594 status
= le32_to_cpu(desc
->opts1
);
595 if (status
& DescOwn
)
598 len
= (status
& 0x1fff) - 4;
599 mapping
= cp
->rx_skb
[rx_tail
].mapping
;
601 if ((status
& (FirstFrag
| LastFrag
)) != (FirstFrag
| LastFrag
)) {
602 cp_rx_frag(cp
, rx_tail
, skb
, status
, len
);
606 if (status
& (RxError
| RxErrFIFO
)) {
607 cp_rx_err_acct(cp
, rx_tail
, status
, len
);
611 if (netif_msg_rx_status(cp
))
612 printk(KERN_DEBUG
"%s: rx slot %d status 0x%x len %d\n",
613 cp
->dev
->name
, rx_tail
, status
, len
);
615 buflen
= cp
->rx_buf_sz
+ RX_OFFSET
;
616 new_skb
= dev_alloc_skb (buflen
);
618 cp
->net_stats
.rx_dropped
++;
622 skb_reserve(new_skb
, RX_OFFSET
);
623 new_skb
->dev
= cp
->dev
;
625 pci_unmap_single(cp
->pdev
, mapping
,
626 buflen
, PCI_DMA_FROMDEVICE
);
628 /* Handle checksum offloading for incoming packets. */
629 if (cp_rx_csum_ok(status
))
630 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
632 skb
->ip_summed
= CHECKSUM_NONE
;
637 cp
->rx_skb
[rx_tail
].mapping
=
638 pci_map_single(cp
->pdev
, new_skb
->tail
,
639 buflen
, PCI_DMA_FROMDEVICE
);
640 cp
->rx_skb
[rx_tail
].skb
= new_skb
;
642 cp_rx_skb(cp
, skb
, desc
);
645 cp
->rx_ring
[rx_tail
].opts2
= 0;
646 cp
->rx_ring
[rx_tail
].addr
= cpu_to_le64(mapping
);
647 if (rx_tail
== (CP_RX_RING_SIZE
- 1))
648 desc
->opts1
= cpu_to_le32(DescOwn
| RingEnd
|
651 desc
->opts1
= cpu_to_le32(DescOwn
| cp
->rx_buf_sz
);
652 rx_tail
= NEXT_RX(rx_tail
);
656 printk(KERN_WARNING
"%s: rx work limit reached\n", cp
->dev
->name
);
658 cp
->rx_tail
= rx_tail
;
662 cp_interrupt (int irq
, void *dev_instance
, struct pt_regs
*regs
)
664 struct net_device
*dev
= dev_instance
;
665 struct cp_private
*cp
= dev
->priv
;
668 status
= cpr16(IntrStatus
);
669 if (!status
|| (status
== 0xFFFF))
672 if (netif_msg_intr(cp
))
673 printk(KERN_DEBUG
"%s: intr, status %04x cmd %02x cpcmd %04x\n",
674 dev
->name
, status
, cpr8(Cmd
), cpr16(CpCmd
));
676 cpw16_f(IntrStatus
, status
);
678 spin_lock(&cp
->lock
);
680 if (status
& (RxOK
| RxErr
| RxEmpty
| RxFIFOOvr
))
682 if (status
& (TxOK
| TxErr
| TxEmpty
| SWInt
))
684 if (status
& LinkChg
)
685 mii_check_media(&cp
->mii_if
, netif_msg_link(cp
), FALSE
);
687 if (status
& PciErr
) {
690 pci_read_config_word(cp
->pdev
, PCI_STATUS
, &pci_status
);
691 pci_write_config_word(cp
->pdev
, PCI_STATUS
, pci_status
);
692 printk(KERN_ERR
"%s: PCI bus error, status=%04x, PCI status=%04x\n",
693 dev
->name
, status
, pci_status
);
696 spin_unlock(&cp
->lock
);
700 static void cp_tx (struct cp_private
*cp
)
702 unsigned tx_head
= cp
->tx_head
;
703 unsigned tx_tail
= cp
->tx_tail
;
705 while (tx_tail
!= tx_head
) {
710 status
= le32_to_cpu(cp
->tx_ring
[tx_tail
].opts1
);
711 if (status
& DescOwn
)
714 skb
= cp
->tx_skb
[tx_tail
].skb
;
718 pci_unmap_single(cp
->pdev
, cp
->tx_skb
[tx_tail
].mapping
,
719 skb
->len
, PCI_DMA_TODEVICE
);
721 if (status
& LastFrag
) {
722 if (status
& (TxError
| TxFIFOUnder
)) {
723 if (netif_msg_tx_err(cp
))
724 printk(KERN_DEBUG
"%s: tx err, status 0x%x\n",
725 cp
->dev
->name
, status
);
726 cp
->net_stats
.tx_errors
++;
728 cp
->net_stats
.tx_window_errors
++;
729 if (status
& TxMaxCol
)
730 cp
->net_stats
.tx_aborted_errors
++;
731 if (status
& TxLinkFail
)
732 cp
->net_stats
.tx_carrier_errors
++;
733 if (status
& TxFIFOUnder
)
734 cp
->net_stats
.tx_fifo_errors
++;
736 cp
->net_stats
.collisions
+=
737 ((status
>> TxColCntShift
) & TxColCntMask
);
738 cp
->net_stats
.tx_packets
++;
739 cp
->net_stats
.tx_bytes
+= skb
->len
;
740 if (netif_msg_tx_done(cp
))
741 printk(KERN_DEBUG
"%s: tx done, slot %d\n", cp
->dev
->name
, tx_tail
);
743 dev_kfree_skb_irq(skb
);
746 cp
->tx_skb
[tx_tail
].skb
= NULL
;
748 tx_tail
= NEXT_TX(tx_tail
);
751 cp
->tx_tail
= tx_tail
;
753 if (netif_queue_stopped(cp
->dev
) && (TX_BUFFS_AVAIL(cp
) > (MAX_SKB_FRAGS
+ 1)))
754 netif_wake_queue(cp
->dev
);
757 static int cp_start_xmit (struct sk_buff
*skb
, struct net_device
*dev
)
759 struct cp_private
*cp
= dev
->priv
;
766 spin_lock_irq(&cp
->lock
);
768 /* This is a hard error, log it. */
769 if (TX_BUFFS_AVAIL(cp
) <= (skb_shinfo(skb
)->nr_frags
+ 1)) {
770 netif_stop_queue(dev
);
771 spin_unlock_irq(&cp
->lock
);
772 printk(KERN_ERR PFX
"%s: BUG! Tx Ring full when queue awake!\n",
778 if (cp
->vlgrp
&& vlan_tx_tag_present(skb
))
779 vlan_tag
= TxVlanTag
| vlan_tx_tag_get(skb
);
783 eor
= (entry
== (CP_TX_RING_SIZE
- 1)) ? RingEnd
: 0;
784 if (skb_shinfo(skb
)->nr_frags
== 0) {
785 struct cp_desc
*txd
= &cp
->tx_ring
[entry
];
790 mapping
= pci_map_single(cp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
791 CP_VLAN_TX_TAG(txd
, vlan_tag
);
792 txd
->addr
= cpu_to_le64(mapping
);
795 #ifdef CP_TX_CHECKSUM
796 if (skb
->ip_summed
== CHECKSUM_HW
) {
797 const struct iphdr
*ip
= skb
->nh
.iph
;
798 if (ip
->protocol
== IPPROTO_TCP
)
799 txd
->opts1
= cpu_to_le32(eor
| len
| DescOwn
|
800 FirstFrag
| LastFrag
|
802 else if (ip
->protocol
== IPPROTO_UDP
)
803 txd
->opts1
= cpu_to_le32(eor
| len
| DescOwn
|
804 FirstFrag
| LastFrag
|
810 txd
->opts1
= cpu_to_le32(eor
| len
| DescOwn
|
811 FirstFrag
| LastFrag
);
814 cp
->tx_skb
[entry
].skb
= skb
;
815 cp
->tx_skb
[entry
].mapping
= mapping
;
816 cp
->tx_skb
[entry
].frag
= 0;
817 entry
= NEXT_TX(entry
);
820 u32 first_len
, first_eor
;
821 dma_addr_t first_mapping
;
822 int frag
, first_entry
= entry
;
823 #ifdef CP_TX_CHECKSUM
824 const struct iphdr
*ip
= skb
->nh
.iph
;
827 /* We must give this initial chunk to the device last.
828 * Otherwise we could race with the device.
831 first_len
= skb_headlen(skb
);
832 first_mapping
= pci_map_single(cp
->pdev
, skb
->data
,
833 first_len
, PCI_DMA_TODEVICE
);
834 cp
->tx_skb
[entry
].skb
= skb
;
835 cp
->tx_skb
[entry
].mapping
= first_mapping
;
836 cp
->tx_skb
[entry
].frag
= 1;
837 entry
= NEXT_TX(entry
);
839 for (frag
= 0; frag
< skb_shinfo(skb
)->nr_frags
; frag
++) {
840 skb_frag_t
*this_frag
= &skb_shinfo(skb
)->frags
[frag
];
845 len
= this_frag
->size
;
846 mapping
= pci_map_single(cp
->pdev
,
847 ((void *) page_address(this_frag
->page
) +
848 this_frag
->page_offset
),
849 len
, PCI_DMA_TODEVICE
);
850 eor
= (entry
== (CP_TX_RING_SIZE
- 1)) ? RingEnd
: 0;
851 #ifdef CP_TX_CHECKSUM
852 if (skb
->ip_summed
== CHECKSUM_HW
) {
853 ctrl
= eor
| len
| DescOwn
| IPCS
;
854 if (ip
->protocol
== IPPROTO_TCP
)
856 else if (ip
->protocol
== IPPROTO_UDP
)
862 ctrl
= eor
| len
| DescOwn
;
864 if (frag
== skb_shinfo(skb
)->nr_frags
- 1)
867 txd
= &cp
->tx_ring
[entry
];
868 CP_VLAN_TX_TAG(txd
, vlan_tag
);
869 txd
->addr
= cpu_to_le64(mapping
);
872 txd
->opts1
= cpu_to_le32(ctrl
);
875 cp
->tx_skb
[entry
].skb
= skb
;
876 cp
->tx_skb
[entry
].mapping
= mapping
;
877 cp
->tx_skb
[entry
].frag
= frag
+ 2;
878 entry
= NEXT_TX(entry
);
881 txd
= &cp
->tx_ring
[first_entry
];
882 CP_VLAN_TX_TAG(txd
, vlan_tag
);
883 txd
->addr
= cpu_to_le64(first_mapping
);
886 #ifdef CP_TX_CHECKSUM
887 if (skb
->ip_summed
== CHECKSUM_HW
) {
888 if (ip
->protocol
== IPPROTO_TCP
)
889 txd
->opts1
= cpu_to_le32(first_eor
| first_len
|
890 FirstFrag
| DescOwn
|
892 else if (ip
->protocol
== IPPROTO_UDP
)
893 txd
->opts1
= cpu_to_le32(first_eor
| first_len
|
894 FirstFrag
| DescOwn
|
900 txd
->opts1
= cpu_to_le32(first_eor
| first_len
|
901 FirstFrag
| DescOwn
);
905 if (netif_msg_tx_queued(cp
))
906 printk(KERN_DEBUG
"%s: tx queued, slot %d, skblen %d\n",
907 dev
->name
, entry
, skb
->len
);
908 if (TX_BUFFS_AVAIL(cp
) <= (MAX_SKB_FRAGS
+ 1))
909 netif_stop_queue(dev
);
911 spin_unlock_irq(&cp
->lock
);
913 cpw8(TxPoll
, NormalTxPoll
);
914 dev
->trans_start
= jiffies
;
919 /* Set or clear the multicast filter for this adaptor.
920 This routine is not state sensitive and need not be SMP locked. */
922 static void __cp_set_rx_mode (struct net_device
*dev
)
924 struct cp_private
*cp
= dev
->priv
;
925 u32 mc_filter
[2]; /* Multicast hash filter */
929 /* Note: do not reorder, GCC is clever about common statements. */
930 if (dev
->flags
& IFF_PROMISC
) {
931 /* Unconditionally log net taps. */
932 printk (KERN_NOTICE
"%s: Promiscuous mode enabled.\n",
935 AcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
|
937 mc_filter
[1] = mc_filter
[0] = 0xffffffff;
938 } else if ((dev
->mc_count
> multicast_filter_limit
)
939 || (dev
->flags
& IFF_ALLMULTI
)) {
940 /* Too many to filter perfectly -- accept all multicasts. */
941 rx_mode
= AcceptBroadcast
| AcceptMulticast
| AcceptMyPhys
;
942 mc_filter
[1] = mc_filter
[0] = 0xffffffff;
944 struct dev_mc_list
*mclist
;
945 rx_mode
= AcceptBroadcast
| AcceptMyPhys
;
946 mc_filter
[1] = mc_filter
[0] = 0;
947 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
948 i
++, mclist
= mclist
->next
) {
949 int bit_nr
= ether_crc(ETH_ALEN
, mclist
->dmi_addr
) >> 26;
951 mc_filter
[bit_nr
>> 5] |= 1 << (bit_nr
& 31);
952 rx_mode
|= AcceptMulticast
;
956 /* We can safely update without stopping the chip. */
957 tmp
= cp_rx_config
| rx_mode
;
958 if (cp
->rx_config
!= tmp
) {
959 cpw32_f (RxConfig
, tmp
);
962 cpw32_f (MAR0
+ 0, mc_filter
[0]);
963 cpw32_f (MAR0
+ 4, mc_filter
[1]);
966 static void cp_set_rx_mode (struct net_device
*dev
)
969 struct cp_private
*cp
= dev
->priv
;
971 spin_lock_irqsave (&cp
->lock
, flags
);
972 __cp_set_rx_mode(dev
);
973 spin_unlock_irqrestore (&cp
->lock
, flags
);
976 static void __cp_get_stats(struct cp_private
*cp
)
981 static struct net_device_stats
*cp_get_stats(struct net_device
*dev
)
983 struct cp_private
*cp
= dev
->priv
;
985 /* The chip only need report frame silently dropped. */
986 spin_lock_irq(&cp
->lock
);
987 if (netif_running(dev
) && netif_device_present(dev
))
989 spin_unlock_irq(&cp
->lock
);
991 return &cp
->net_stats
;
994 static void cp_stop_hw (struct cp_private
*cp
)
996 struct net_device
*dev
= cp
->dev
;
1003 cpw16(IntrStatus
, ~(cpr16(IntrStatus
)));
1004 synchronize_irq(dev
->irq
);
1008 cp
->tx_head
= cp
->tx_tail
= 0;
1010 (void) dev
; /* avoid compiler warning when synchronize_irq()
1011 * disappears during !CONFIG_SMP
1015 static void cp_reset_hw (struct cp_private
*cp
)
1017 unsigned work
= 1000;
1019 cpw8(Cmd
, CmdReset
);
1022 if (!(cpr8(Cmd
) & CmdReset
))
1025 set_current_state(TASK_UNINTERRUPTIBLE
);
1026 schedule_timeout(10);
1029 printk(KERN_ERR
"%s: hardware reset timeout\n", cp
->dev
->name
);
1032 static inline void cp_start_hw (struct cp_private
*cp
)
1034 u16 pci_dac
= cp
->pci_using_dac
? PCIDAC
: 0;
1035 if (cp
->board_type
== RTL8169
)
1036 cpw16(CpCmd
, pci_dac
| PCIMulRW
| RxChkSum
);
1038 cpw16(CpCmd
, pci_dac
| PCIMulRW
| RxChkSum
| CpRxOn
| CpTxOn
);
1039 cpw8(Cmd
, RxOn
| TxOn
);
1042 static void cp_init_hw (struct cp_private
*cp
)
1044 struct net_device
*dev
= cp
->dev
;
1048 cpw8_f (Cfg9346
, Cfg9346_Unlock
);
1050 /* Restore our idea of the MAC address. */
1051 cpw32_f (MAC0
+ 0, cpu_to_le32 (*(u32
*) (dev
->dev_addr
+ 0)));
1052 cpw32_f (MAC0
+ 4, cpu_to_le32 (*(u32
*) (dev
->dev_addr
+ 4)));
1055 cpw8(TxThresh
, 0x06); /* XXX convert magic num to a constant */
1057 __cp_set_rx_mode(dev
);
1058 cpw32_f (TxConfig
, IFG
| (TX_DMA_BURST
<< TxDMAShift
));
1060 cpw8(Config1
, cpr8(Config1
) | DriverLoaded
| PMEnable
);
1061 /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1062 if (cp
->board_type
== RTL8139Cp
) {
1063 cpw8(Config3
, PARMEnable
);
1064 cp
->wol_enabled
= 0;
1066 cpw8(Config5
, cpr8(Config5
) & PMEStatus
);
1067 if (cp
->board_type
== RTL8169
)
1068 cpw16(RxMaxSize
, cp
->rx_buf_sz
);
1070 cpw32_f(HiTxRingAddr
, 0);
1071 cpw32_f(HiTxRingAddr
+ 4, 0);
1073 cpw32_f(RxRingAddr
, cp
->ring_dma
);
1074 cpw32_f(RxRingAddr
+ 4, 0); /* FIXME: 64-bit PCI */
1075 cpw32_f(TxRingAddr
, cp
->ring_dma
+ (sizeof(struct cp_desc
) * CP_RX_RING_SIZE
));
1076 cpw32_f(TxRingAddr
+ 4, 0); /* FIXME: 64-bit PCI */
1078 cpw16(MultiIntr
, 0);
1080 cpw16_f(IntrMask
, cp_intr_mask
);
1082 cpw8_f(Cfg9346
, Cfg9346_Lock
);
1085 static int cp_refill_rx (struct cp_private
*cp
)
1089 for (i
= 0; i
< CP_RX_RING_SIZE
; i
++) {
1090 struct sk_buff
*skb
;
1092 skb
= dev_alloc_skb(cp
->rx_buf_sz
+ RX_OFFSET
);
1097 skb_reserve(skb
, RX_OFFSET
);
1099 cp
->rx_skb
[i
].mapping
= pci_map_single(cp
->pdev
,
1100 skb
->tail
, cp
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1101 cp
->rx_skb
[i
].skb
= skb
;
1102 cp
->rx_skb
[i
].frag
= 0;
1104 cp
->rx_ring
[i
].opts2
= 0;
1105 cp
->rx_ring
[i
].addr
= cpu_to_le64(cp
->rx_skb
[i
].mapping
);
1106 if (i
== (CP_RX_RING_SIZE
- 1))
1107 cp
->rx_ring
[i
].opts1
=
1108 cpu_to_le32(DescOwn
| RingEnd
| cp
->rx_buf_sz
);
1110 cp
->rx_ring
[i
].opts1
=
1111 cpu_to_le32(DescOwn
| cp
->rx_buf_sz
);
1121 static int cp_init_rings (struct cp_private
*cp
)
1123 memset(cp
->tx_ring
, 0, sizeof(struct cp_desc
) * CP_TX_RING_SIZE
);
1124 cp
->tx_ring
[CP_TX_RING_SIZE
- 1].opts1
= cpu_to_le32(RingEnd
);
1127 cp
->tx_head
= cp
->tx_tail
= 0;
1129 return cp_refill_rx (cp
);
1132 static int cp_alloc_rings (struct cp_private
*cp
)
1136 mem
= pci_alloc_consistent(cp
->pdev
, CP_RING_BYTES
, &cp
->ring_dma
);
1141 cp
->tx_ring
= &cp
->rx_ring
[CP_RX_RING_SIZE
];
1143 mem
+= (CP_RING_BYTES
- CP_STATS_SIZE
);
1144 cp
->nic_stats
= mem
;
1145 cp
->nic_stats_dma
= cp
->ring_dma
+ (CP_RING_BYTES
- CP_STATS_SIZE
);
1147 return cp_init_rings(cp
);
1150 static void cp_clean_rings (struct cp_private
*cp
)
1154 memset(cp
->rx_ring
, 0, sizeof(struct cp_desc
) * CP_RX_RING_SIZE
);
1155 memset(cp
->tx_ring
, 0, sizeof(struct cp_desc
) * CP_TX_RING_SIZE
);
1157 for (i
= 0; i
< CP_RX_RING_SIZE
; i
++) {
1158 if (cp
->rx_skb
[i
].skb
) {
1159 pci_unmap_single(cp
->pdev
, cp
->rx_skb
[i
].mapping
,
1160 cp
->rx_buf_sz
, PCI_DMA_FROMDEVICE
);
1161 dev_kfree_skb(cp
->rx_skb
[i
].skb
);
1165 for (i
= 0; i
< CP_TX_RING_SIZE
; i
++) {
1166 if (cp
->tx_skb
[i
].skb
) {
1167 struct sk_buff
*skb
= cp
->tx_skb
[i
].skb
;
1168 pci_unmap_single(cp
->pdev
, cp
->tx_skb
[i
].mapping
,
1169 skb
->len
, PCI_DMA_TODEVICE
);
1171 cp
->net_stats
.tx_dropped
++;
1175 memset(&cp
->rx_skb
, 0, sizeof(struct ring_info
) * CP_RX_RING_SIZE
);
1176 memset(&cp
->tx_skb
, 0, sizeof(struct ring_info
) * CP_TX_RING_SIZE
);
1179 static void cp_free_rings (struct cp_private
*cp
)
1182 pci_free_consistent(cp
->pdev
, CP_RING_BYTES
, cp
->rx_ring
, cp
->ring_dma
);
1185 cp
->nic_stats
= NULL
;
1188 static int cp_open (struct net_device
*dev
)
1190 struct cp_private
*cp
= dev
->priv
;
1193 if (netif_msg_ifup(cp
))
1194 printk(KERN_DEBUG
"%s: enabling interface\n", dev
->name
);
1196 rc
= cp_alloc_rings(cp
);
1202 rc
= request_irq(dev
->irq
, cp_interrupt
, SA_SHIRQ
, dev
->name
, dev
);
1206 netif_carrier_off(dev
);
1207 mii_check_media(&cp
->mii_if
, netif_msg_link(cp
), TRUE
);
1208 netif_start_queue(dev
);
1218 static int cp_close (struct net_device
*dev
)
1220 struct cp_private
*cp
= dev
->priv
;
1222 if (netif_msg_ifdown(cp
))
1223 printk(KERN_DEBUG
"%s: disabling interface\n", dev
->name
);
1225 netif_stop_queue(dev
);
1226 netif_carrier_off(dev
);
1228 spin_lock_irq(&cp
->lock
);
1230 spin_unlock_irq(&cp
->lock
);
1232 free_irq(dev
->irq
, dev
);
1238 static int cp_change_mtu(struct net_device
*dev
, int new_mtu
)
1240 struct cp_private
*cp
= dev
->priv
;
1243 /* check for invalid MTU, according to hardware limits */
1244 if (new_mtu
< CP_MIN_MTU
|| new_mtu
> CP_MAX_MTU
)
1247 /* if network interface not up, no need for complexity */
1248 if (!netif_running(dev
)) {
1250 cp_set_rxbufsize(cp
); /* set new rx buf size */
1254 spin_lock_irq(&cp
->lock
);
1256 cp_stop_hw(cp
); /* stop h/w and free rings */
1260 cp_set_rxbufsize(cp
); /* set new rx buf size */
1261 if (cp
->board_type
== RTL8169
)
1262 cpw16(RxMaxSize
, cp
->rx_buf_sz
);
1264 rc
= cp_init_rings(cp
); /* realloc and restart h/w */
1267 spin_unlock_irq(&cp
->lock
);
1273 static char mii_2_8139_map
[8] = {
1284 static int mdio_read(struct net_device
*dev
, int phy_id
, int location
)
1286 struct cp_private
*cp
= dev
->priv
;
1288 return location
< 8 && mii_2_8139_map
[location
] ?
1289 readw(cp
->regs
+ mii_2_8139_map
[location
]) : 0;
1293 static void mdio_write(struct net_device
*dev
, int phy_id
, int location
,
1296 struct cp_private
*cp
= dev
->priv
;
1298 if (location
== 0) {
1299 cpw8(Cfg9346
, Cfg9346_Unlock
);
1300 cpw16(BasicModeCtrl
, value
);
1301 cpw8(Cfg9346
, Cfg9346_Lock
);
1302 } else if (location
< 8 && mii_2_8139_map
[location
])
1303 cpw16(mii_2_8139_map
[location
], value
);
1306 /* Set the ethtool Wake-on-LAN settings */
1307 static void netdev_set_wol (struct cp_private
*cp
,
1308 const struct ethtool_wolinfo
*wol
)
1312 options
= cpr8 (Config3
) & ~(LinkUp
| MagicPacket
);
1313 /* If WOL is being disabled, no need for complexity */
1315 if (wol
->wolopts
& WAKE_PHY
) options
|= LinkUp
;
1316 if (wol
->wolopts
& WAKE_MAGIC
) options
|= MagicPacket
;
1319 cpw8 (Cfg9346
, Cfg9346_Unlock
);
1320 cpw8 (Config3
, options
);
1321 cpw8 (Cfg9346
, Cfg9346_Lock
);
1323 options
= 0; /* Paranoia setting */
1324 options
= cpr8 (Config5
) & ~(UWF
| MWF
| BWF
);
1325 /* If WOL is being disabled, no need for complexity */
1327 if (wol
->wolopts
& WAKE_UCAST
) options
|= UWF
;
1328 if (wol
->wolopts
& WAKE_BCAST
) options
|= BWF
;
1329 if (wol
->wolopts
& WAKE_MCAST
) options
|= MWF
;
1332 cpw8 (Config5
, options
);
1334 cp
->wol_enabled
= (wol
->wolopts
) ? 1 : 0;
1337 /* Get the ethtool Wake-on-LAN settings */
1338 static void netdev_get_wol (struct cp_private
*cp
,
1339 struct ethtool_wolinfo
*wol
)
1343 wol
->wolopts
= 0; /* Start from scratch */
1344 wol
->supported
= WAKE_PHY
| WAKE_BCAST
| WAKE_MAGIC
|
1345 WAKE_MCAST
| WAKE_UCAST
;
1346 /* We don't need to go on if WOL is disabled */
1347 if (!cp
->wol_enabled
) return;
1349 options
= cpr8 (Config3
);
1350 if (options
& LinkUp
) wol
->wolopts
|= WAKE_PHY
;
1351 if (options
& MagicPacket
) wol
->wolopts
|= WAKE_MAGIC
;
1353 options
= 0; /* Paranoia setting */
1354 options
= cpr8 (Config5
);
1355 if (options
& UWF
) wol
->wolopts
|= WAKE_UCAST
;
1356 if (options
& BWF
) wol
->wolopts
|= WAKE_BCAST
;
1357 if (options
& MWF
) wol
->wolopts
|= WAKE_MCAST
;
1360 static int cp_ethtool_ioctl (struct cp_private
*cp
, void *useraddr
)
1364 /* dev_ioctl() in ../../net/core/dev.c has already checked
1365 capable(CAP_NET_ADMIN), so don't bother with that here. */
1367 if (get_user(ethcmd
, (u32
*)useraddr
))
1372 case ETHTOOL_GDRVINFO
: {
1373 struct ethtool_drvinfo info
= { ETHTOOL_GDRVINFO
};
1374 strcpy (info
.driver
, DRV_NAME
);
1375 strcpy (info
.version
, DRV_VERSION
);
1376 strcpy (info
.bus_info
, cp
->pdev
->slot_name
);
1377 info
.regdump_len
= CP_REGS_SIZE
;
1378 info
.n_stats
= CP_NUM_STATS
;
1379 if (copy_to_user (useraddr
, &info
, sizeof (info
)))
1385 case ETHTOOL_GSET
: {
1386 struct ethtool_cmd ecmd
= { ETHTOOL_GSET
};
1387 spin_lock_irq(&cp
->lock
);
1388 mii_ethtool_gset(&cp
->mii_if
, &ecmd
);
1389 spin_unlock_irq(&cp
->lock
);
1390 if (copy_to_user(useraddr
, &ecmd
, sizeof(ecmd
)))
1395 case ETHTOOL_SSET
: {
1397 struct ethtool_cmd ecmd
;
1398 if (copy_from_user(&ecmd
, useraddr
, sizeof(ecmd
)))
1400 spin_lock_irq(&cp
->lock
);
1401 r
= mii_ethtool_sset(&cp
->mii_if
, &ecmd
);
1402 spin_unlock_irq(&cp
->lock
);
1405 /* restart autonegotiation */
1406 case ETHTOOL_NWAY_RST
: {
1407 return mii_nway_restart(&cp
->mii_if
);
1409 /* get link status */
1410 case ETHTOOL_GLINK
: {
1411 struct ethtool_value edata
= {ETHTOOL_GLINK
};
1412 edata
.data
= mii_link_ok(&cp
->mii_if
);
1413 if (copy_to_user(useraddr
, &edata
, sizeof(edata
)))
1418 /* get message-level */
1419 case ETHTOOL_GMSGLVL
: {
1420 struct ethtool_value edata
= {ETHTOOL_GMSGLVL
};
1421 edata
.data
= cp
->msg_enable
;
1422 if (copy_to_user(useraddr
, &edata
, sizeof(edata
)))
1426 /* set message-level */
1427 case ETHTOOL_SMSGLVL
: {
1428 struct ethtool_value edata
;
1429 if (copy_from_user(&edata
, useraddr
, sizeof(edata
)))
1431 cp
->msg_enable
= edata
.data
;
1435 /* NIC register dump */
1436 case ETHTOOL_GREGS
: {
1437 struct ethtool_regs regs
;
1438 u8
*regbuf
= kmalloc(CP_REGS_SIZE
, GFP_KERNEL
);
1443 memset(regbuf
, 0, CP_REGS_SIZE
);
1445 rc
= copy_from_user(®s
, useraddr
, sizeof(regs
));
1451 if (regs
.len
> CP_REGS_SIZE
)
1452 regs
.len
= CP_REGS_SIZE
;
1453 if (regs
.len
< CP_REGS_SIZE
) {
1458 regs
.version
= CP_REGS_VER
;
1459 rc
= copy_to_user(useraddr
, ®s
, sizeof(regs
));
1465 useraddr
+= offsetof(struct ethtool_regs
, data
);
1467 spin_lock_irq(&cp
->lock
);
1468 memcpy_fromio(regbuf
, cp
->regs
, CP_REGS_SIZE
);
1469 spin_unlock_irq(&cp
->lock
);
1471 if (copy_to_user(useraddr
, regbuf
, regs
.len
))
1479 /* get/set RX checksumming */
1480 case ETHTOOL_GRXCSUM
: {
1481 struct ethtool_value edata
= { ETHTOOL_GRXCSUM
};
1482 u16 cmd
= cpr16(CpCmd
) & RxChkSum
;
1484 edata
.data
= cmd
? 1 : 0;
1485 if (copy_to_user(useraddr
, &edata
, sizeof(edata
)))
1489 case ETHTOOL_SRXCSUM
: {
1490 struct ethtool_value edata
;
1491 u16 cmd
= cpr16(CpCmd
), newcmd
;
1495 if (copy_from_user(&edata
, useraddr
, sizeof(edata
)))
1501 newcmd
&= ~RxChkSum
;
1506 spin_lock_irq(&cp
->lock
);
1507 cpw16_f(CpCmd
, newcmd
);
1508 spin_unlock_irq(&cp
->lock
);
1511 /* get/set TX checksumming */
1512 case ETHTOOL_GTXCSUM
: {
1513 struct ethtool_value edata
= { ETHTOOL_GTXCSUM
};
1515 edata
.data
= (cp
->dev
->features
& NETIF_F_IP_CSUM
) != 0;
1516 if (copy_to_user(useraddr
, &edata
, sizeof(edata
)))
1520 case ETHTOOL_STXCSUM
: {
1521 struct ethtool_value edata
;
1523 if (copy_from_user(&edata
, useraddr
, sizeof(edata
)))
1527 cp
->dev
->features
|= NETIF_F_IP_CSUM
;
1529 cp
->dev
->features
&= ~NETIF_F_IP_CSUM
;
1534 /* get/set scatter-gather */
1536 struct ethtool_value edata
= { ETHTOOL_GSG
};
1538 edata
.data
= (cp
->dev
->features
& NETIF_F_SG
) != 0;
1539 if (copy_to_user(useraddr
, &edata
, sizeof(edata
)))
1544 struct ethtool_value edata
;
1546 if (copy_from_user(&edata
, useraddr
, sizeof(edata
)))
1550 cp
->dev
->features
|= NETIF_F_SG
;
1552 cp
->dev
->features
&= ~NETIF_F_SG
;
1557 /* get string list(s) */
1558 case ETHTOOL_GSTRINGS
: {
1559 struct ethtool_gstrings estr
= { ETHTOOL_GSTRINGS
};
1561 if (copy_from_user(&estr
, useraddr
, sizeof(estr
)))
1563 if (estr
.string_set
!= ETH_SS_STATS
)
1566 estr
.len
= CP_NUM_STATS
;
1567 if (copy_to_user(useraddr
, &estr
, sizeof(estr
)))
1569 if (copy_to_user(useraddr
+ sizeof(estr
),
1570 ðtool_stats_keys
,
1571 sizeof(ethtool_stats_keys
)))
1576 /* get NIC-specific statistics */
1577 case ETHTOOL_GSTATS
: {
1578 struct ethtool_stats estats
= { ETHTOOL_GSTATS
};
1580 unsigned int work
= 100;
1581 const unsigned int sz
= sizeof(u64
) * CP_NUM_STATS
;
1584 /* begin NIC statistics dump */
1585 cpw32(StatsAddr
+ 4, 0); /* FIXME: 64-bit PCI */
1586 cpw32(StatsAddr
, cp
->nic_stats_dma
| DumpStats
);
1589 estats
.n_stats
= CP_NUM_STATS
;
1590 if (copy_to_user(useraddr
, &estats
, sizeof(estats
)))
1593 while (work
-- > 0) {
1594 if ((cpr32(StatsAddr
) & DumpStats
) == 0)
1599 if (cpr32(StatsAddr
) & DumpStats
)
1602 tmp_stats
= kmalloc(sz
, GFP_KERNEL
);
1605 memset(tmp_stats
, 0, sz
);
1608 tmp_stats
[i
++] = le64_to_cpu(cp
->nic_stats
->tx_ok
);
1609 tmp_stats
[i
++] = le64_to_cpu(cp
->nic_stats
->rx_ok
);
1610 tmp_stats
[i
++] = le64_to_cpu(cp
->nic_stats
->tx_err
);
1611 tmp_stats
[i
++] = le32_to_cpu(cp
->nic_stats
->rx_err
);
1612 tmp_stats
[i
++] = le16_to_cpu(cp
->nic_stats
->rx_fifo
);
1613 tmp_stats
[i
++] = le16_to_cpu(cp
->nic_stats
->frame_align
);
1614 tmp_stats
[i
++] = le32_to_cpu(cp
->nic_stats
->tx_ok_1col
);
1615 tmp_stats
[i
++] = le32_to_cpu(cp
->nic_stats
->tx_ok_mcol
);
1616 tmp_stats
[i
++] = le64_to_cpu(cp
->nic_stats
->rx_ok_phys
);
1617 tmp_stats
[i
++] = le64_to_cpu(cp
->nic_stats
->rx_ok_bcast
);
1618 tmp_stats
[i
++] = le32_to_cpu(cp
->nic_stats
->rx_ok_mcast
);
1619 tmp_stats
[i
++] = le16_to_cpu(cp
->nic_stats
->tx_abort
);
1620 tmp_stats
[i
++] = le16_to_cpu(cp
->nic_stats
->tx_underrun
);
1621 tmp_stats
[i
++] = cp
->cp_stats
.rx_frags
;
1622 if (i
!= CP_NUM_STATS
)
1625 i
= copy_to_user(useraddr
+ sizeof(estats
),
1634 /* get/set Wake-on-LAN settings */
1635 case ETHTOOL_GWOL
: {
1636 struct ethtool_wolinfo wol
= { ETHTOOL_GWOL
};
1638 spin_lock_irq (&cp
->lock
);
1639 netdev_get_wol (cp
, &wol
);
1640 spin_unlock_irq (&cp
->lock
);
1641 return ((copy_to_user (useraddr
, &wol
, sizeof (wol
)))? -EFAULT
: 0);
1644 case ETHTOOL_SWOL
: {
1645 struct ethtool_wolinfo wol
;
1647 if (copy_from_user (&wol
, useraddr
, sizeof (wol
)))
1649 spin_lock_irq (&cp
->lock
);
1650 netdev_set_wol (cp
, &wol
);
1651 spin_unlock_irq (&cp
->lock
);
1663 static int cp_ioctl (struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1665 struct cp_private
*cp
= dev
->priv
;
1666 struct mii_ioctl_data
*mii
= (struct mii_ioctl_data
*) &rq
->ifr_data
;
1669 if (!netif_running(dev
))
1672 if (cmd
== SIOCETHTOOL
)
1673 return cp_ethtool_ioctl(cp
, (void *) rq
->ifr_data
);
1675 spin_lock_irq(&cp
->lock
);
1676 rc
= generic_mii_ioctl(&cp
->mii_if
, mii
, cmd
, NULL
);
1677 spin_unlock_irq(&cp
->lock
);
1681 #if CP_VLAN_TAG_USED
1682 static void cp_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
1684 struct cp_private
*cp
= dev
->priv
;
1686 spin_lock_irq(&cp
->lock
);
1688 cpw16(CpCmd
, cpr16(CpCmd
) | RxVlanOn
);
1689 spin_unlock_irq(&cp
->lock
);
1692 static void cp_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
1694 struct cp_private
*cp
= dev
->priv
;
1696 spin_lock_irq(&cp
->lock
);
1697 cpw16(CpCmd
, cpr16(CpCmd
) & ~RxVlanOn
);
1699 cp
->vlgrp
->vlan_devices
[vid
] = NULL
;
1700 spin_unlock_irq(&cp
->lock
);
1704 /* Serial EEPROM section. */
1706 /* EEPROM_Ctrl bits. */
1707 #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
1708 #define EE_CS 0x08 /* EEPROM chip select. */
1709 #define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
1710 #define EE_WRITE_0 0x00
1711 #define EE_WRITE_1 0x02
1712 #define EE_DATA_READ 0x01 /* EEPROM chip data out. */
1713 #define EE_ENB (0x80 | EE_CS)
1715 /* Delay between EEPROM clock transitions.
1716 No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1719 #define eeprom_delay() readl(ee_addr)
1721 /* The EEPROM commands include the alway-set leading bit. */
1722 #define EE_WRITE_CMD (5)
1723 #define EE_READ_CMD (6)
1724 #define EE_ERASE_CMD (7)
1726 static int __devinit
read_eeprom (void *ioaddr
, int location
, int addr_len
)
1729 unsigned retval
= 0;
1730 void *ee_addr
= ioaddr
+ Cfg9346
;
1731 int read_cmd
= location
| (EE_READ_CMD
<< addr_len
);
1733 writeb (EE_ENB
& ~EE_CS
, ee_addr
);
1734 writeb (EE_ENB
, ee_addr
);
1737 /* Shift the read command bits out. */
1738 for (i
= 4 + addr_len
; i
>= 0; i
--) {
1739 int dataval
= (read_cmd
& (1 << i
)) ? EE_DATA_WRITE
: 0;
1740 writeb (EE_ENB
| dataval
, ee_addr
);
1742 writeb (EE_ENB
| dataval
| EE_SHIFT_CLK
, ee_addr
);
1745 writeb (EE_ENB
, ee_addr
);
1748 for (i
= 16; i
> 0; i
--) {
1749 writeb (EE_ENB
| EE_SHIFT_CLK
, ee_addr
);
1752 (retval
<< 1) | ((readb (ee_addr
) & EE_DATA_READ
) ? 1 :
1754 writeb (EE_ENB
, ee_addr
);
1758 /* Terminate the EEPROM access. */
1759 writeb (~EE_CS
, ee_addr
);
1765 /* Put the board into D3cold state and wait for WakeUp signal */
1766 static void cp_set_d3_state (struct cp_private
*cp
)
1768 pci_enable_wake (cp
->pdev
, 0, 1); /* Enable PME# generation */
1769 pci_set_power_state (cp
->pdev
, 3);
1772 static int __devinit
cp_init_one (struct pci_dev
*pdev
,
1773 const struct pci_device_id
*ent
)
1775 struct net_device
*dev
;
1776 struct cp_private
*cp
;
1780 unsigned int addr_len
, i
;
1781 u8 pci_rev
, cache_size
;
1782 unsigned int board_type
= (unsigned int) ent
->driver_data
;
1785 static int version_printed
;
1786 if (version_printed
++ == 0)
1787 printk("%s", version
);
1790 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &pci_rev
);
1792 if (pdev
->vendor
== PCI_VENDOR_ID_REALTEK
&&
1793 pdev
->device
== PCI_DEVICE_ID_REALTEK_8139
&& pci_rev
< 0x20) {
1794 printk(KERN_ERR PFX
"pci dev %s (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
1795 pdev
->slot_name
, pdev
->vendor
, pdev
->device
, pci_rev
);
1796 printk(KERN_ERR PFX
"Try the \"8139too\" driver instead.\n");
1800 dev
= alloc_etherdev(sizeof(struct cp_private
));
1803 SET_MODULE_OWNER(dev
);
1804 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1808 cp
->board_type
= board_type
;
1810 cp
->msg_enable
= (debug
< 0 ? CP_DEF_MSG_ENABLE
: debug
);
1811 spin_lock_init (&cp
->lock
);
1812 cp
->mii_if
.dev
= dev
;
1813 cp
->mii_if
.mdio_read
= mdio_read
;
1814 cp
->mii_if
.mdio_write
= mdio_write
;
1815 cp
->mii_if
.phy_id
= CP_INTERNAL_PHY
;
1816 cp
->mii_if
.phy_id_mask
= 0x1f;
1817 cp
->mii_if
.reg_num_mask
= 0x1f;
1818 cp_set_rxbufsize(cp
);
1820 rc
= pci_enable_device(pdev
);
1824 rc
= pci_request_regions(pdev
, DRV_NAME
);
1826 goto err_out_disable
;
1828 if (pdev
->irq
< 2) {
1830 printk(KERN_ERR PFX
"invalid irq (%d) for pci dev %s\n",
1831 pdev
->irq
, pdev
->slot_name
);
1834 pciaddr
= pci_resource_start(pdev
, 1);
1837 printk(KERN_ERR PFX
"no MMIO resource for pci dev %s\n",
1841 if (pci_resource_len(pdev
, 1) < CP_REGS_SIZE
) {
1843 printk(KERN_ERR PFX
"MMIO resource (%lx) too small on pci dev %s\n",
1844 pci_resource_len(pdev
, 1), pdev
->slot_name
);
1848 /* Configure DMA attributes. */
1849 if (!pci_set_dma_mask(pdev
, (u64
) 0xffffffffffffffffULL
)) {
1850 cp
->pci_using_dac
= 1;
1852 rc
= pci_set_dma_mask(pdev
, (u64
) 0xffffffff);
1854 printk(KERN_ERR PFX
"No usable DMA configuration, "
1858 cp
->pci_using_dac
= 0;
1861 regs
= ioremap_nocache(pciaddr
, CP_REGS_SIZE
);
1864 printk(KERN_ERR PFX
"Cannot map PCI MMIO (%lx@%lx) on pci dev %s\n",
1865 pci_resource_len(pdev
, 1), pciaddr
, pdev
->slot_name
);
1868 dev
->base_addr
= (unsigned long) regs
;
1873 /* read MAC address from EEPROM */
1874 addr_len
= read_eeprom (regs
, 0, 8) == 0x8129 ? 8 : 6;
1875 for (i
= 0; i
< 3; i
++)
1876 ((u16
*) (dev
->dev_addr
))[i
] =
1877 le16_to_cpu (read_eeprom (regs
, i
+ 7, addr_len
));
1879 dev
->open
= cp_open
;
1880 dev
->stop
= cp_close
;
1881 dev
->set_multicast_list
= cp_set_rx_mode
;
1882 dev
->hard_start_xmit
= cp_start_xmit
;
1883 dev
->get_stats
= cp_get_stats
;
1884 dev
->do_ioctl
= cp_ioctl
;
1886 dev
->change_mtu
= cp_change_mtu
;
1889 dev
->tx_timeout
= cp_tx_timeout
;
1890 dev
->watchdog_timeo
= TX_TIMEOUT
;
1892 #ifdef CP_TX_CHECKSUM
1893 dev
->features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
;
1895 #if CP_VLAN_TAG_USED
1896 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
1897 dev
->vlan_rx_register
= cp_vlan_rx_register
;
1898 dev
->vlan_rx_kill_vid
= cp_vlan_rx_kill_vid
;
1901 dev
->irq
= pdev
->irq
;
1903 rc
= register_netdev(dev
);
1907 printk (KERN_INFO
"%s: %s at 0x%lx, "
1908 "%02x:%02x:%02x:%02x:%02x:%02x, "
1911 cp_board_tbl
[board_type
].name
,
1913 dev
->dev_addr
[0], dev
->dev_addr
[1],
1914 dev
->dev_addr
[2], dev
->dev_addr
[3],
1915 dev
->dev_addr
[4], dev
->dev_addr
[5],
1918 pci_set_drvdata(pdev
, dev
);
1921 * Looks like this is necessary to deal with on all architectures,
1922 * even this %$#%$# N440BX Intel based thing doesn't get it right.
1923 * Ie. having two NICs in the machine, one will have the cache
1924 * line set at boot time, the other will not.
1926 pci_read_config_byte(pdev
, PCI_CACHE_LINE_SIZE
, &cache_size
);
1928 if (cache_size
!= SMP_CACHE_BYTES
) {
1929 printk(KERN_INFO
"%s: PCI cache line size set incorrectly "
1930 "(%i bytes) by BIOS/FW, ", dev
->name
, cache_size
);
1931 if (cache_size
> SMP_CACHE_BYTES
)
1932 printk("expecting %i\n", SMP_CACHE_BYTES
);
1934 printk("correcting to %i\n", SMP_CACHE_BYTES
);
1935 pci_write_config_byte(pdev
, PCI_CACHE_LINE_SIZE
,
1936 SMP_CACHE_BYTES
>> 2);
1940 /* enable busmastering and memory-write-invalidate */
1941 pci_set_master(pdev
);
1944 if (cp
->wol_enabled
) cp_set_d3_state (cp
);
1951 pci_release_regions(pdev
);
1953 pci_disable_device(pdev
);
1959 static void __devexit
cp_remove_one (struct pci_dev
*pdev
)
1961 struct net_device
*dev
= pci_get_drvdata(pdev
);
1962 struct cp_private
*cp
= dev
->priv
;
1966 unregister_netdev(dev
);
1968 if (cp
->wol_enabled
) pci_set_power_state (pdev
, 0);
1969 pci_release_regions(pdev
);
1970 pci_disable_device(pdev
);
1971 pci_set_drvdata(pdev
, NULL
);
1976 static int cp_suspend (struct pci_dev
*pdev
, u32 state
)
1978 struct net_device
*dev
;
1979 struct cp_private
*cp
;
1980 unsigned long flags
;
1982 dev
= pci_get_drvdata (pdev
);
1985 if (!dev
|| !netif_running (dev
)) return 0;
1987 netif_device_detach (dev
);
1988 netif_stop_queue (dev
);
1990 spin_lock_irqsave (&cp
->lock
, flags
);
1992 /* Disable Rx and Tx */
1993 cpw16 (IntrMask
, 0);
1994 cpw8 (Cmd
, cpr8 (Cmd
) & (~RxOn
| ~TxOn
));
1996 spin_unlock_irqrestore (&cp
->lock
, flags
);
1998 if (cp
->pdev
&& cp
->wol_enabled
) {
1999 pci_save_state (cp
->pdev
, cp
->power_state
);
2000 cp_set_d3_state (cp
);
2006 static int cp_resume (struct pci_dev
*pdev
)
2008 struct net_device
*dev
;
2009 struct cp_private
*cp
;
2011 dev
= pci_get_drvdata (pdev
);
2014 netif_device_attach (dev
);
2016 if (cp
->pdev
&& cp
->wol_enabled
) {
2017 pci_set_power_state (cp
->pdev
, 0);
2018 pci_restore_state (cp
->pdev
, cp
->power_state
);
2022 netif_start_queue (dev
);
2026 #endif /* CONFIG_PM */
2028 static struct pci_driver cp_driver
= {
2030 .id_table
= cp_pci_tbl
,
2031 .probe
= cp_init_one
,
2032 .remove
= __devexit_p(cp_remove_one
),
2034 .resume
= cp_resume
,
2035 .suspend
= cp_suspend
,
2039 static int __init
cp_init (void)
2042 printk("%s", version
);
2044 return pci_module_init (&cp_driver
);
2047 static void __exit
cp_exit (void)
2049 pci_unregister_driver (&cp_driver
);
2052 module_init(cp_init
);
2053 module_exit(cp_exit
);