2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey. It's neither supported nor endorsed
7 * by NVIDIA Corp. Use at your own risk.
9 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
10 * trademarks of NVIDIA Corporation in the United States and other
13 * Copyright (C) 2003,4 Manfred Spraul
14 * Copyright (C) 2004 Andrew de Quincey (wol support)
15 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
16 * IRQ rate fixes, bigendian fixes, cleanups, verification)
17 * Copyright (c) 2004 NVIDIA Corporation
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
34 * 0.01: 05 Oct 2003: First release that compiles without warnings.
35 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
36 * Check all PCI BARs for the register window.
37 * udelay added to mii_rw.
38 * 0.03: 06 Oct 2003: Initialize dev->irq.
39 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
40 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
41 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
43 * 0.07: 14 Oct 2003: Further irq mask updates.
44 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
45 * added into irq handler, NULL check for drain_ring.
46 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
47 * requested interrupt sources.
48 * 0.10: 20 Oct 2003: First cleanup for release.
49 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
50 * MAC Address init fix, set_multicast cleanup.
51 * 0.12: 23 Oct 2003: Cleanups for release.
52 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
53 * Set link speed correctly. start rx before starting
54 * tx (nv_start_rx sets the link speed).
55 * 0.14: 25 Oct 2003: Nic dependant irq mask.
56 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
58 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
59 * increased to 1628 bytes.
60 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
62 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
63 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
64 * addresses, really stop rx if already running
65 * in nv_start_rx, clean up a bit.
66 * 0.20: 07 Dec 2003: alloc fixes
67 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
68 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
70 * 0.23: 26 Jan 2004: various small cleanups
71 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces
72 * 0.25: 09 Mar 2004: wol support
73 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
74 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
75 * added CK804/MCP04 device IDs, code fixes
76 * for registers, link status and other minor fixes.
77 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
78 * 0.29: 31 Aug 2004: Add backup timer for link change notification.
79 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
80 * into nv_close, otherwise reenabling for wol can
81 * cause DMA to kfree'd memory.
82 * 0.31: 14 Nov 2004: ethtool support for getting/setting link
84 * 0.32: 16 Apr 2005: RX_ERROR4 handling added.
85 * 0.33: 16 May 2005: Support for MCP51 added.
86 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
87 * 0.35: 26 Jun 2005: Support for MCP55 added.
88 * 0.36: 28 Jun 2005: Add jumbo frame support.
89 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
90 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
92 * 0.39: 18 Jul 2005: Add 64bit descriptor support.
93 * 0.40: 19 Jul 2005: Add support for mac address change.
94 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
96 * 0.42: 06 Aug 2005: Fix lack of link speed initialization
97 * in the second (and later) nv_open call
98 * 0.43: 10 Aug 2005: Add support for tx checksum.
99 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
102 * We suspect that on some hardware no TX done interrupts are generated.
103 * This means recovery from netif_stop_queue only happens if the hw timer
104 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
105 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
106 * If your hardware reliably generates tx done interrupts, then you can remove
107 * DEV_NEED_TIMERIRQ from the driver_data flags.
108 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
109 * superfluous timer interrupts from the nic.
111 #define FORCEDETH_VERSION "0.44"
112 #define DRV_NAME "forcedeth"
114 #include <linux/module.h>
115 #include <linux/types.h>
116 #include <linux/pci.h>
117 #include <linux/interrupt.h>
118 #include <linux/netdevice.h>
119 #include <linux/etherdevice.h>
120 #include <linux/delay.h>
121 #include <linux/spinlock.h>
122 #include <linux/ethtool.h>
123 #include <linux/timer.h>
124 #include <linux/skbuff.h>
125 #include <linux/mii.h>
126 #include <linux/random.h>
127 #include <linux/init.h>
128 #include <linux/if_vlan.h>
132 #include <asm/uaccess.h>
133 #include <asm/system.h>
136 #define dprintk printk
138 #define dprintk(x...) do { } while (0)
146 #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
147 #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
148 #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
149 #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
150 #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
153 NvRegIrqStatus
= 0x000,
154 #define NVREG_IRQSTAT_MIIEVENT 0x040
155 #define NVREG_IRQSTAT_MASK 0x1ff
156 NvRegIrqMask
= 0x004,
157 #define NVREG_IRQ_RX_ERROR 0x0001
158 #define NVREG_IRQ_RX 0x0002
159 #define NVREG_IRQ_RX_NOBUF 0x0004
160 #define NVREG_IRQ_TX_ERR 0x0008
161 #define NVREG_IRQ_TX_OK 0x0010
162 #define NVREG_IRQ_TIMER 0x0020
163 #define NVREG_IRQ_LINK 0x0040
164 #define NVREG_IRQ_TX_ERROR 0x0080
165 #define NVREG_IRQ_TX1 0x0100
166 #define NVREG_IRQMASK_WANTED 0x00df
168 #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
169 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \
172 NvRegUnknownSetupReg6
= 0x008,
173 #define NVREG_UNKSETUP6_VAL 3
176 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
177 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
179 NvRegPollingInterval
= 0x00c,
180 #define NVREG_POLL_DEFAULT 970
182 #define NVREG_MISC1_HD 0x02
183 #define NVREG_MISC1_FORCE 0x3b0f3c
185 NvRegTransmitterControl
= 0x084,
186 #define NVREG_XMITCTL_START 0x01
187 NvRegTransmitterStatus
= 0x088,
188 #define NVREG_XMITSTAT_BUSY 0x01
190 NvRegPacketFilterFlags
= 0x8c,
191 #define NVREG_PFF_ALWAYS 0x7F0008
192 #define NVREG_PFF_PROMISC 0x80
193 #define NVREG_PFF_MYADDR 0x20
195 NvRegOffloadConfig
= 0x90,
196 #define NVREG_OFFLOAD_HOMEPHY 0x601
197 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
198 NvRegReceiverControl
= 0x094,
199 #define NVREG_RCVCTL_START 0x01
200 NvRegReceiverStatus
= 0x98,
201 #define NVREG_RCVSTAT_BUSY 0x01
203 NvRegRandomSeed
= 0x9c,
204 #define NVREG_RNDSEED_MASK 0x00ff
205 #define NVREG_RNDSEED_FORCE 0x7f00
206 #define NVREG_RNDSEED_FORCE2 0x2d00
207 #define NVREG_RNDSEED_FORCE3 0x7400
209 NvRegUnknownSetupReg1
= 0xA0,
210 #define NVREG_UNKSETUP1_VAL 0x16070f
211 NvRegUnknownSetupReg2
= 0xA4,
212 #define NVREG_UNKSETUP2_VAL 0x16
213 NvRegMacAddrA
= 0xA8,
214 NvRegMacAddrB
= 0xAC,
215 NvRegMulticastAddrA
= 0xB0,
216 #define NVREG_MCASTADDRA_FORCE 0x01
217 NvRegMulticastAddrB
= 0xB4,
218 NvRegMulticastMaskA
= 0xB8,
219 NvRegMulticastMaskB
= 0xBC,
221 NvRegPhyInterface
= 0xC0,
222 #define PHY_RGMII 0x10000000
224 NvRegTxRingPhysAddr
= 0x100,
225 NvRegRxRingPhysAddr
= 0x104,
226 NvRegRingSizes
= 0x108,
227 #define NVREG_RINGSZ_TXSHIFT 0
228 #define NVREG_RINGSZ_RXSHIFT 16
229 NvRegUnknownTransmitterReg
= 0x10c,
230 NvRegLinkSpeed
= 0x110,
231 #define NVREG_LINKSPEED_FORCE 0x10000
232 #define NVREG_LINKSPEED_10 1000
233 #define NVREG_LINKSPEED_100 100
234 #define NVREG_LINKSPEED_1000 50
235 #define NVREG_LINKSPEED_MASK (0xFFF)
236 NvRegUnknownSetupReg5
= 0x130,
237 #define NVREG_UNKSETUP5_BIT31 (1<<31)
238 NvRegUnknownSetupReg3
= 0x13c,
239 #define NVREG_UNKSETUP3_VAL1 0x200010
240 NvRegTxRxControl
= 0x144,
241 #define NVREG_TXRXCTL_KICK 0x0001
242 #define NVREG_TXRXCTL_BIT1 0x0002
243 #define NVREG_TXRXCTL_BIT2 0x0004
244 #define NVREG_TXRXCTL_IDLE 0x0008
245 #define NVREG_TXRXCTL_RESET 0x0010
246 #define NVREG_TXRXCTL_RXCHECK 0x0400
247 #define NVREG_TXRXCTL_DESC_1 0
248 #define NVREG_TXRXCTL_DESC_2 0x02100
249 #define NVREG_TXRXCTL_DESC_3 0x02200
250 NvRegMIIStatus
= 0x180,
251 #define NVREG_MIISTAT_ERROR 0x0001
252 #define NVREG_MIISTAT_LINKCHANGE 0x0008
253 #define NVREG_MIISTAT_MASK 0x000f
254 #define NVREG_MIISTAT_MASK2 0x000f
255 NvRegUnknownSetupReg4
= 0x184,
256 #define NVREG_UNKSETUP4_VAL 8
258 NvRegAdapterControl
= 0x188,
259 #define NVREG_ADAPTCTL_START 0x02
260 #define NVREG_ADAPTCTL_LINKUP 0x04
261 #define NVREG_ADAPTCTL_PHYVALID 0x40000
262 #define NVREG_ADAPTCTL_RUNNING 0x100000
263 #define NVREG_ADAPTCTL_PHYSHIFT 24
264 NvRegMIISpeed
= 0x18c,
265 #define NVREG_MIISPEED_BIT8 (1<<8)
266 #define NVREG_MIIDELAY 5
267 NvRegMIIControl
= 0x190,
268 #define NVREG_MIICTL_INUSE 0x08000
269 #define NVREG_MIICTL_WRITE 0x00400
270 #define NVREG_MIICTL_ADDRSHIFT 5
271 NvRegMIIData
= 0x194,
272 NvRegWakeUpFlags
= 0x200,
273 #define NVREG_WAKEUPFLAGS_VAL 0x7770
274 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
275 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
276 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
277 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
278 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
279 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
280 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
281 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
282 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
283 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
285 NvRegPatternCRC
= 0x204,
286 NvRegPatternMask
= 0x208,
287 NvRegPowerCap
= 0x268,
288 #define NVREG_POWERCAP_D3SUPP (1<<30)
289 #define NVREG_POWERCAP_D2SUPP (1<<26)
290 #define NVREG_POWERCAP_D1SUPP (1<<25)
291 NvRegPowerState
= 0x26c,
292 #define NVREG_POWERSTATE_POWEREDUP 0x8000
293 #define NVREG_POWERSTATE_VALID 0x0100
294 #define NVREG_POWERSTATE_MASK 0x0003
295 #define NVREG_POWERSTATE_D0 0x0000
296 #define NVREG_POWERSTATE_D1 0x0001
297 #define NVREG_POWERSTATE_D2 0x0002
298 #define NVREG_POWERSTATE_D3 0x0003
301 /* Big endian: should work, but is untested */
307 struct ring_desc_ex
{
308 u32 PacketBufferHigh
;
314 typedef union _ring_type
{
315 struct ring_desc
* orig
;
316 struct ring_desc_ex
* ex
;
319 #define FLAG_MASK_V1 0xffff0000
320 #define FLAG_MASK_V2 0xffffc000
321 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
322 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
324 #define NV_TX_LASTPACKET (1<<16)
325 #define NV_TX_RETRYERROR (1<<19)
326 #define NV_TX_FORCED_INTERRUPT (1<<24)
327 #define NV_TX_DEFERRED (1<<26)
328 #define NV_TX_CARRIERLOST (1<<27)
329 #define NV_TX_LATECOLLISION (1<<28)
330 #define NV_TX_UNDERFLOW (1<<29)
331 #define NV_TX_ERROR (1<<30)
332 #define NV_TX_VALID (1<<31)
334 #define NV_TX2_LASTPACKET (1<<29)
335 #define NV_TX2_RETRYERROR (1<<18)
336 #define NV_TX2_FORCED_INTERRUPT (1<<30)
337 #define NV_TX2_DEFERRED (1<<25)
338 #define NV_TX2_CARRIERLOST (1<<26)
339 #define NV_TX2_LATECOLLISION (1<<27)
340 #define NV_TX2_UNDERFLOW (1<<28)
341 /* error and valid are the same for both */
342 #define NV_TX2_ERROR (1<<30)
343 #define NV_TX2_VALID (1<<31)
344 #define NV_TX2_TSO (1<<28)
345 #define NV_TX2_TSO_SHIFT 14
346 #define NV_TX2_CHECKSUM_L3 (1<<27)
347 #define NV_TX2_CHECKSUM_L4 (1<<26)
349 #define NV_RX_DESCRIPTORVALID (1<<16)
350 #define NV_RX_MISSEDFRAME (1<<17)
351 #define NV_RX_SUBSTRACT1 (1<<18)
352 #define NV_RX_ERROR1 (1<<23)
353 #define NV_RX_ERROR2 (1<<24)
354 #define NV_RX_ERROR3 (1<<25)
355 #define NV_RX_ERROR4 (1<<26)
356 #define NV_RX_CRCERR (1<<27)
357 #define NV_RX_OVERFLOW (1<<28)
358 #define NV_RX_FRAMINGERR (1<<29)
359 #define NV_RX_ERROR (1<<30)
360 #define NV_RX_AVAIL (1<<31)
362 #define NV_RX2_CHECKSUMMASK (0x1C000000)
363 #define NV_RX2_CHECKSUMOK1 (0x10000000)
364 #define NV_RX2_CHECKSUMOK2 (0x14000000)
365 #define NV_RX2_CHECKSUMOK3 (0x18000000)
366 #define NV_RX2_DESCRIPTORVALID (1<<29)
367 #define NV_RX2_SUBSTRACT1 (1<<25)
368 #define NV_RX2_ERROR1 (1<<18)
369 #define NV_RX2_ERROR2 (1<<19)
370 #define NV_RX2_ERROR3 (1<<20)
371 #define NV_RX2_ERROR4 (1<<21)
372 #define NV_RX2_CRCERR (1<<22)
373 #define NV_RX2_OVERFLOW (1<<23)
374 #define NV_RX2_FRAMINGERR (1<<24)
375 /* error and avail are the same for both */
376 #define NV_RX2_ERROR (1<<30)
377 #define NV_RX2_AVAIL (1<<31)
379 /* Miscelaneous hardware related defines: */
380 #define NV_PCI_REGSZ 0x270
382 /* various timeout delays: all in usec */
383 #define NV_TXRX_RESET_DELAY 4
384 #define NV_TXSTOP_DELAY1 10
385 #define NV_TXSTOP_DELAY1MAX 500000
386 #define NV_TXSTOP_DELAY2 100
387 #define NV_RXSTOP_DELAY1 10
388 #define NV_RXSTOP_DELAY1MAX 500000
389 #define NV_RXSTOP_DELAY2 100
390 #define NV_SETUP5_DELAY 5
391 #define NV_SETUP5_DELAYMAX 50000
392 #define NV_POWERUP_DELAY 5
393 #define NV_POWERUP_DELAYMAX 5000
394 #define NV_MIIBUSY_DELAY 50
395 #define NV_MIIPHY_DELAY 10
396 #define NV_MIIPHY_DELAYMAX 10000
398 #define NV_WAKEUPPATTERNS 5
399 #define NV_WAKEUPMASKENTRIES 4
401 /* General driver defaults */
402 #define NV_WATCHDOG_TIMEO (5*HZ)
407 * If your nic mysteriously hangs then try to reduce the limits
408 * to 1/0: It might be required to set NV_TX_LASTPACKET in the
409 * last valid ring entry. But this would be impossible to
410 * implement - probably a disassembly error.
412 #define TX_LIMIT_STOP 63
413 #define TX_LIMIT_START 62
415 /* rx/tx mac addr + type + vlan + align + slack*/
416 #define NV_RX_HEADERS (64)
417 /* even more slack. */
418 #define NV_RX_ALLOC_PAD (64)
420 /* maximum mtu size */
421 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
422 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
424 #define OOM_REFILL (1+HZ/20)
425 #define POLL_WAIT (1+HZ/100)
426 #define LINK_TIMEOUT (3*HZ)
430 * The nic supports three different descriptor types:
431 * - DESC_VER_1: Original
432 * - DESC_VER_2: support for jumbo frames.
433 * - DESC_VER_3: 64-bit format.
440 #define PHY_OUI_MARVELL 0x5043
441 #define PHY_OUI_CICADA 0x03f1
442 #define PHYID1_OUI_MASK 0x03ff
443 #define PHYID1_OUI_SHFT 6
444 #define PHYID2_OUI_MASK 0xfc00
445 #define PHYID2_OUI_SHFT 10
446 #define PHY_INIT1 0x0f000
447 #define PHY_INIT2 0x0e00
448 #define PHY_INIT3 0x01000
449 #define PHY_INIT4 0x0200
450 #define PHY_INIT5 0x0004
451 #define PHY_INIT6 0x02000
452 #define PHY_GIGABIT 0x0100
454 #define PHY_TIMEOUT 0x1
455 #define PHY_ERROR 0x2
459 #define PHY_HALF 0x100
461 /* FIXME: MII defines that should be added to <linux/mii.h> */
462 #define MII_1000BT_CR 0x09
463 #define MII_1000BT_SR 0x0a
464 #define ADVERTISE_1000FULL 0x0200
465 #define ADVERTISE_1000HALF 0x0100
466 #define LPA_1000FULL 0x0800
467 #define LPA_1000HALF 0x0400
472 * All hardware access under dev->priv->lock, except the performance
474 * - rx is (pseudo-) lockless: it relies on the single-threading provided
475 * by the arch code for interrupts.
476 * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
477 * needs dev->priv->lock :-(
478 * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
481 /* in dev: base, irq */
486 * Locking: spin_lock(&np->lock); */
487 struct net_device_stats stats
;
495 unsigned int phy_oui
;
498 /* General data: RO fields */
499 dma_addr_t ring_addr
;
500 struct pci_dev
*pci_dev
;
508 /* rx specific fields.
509 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
512 unsigned int cur_rx
, refill_rx
;
513 struct sk_buff
*rx_skbuff
[RX_RING
];
514 dma_addr_t rx_dma
[RX_RING
];
515 unsigned int rx_buf_sz
;
516 unsigned int pkt_limit
;
517 struct timer_list oom_kick
;
518 struct timer_list nic_poll
;
520 /* media detection workaround.
521 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
524 unsigned long link_timeout
;
526 * tx specific fields.
529 unsigned int next_tx
, nic_tx
;
530 struct sk_buff
*tx_skbuff
[TX_RING
];
531 dma_addr_t tx_dma
[TX_RING
];
536 * Maximum number of loops until we assume that a bit in the irq mask
537 * is stuck. Overridable with module param.
539 static int max_interrupt_work
= 5;
541 static inline struct fe_priv
*get_nvpriv(struct net_device
*dev
)
543 return netdev_priv(dev
);
546 static inline u8 __iomem
*get_hwbase(struct net_device
*dev
)
548 return ((struct fe_priv
*)netdev_priv(dev
))->base
;
551 static inline void pci_push(u8 __iomem
*base
)
553 /* force out pending posted writes */
557 static inline u32
nv_descr_getlength(struct ring_desc
*prd
, u32 v
)
559 return le32_to_cpu(prd
->FlagLen
)
560 & ((v
== DESC_VER_1
) ? LEN_MASK_V1
: LEN_MASK_V2
);
563 static inline u32
nv_descr_getlength_ex(struct ring_desc_ex
*prd
, u32 v
)
565 return le32_to_cpu(prd
->FlagLen
) & LEN_MASK_V2
;
568 static int reg_delay(struct net_device
*dev
, int offset
, u32 mask
, u32 target
,
569 int delay
, int delaymax
, const char *msg
)
571 u8 __iomem
*base
= get_hwbase(dev
);
582 } while ((readl(base
+ offset
) & mask
) != target
);
586 #define MII_READ (-1)
587 /* mii_rw: read/write a register on the PHY.
589 * Caller must guarantee serialization
591 static int mii_rw(struct net_device
*dev
, int addr
, int miireg
, int value
)
593 u8 __iomem
*base
= get_hwbase(dev
);
597 writel(NVREG_MIISTAT_MASK
, base
+ NvRegMIIStatus
);
599 reg
= readl(base
+ NvRegMIIControl
);
600 if (reg
& NVREG_MIICTL_INUSE
) {
601 writel(NVREG_MIICTL_INUSE
, base
+ NvRegMIIControl
);
602 udelay(NV_MIIBUSY_DELAY
);
605 reg
= (addr
<< NVREG_MIICTL_ADDRSHIFT
) | miireg
;
606 if (value
!= MII_READ
) {
607 writel(value
, base
+ NvRegMIIData
);
608 reg
|= NVREG_MIICTL_WRITE
;
610 writel(reg
, base
+ NvRegMIIControl
);
612 if (reg_delay(dev
, NvRegMIIControl
, NVREG_MIICTL_INUSE
, 0,
613 NV_MIIPHY_DELAY
, NV_MIIPHY_DELAYMAX
, NULL
)) {
614 dprintk(KERN_DEBUG
"%s: mii_rw of reg %d at PHY %d timed out.\n",
615 dev
->name
, miireg
, addr
);
617 } else if (value
!= MII_READ
) {
618 /* it was a write operation - fewer failures are detectable */
619 dprintk(KERN_DEBUG
"%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
620 dev
->name
, value
, miireg
, addr
);
622 } else if (readl(base
+ NvRegMIIStatus
) & NVREG_MIISTAT_ERROR
) {
623 dprintk(KERN_DEBUG
"%s: mii_rw of reg %d at PHY %d failed.\n",
624 dev
->name
, miireg
, addr
);
627 retval
= readl(base
+ NvRegMIIData
);
628 dprintk(KERN_DEBUG
"%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
629 dev
->name
, miireg
, addr
, retval
);
635 static int phy_reset(struct net_device
*dev
)
637 struct fe_priv
*np
= netdev_priv(dev
);
639 unsigned int tries
= 0;
641 miicontrol
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
642 miicontrol
|= BMCR_RESET
;
643 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, miicontrol
)) {
650 /* must wait till reset is deasserted */
651 while (miicontrol
& BMCR_RESET
) {
653 miicontrol
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
654 /* FIXME: 100 tries seem excessive */
661 static int phy_init(struct net_device
*dev
)
663 struct fe_priv
*np
= get_nvpriv(dev
);
664 u8 __iomem
*base
= get_hwbase(dev
);
665 u32 phyinterface
, phy_reserved
, mii_status
, mii_control
, mii_control_1000
,reg
;
667 /* set advertise register */
668 reg
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
669 reg
|= (ADVERTISE_10HALF
|ADVERTISE_10FULL
|ADVERTISE_100HALF
|ADVERTISE_100FULL
|0x800|0x400);
670 if (mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, reg
)) {
671 printk(KERN_INFO
"%s: phy write to advertise failed.\n", pci_name(np
->pci_dev
));
675 /* get phy interface type */
676 phyinterface
= readl(base
+ NvRegPhyInterface
);
678 /* see if gigabit phy */
679 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
680 if (mii_status
& PHY_GIGABIT
) {
681 np
->gigabit
= PHY_GIGABIT
;
682 mii_control_1000
= mii_rw(dev
, np
->phyaddr
, MII_1000BT_CR
, MII_READ
);
683 mii_control_1000
&= ~ADVERTISE_1000HALF
;
684 if (phyinterface
& PHY_RGMII
)
685 mii_control_1000
|= ADVERTISE_1000FULL
;
687 mii_control_1000
&= ~ADVERTISE_1000FULL
;
689 if (mii_rw(dev
, np
->phyaddr
, MII_1000BT_CR
, mii_control_1000
)) {
690 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
698 if (phy_reset(dev
)) {
699 printk(KERN_INFO
"%s: phy reset failed\n", pci_name(np
->pci_dev
));
703 /* phy vendor specific configuration */
704 if ((np
->phy_oui
== PHY_OUI_CICADA
) && (phyinterface
& PHY_RGMII
) ) {
705 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_RESV1
, MII_READ
);
706 phy_reserved
&= ~(PHY_INIT1
| PHY_INIT2
);
707 phy_reserved
|= (PHY_INIT3
| PHY_INIT4
);
708 if (mii_rw(dev
, np
->phyaddr
, MII_RESV1
, phy_reserved
)) {
709 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
712 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, MII_READ
);
713 phy_reserved
|= PHY_INIT5
;
714 if (mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, phy_reserved
)) {
715 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
719 if (np
->phy_oui
== PHY_OUI_CICADA
) {
720 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_SREVISION
, MII_READ
);
721 phy_reserved
|= PHY_INIT6
;
722 if (mii_rw(dev
, np
->phyaddr
, MII_SREVISION
, phy_reserved
)) {
723 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
728 /* restart auto negotiation */
729 mii_control
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
730 mii_control
|= (BMCR_ANRESTART
| BMCR_ANENABLE
);
731 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, mii_control
)) {
738 static void nv_start_rx(struct net_device
*dev
)
740 struct fe_priv
*np
= netdev_priv(dev
);
741 u8 __iomem
*base
= get_hwbase(dev
);
743 dprintk(KERN_DEBUG
"%s: nv_start_rx\n", dev
->name
);
744 /* Already running? Stop it. */
745 if (readl(base
+ NvRegReceiverControl
) & NVREG_RCVCTL_START
) {
746 writel(0, base
+ NvRegReceiverControl
);
749 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
751 writel(NVREG_RCVCTL_START
, base
+ NvRegReceiverControl
);
752 dprintk(KERN_DEBUG
"%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
753 dev
->name
, np
->duplex
, np
->linkspeed
);
757 static void nv_stop_rx(struct net_device
*dev
)
759 u8 __iomem
*base
= get_hwbase(dev
);
761 dprintk(KERN_DEBUG
"%s: nv_stop_rx\n", dev
->name
);
762 writel(0, base
+ NvRegReceiverControl
);
763 reg_delay(dev
, NvRegReceiverStatus
, NVREG_RCVSTAT_BUSY
, 0,
764 NV_RXSTOP_DELAY1
, NV_RXSTOP_DELAY1MAX
,
765 KERN_INFO
"nv_stop_rx: ReceiverStatus remained busy");
767 udelay(NV_RXSTOP_DELAY2
);
768 writel(0, base
+ NvRegLinkSpeed
);
771 static void nv_start_tx(struct net_device
*dev
)
773 u8 __iomem
*base
= get_hwbase(dev
);
775 dprintk(KERN_DEBUG
"%s: nv_start_tx\n", dev
->name
);
776 writel(NVREG_XMITCTL_START
, base
+ NvRegTransmitterControl
);
780 static void nv_stop_tx(struct net_device
*dev
)
782 u8 __iomem
*base
= get_hwbase(dev
);
784 dprintk(KERN_DEBUG
"%s: nv_stop_tx\n", dev
->name
);
785 writel(0, base
+ NvRegTransmitterControl
);
786 reg_delay(dev
, NvRegTransmitterStatus
, NVREG_XMITSTAT_BUSY
, 0,
787 NV_TXSTOP_DELAY1
, NV_TXSTOP_DELAY1MAX
,
788 KERN_INFO
"nv_stop_tx: TransmitterStatus remained busy");
790 udelay(NV_TXSTOP_DELAY2
);
791 writel(0, base
+ NvRegUnknownTransmitterReg
);
794 static void nv_txrx_reset(struct net_device
*dev
)
796 struct fe_priv
*np
= netdev_priv(dev
);
797 u8 __iomem
*base
= get_hwbase(dev
);
799 dprintk(KERN_DEBUG
"%s: nv_txrx_reset\n", dev
->name
);
800 writel(NVREG_TXRXCTL_BIT2
| NVREG_TXRXCTL_RESET
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
802 udelay(NV_TXRX_RESET_DELAY
);
803 writel(NVREG_TXRXCTL_BIT2
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
808 * nv_get_stats: dev->get_stats function
809 * Get latest stats value from the nic.
810 * Called with read_lock(&dev_base_lock) held for read -
811 * only synchronized against unregister_netdevice.
813 static struct net_device_stats
*nv_get_stats(struct net_device
*dev
)
815 struct fe_priv
*np
= netdev_priv(dev
);
817 /* It seems that the nic always generates interrupts and doesn't
818 * accumulate errors internally. Thus the current values in np->stats
819 * are already up to date.
825 * nv_alloc_rx: fill rx ring entries.
826 * Return 1 if the allocations for the skbs failed and the
827 * rx engine is without Available descriptors
829 static int nv_alloc_rx(struct net_device
*dev
)
831 struct fe_priv
*np
= netdev_priv(dev
);
832 unsigned int refill_rx
= np
->refill_rx
;
835 while (np
->cur_rx
!= refill_rx
) {
838 nr
= refill_rx
% RX_RING
;
839 if (np
->rx_skbuff
[nr
] == NULL
) {
841 skb
= dev_alloc_skb(np
->rx_buf_sz
+ NV_RX_ALLOC_PAD
);
846 np
->rx_skbuff
[nr
] = skb
;
848 skb
= np
->rx_skbuff
[nr
];
850 np
->rx_dma
[nr
] = pci_map_single(np
->pci_dev
, skb
->data
, skb
->len
,
852 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
853 np
->rx_ring
.orig
[nr
].PacketBuffer
= cpu_to_le32(np
->rx_dma
[nr
]);
855 np
->rx_ring
.orig
[nr
].FlagLen
= cpu_to_le32(np
->rx_buf_sz
| NV_RX_AVAIL
);
857 np
->rx_ring
.ex
[nr
].PacketBufferHigh
= cpu_to_le64(np
->rx_dma
[nr
]) >> 32;
858 np
->rx_ring
.ex
[nr
].PacketBufferLow
= cpu_to_le64(np
->rx_dma
[nr
]) & 0x0FFFFFFFF;
860 np
->rx_ring
.ex
[nr
].FlagLen
= cpu_to_le32(np
->rx_buf_sz
| NV_RX2_AVAIL
);
862 dprintk(KERN_DEBUG
"%s: nv_alloc_rx: Packet %d marked as Available\n",
863 dev
->name
, refill_rx
);
866 np
->refill_rx
= refill_rx
;
867 if (np
->cur_rx
- refill_rx
== RX_RING
)
872 static void nv_do_rx_refill(unsigned long data
)
874 struct net_device
*dev
= (struct net_device
*) data
;
875 struct fe_priv
*np
= netdev_priv(dev
);
877 disable_irq(dev
->irq
);
878 if (nv_alloc_rx(dev
)) {
879 spin_lock(&np
->lock
);
880 if (!np
->in_shutdown
)
881 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
882 spin_unlock(&np
->lock
);
884 enable_irq(dev
->irq
);
887 static void nv_init_rx(struct net_device
*dev
)
889 struct fe_priv
*np
= netdev_priv(dev
);
892 np
->cur_rx
= RX_RING
;
894 for (i
= 0; i
< RX_RING
; i
++)
895 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
896 np
->rx_ring
.orig
[i
].FlagLen
= 0;
898 np
->rx_ring
.ex
[i
].FlagLen
= 0;
901 static void nv_init_tx(struct net_device
*dev
)
903 struct fe_priv
*np
= netdev_priv(dev
);
906 np
->next_tx
= np
->nic_tx
= 0;
907 for (i
= 0; i
< TX_RING
; i
++) {
908 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
909 np
->tx_ring
.orig
[i
].FlagLen
= 0;
911 np
->tx_ring
.ex
[i
].FlagLen
= 0;
912 np
->tx_skbuff
[i
] = NULL
;
916 static int nv_init_ring(struct net_device
*dev
)
920 return nv_alloc_rx(dev
);
923 static void nv_release_txskb(struct net_device
*dev
, unsigned int skbnr
)
925 struct fe_priv
*np
= netdev_priv(dev
);
926 struct sk_buff
*skb
= np
->tx_skbuff
[skbnr
];
927 unsigned int j
, entry
, fragments
;
929 dprintk(KERN_INFO
"%s: nv_release_txskb for skbnr %d, skb %p\n",
930 dev
->name
, skbnr
, np
->tx_skbuff
[skbnr
]);
933 if ((fragments
= skb_shinfo(skb
)->nr_frags
) != 0) {
934 for (j
= fragments
; j
>= 1; j
--) {
935 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[j
-1];
936 pci_unmap_page(np
->pci_dev
, np
->tx_dma
[entry
],
939 entry
= (entry
- 1) % TX_RING
;
942 pci_unmap_single(np
->pci_dev
, np
->tx_dma
[entry
],
943 skb
->len
- skb
->data_len
,
945 dev_kfree_skb_irq(skb
);
946 np
->tx_skbuff
[skbnr
] = NULL
;
949 static void nv_drain_tx(struct net_device
*dev
)
951 struct fe_priv
*np
= netdev_priv(dev
);
954 for (i
= 0; i
< TX_RING
; i
++) {
955 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
956 np
->tx_ring
.orig
[i
].FlagLen
= 0;
958 np
->tx_ring
.ex
[i
].FlagLen
= 0;
959 if (np
->tx_skbuff
[i
]) {
960 nv_release_txskb(dev
, i
);
961 np
->stats
.tx_dropped
++;
966 static void nv_drain_rx(struct net_device
*dev
)
968 struct fe_priv
*np
= netdev_priv(dev
);
970 for (i
= 0; i
< RX_RING
; i
++) {
971 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
972 np
->rx_ring
.orig
[i
].FlagLen
= 0;
974 np
->rx_ring
.ex
[i
].FlagLen
= 0;
976 if (np
->rx_skbuff
[i
]) {
977 pci_unmap_single(np
->pci_dev
, np
->rx_dma
[i
],
978 np
->rx_skbuff
[i
]->len
,
980 dev_kfree_skb(np
->rx_skbuff
[i
]);
981 np
->rx_skbuff
[i
] = NULL
;
986 static void drain_ring(struct net_device
*dev
)
993 * nv_start_xmit: dev->hard_start_xmit function
994 * Called with dev->xmit_lock held.
996 static int nv_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
998 struct fe_priv
*np
= netdev_priv(dev
);
999 u32 tx_flags_extra
= (np
->desc_ver
== DESC_VER_1
? NV_TX_LASTPACKET
: NV_TX2_LASTPACKET
);
1000 unsigned int fragments
= skb_shinfo(skb
)->nr_frags
;
1001 unsigned int nr
= (np
->next_tx
+ fragments
) % TX_RING
;
1004 spin_lock_irq(&np
->lock
);
1006 if ((np
->next_tx
- np
->nic_tx
+ fragments
) > TX_LIMIT_STOP
) {
1007 spin_unlock_irq(&np
->lock
);
1008 netif_stop_queue(dev
);
1009 return NETDEV_TX_BUSY
;
1012 np
->tx_skbuff
[nr
] = skb
;
1015 dprintk(KERN_DEBUG
"%s: nv_start_xmit: buffer contains %d fragments\n", dev
->name
, fragments
);
1016 /* setup descriptors in reverse order */
1017 for (i
= fragments
; i
>= 1; i
--) {
1018 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
-1];
1019 np
->tx_dma
[nr
] = pci_map_page(np
->pci_dev
, frag
->page
, frag
->page_offset
, frag
->size
,
1022 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
1023 np
->tx_ring
.orig
[nr
].PacketBuffer
= cpu_to_le32(np
->tx_dma
[nr
]);
1024 np
->tx_ring
.orig
[nr
].FlagLen
= cpu_to_le32( (frag
->size
-1) | np
->tx_flags
| tx_flags_extra
);
1026 np
->tx_ring
.ex
[nr
].PacketBufferHigh
= cpu_to_le64(np
->tx_dma
[nr
]) >> 32;
1027 np
->tx_ring
.ex
[nr
].PacketBufferLow
= cpu_to_le64(np
->tx_dma
[nr
]) & 0x0FFFFFFFF;
1028 np
->tx_ring
.ex
[nr
].FlagLen
= cpu_to_le32( (frag
->size
-1) | np
->tx_flags
| tx_flags_extra
);
1031 nr
= (nr
- 1) % TX_RING
;
1033 if (np
->desc_ver
== DESC_VER_1
)
1034 tx_flags_extra
&= ~NV_TX_LASTPACKET
;
1036 tx_flags_extra
&= ~NV_TX2_LASTPACKET
;
1041 if (skb_shinfo(skb
)->tso_size
)
1042 tx_flags_extra
|= NV_TX2_TSO
| (skb_shinfo(skb
)->tso_size
<< NV_TX2_TSO_SHIFT
);
1045 tx_flags_extra
|= (skb
->ip_summed
== CHECKSUM_HW
? (NV_TX2_CHECKSUM_L3
|NV_TX2_CHECKSUM_L4
) : 0);
1047 np
->tx_dma
[nr
] = pci_map_single(np
->pci_dev
, skb
->data
, skb
->len
-skb
->data_len
,
1050 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
1051 np
->tx_ring
.orig
[nr
].PacketBuffer
= cpu_to_le32(np
->tx_dma
[nr
]);
1052 np
->tx_ring
.orig
[nr
].FlagLen
= cpu_to_le32( (skb
->len
-skb
->data_len
-1) | np
->tx_flags
| tx_flags_extra
);
1054 np
->tx_ring
.ex
[nr
].PacketBufferHigh
= cpu_to_le64(np
->tx_dma
[nr
]) >> 32;
1055 np
->tx_ring
.ex
[nr
].PacketBufferLow
= cpu_to_le64(np
->tx_dma
[nr
]) & 0x0FFFFFFFF;
1056 np
->tx_ring
.ex
[nr
].FlagLen
= cpu_to_le32( (skb
->len
-skb
->data_len
-1) | np
->tx_flags
| tx_flags_extra
);
1059 dprintk(KERN_DEBUG
"%s: nv_start_xmit: packet packet %d queued for transmission. tx_flags_extra: %x\n",
1060 dev
->name
, np
->next_tx
, tx_flags_extra
);
1063 for (j
=0; j
<64; j
++) {
1065 dprintk("\n%03x:", j
);
1066 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
1071 np
->next_tx
+= 1 + fragments
;
1073 dev
->trans_start
= jiffies
;
1074 spin_unlock_irq(&np
->lock
);
1075 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
1076 pci_push(get_hwbase(dev
));
1077 return NETDEV_TX_OK
;
1081 * nv_tx_done: check for completed packets, release the skbs.
1083 * Caller must own np->lock.
1085 static void nv_tx_done(struct net_device
*dev
)
1087 struct fe_priv
*np
= netdev_priv(dev
);
1090 struct sk_buff
*skb
;
1092 while (np
->nic_tx
!= np
->next_tx
) {
1093 i
= np
->nic_tx
% TX_RING
;
1095 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
1096 Flags
= le32_to_cpu(np
->tx_ring
.orig
[i
].FlagLen
);
1098 Flags
= le32_to_cpu(np
->tx_ring
.ex
[i
].FlagLen
);
1100 dprintk(KERN_DEBUG
"%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
1101 dev
->name
, np
->nic_tx
, Flags
);
1102 if (Flags
& NV_TX_VALID
)
1104 if (np
->desc_ver
== DESC_VER_1
) {
1105 if (Flags
& NV_TX_LASTPACKET
) {
1106 skb
= np
->tx_skbuff
[i
];
1107 if (Flags
& (NV_TX_RETRYERROR
|NV_TX_CARRIERLOST
|NV_TX_LATECOLLISION
|
1108 NV_TX_UNDERFLOW
|NV_TX_ERROR
)) {
1109 if (Flags
& NV_TX_UNDERFLOW
)
1110 np
->stats
.tx_fifo_errors
++;
1111 if (Flags
& NV_TX_CARRIERLOST
)
1112 np
->stats
.tx_carrier_errors
++;
1113 np
->stats
.tx_errors
++;
1115 np
->stats
.tx_packets
++;
1116 np
->stats
.tx_bytes
+= skb
->len
;
1118 nv_release_txskb(dev
, i
);
1121 if (Flags
& NV_TX2_LASTPACKET
) {
1122 skb
= np
->tx_skbuff
[i
];
1123 if (Flags
& (NV_TX2_RETRYERROR
|NV_TX2_CARRIERLOST
|NV_TX2_LATECOLLISION
|
1124 NV_TX2_UNDERFLOW
|NV_TX2_ERROR
)) {
1125 if (Flags
& NV_TX2_UNDERFLOW
)
1126 np
->stats
.tx_fifo_errors
++;
1127 if (Flags
& NV_TX2_CARRIERLOST
)
1128 np
->stats
.tx_carrier_errors
++;
1129 np
->stats
.tx_errors
++;
1131 np
->stats
.tx_packets
++;
1132 np
->stats
.tx_bytes
+= skb
->len
;
1134 nv_release_txskb(dev
, i
);
1139 if (np
->next_tx
- np
->nic_tx
< TX_LIMIT_START
)
1140 netif_wake_queue(dev
);
1144 * nv_tx_timeout: dev->tx_timeout function
1145 * Called with dev->xmit_lock held.
1147 static void nv_tx_timeout(struct net_device
*dev
)
1149 struct fe_priv
*np
= netdev_priv(dev
);
1150 u8 __iomem
*base
= get_hwbase(dev
);
1152 printk(KERN_INFO
"%s: Got tx_timeout. irq: %08x\n", dev
->name
,
1153 readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
);
1158 printk(KERN_INFO
"%s: Ring at %lx: next %d nic %d\n",
1159 dev
->name
, (unsigned long)np
->ring_addr
,
1160 np
->next_tx
, np
->nic_tx
);
1161 printk(KERN_INFO
"%s: Dumping tx registers\n", dev
->name
);
1162 for (i
=0;i
<0x400;i
+= 32) {
1163 printk(KERN_INFO
"%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
1165 readl(base
+ i
+ 0), readl(base
+ i
+ 4),
1166 readl(base
+ i
+ 8), readl(base
+ i
+ 12),
1167 readl(base
+ i
+ 16), readl(base
+ i
+ 20),
1168 readl(base
+ i
+ 24), readl(base
+ i
+ 28));
1170 printk(KERN_INFO
"%s: Dumping tx ring\n", dev
->name
);
1171 for (i
=0;i
<TX_RING
;i
+= 4) {
1172 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
1173 printk(KERN_INFO
"%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
1175 le32_to_cpu(np
->tx_ring
.orig
[i
].PacketBuffer
),
1176 le32_to_cpu(np
->tx_ring
.orig
[i
].FlagLen
),
1177 le32_to_cpu(np
->tx_ring
.orig
[i
+1].PacketBuffer
),
1178 le32_to_cpu(np
->tx_ring
.orig
[i
+1].FlagLen
),
1179 le32_to_cpu(np
->tx_ring
.orig
[i
+2].PacketBuffer
),
1180 le32_to_cpu(np
->tx_ring
.orig
[i
+2].FlagLen
),
1181 le32_to_cpu(np
->tx_ring
.orig
[i
+3].PacketBuffer
),
1182 le32_to_cpu(np
->tx_ring
.orig
[i
+3].FlagLen
));
1184 printk(KERN_INFO
"%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
1186 le32_to_cpu(np
->tx_ring
.ex
[i
].PacketBufferHigh
),
1187 le32_to_cpu(np
->tx_ring
.ex
[i
].PacketBufferLow
),
1188 le32_to_cpu(np
->tx_ring
.ex
[i
].FlagLen
),
1189 le32_to_cpu(np
->tx_ring
.ex
[i
+1].PacketBufferHigh
),
1190 le32_to_cpu(np
->tx_ring
.ex
[i
+1].PacketBufferLow
),
1191 le32_to_cpu(np
->tx_ring
.ex
[i
+1].FlagLen
),
1192 le32_to_cpu(np
->tx_ring
.ex
[i
+2].PacketBufferHigh
),
1193 le32_to_cpu(np
->tx_ring
.ex
[i
+2].PacketBufferLow
),
1194 le32_to_cpu(np
->tx_ring
.ex
[i
+2].FlagLen
),
1195 le32_to_cpu(np
->tx_ring
.ex
[i
+3].PacketBufferHigh
),
1196 le32_to_cpu(np
->tx_ring
.ex
[i
+3].PacketBufferLow
),
1197 le32_to_cpu(np
->tx_ring
.ex
[i
+3].FlagLen
));
1202 spin_lock_irq(&np
->lock
);
1204 /* 1) stop tx engine */
1207 /* 2) check that the packets were not sent already: */
1210 /* 3) if there are dead entries: clear everything */
1211 if (np
->next_tx
!= np
->nic_tx
) {
1212 printk(KERN_DEBUG
"%s: tx_timeout: dead entries!\n", dev
->name
);
1214 np
->next_tx
= np
->nic_tx
= 0;
1215 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
1216 writel((u32
) (np
->ring_addr
+ RX_RING
*sizeof(struct ring_desc
)), base
+ NvRegTxRingPhysAddr
);
1218 writel((u32
) (np
->ring_addr
+ RX_RING
*sizeof(struct ring_desc_ex
)), base
+ NvRegTxRingPhysAddr
);
1219 netif_wake_queue(dev
);
1222 /* 4) restart tx engine */
1224 spin_unlock_irq(&np
->lock
);
1228 * Called when the nic notices a mismatch between the actual data len on the
1229 * wire and the len indicated in the 802 header
1231 static int nv_getlen(struct net_device
*dev
, void *packet
, int datalen
)
1233 int hdrlen
; /* length of the 802 header */
1234 int protolen
; /* length as stored in the proto field */
1236 /* 1) calculate len according to header */
1237 if ( ((struct vlan_ethhdr
*)packet
)->h_vlan_proto
== __constant_htons(ETH_P_8021Q
)) {
1238 protolen
= ntohs( ((struct vlan_ethhdr
*)packet
)->h_vlan_encapsulated_proto
);
1241 protolen
= ntohs( ((struct ethhdr
*)packet
)->h_proto
);
1244 dprintk(KERN_DEBUG
"%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
1245 dev
->name
, datalen
, protolen
, hdrlen
);
1246 if (protolen
> ETH_DATA_LEN
)
1247 return datalen
; /* Value in proto field not a len, no checks possible */
1250 /* consistency checks: */
1251 if (datalen
> ETH_ZLEN
) {
1252 if (datalen
>= protolen
) {
1253 /* more data on wire than in 802 header, trim of
1256 dprintk(KERN_DEBUG
"%s: nv_getlen: accepting %d bytes.\n",
1257 dev
->name
, protolen
);
1260 /* less data on wire than mentioned in header.
1261 * Discard the packet.
1263 dprintk(KERN_DEBUG
"%s: nv_getlen: discarding long packet.\n",
1268 /* short packet. Accept only if 802 values are also short */
1269 if (protolen
> ETH_ZLEN
) {
1270 dprintk(KERN_DEBUG
"%s: nv_getlen: discarding short packet.\n",
1274 dprintk(KERN_DEBUG
"%s: nv_getlen: accepting %d bytes.\n",
1275 dev
->name
, datalen
);
1280 static void nv_rx_process(struct net_device
*dev
)
1282 struct fe_priv
*np
= netdev_priv(dev
);
1286 struct sk_buff
*skb
;
1289 if (np
->cur_rx
- np
->refill_rx
>= RX_RING
)
1290 break; /* we scanned the whole ring - do not continue */
1292 i
= np
->cur_rx
% RX_RING
;
1293 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
1294 Flags
= le32_to_cpu(np
->rx_ring
.orig
[i
].FlagLen
);
1295 len
= nv_descr_getlength(&np
->rx_ring
.orig
[i
], np
->desc_ver
);
1297 Flags
= le32_to_cpu(np
->rx_ring
.ex
[i
].FlagLen
);
1298 len
= nv_descr_getlength_ex(&np
->rx_ring
.ex
[i
], np
->desc_ver
);
1301 dprintk(KERN_DEBUG
"%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
1302 dev
->name
, np
->cur_rx
, Flags
);
1304 if (Flags
& NV_RX_AVAIL
)
1305 break; /* still owned by hardware, */
1308 * the packet is for us - immediately tear down the pci mapping.
1309 * TODO: check if a prefetch of the first cacheline improves
1312 pci_unmap_single(np
->pci_dev
, np
->rx_dma
[i
],
1313 np
->rx_skbuff
[i
]->len
,
1314 PCI_DMA_FROMDEVICE
);
1318 dprintk(KERN_DEBUG
"Dumping packet (flags 0x%x).",Flags
);
1319 for (j
=0; j
<64; j
++) {
1321 dprintk("\n%03x:", j
);
1322 dprintk(" %02x", ((unsigned char*)np
->rx_skbuff
[i
]->data
)[j
]);
1326 /* look at what we actually got: */
1327 if (np
->desc_ver
== DESC_VER_1
) {
1328 if (!(Flags
& NV_RX_DESCRIPTORVALID
))
1331 if (Flags
& NV_RX_MISSEDFRAME
) {
1332 np
->stats
.rx_missed_errors
++;
1333 np
->stats
.rx_errors
++;
1336 if (Flags
& (NV_RX_ERROR1
|NV_RX_ERROR2
|NV_RX_ERROR3
)) {
1337 np
->stats
.rx_errors
++;
1340 if (Flags
& NV_RX_CRCERR
) {
1341 np
->stats
.rx_crc_errors
++;
1342 np
->stats
.rx_errors
++;
1345 if (Flags
& NV_RX_OVERFLOW
) {
1346 np
->stats
.rx_over_errors
++;
1347 np
->stats
.rx_errors
++;
1350 if (Flags
& NV_RX_ERROR4
) {
1351 len
= nv_getlen(dev
, np
->rx_skbuff
[i
]->data
, len
);
1353 np
->stats
.rx_errors
++;
1357 /* framing errors are soft errors. */
1358 if (Flags
& NV_RX_FRAMINGERR
) {
1359 if (Flags
& NV_RX_SUBSTRACT1
) {
1364 if (!(Flags
& NV_RX2_DESCRIPTORVALID
))
1367 if (Flags
& (NV_RX2_ERROR1
|NV_RX2_ERROR2
|NV_RX2_ERROR3
)) {
1368 np
->stats
.rx_errors
++;
1371 if (Flags
& NV_RX2_CRCERR
) {
1372 np
->stats
.rx_crc_errors
++;
1373 np
->stats
.rx_errors
++;
1376 if (Flags
& NV_RX2_OVERFLOW
) {
1377 np
->stats
.rx_over_errors
++;
1378 np
->stats
.rx_errors
++;
1381 if (Flags
& NV_RX2_ERROR4
) {
1382 len
= nv_getlen(dev
, np
->rx_skbuff
[i
]->data
, len
);
1384 np
->stats
.rx_errors
++;
1388 /* framing errors are soft errors */
1389 if (Flags
& NV_RX2_FRAMINGERR
) {
1390 if (Flags
& NV_RX2_SUBSTRACT1
) {
1394 Flags
&= NV_RX2_CHECKSUMMASK
;
1395 if (Flags
== NV_RX2_CHECKSUMOK1
||
1396 Flags
== NV_RX2_CHECKSUMOK2
||
1397 Flags
== NV_RX2_CHECKSUMOK3
) {
1398 dprintk(KERN_DEBUG
"%s: hw checksum hit!.\n", dev
->name
);
1399 np
->rx_skbuff
[i
]->ip_summed
= CHECKSUM_UNNECESSARY
;
1401 dprintk(KERN_DEBUG
"%s: hwchecksum miss!.\n", dev
->name
);
1404 /* got a valid packet - forward it to the network core */
1405 skb
= np
->rx_skbuff
[i
];
1406 np
->rx_skbuff
[i
] = NULL
;
1409 skb
->protocol
= eth_type_trans(skb
, dev
);
1410 dprintk(KERN_DEBUG
"%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
1411 dev
->name
, np
->cur_rx
, len
, skb
->protocol
);
1413 dev
->last_rx
= jiffies
;
1414 np
->stats
.rx_packets
++;
1415 np
->stats
.rx_bytes
+= len
;
1421 static void set_bufsize(struct net_device
*dev
)
1423 struct fe_priv
*np
= netdev_priv(dev
);
1425 if (dev
->mtu
<= ETH_DATA_LEN
)
1426 np
->rx_buf_sz
= ETH_DATA_LEN
+ NV_RX_HEADERS
;
1428 np
->rx_buf_sz
= dev
->mtu
+ NV_RX_HEADERS
;
1432 * nv_change_mtu: dev->change_mtu function
1433 * Called with dev_base_lock held for read.
1435 static int nv_change_mtu(struct net_device
*dev
, int new_mtu
)
1437 struct fe_priv
*np
= netdev_priv(dev
);
1440 if (new_mtu
< 64 || new_mtu
> np
->pkt_limit
)
1446 /* return early if the buffer sizes will not change */
1447 if (old_mtu
<= ETH_DATA_LEN
&& new_mtu
<= ETH_DATA_LEN
)
1449 if (old_mtu
== new_mtu
)
1452 /* synchronized against open : rtnl_lock() held by caller */
1453 if (netif_running(dev
)) {
1454 u8 __iomem
*base
= get_hwbase(dev
);
1456 * It seems that the nic preloads valid ring entries into an
1457 * internal buffer. The procedure for flushing everything is
1458 * guessed, there is probably a simpler approach.
1459 * Changing the MTU is a rare event, it shouldn't matter.
1461 disable_irq(dev
->irq
);
1462 spin_lock_bh(&dev
->xmit_lock
);
1463 spin_lock(&np
->lock
);
1468 /* drain rx queue */
1471 /* reinit driver view of the rx queue */
1474 /* alloc new rx buffers */
1476 if (nv_alloc_rx(dev
)) {
1477 if (!np
->in_shutdown
)
1478 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
1480 /* reinit nic view of the rx queue */
1481 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
1482 writel((u32
) np
->ring_addr
, base
+ NvRegRxRingPhysAddr
);
1483 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
1484 writel((u32
) (np
->ring_addr
+ RX_RING
*sizeof(struct ring_desc
)), base
+ NvRegTxRingPhysAddr
);
1486 writel((u32
) (np
->ring_addr
+ RX_RING
*sizeof(struct ring_desc_ex
)), base
+ NvRegTxRingPhysAddr
);
1487 writel( ((RX_RING
-1) << NVREG_RINGSZ_RXSHIFT
) + ((TX_RING
-1) << NVREG_RINGSZ_TXSHIFT
),
1488 base
+ NvRegRingSizes
);
1490 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
1493 /* restart rx engine */
1496 spin_unlock(&np
->lock
);
1497 spin_unlock_bh(&dev
->xmit_lock
);
1498 enable_irq(dev
->irq
);
1503 static void nv_copy_mac_to_hw(struct net_device
*dev
)
1505 u8 __iomem
*base
= get_hwbase(dev
);
1508 mac
[0] = (dev
->dev_addr
[0] << 0) + (dev
->dev_addr
[1] << 8) +
1509 (dev
->dev_addr
[2] << 16) + (dev
->dev_addr
[3] << 24);
1510 mac
[1] = (dev
->dev_addr
[4] << 0) + (dev
->dev_addr
[5] << 8);
1512 writel(mac
[0], base
+ NvRegMacAddrA
);
1513 writel(mac
[1], base
+ NvRegMacAddrB
);
1517 * nv_set_mac_address: dev->set_mac_address function
1518 * Called with rtnl_lock() held.
1520 static int nv_set_mac_address(struct net_device
*dev
, void *addr
)
1522 struct fe_priv
*np
= netdev_priv(dev
);
1523 struct sockaddr
*macaddr
= (struct sockaddr
*)addr
;
1525 if(!is_valid_ether_addr(macaddr
->sa_data
))
1526 return -EADDRNOTAVAIL
;
1528 /* synchronized against open : rtnl_lock() held by caller */
1529 memcpy(dev
->dev_addr
, macaddr
->sa_data
, ETH_ALEN
);
1531 if (netif_running(dev
)) {
1532 spin_lock_bh(&dev
->xmit_lock
);
1533 spin_lock_irq(&np
->lock
);
1535 /* stop rx engine */
1538 /* set mac address */
1539 nv_copy_mac_to_hw(dev
);
1541 /* restart rx engine */
1543 spin_unlock_irq(&np
->lock
);
1544 spin_unlock_bh(&dev
->xmit_lock
);
1546 nv_copy_mac_to_hw(dev
);
1552 * nv_set_multicast: dev->set_multicast function
1553 * Called with dev->xmit_lock held.
1555 static void nv_set_multicast(struct net_device
*dev
)
1557 struct fe_priv
*np
= netdev_priv(dev
);
1558 u8 __iomem
*base
= get_hwbase(dev
);
1563 memset(addr
, 0, sizeof(addr
));
1564 memset(mask
, 0, sizeof(mask
));
1566 if (dev
->flags
& IFF_PROMISC
) {
1567 printk(KERN_NOTICE
"%s: Promiscuous mode enabled.\n", dev
->name
);
1568 pff
= NVREG_PFF_PROMISC
;
1570 pff
= NVREG_PFF_MYADDR
;
1572 if (dev
->flags
& IFF_ALLMULTI
|| dev
->mc_list
) {
1576 alwaysOn
[0] = alwaysOn
[1] = alwaysOff
[0] = alwaysOff
[1] = 0xffffffff;
1577 if (dev
->flags
& IFF_ALLMULTI
) {
1578 alwaysOn
[0] = alwaysOn
[1] = alwaysOff
[0] = alwaysOff
[1] = 0;
1580 struct dev_mc_list
*walk
;
1582 walk
= dev
->mc_list
;
1583 while (walk
!= NULL
) {
1585 a
= le32_to_cpu(*(u32
*) walk
->dmi_addr
);
1586 b
= le16_to_cpu(*(u16
*) (&walk
->dmi_addr
[4]));
1594 addr
[0] = alwaysOn
[0];
1595 addr
[1] = alwaysOn
[1];
1596 mask
[0] = alwaysOn
[0] | alwaysOff
[0];
1597 mask
[1] = alwaysOn
[1] | alwaysOff
[1];
1600 addr
[0] |= NVREG_MCASTADDRA_FORCE
;
1601 pff
|= NVREG_PFF_ALWAYS
;
1602 spin_lock_irq(&np
->lock
);
1604 writel(addr
[0], base
+ NvRegMulticastAddrA
);
1605 writel(addr
[1], base
+ NvRegMulticastAddrB
);
1606 writel(mask
[0], base
+ NvRegMulticastMaskA
);
1607 writel(mask
[1], base
+ NvRegMulticastMaskB
);
1608 writel(pff
, base
+ NvRegPacketFilterFlags
);
1609 dprintk(KERN_INFO
"%s: reconfiguration for multicast lists.\n",
1612 spin_unlock_irq(&np
->lock
);
1615 static int nv_update_linkspeed(struct net_device
*dev
)
1617 struct fe_priv
*np
= netdev_priv(dev
);
1618 u8 __iomem
*base
= get_hwbase(dev
);
1620 int newls
= np
->linkspeed
;
1621 int newdup
= np
->duplex
;
1624 u32 control_1000
, status_1000
, phyreg
;
1626 /* BMSR_LSTATUS is latched, read it twice:
1627 * we want the current value.
1629 mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
1630 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
1632 if (!(mii_status
& BMSR_LSTATUS
)) {
1633 dprintk(KERN_DEBUG
"%s: no link detected by phy - falling back to 10HD.\n",
1635 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
1641 if (np
->autoneg
== 0) {
1642 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
1643 dev
->name
, np
->fixed_mode
);
1644 if (np
->fixed_mode
& LPA_100FULL
) {
1645 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
1647 } else if (np
->fixed_mode
& LPA_100HALF
) {
1648 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
1650 } else if (np
->fixed_mode
& LPA_10FULL
) {
1651 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
1654 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
1660 /* check auto negotiation is complete */
1661 if (!(mii_status
& BMSR_ANEGCOMPLETE
)) {
1662 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
1663 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
1666 dprintk(KERN_DEBUG
"%s: autoneg not completed - falling back to 10HD.\n", dev
->name
);
1671 if (np
->gigabit
== PHY_GIGABIT
) {
1672 control_1000
= mii_rw(dev
, np
->phyaddr
, MII_1000BT_CR
, MII_READ
);
1673 status_1000
= mii_rw(dev
, np
->phyaddr
, MII_1000BT_SR
, MII_READ
);
1675 if ((control_1000
& ADVERTISE_1000FULL
) &&
1676 (status_1000
& LPA_1000FULL
)) {
1677 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: GBit ethernet detected.\n",
1679 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_1000
;
1685 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
1686 lpa
= mii_rw(dev
, np
->phyaddr
, MII_LPA
, MII_READ
);
1687 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
1688 dev
->name
, adv
, lpa
);
1690 /* FIXME: handle parallel detection properly */
1692 if (lpa
& LPA_100FULL
) {
1693 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
1695 } else if (lpa
& LPA_100HALF
) {
1696 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
1698 } else if (lpa
& LPA_10FULL
) {
1699 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
1701 } else if (lpa
& LPA_10HALF
) {
1702 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
1705 dprintk(KERN_DEBUG
"%s: bad ability %04x - falling back to 10HD.\n", dev
->name
, lpa
);
1706 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
1711 if (np
->duplex
== newdup
&& np
->linkspeed
== newls
)
1714 dprintk(KERN_INFO
"%s: changing link setting from %d/%d to %d/%d.\n",
1715 dev
->name
, np
->linkspeed
, np
->duplex
, newls
, newdup
);
1717 np
->duplex
= newdup
;
1718 np
->linkspeed
= newls
;
1720 if (np
->gigabit
== PHY_GIGABIT
) {
1721 phyreg
= readl(base
+ NvRegRandomSeed
);
1722 phyreg
&= ~(0x3FF00);
1723 if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_10
)
1724 phyreg
|= NVREG_RNDSEED_FORCE3
;
1725 else if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_100
)
1726 phyreg
|= NVREG_RNDSEED_FORCE2
;
1727 else if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_1000
)
1728 phyreg
|= NVREG_RNDSEED_FORCE
;
1729 writel(phyreg
, base
+ NvRegRandomSeed
);
1732 phyreg
= readl(base
+ NvRegPhyInterface
);
1733 phyreg
&= ~(PHY_HALF
|PHY_100
|PHY_1000
);
1734 if (np
->duplex
== 0)
1736 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_100
)
1738 else if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
)
1740 writel(phyreg
, base
+ NvRegPhyInterface
);
1742 writel(NVREG_MISC1_FORCE
| ( np
->duplex
? 0 : NVREG_MISC1_HD
),
1745 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
1751 static void nv_linkchange(struct net_device
*dev
)
1753 if (nv_update_linkspeed(dev
)) {
1754 if (netif_carrier_ok(dev
)) {
1757 netif_carrier_on(dev
);
1758 printk(KERN_INFO
"%s: link up.\n", dev
->name
);
1762 if (netif_carrier_ok(dev
)) {
1763 netif_carrier_off(dev
);
1764 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
1770 static void nv_link_irq(struct net_device
*dev
)
1772 u8 __iomem
*base
= get_hwbase(dev
);
1775 miistat
= readl(base
+ NvRegMIIStatus
);
1776 writel(NVREG_MIISTAT_MASK
, base
+ NvRegMIIStatus
);
1777 dprintk(KERN_INFO
"%s: link change irq, status 0x%x.\n", dev
->name
, miistat
);
1779 if (miistat
& (NVREG_MIISTAT_LINKCHANGE
))
1781 dprintk(KERN_DEBUG
"%s: link change notification done.\n", dev
->name
);
1784 static irqreturn_t
nv_nic_irq(int foo
, void *data
, struct pt_regs
*regs
)
1786 struct net_device
*dev
= (struct net_device
*) data
;
1787 struct fe_priv
*np
= netdev_priv(dev
);
1788 u8 __iomem
*base
= get_hwbase(dev
);
1792 dprintk(KERN_DEBUG
"%s: nv_nic_irq\n", dev
->name
);
1795 events
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
1796 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
1798 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
1799 if (!(events
& np
->irqmask
))
1802 if (events
& (NVREG_IRQ_TX1
|NVREG_IRQ_TX_OK
|NVREG_IRQ_TX_ERROR
|NVREG_IRQ_TX_ERR
)) {
1803 spin_lock(&np
->lock
);
1805 spin_unlock(&np
->lock
);
1808 if (events
& (NVREG_IRQ_RX_ERROR
|NVREG_IRQ_RX
|NVREG_IRQ_RX_NOBUF
)) {
1810 if (nv_alloc_rx(dev
)) {
1811 spin_lock(&np
->lock
);
1812 if (!np
->in_shutdown
)
1813 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
1814 spin_unlock(&np
->lock
);
1818 if (events
& NVREG_IRQ_LINK
) {
1819 spin_lock(&np
->lock
);
1821 spin_unlock(&np
->lock
);
1823 if (np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
)) {
1824 spin_lock(&np
->lock
);
1826 spin_unlock(&np
->lock
);
1827 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
1829 if (events
& (NVREG_IRQ_TX_ERR
)) {
1830 dprintk(KERN_DEBUG
"%s: received irq with events 0x%x. Probably TX fail.\n",
1833 if (events
& (NVREG_IRQ_UNKNOWN
)) {
1834 printk(KERN_DEBUG
"%s: received irq with unknown events 0x%x. Please report\n",
1837 if (i
> max_interrupt_work
) {
1838 spin_lock(&np
->lock
);
1839 /* disable interrupts on the nic */
1840 writel(0, base
+ NvRegIrqMask
);
1843 if (!np
->in_shutdown
)
1844 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
1845 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq.\n", dev
->name
, i
);
1846 spin_unlock(&np
->lock
);
1851 dprintk(KERN_DEBUG
"%s: nv_nic_irq completed\n", dev
->name
);
1853 return IRQ_RETVAL(i
);
1856 static void nv_do_nic_poll(unsigned long data
)
1858 struct net_device
*dev
= (struct net_device
*) data
;
1859 struct fe_priv
*np
= netdev_priv(dev
);
1860 u8 __iomem
*base
= get_hwbase(dev
);
1862 disable_irq(dev
->irq
);
1863 /* FIXME: Do we need synchronize_irq(dev->irq) here? */
1865 * reenable interrupts on the nic, we have to do this before calling
1866 * nv_nic_irq because that may decide to do otherwise
1868 writel(np
->irqmask
, base
+ NvRegIrqMask
);
1870 nv_nic_irq((int) 0, (void *) data
, (struct pt_regs
*) NULL
);
1871 enable_irq(dev
->irq
);
1874 #ifdef CONFIG_NET_POLL_CONTROLLER
1875 static void nv_poll_controller(struct net_device
*dev
)
1877 nv_do_nic_poll((unsigned long) dev
);
1881 static void nv_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
1883 struct fe_priv
*np
= netdev_priv(dev
);
1884 strcpy(info
->driver
, "forcedeth");
1885 strcpy(info
->version
, FORCEDETH_VERSION
);
1886 strcpy(info
->bus_info
, pci_name(np
->pci_dev
));
1889 static void nv_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wolinfo
)
1891 struct fe_priv
*np
= netdev_priv(dev
);
1892 wolinfo
->supported
= WAKE_MAGIC
;
1894 spin_lock_irq(&np
->lock
);
1896 wolinfo
->wolopts
= WAKE_MAGIC
;
1897 spin_unlock_irq(&np
->lock
);
1900 static int nv_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wolinfo
)
1902 struct fe_priv
*np
= netdev_priv(dev
);
1903 u8 __iomem
*base
= get_hwbase(dev
);
1905 spin_lock_irq(&np
->lock
);
1906 if (wolinfo
->wolopts
== 0) {
1907 writel(0, base
+ NvRegWakeUpFlags
);
1910 if (wolinfo
->wolopts
& WAKE_MAGIC
) {
1911 writel(NVREG_WAKEUPFLAGS_ENABLE
, base
+ NvRegWakeUpFlags
);
1914 spin_unlock_irq(&np
->lock
);
1918 static int nv_get_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
1920 struct fe_priv
*np
= netdev_priv(dev
);
1923 spin_lock_irq(&np
->lock
);
1924 ecmd
->port
= PORT_MII
;
1925 if (!netif_running(dev
)) {
1926 /* We do not track link speed / duplex setting if the
1927 * interface is disabled. Force a link check */
1928 nv_update_linkspeed(dev
);
1930 switch(np
->linkspeed
& (NVREG_LINKSPEED_MASK
)) {
1931 case NVREG_LINKSPEED_10
:
1932 ecmd
->speed
= SPEED_10
;
1934 case NVREG_LINKSPEED_100
:
1935 ecmd
->speed
= SPEED_100
;
1937 case NVREG_LINKSPEED_1000
:
1938 ecmd
->speed
= SPEED_1000
;
1941 ecmd
->duplex
= DUPLEX_HALF
;
1943 ecmd
->duplex
= DUPLEX_FULL
;
1945 ecmd
->autoneg
= np
->autoneg
;
1947 ecmd
->advertising
= ADVERTISED_MII
;
1949 ecmd
->advertising
|= ADVERTISED_Autoneg
;
1950 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
1952 adv
= np
->fixed_mode
;
1954 if (adv
& ADVERTISE_10HALF
)
1955 ecmd
->advertising
|= ADVERTISED_10baseT_Half
;
1956 if (adv
& ADVERTISE_10FULL
)
1957 ecmd
->advertising
|= ADVERTISED_10baseT_Full
;
1958 if (adv
& ADVERTISE_100HALF
)
1959 ecmd
->advertising
|= ADVERTISED_100baseT_Half
;
1960 if (adv
& ADVERTISE_100FULL
)
1961 ecmd
->advertising
|= ADVERTISED_100baseT_Full
;
1962 if (np
->autoneg
&& np
->gigabit
== PHY_GIGABIT
) {
1963 adv
= mii_rw(dev
, np
->phyaddr
, MII_1000BT_CR
, MII_READ
);
1964 if (adv
& ADVERTISE_1000FULL
)
1965 ecmd
->advertising
|= ADVERTISED_1000baseT_Full
;
1968 ecmd
->supported
= (SUPPORTED_Autoneg
|
1969 SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
|
1970 SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full
|
1972 if (np
->gigabit
== PHY_GIGABIT
)
1973 ecmd
->supported
|= SUPPORTED_1000baseT_Full
;
1975 ecmd
->phy_address
= np
->phyaddr
;
1976 ecmd
->transceiver
= XCVR_EXTERNAL
;
1978 /* ignore maxtxpkt, maxrxpkt for now */
1979 spin_unlock_irq(&np
->lock
);
1983 static int nv_set_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
1985 struct fe_priv
*np
= netdev_priv(dev
);
1987 if (ecmd
->port
!= PORT_MII
)
1989 if (ecmd
->transceiver
!= XCVR_EXTERNAL
)
1991 if (ecmd
->phy_address
!= np
->phyaddr
) {
1992 /* TODO: support switching between multiple phys. Should be
1993 * trivial, but not enabled due to lack of test hardware. */
1996 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
1999 mask
= ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
2000 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
;
2001 if (np
->gigabit
== PHY_GIGABIT
)
2002 mask
|= ADVERTISED_1000baseT_Full
;
2004 if ((ecmd
->advertising
& mask
) == 0)
2007 } else if (ecmd
->autoneg
== AUTONEG_DISABLE
) {
2008 /* Note: autonegotiation disable, speed 1000 intentionally
2009 * forbidden - noone should need that. */
2011 if (ecmd
->speed
!= SPEED_10
&& ecmd
->speed
!= SPEED_100
)
2013 if (ecmd
->duplex
!= DUPLEX_HALF
&& ecmd
->duplex
!= DUPLEX_FULL
)
2019 spin_lock_irq(&np
->lock
);
2020 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
2025 /* advertise only what has been requested */
2026 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
2027 adv
&= ~(ADVERTISE_ALL
| ADVERTISE_100BASE4
);
2028 if (ecmd
->advertising
& ADVERTISED_10baseT_Half
)
2029 adv
|= ADVERTISE_10HALF
;
2030 if (ecmd
->advertising
& ADVERTISED_10baseT_Full
)
2031 adv
|= ADVERTISE_10FULL
;
2032 if (ecmd
->advertising
& ADVERTISED_100baseT_Half
)
2033 adv
|= ADVERTISE_100HALF
;
2034 if (ecmd
->advertising
& ADVERTISED_100baseT_Full
)
2035 adv
|= ADVERTISE_100FULL
;
2036 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
2038 if (np
->gigabit
== PHY_GIGABIT
) {
2039 adv
= mii_rw(dev
, np
->phyaddr
, MII_1000BT_CR
, MII_READ
);
2040 adv
&= ~ADVERTISE_1000FULL
;
2041 if (ecmd
->advertising
& ADVERTISED_1000baseT_Full
)
2042 adv
|= ADVERTISE_1000FULL
;
2043 mii_rw(dev
, np
->phyaddr
, MII_1000BT_CR
, adv
);
2046 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
2047 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
2048 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
2055 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
2056 adv
&= ~(ADVERTISE_ALL
| ADVERTISE_100BASE4
);
2057 if (ecmd
->speed
== SPEED_10
&& ecmd
->duplex
== DUPLEX_HALF
)
2058 adv
|= ADVERTISE_10HALF
;
2059 if (ecmd
->speed
== SPEED_10
&& ecmd
->duplex
== DUPLEX_FULL
)
2060 adv
|= ADVERTISE_10FULL
;
2061 if (ecmd
->speed
== SPEED_100
&& ecmd
->duplex
== DUPLEX_HALF
)
2062 adv
|= ADVERTISE_100HALF
;
2063 if (ecmd
->speed
== SPEED_100
&& ecmd
->duplex
== DUPLEX_FULL
)
2064 adv
|= ADVERTISE_100FULL
;
2065 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
2066 np
->fixed_mode
= adv
;
2068 if (np
->gigabit
== PHY_GIGABIT
) {
2069 adv
= mii_rw(dev
, np
->phyaddr
, MII_1000BT_CR
, MII_READ
);
2070 adv
&= ~ADVERTISE_1000FULL
;
2071 mii_rw(dev
, np
->phyaddr
, MII_1000BT_CR
, adv
);
2074 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
2075 bmcr
|= ~(BMCR_ANENABLE
|BMCR_SPEED100
|BMCR_FULLDPLX
);
2076 if (adv
& (ADVERTISE_10FULL
|ADVERTISE_100FULL
))
2077 bmcr
|= BMCR_FULLDPLX
;
2078 if (adv
& (ADVERTISE_100HALF
|ADVERTISE_100FULL
))
2079 bmcr
|= BMCR_SPEED100
;
2080 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
2082 if (netif_running(dev
)) {
2083 /* Wait a bit and then reconfigure the nic. */
2088 spin_unlock_irq(&np
->lock
);
2093 #define FORCEDETH_REGS_VER 1
2094 #define FORCEDETH_REGS_SIZE 0x400 /* 256 32-bit registers */
2096 static int nv_get_regs_len(struct net_device
*dev
)
2098 return FORCEDETH_REGS_SIZE
;
2101 static void nv_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
, void *buf
)
2103 struct fe_priv
*np
= netdev_priv(dev
);
2104 u8 __iomem
*base
= get_hwbase(dev
);
2108 regs
->version
= FORCEDETH_REGS_VER
;
2109 spin_lock_irq(&np
->lock
);
2110 for (i
=0;i
<FORCEDETH_REGS_SIZE
/sizeof(u32
);i
++)
2111 rbuf
[i
] = readl(base
+ i
*sizeof(u32
));
2112 spin_unlock_irq(&np
->lock
);
2115 static int nv_nway_reset(struct net_device
*dev
)
2117 struct fe_priv
*np
= netdev_priv(dev
);
2120 spin_lock_irq(&np
->lock
);
2124 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
2125 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
2126 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
2132 spin_unlock_irq(&np
->lock
);
2137 static struct ethtool_ops ops
= {
2138 .get_drvinfo
= nv_get_drvinfo
,
2139 .get_link
= ethtool_op_get_link
,
2140 .get_wol
= nv_get_wol
,
2141 .set_wol
= nv_set_wol
,
2142 .get_settings
= nv_get_settings
,
2143 .set_settings
= nv_set_settings
,
2144 .get_regs_len
= nv_get_regs_len
,
2145 .get_regs
= nv_get_regs
,
2146 .nway_reset
= nv_nway_reset
,
2147 .get_perm_addr
= ethtool_op_get_perm_addr
,
2150 static int nv_open(struct net_device
*dev
)
2152 struct fe_priv
*np
= netdev_priv(dev
);
2153 u8 __iomem
*base
= get_hwbase(dev
);
2156 dprintk(KERN_DEBUG
"nv_open: begin\n");
2158 /* 1) erase previous misconfiguration */
2159 /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
2160 writel(NVREG_MCASTADDRA_FORCE
, base
+ NvRegMulticastAddrA
);
2161 writel(0, base
+ NvRegMulticastAddrB
);
2162 writel(0, base
+ NvRegMulticastMaskA
);
2163 writel(0, base
+ NvRegMulticastMaskB
);
2164 writel(0, base
+ NvRegPacketFilterFlags
);
2166 writel(0, base
+ NvRegTransmitterControl
);
2167 writel(0, base
+ NvRegReceiverControl
);
2169 writel(0, base
+ NvRegAdapterControl
);
2171 /* 2) initialize descriptor rings */
2173 oom
= nv_init_ring(dev
);
2175 writel(0, base
+ NvRegLinkSpeed
);
2176 writel(0, base
+ NvRegUnknownTransmitterReg
);
2178 writel(0, base
+ NvRegUnknownSetupReg6
);
2180 np
->in_shutdown
= 0;
2182 /* 3) set mac address */
2183 nv_copy_mac_to_hw(dev
);
2185 /* 4) give hw rings */
2186 writel((u32
) np
->ring_addr
, base
+ NvRegRxRingPhysAddr
);
2187 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
2188 writel((u32
) (np
->ring_addr
+ RX_RING
*sizeof(struct ring_desc
)), base
+ NvRegTxRingPhysAddr
);
2190 writel((u32
) (np
->ring_addr
+ RX_RING
*sizeof(struct ring_desc_ex
)), base
+ NvRegTxRingPhysAddr
);
2191 writel( ((RX_RING
-1) << NVREG_RINGSZ_RXSHIFT
) + ((TX_RING
-1) << NVREG_RINGSZ_TXSHIFT
),
2192 base
+ NvRegRingSizes
);
2194 /* 5) continue setup */
2195 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
2196 writel(NVREG_UNKSETUP3_VAL1
, base
+ NvRegUnknownSetupReg3
);
2197 writel(np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
2199 writel(NVREG_TXRXCTL_BIT1
|np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
2200 reg_delay(dev
, NvRegUnknownSetupReg5
, NVREG_UNKSETUP5_BIT31
, NVREG_UNKSETUP5_BIT31
,
2201 NV_SETUP5_DELAY
, NV_SETUP5_DELAYMAX
,
2202 KERN_INFO
"open: SetupReg5, Bit 31 remained off\n");
2204 writel(0, base
+ NvRegUnknownSetupReg4
);
2205 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
2206 writel(NVREG_MIISTAT_MASK2
, base
+ NvRegMIIStatus
);
2208 /* 6) continue setup */
2209 writel(NVREG_MISC1_FORCE
| NVREG_MISC1_HD
, base
+ NvRegMisc1
);
2210 writel(readl(base
+ NvRegTransmitterStatus
), base
+ NvRegTransmitterStatus
);
2211 writel(NVREG_PFF_ALWAYS
, base
+ NvRegPacketFilterFlags
);
2212 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
2214 writel(readl(base
+ NvRegReceiverStatus
), base
+ NvRegReceiverStatus
);
2215 get_random_bytes(&i
, sizeof(i
));
2216 writel(NVREG_RNDSEED_FORCE
| (i
&NVREG_RNDSEED_MASK
), base
+ NvRegRandomSeed
);
2217 writel(NVREG_UNKSETUP1_VAL
, base
+ NvRegUnknownSetupReg1
);
2218 writel(NVREG_UNKSETUP2_VAL
, base
+ NvRegUnknownSetupReg2
);
2219 writel(NVREG_POLL_DEFAULT
, base
+ NvRegPollingInterval
);
2220 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
2221 writel((np
->phyaddr
<< NVREG_ADAPTCTL_PHYSHIFT
)|NVREG_ADAPTCTL_PHYVALID
|NVREG_ADAPTCTL_RUNNING
,
2222 base
+ NvRegAdapterControl
);
2223 writel(NVREG_MIISPEED_BIT8
|NVREG_MIIDELAY
, base
+ NvRegMIISpeed
);
2224 writel(NVREG_UNKSETUP4_VAL
, base
+ NvRegUnknownSetupReg4
);
2225 writel(NVREG_WAKEUPFLAGS_VAL
, base
+ NvRegWakeUpFlags
);
2227 i
= readl(base
+ NvRegPowerState
);
2228 if ( (i
& NVREG_POWERSTATE_POWEREDUP
) == 0)
2229 writel(NVREG_POWERSTATE_POWEREDUP
|i
, base
+ NvRegPowerState
);
2233 writel(readl(base
+ NvRegPowerState
) | NVREG_POWERSTATE_VALID
, base
+ NvRegPowerState
);
2235 writel(0, base
+ NvRegIrqMask
);
2237 writel(NVREG_MIISTAT_MASK2
, base
+ NvRegMIIStatus
);
2238 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
2241 ret
= request_irq(dev
->irq
, &nv_nic_irq
, SA_SHIRQ
, dev
->name
, dev
);
2245 /* ask for interrupts */
2246 writel(np
->irqmask
, base
+ NvRegIrqMask
);
2248 spin_lock_irq(&np
->lock
);
2249 writel(NVREG_MCASTADDRA_FORCE
, base
+ NvRegMulticastAddrA
);
2250 writel(0, base
+ NvRegMulticastAddrB
);
2251 writel(0, base
+ NvRegMulticastMaskA
);
2252 writel(0, base
+ NvRegMulticastMaskB
);
2253 writel(NVREG_PFF_ALWAYS
|NVREG_PFF_MYADDR
, base
+ NvRegPacketFilterFlags
);
2254 /* One manual link speed update: Interrupts are enabled, future link
2255 * speed changes cause interrupts and are handled by nv_link_irq().
2259 miistat
= readl(base
+ NvRegMIIStatus
);
2260 writel(NVREG_MIISTAT_MASK
, base
+ NvRegMIIStatus
);
2261 dprintk(KERN_INFO
"startup: got 0x%08x.\n", miistat
);
2263 /* set linkspeed to invalid value, thus force nv_update_linkspeed
2266 ret
= nv_update_linkspeed(dev
);
2269 netif_start_queue(dev
);
2271 netif_carrier_on(dev
);
2273 printk("%s: no link during initialization.\n", dev
->name
);
2274 netif_carrier_off(dev
);
2277 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
2278 spin_unlock_irq(&np
->lock
);
2286 static int nv_close(struct net_device
*dev
)
2288 struct fe_priv
*np
= netdev_priv(dev
);
2291 spin_lock_irq(&np
->lock
);
2292 np
->in_shutdown
= 1;
2293 spin_unlock_irq(&np
->lock
);
2294 synchronize_irq(dev
->irq
);
2296 del_timer_sync(&np
->oom_kick
);
2297 del_timer_sync(&np
->nic_poll
);
2299 netif_stop_queue(dev
);
2300 spin_lock_irq(&np
->lock
);
2305 /* disable interrupts on the nic or we will lock up */
2306 base
= get_hwbase(dev
);
2307 writel(0, base
+ NvRegIrqMask
);
2309 dprintk(KERN_INFO
"%s: Irqmask is zero again\n", dev
->name
);
2311 spin_unlock_irq(&np
->lock
);
2313 free_irq(dev
->irq
, dev
);
2320 /* special op: write back the misordered MAC address - otherwise
2321 * the next nv_probe would see a wrong address.
2323 writel(np
->orig_mac
[0], base
+ NvRegMacAddrA
);
2324 writel(np
->orig_mac
[1], base
+ NvRegMacAddrB
);
2326 /* FIXME: power down nic */
2331 static int __devinit
nv_probe(struct pci_dev
*pci_dev
, const struct pci_device_id
*id
)
2333 struct net_device
*dev
;
2339 dev
= alloc_etherdev(sizeof(struct fe_priv
));
2344 np
= netdev_priv(dev
);
2345 np
->pci_dev
= pci_dev
;
2346 spin_lock_init(&np
->lock
);
2347 SET_MODULE_OWNER(dev
);
2348 SET_NETDEV_DEV(dev
, &pci_dev
->dev
);
2350 init_timer(&np
->oom_kick
);
2351 np
->oom_kick
.data
= (unsigned long) dev
;
2352 np
->oom_kick
.function
= &nv_do_rx_refill
; /* timer handler */
2353 init_timer(&np
->nic_poll
);
2354 np
->nic_poll
.data
= (unsigned long) dev
;
2355 np
->nic_poll
.function
= &nv_do_nic_poll
; /* timer handler */
2357 err
= pci_enable_device(pci_dev
);
2359 printk(KERN_INFO
"forcedeth: pci_enable_dev failed (%d) for device %s\n",
2360 err
, pci_name(pci_dev
));
2364 pci_set_master(pci_dev
);
2366 err
= pci_request_regions(pci_dev
, DRV_NAME
);
2372 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
2373 dprintk(KERN_DEBUG
"%s: resource %d start %p len %ld flags 0x%08lx.\n",
2374 pci_name(pci_dev
), i
, (void*)pci_resource_start(pci_dev
, i
),
2375 pci_resource_len(pci_dev
, i
),
2376 pci_resource_flags(pci_dev
, i
));
2377 if (pci_resource_flags(pci_dev
, i
) & IORESOURCE_MEM
&&
2378 pci_resource_len(pci_dev
, i
) >= NV_PCI_REGSZ
) {
2379 addr
= pci_resource_start(pci_dev
, i
);
2383 if (i
== DEVICE_COUNT_RESOURCE
) {
2384 printk(KERN_INFO
"forcedeth: Couldn't find register window for device %s.\n",
2389 /* handle different descriptor versions */
2390 if (id
->driver_data
& DEV_HAS_HIGH_DMA
) {
2391 /* packet format 3: supports 40-bit addressing */
2392 np
->desc_ver
= DESC_VER_3
;
2393 if (pci_set_dma_mask(pci_dev
, 0x0000007fffffffffULL
)) {
2394 printk(KERN_INFO
"forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
2397 dev
->features
|= NETIF_F_HIGHDMA
;
2399 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_3
;
2400 } else if (id
->driver_data
& DEV_HAS_LARGEDESC
) {
2401 /* packet format 2: supports jumbo frames */
2402 np
->desc_ver
= DESC_VER_2
;
2403 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_2
;
2405 /* original packet format */
2406 np
->desc_ver
= DESC_VER_1
;
2407 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_1
;
2410 np
->pkt_limit
= NV_PKTLIMIT_1
;
2411 if (id
->driver_data
& DEV_HAS_LARGEDESC
)
2412 np
->pkt_limit
= NV_PKTLIMIT_2
;
2414 if (id
->driver_data
& DEV_HAS_CHECKSUM
) {
2415 np
->txrxctl_bits
|= NVREG_TXRXCTL_RXCHECK
;
2416 dev
->features
|= NETIF_F_HW_CSUM
| NETIF_F_SG
;
2418 dev
->features
|= NETIF_F_TSO
;
2423 np
->base
= ioremap(addr
, NV_PCI_REGSZ
);
2426 dev
->base_addr
= (unsigned long)np
->base
;
2428 dev
->irq
= pci_dev
->irq
;
2430 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
2431 np
->rx_ring
.orig
= pci_alloc_consistent(pci_dev
,
2432 sizeof(struct ring_desc
) * (RX_RING
+ TX_RING
),
2434 if (!np
->rx_ring
.orig
)
2436 np
->tx_ring
.orig
= &np
->rx_ring
.orig
[RX_RING
];
2438 np
->rx_ring
.ex
= pci_alloc_consistent(pci_dev
,
2439 sizeof(struct ring_desc_ex
) * (RX_RING
+ TX_RING
),
2441 if (!np
->rx_ring
.ex
)
2443 np
->tx_ring
.ex
= &np
->rx_ring
.ex
[RX_RING
];
2446 dev
->open
= nv_open
;
2447 dev
->stop
= nv_close
;
2448 dev
->hard_start_xmit
= nv_start_xmit
;
2449 dev
->get_stats
= nv_get_stats
;
2450 dev
->change_mtu
= nv_change_mtu
;
2451 dev
->set_mac_address
= nv_set_mac_address
;
2452 dev
->set_multicast_list
= nv_set_multicast
;
2453 #ifdef CONFIG_NET_POLL_CONTROLLER
2454 dev
->poll_controller
= nv_poll_controller
;
2456 SET_ETHTOOL_OPS(dev
, &ops
);
2457 dev
->tx_timeout
= nv_tx_timeout
;
2458 dev
->watchdog_timeo
= NV_WATCHDOG_TIMEO
;
2460 pci_set_drvdata(pci_dev
, dev
);
2462 /* read the mac address */
2463 base
= get_hwbase(dev
);
2464 np
->orig_mac
[0] = readl(base
+ NvRegMacAddrA
);
2465 np
->orig_mac
[1] = readl(base
+ NvRegMacAddrB
);
2467 dev
->dev_addr
[0] = (np
->orig_mac
[1] >> 8) & 0xff;
2468 dev
->dev_addr
[1] = (np
->orig_mac
[1] >> 0) & 0xff;
2469 dev
->dev_addr
[2] = (np
->orig_mac
[0] >> 24) & 0xff;
2470 dev
->dev_addr
[3] = (np
->orig_mac
[0] >> 16) & 0xff;
2471 dev
->dev_addr
[4] = (np
->orig_mac
[0] >> 8) & 0xff;
2472 dev
->dev_addr
[5] = (np
->orig_mac
[0] >> 0) & 0xff;
2473 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
2475 if (!is_valid_ether_addr(dev
->perm_addr
)) {
2477 * Bad mac address. At least one bios sets the mac address
2478 * to 01:23:45:67:89:ab
2480 printk(KERN_ERR
"%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n",
2482 dev
->dev_addr
[0], dev
->dev_addr
[1], dev
->dev_addr
[2],
2483 dev
->dev_addr
[3], dev
->dev_addr
[4], dev
->dev_addr
[5]);
2484 printk(KERN_ERR
"Please complain to your hardware vendor. Switching to a random MAC.\n");
2485 dev
->dev_addr
[0] = 0x00;
2486 dev
->dev_addr
[1] = 0x00;
2487 dev
->dev_addr
[2] = 0x6c;
2488 get_random_bytes(&dev
->dev_addr
[3], 3);
2491 dprintk(KERN_DEBUG
"%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev
),
2492 dev
->dev_addr
[0], dev
->dev_addr
[1], dev
->dev_addr
[2],
2493 dev
->dev_addr
[3], dev
->dev_addr
[4], dev
->dev_addr
[5]);
2496 writel(0, base
+ NvRegWakeUpFlags
);
2499 if (np
->desc_ver
== DESC_VER_1
) {
2500 np
->tx_flags
= NV_TX_VALID
;
2502 np
->tx_flags
= NV_TX2_VALID
;
2504 np
->irqmask
= NVREG_IRQMASK_WANTED
;
2505 if (id
->driver_data
& DEV_NEED_TIMERIRQ
)
2506 np
->irqmask
|= NVREG_IRQ_TIMER
;
2507 if (id
->driver_data
& DEV_NEED_LINKTIMER
) {
2508 dprintk(KERN_INFO
"%s: link timer on.\n", pci_name(pci_dev
));
2509 np
->need_linktimer
= 1;
2510 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
2512 dprintk(KERN_INFO
"%s: link timer off.\n", pci_name(pci_dev
));
2513 np
->need_linktimer
= 0;
2516 /* find a suitable phy */
2517 for (i
= 1; i
< 32; i
++) {
2520 spin_lock_irq(&np
->lock
);
2521 id1
= mii_rw(dev
, i
, MII_PHYSID1
, MII_READ
);
2522 spin_unlock_irq(&np
->lock
);
2523 if (id1
< 0 || id1
== 0xffff)
2525 spin_lock_irq(&np
->lock
);
2526 id2
= mii_rw(dev
, i
, MII_PHYSID2
, MII_READ
);
2527 spin_unlock_irq(&np
->lock
);
2528 if (id2
< 0 || id2
== 0xffff)
2531 id1
= (id1
& PHYID1_OUI_MASK
) << PHYID1_OUI_SHFT
;
2532 id2
= (id2
& PHYID2_OUI_MASK
) >> PHYID2_OUI_SHFT
;
2533 dprintk(KERN_DEBUG
"%s: open: Found PHY %04x:%04x at address %d.\n",
2534 pci_name(pci_dev
), id1
, id2
, i
);
2536 np
->phy_oui
= id1
| id2
;
2540 /* PHY in isolate mode? No phy attached and user wants to
2541 * test loopback? Very odd, but can be correct.
2543 printk(KERN_INFO
"%s: open: Could not find a valid PHY.\n",
2552 /* set default link speed settings */
2553 np
->linkspeed
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2557 err
= register_netdev(dev
);
2559 printk(KERN_INFO
"forcedeth: unable to register netdev: %d\n", err
);
2562 printk(KERN_INFO
"%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
2563 dev
->name
, pci_dev
->subsystem_vendor
, pci_dev
->subsystem_device
,
2569 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
2570 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc
) * (RX_RING
+ TX_RING
),
2571 np
->rx_ring
.orig
, np
->ring_addr
);
2573 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc_ex
) * (RX_RING
+ TX_RING
),
2574 np
->rx_ring
.ex
, np
->ring_addr
);
2575 pci_set_drvdata(pci_dev
, NULL
);
2577 iounmap(get_hwbase(dev
));
2579 pci_release_regions(pci_dev
);
2581 pci_disable_device(pci_dev
);
2588 static void __devexit
nv_remove(struct pci_dev
*pci_dev
)
2590 struct net_device
*dev
= pci_get_drvdata(pci_dev
);
2591 struct fe_priv
*np
= netdev_priv(dev
);
2593 unregister_netdev(dev
);
2595 /* free all structures */
2596 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
2597 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc
) * (RX_RING
+ TX_RING
), np
->rx_ring
.orig
, np
->ring_addr
);
2599 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc_ex
) * (RX_RING
+ TX_RING
), np
->rx_ring
.ex
, np
->ring_addr
);
2600 iounmap(get_hwbase(dev
));
2601 pci_release_regions(pci_dev
);
2602 pci_disable_device(pci_dev
);
2604 pci_set_drvdata(pci_dev
, NULL
);
2607 static struct pci_device_id pci_tbl
[] = {
2608 { /* nForce Ethernet Controller */
2609 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_1
),
2610 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
2612 { /* nForce2 Ethernet Controller */
2613 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_2
),
2614 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
2616 { /* nForce3 Ethernet Controller */
2617 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_3
),
2618 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
2620 { /* nForce3 Ethernet Controller */
2621 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_4
),
2622 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
2624 { /* nForce3 Ethernet Controller */
2625 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_5
),
2626 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
2628 { /* nForce3 Ethernet Controller */
2629 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_6
),
2630 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
2632 { /* nForce3 Ethernet Controller */
2633 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_7
),
2634 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
2636 { /* CK804 Ethernet Controller */
2637 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_8
),
2638 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
,
2640 { /* CK804 Ethernet Controller */
2641 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_9
),
2642 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
,
2644 { /* MCP04 Ethernet Controller */
2645 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_10
),
2646 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
,
2648 { /* MCP04 Ethernet Controller */
2649 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_11
),
2650 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
,
2652 { /* MCP51 Ethernet Controller */
2653 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_12
),
2654 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
,
2656 { /* MCP51 Ethernet Controller */
2657 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_13
),
2658 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
,
2660 { /* MCP55 Ethernet Controller */
2661 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_14
),
2662 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
,
2664 { /* MCP55 Ethernet Controller */
2665 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_15
),
2666 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
,
2671 static struct pci_driver driver
= {
2672 .name
= "forcedeth",
2673 .id_table
= pci_tbl
,
2675 .remove
= __devexit_p(nv_remove
),
2679 static int __init
init_nic(void)
2681 printk(KERN_INFO
"forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION
);
2682 return pci_module_init(&driver
);
2685 static void __exit
exit_nic(void)
2687 pci_unregister_driver(&driver
);
2690 module_param(max_interrupt_work
, int, 0);
2691 MODULE_PARM_DESC(max_interrupt_work
, "forcedeth maximum events handled per interrupt");
2693 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
2694 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
2695 MODULE_LICENSE("GPL");
2697 MODULE_DEVICE_TABLE(pci
, pci_tbl
);
2699 module_init(init_nic
);
2700 module_exit(exit_nic
);