2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey.
8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9 * trademarks of NVIDIA Corporation in the United States and other
12 * Copyright (C) 2003,4,5 Manfred Spraul
13 * Copyright (C) 2004 Andrew de Quincey (wol support)
14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15 * IRQ rate fixes, bigendian fixes, cleanups, verification)
16 * Copyright (c) 2004,2005,2006,2007,2008 NVIDIA Corporation
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 * 0.01: 05 Oct 2003: First release that compiles without warnings.
34 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
35 * Check all PCI BARs for the register window.
36 * udelay added to mii_rw.
37 * 0.03: 06 Oct 2003: Initialize dev->irq.
38 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
39 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
40 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
42 * 0.07: 14 Oct 2003: Further irq mask updates.
43 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
44 * added into irq handler, NULL check for drain_ring.
45 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
46 * requested interrupt sources.
47 * 0.10: 20 Oct 2003: First cleanup for release.
48 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
49 * MAC Address init fix, set_multicast cleanup.
50 * 0.12: 23 Oct 2003: Cleanups for release.
51 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
52 * Set link speed correctly. start rx before starting
53 * tx (nv_start_rx sets the link speed).
54 * 0.14: 25 Oct 2003: Nic dependant irq mask.
55 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
57 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
58 * increased to 1628 bytes.
59 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
61 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
62 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
63 * addresses, really stop rx if already running
64 * in nv_start_rx, clean up a bit.
65 * 0.20: 07 Dec 2003: alloc fixes
66 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
67 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
69 * 0.23: 26 Jan 2004: various small cleanups
70 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces
71 * 0.25: 09 Mar 2004: wol support
72 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
73 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
74 * added CK804/MCP04 device IDs, code fixes
75 * for registers, link status and other minor fixes.
76 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
77 * 0.29: 31 Aug 2004: Add backup timer for link change notification.
78 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
79 * into nv_close, otherwise reenabling for wol can
80 * cause DMA to kfree'd memory.
81 * 0.31: 14 Nov 2004: ethtool support for getting/setting link
83 * 0.32: 16 Apr 2005: RX_ERROR4 handling added.
84 * 0.33: 16 May 2005: Support for MCP51 added.
85 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
86 * 0.35: 26 Jun 2005: Support for MCP55 added.
87 * 0.36: 28 Jun 2005: Add jumbo frame support.
88 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
89 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
91 * 0.39: 18 Jul 2005: Add 64bit descriptor support.
92 * 0.40: 19 Jul 2005: Add support for mac address change.
93 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
95 * 0.42: 06 Aug 2005: Fix lack of link speed initialization
96 * in the second (and later) nv_open call
97 * 0.43: 10 Aug 2005: Add support for tx checksum.
98 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
99 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check
100 * 0.46: 20 Oct 2005: Add irq optimization modes.
101 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
102 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
103 * 0.49: 10 Dec 2005: Fix tso for large buffers.
104 * 0.50: 20 Jan 2006: Add 8021pq tagging support.
105 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
106 * 0.52: 20 Jan 2006: Add MSI/MSIX support.
107 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
108 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
109 * 0.55: 22 Mar 2006: Add flow control (pause frame).
110 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
111 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
112 * 0.58: 30 Oct 2006: Added support for sideband management unit.
113 * 0.59: 30 Oct 2006: Added support for recoverable error.
114 * 0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats.
117 * We suspect that on some hardware no TX done interrupts are generated.
118 * This means recovery from netif_stop_queue only happens if the hw timer
119 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
120 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
121 * If your hardware reliably generates tx done interrupts, then you can remove
122 * DEV_NEED_TIMERIRQ from the driver_data flags.
123 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
124 * superfluous timer interrupts from the nic.
126 #ifdef CONFIG_FORCEDETH_NAPI
127 #define DRIVERNAPI "-NAPI"
131 #define FORCEDETH_VERSION "0.61"
132 #define DRV_NAME "forcedeth"
134 #include <linux/module.h>
135 #include <linux/types.h>
136 #include <linux/pci.h>
137 #include <linux/interrupt.h>
138 #include <linux/netdevice.h>
139 #include <linux/etherdevice.h>
140 #include <linux/delay.h>
141 #include <linux/spinlock.h>
142 #include <linux/ethtool.h>
143 #include <linux/timer.h>
144 #include <linux/skbuff.h>
145 #include <linux/mii.h>
146 #include <linux/random.h>
147 #include <linux/init.h>
148 #include <linux/if_vlan.h>
149 #include <linux/dma-mapping.h>
153 #include <asm/uaccess.h>
154 #include <asm/system.h>
157 #define dprintk printk
159 #define dprintk(x...) do { } while (0)
162 #define TX_WORK_PER_LOOP 64
163 #define RX_WORK_PER_LOOP 64
169 #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
170 #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
171 #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
172 #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
173 #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
174 #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */
175 #define DEV_HAS_MSI 0x0040 /* device supports MSI */
176 #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
177 #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
178 #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
179 #define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */
180 #define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */
181 #define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */
182 #define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */
183 #define DEV_HAS_CORRECT_MACADDR 0x4000 /* device supports correct mac address order */
186 NvRegIrqStatus
= 0x000,
187 #define NVREG_IRQSTAT_MIIEVENT 0x040
188 #define NVREG_IRQSTAT_MASK 0x81ff
189 NvRegIrqMask
= 0x004,
190 #define NVREG_IRQ_RX_ERROR 0x0001
191 #define NVREG_IRQ_RX 0x0002
192 #define NVREG_IRQ_RX_NOBUF 0x0004
193 #define NVREG_IRQ_TX_ERR 0x0008
194 #define NVREG_IRQ_TX_OK 0x0010
195 #define NVREG_IRQ_TIMER 0x0020
196 #define NVREG_IRQ_LINK 0x0040
197 #define NVREG_IRQ_RX_FORCED 0x0080
198 #define NVREG_IRQ_TX_FORCED 0x0100
199 #define NVREG_IRQ_RECOVER_ERROR 0x8000
200 #define NVREG_IRQMASK_THROUGHPUT 0x00df
201 #define NVREG_IRQMASK_CPU 0x0060
202 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
203 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
204 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
206 #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
207 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
208 NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR))
210 NvRegUnknownSetupReg6
= 0x008,
211 #define NVREG_UNKSETUP6_VAL 3
214 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
215 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
217 NvRegPollingInterval
= 0x00c,
218 #define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */
219 #define NVREG_POLL_DEFAULT_CPU 13
220 NvRegMSIMap0
= 0x020,
221 NvRegMSIMap1
= 0x024,
222 NvRegMSIIrqMask
= 0x030,
223 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
225 #define NVREG_MISC1_PAUSE_TX 0x01
226 #define NVREG_MISC1_HD 0x02
227 #define NVREG_MISC1_FORCE 0x3b0f3c
229 NvRegMacReset
= 0x34,
230 #define NVREG_MAC_RESET_ASSERT 0x0F3
231 NvRegTransmitterControl
= 0x084,
232 #define NVREG_XMITCTL_START 0x01
233 #define NVREG_XMITCTL_MGMT_ST 0x40000000
234 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
235 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
236 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
237 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
238 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
239 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
240 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
241 #define NVREG_XMITCTL_HOST_LOADED 0x00004000
242 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
243 NvRegTransmitterStatus
= 0x088,
244 #define NVREG_XMITSTAT_BUSY 0x01
246 NvRegPacketFilterFlags
= 0x8c,
247 #define NVREG_PFF_PAUSE_RX 0x08
248 #define NVREG_PFF_ALWAYS 0x7F0000
249 #define NVREG_PFF_PROMISC 0x80
250 #define NVREG_PFF_MYADDR 0x20
251 #define NVREG_PFF_LOOPBACK 0x10
253 NvRegOffloadConfig
= 0x90,
254 #define NVREG_OFFLOAD_HOMEPHY 0x601
255 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
256 NvRegReceiverControl
= 0x094,
257 #define NVREG_RCVCTL_START 0x01
258 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
259 NvRegReceiverStatus
= 0x98,
260 #define NVREG_RCVSTAT_BUSY 0x01
262 NvRegRandomSeed
= 0x9c,
263 #define NVREG_RNDSEED_MASK 0x00ff
264 #define NVREG_RNDSEED_FORCE 0x7f00
265 #define NVREG_RNDSEED_FORCE2 0x2d00
266 #define NVREG_RNDSEED_FORCE3 0x7400
268 NvRegTxDeferral
= 0xA0,
269 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
270 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
271 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
272 NvRegRxDeferral
= 0xA4,
273 #define NVREG_RX_DEFERRAL_DEFAULT 0x16
274 NvRegMacAddrA
= 0xA8,
275 NvRegMacAddrB
= 0xAC,
276 NvRegMulticastAddrA
= 0xB0,
277 #define NVREG_MCASTADDRA_FORCE 0x01
278 NvRegMulticastAddrB
= 0xB4,
279 NvRegMulticastMaskA
= 0xB8,
280 #define NVREG_MCASTMASKA_NONE 0xffffffff
281 NvRegMulticastMaskB
= 0xBC,
282 #define NVREG_MCASTMASKB_NONE 0xffff
284 NvRegPhyInterface
= 0xC0,
285 #define PHY_RGMII 0x10000000
287 NvRegTxRingPhysAddr
= 0x100,
288 NvRegRxRingPhysAddr
= 0x104,
289 NvRegRingSizes
= 0x108,
290 #define NVREG_RINGSZ_TXSHIFT 0
291 #define NVREG_RINGSZ_RXSHIFT 16
292 NvRegTransmitPoll
= 0x10c,
293 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
294 NvRegLinkSpeed
= 0x110,
295 #define NVREG_LINKSPEED_FORCE 0x10000
296 #define NVREG_LINKSPEED_10 1000
297 #define NVREG_LINKSPEED_100 100
298 #define NVREG_LINKSPEED_1000 50
299 #define NVREG_LINKSPEED_MASK (0xFFF)
300 NvRegUnknownSetupReg5
= 0x130,
301 #define NVREG_UNKSETUP5_BIT31 (1<<31)
302 NvRegTxWatermark
= 0x13c,
303 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
304 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
305 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
306 NvRegTxRxControl
= 0x144,
307 #define NVREG_TXRXCTL_KICK 0x0001
308 #define NVREG_TXRXCTL_BIT1 0x0002
309 #define NVREG_TXRXCTL_BIT2 0x0004
310 #define NVREG_TXRXCTL_IDLE 0x0008
311 #define NVREG_TXRXCTL_RESET 0x0010
312 #define NVREG_TXRXCTL_RXCHECK 0x0400
313 #define NVREG_TXRXCTL_DESC_1 0
314 #define NVREG_TXRXCTL_DESC_2 0x002100
315 #define NVREG_TXRXCTL_DESC_3 0xc02200
316 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
317 #define NVREG_TXRXCTL_VLANINS 0x00080
318 NvRegTxRingPhysAddrHigh
= 0x148,
319 NvRegRxRingPhysAddrHigh
= 0x14C,
320 NvRegTxPauseFrame
= 0x170,
321 #define NVREG_TX_PAUSEFRAME_DISABLE 0x01ff0080
322 #define NVREG_TX_PAUSEFRAME_ENABLE 0x01800010
323 NvRegMIIStatus
= 0x180,
324 #define NVREG_MIISTAT_ERROR 0x0001
325 #define NVREG_MIISTAT_LINKCHANGE 0x0008
326 #define NVREG_MIISTAT_MASK 0x000f
327 #define NVREG_MIISTAT_MASK2 0x000f
328 NvRegMIIMask
= 0x184,
329 #define NVREG_MII_LINKCHANGE 0x0008
331 NvRegAdapterControl
= 0x188,
332 #define NVREG_ADAPTCTL_START 0x02
333 #define NVREG_ADAPTCTL_LINKUP 0x04
334 #define NVREG_ADAPTCTL_PHYVALID 0x40000
335 #define NVREG_ADAPTCTL_RUNNING 0x100000
336 #define NVREG_ADAPTCTL_PHYSHIFT 24
337 NvRegMIISpeed
= 0x18c,
338 #define NVREG_MIISPEED_BIT8 (1<<8)
339 #define NVREG_MIIDELAY 5
340 NvRegMIIControl
= 0x190,
341 #define NVREG_MIICTL_INUSE 0x08000
342 #define NVREG_MIICTL_WRITE 0x00400
343 #define NVREG_MIICTL_ADDRSHIFT 5
344 NvRegMIIData
= 0x194,
345 NvRegWakeUpFlags
= 0x200,
346 #define NVREG_WAKEUPFLAGS_VAL 0x7770
347 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
348 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
349 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
350 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
351 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
352 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
353 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
354 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
355 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
356 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
358 NvRegPatternCRC
= 0x204,
359 NvRegPatternMask
= 0x208,
360 NvRegPowerCap
= 0x268,
361 #define NVREG_POWERCAP_D3SUPP (1<<30)
362 #define NVREG_POWERCAP_D2SUPP (1<<26)
363 #define NVREG_POWERCAP_D1SUPP (1<<25)
364 NvRegPowerState
= 0x26c,
365 #define NVREG_POWERSTATE_POWEREDUP 0x8000
366 #define NVREG_POWERSTATE_VALID 0x0100
367 #define NVREG_POWERSTATE_MASK 0x0003
368 #define NVREG_POWERSTATE_D0 0x0000
369 #define NVREG_POWERSTATE_D1 0x0001
370 #define NVREG_POWERSTATE_D2 0x0002
371 #define NVREG_POWERSTATE_D3 0x0003
373 NvRegTxZeroReXmt
= 0x284,
374 NvRegTxOneReXmt
= 0x288,
375 NvRegTxManyReXmt
= 0x28c,
376 NvRegTxLateCol
= 0x290,
377 NvRegTxUnderflow
= 0x294,
378 NvRegTxLossCarrier
= 0x298,
379 NvRegTxExcessDef
= 0x29c,
380 NvRegTxRetryErr
= 0x2a0,
381 NvRegRxFrameErr
= 0x2a4,
382 NvRegRxExtraByte
= 0x2a8,
383 NvRegRxLateCol
= 0x2ac,
385 NvRegRxFrameTooLong
= 0x2b4,
386 NvRegRxOverflow
= 0x2b8,
387 NvRegRxFCSErr
= 0x2bc,
388 NvRegRxFrameAlignErr
= 0x2c0,
389 NvRegRxLenErr
= 0x2c4,
390 NvRegRxUnicast
= 0x2c8,
391 NvRegRxMulticast
= 0x2cc,
392 NvRegRxBroadcast
= 0x2d0,
394 NvRegTxFrame
= 0x2d8,
396 NvRegTxPause
= 0x2e0,
397 NvRegRxPause
= 0x2e4,
398 NvRegRxDropFrame
= 0x2e8,
399 NvRegVlanControl
= 0x300,
400 #define NVREG_VLANCONTROL_ENABLE 0x2000
401 NvRegMSIXMap0
= 0x3e0,
402 NvRegMSIXMap1
= 0x3e4,
403 NvRegMSIXIrqStatus
= 0x3f0,
405 NvRegPowerState2
= 0x600,
406 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
407 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
410 /* Big endian: should work, but is untested */
416 struct ring_desc_ex
{
424 struct ring_desc
* orig
;
425 struct ring_desc_ex
* ex
;
428 #define FLAG_MASK_V1 0xffff0000
429 #define FLAG_MASK_V2 0xffffc000
430 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
431 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
433 #define NV_TX_LASTPACKET (1<<16)
434 #define NV_TX_RETRYERROR (1<<19)
435 #define NV_TX_FORCED_INTERRUPT (1<<24)
436 #define NV_TX_DEFERRED (1<<26)
437 #define NV_TX_CARRIERLOST (1<<27)
438 #define NV_TX_LATECOLLISION (1<<28)
439 #define NV_TX_UNDERFLOW (1<<29)
440 #define NV_TX_ERROR (1<<30)
441 #define NV_TX_VALID (1<<31)
443 #define NV_TX2_LASTPACKET (1<<29)
444 #define NV_TX2_RETRYERROR (1<<18)
445 #define NV_TX2_FORCED_INTERRUPT (1<<30)
446 #define NV_TX2_DEFERRED (1<<25)
447 #define NV_TX2_CARRIERLOST (1<<26)
448 #define NV_TX2_LATECOLLISION (1<<27)
449 #define NV_TX2_UNDERFLOW (1<<28)
450 /* error and valid are the same for both */
451 #define NV_TX2_ERROR (1<<30)
452 #define NV_TX2_VALID (1<<31)
453 #define NV_TX2_TSO (1<<28)
454 #define NV_TX2_TSO_SHIFT 14
455 #define NV_TX2_TSO_MAX_SHIFT 14
456 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
457 #define NV_TX2_CHECKSUM_L3 (1<<27)
458 #define NV_TX2_CHECKSUM_L4 (1<<26)
460 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
462 #define NV_RX_DESCRIPTORVALID (1<<16)
463 #define NV_RX_MISSEDFRAME (1<<17)
464 #define NV_RX_SUBSTRACT1 (1<<18)
465 #define NV_RX_ERROR1 (1<<23)
466 #define NV_RX_ERROR2 (1<<24)
467 #define NV_RX_ERROR3 (1<<25)
468 #define NV_RX_ERROR4 (1<<26)
469 #define NV_RX_CRCERR (1<<27)
470 #define NV_RX_OVERFLOW (1<<28)
471 #define NV_RX_FRAMINGERR (1<<29)
472 #define NV_RX_ERROR (1<<30)
473 #define NV_RX_AVAIL (1<<31)
475 #define NV_RX2_CHECKSUMMASK (0x1C000000)
476 #define NV_RX2_CHECKSUM_IP (0x10000000)
477 #define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
478 #define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
479 #define NV_RX2_DESCRIPTORVALID (1<<29)
480 #define NV_RX2_SUBSTRACT1 (1<<25)
481 #define NV_RX2_ERROR1 (1<<18)
482 #define NV_RX2_ERROR2 (1<<19)
483 #define NV_RX2_ERROR3 (1<<20)
484 #define NV_RX2_ERROR4 (1<<21)
485 #define NV_RX2_CRCERR (1<<22)
486 #define NV_RX2_OVERFLOW (1<<23)
487 #define NV_RX2_FRAMINGERR (1<<24)
488 /* error and avail are the same for both */
489 #define NV_RX2_ERROR (1<<30)
490 #define NV_RX2_AVAIL (1<<31)
492 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
493 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
495 /* Miscelaneous hardware related defines: */
496 #define NV_PCI_REGSZ_VER1 0x270
497 #define NV_PCI_REGSZ_VER2 0x2d4
498 #define NV_PCI_REGSZ_VER3 0x604
500 /* various timeout delays: all in usec */
501 #define NV_TXRX_RESET_DELAY 4
502 #define NV_TXSTOP_DELAY1 10
503 #define NV_TXSTOP_DELAY1MAX 500000
504 #define NV_TXSTOP_DELAY2 100
505 #define NV_RXSTOP_DELAY1 10
506 #define NV_RXSTOP_DELAY1MAX 500000
507 #define NV_RXSTOP_DELAY2 100
508 #define NV_SETUP5_DELAY 5
509 #define NV_SETUP5_DELAYMAX 50000
510 #define NV_POWERUP_DELAY 5
511 #define NV_POWERUP_DELAYMAX 5000
512 #define NV_MIIBUSY_DELAY 50
513 #define NV_MIIPHY_DELAY 10
514 #define NV_MIIPHY_DELAYMAX 10000
515 #define NV_MAC_RESET_DELAY 64
517 #define NV_WAKEUPPATTERNS 5
518 #define NV_WAKEUPMASKENTRIES 4
520 /* General driver defaults */
521 #define NV_WATCHDOG_TIMEO (5*HZ)
523 #define RX_RING_DEFAULT 128
524 #define TX_RING_DEFAULT 256
525 #define RX_RING_MIN 128
526 #define TX_RING_MIN 64
527 #define RING_MAX_DESC_VER_1 1024
528 #define RING_MAX_DESC_VER_2_3 16384
530 /* rx/tx mac addr + type + vlan + align + slack*/
531 #define NV_RX_HEADERS (64)
532 /* even more slack. */
533 #define NV_RX_ALLOC_PAD (64)
535 /* maximum mtu size */
536 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
537 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
539 #define OOM_REFILL (1+HZ/20)
540 #define POLL_WAIT (1+HZ/100)
541 #define LINK_TIMEOUT (3*HZ)
542 #define STATS_INTERVAL (10*HZ)
546 * The nic supports three different descriptor types:
547 * - DESC_VER_1: Original
548 * - DESC_VER_2: support for jumbo frames.
549 * - DESC_VER_3: 64-bit format.
556 #define PHY_OUI_MARVELL 0x5043
557 #define PHY_OUI_CICADA 0x03f1
558 #define PHY_OUI_VITESSE 0x01c1
559 #define PHY_OUI_REALTEK 0x0732
560 #define PHYID1_OUI_MASK 0x03ff
561 #define PHYID1_OUI_SHFT 6
562 #define PHYID2_OUI_MASK 0xfc00
563 #define PHYID2_OUI_SHFT 10
564 #define PHYID2_MODEL_MASK 0x03f0
565 #define PHY_MODEL_MARVELL_E3016 0x220
566 #define PHY_MARVELL_E3016_INITMASK 0x0300
567 #define PHY_CICADA_INIT1 0x0f000
568 #define PHY_CICADA_INIT2 0x0e00
569 #define PHY_CICADA_INIT3 0x01000
570 #define PHY_CICADA_INIT4 0x0200
571 #define PHY_CICADA_INIT5 0x0004
572 #define PHY_CICADA_INIT6 0x02000
573 #define PHY_VITESSE_INIT_REG1 0x1f
574 #define PHY_VITESSE_INIT_REG2 0x10
575 #define PHY_VITESSE_INIT_REG3 0x11
576 #define PHY_VITESSE_INIT_REG4 0x12
577 #define PHY_VITESSE_INIT_MSK1 0xc
578 #define PHY_VITESSE_INIT_MSK2 0x0180
579 #define PHY_VITESSE_INIT1 0x52b5
580 #define PHY_VITESSE_INIT2 0xaf8a
581 #define PHY_VITESSE_INIT3 0x8
582 #define PHY_VITESSE_INIT4 0x8f8a
583 #define PHY_VITESSE_INIT5 0xaf86
584 #define PHY_VITESSE_INIT6 0x8f86
585 #define PHY_VITESSE_INIT7 0xaf82
586 #define PHY_VITESSE_INIT8 0x0100
587 #define PHY_VITESSE_INIT9 0x8f82
588 #define PHY_VITESSE_INIT10 0x0
589 #define PHY_REALTEK_INIT_REG1 0x1f
590 #define PHY_REALTEK_INIT_REG2 0x19
591 #define PHY_REALTEK_INIT_REG3 0x13
592 #define PHY_REALTEK_INIT1 0x0000
593 #define PHY_REALTEK_INIT2 0x8e00
594 #define PHY_REALTEK_INIT3 0x0001
595 #define PHY_REALTEK_INIT4 0xad17
597 #define PHY_GIGABIT 0x0100
599 #define PHY_TIMEOUT 0x1
600 #define PHY_ERROR 0x2
604 #define PHY_HALF 0x100
606 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
607 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
608 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
609 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
610 #define NV_PAUSEFRAME_RX_REQ 0x0010
611 #define NV_PAUSEFRAME_TX_REQ 0x0020
612 #define NV_PAUSEFRAME_AUTONEG 0x0040
614 /* MSI/MSI-X defines */
615 #define NV_MSI_X_MAX_VECTORS 8
616 #define NV_MSI_X_VECTORS_MASK 0x000f
617 #define NV_MSI_CAPABLE 0x0010
618 #define NV_MSI_X_CAPABLE 0x0020
619 #define NV_MSI_ENABLED 0x0040
620 #define NV_MSI_X_ENABLED 0x0080
622 #define NV_MSI_X_VECTOR_ALL 0x0
623 #define NV_MSI_X_VECTOR_RX 0x0
624 #define NV_MSI_X_VECTOR_TX 0x1
625 #define NV_MSI_X_VECTOR_OTHER 0x2
627 #define NV_RESTART_TX 0x1
628 #define NV_RESTART_RX 0x2
631 struct nv_ethtool_str
{
632 char name
[ETH_GSTRING_LEN
];
635 static const struct nv_ethtool_str nv_estats_str
[] = {
640 { "tx_late_collision" },
641 { "tx_fifo_errors" },
642 { "tx_carrier_errors" },
643 { "tx_excess_deferral" },
644 { "tx_retry_error" },
645 { "rx_frame_error" },
647 { "rx_late_collision" },
649 { "rx_frame_too_long" },
650 { "rx_over_errors" },
652 { "rx_frame_align_error" },
653 { "rx_length_error" },
658 { "rx_errors_total" },
659 { "tx_errors_total" },
661 /* version 2 stats */
670 struct nv_ethtool_stats
{
675 u64 tx_late_collision
;
677 u64 tx_carrier_errors
;
678 u64 tx_excess_deferral
;
682 u64 rx_late_collision
;
684 u64 rx_frame_too_long
;
687 u64 rx_frame_align_error
;
696 /* version 2 stats */
705 #define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
706 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
709 #define NV_TEST_COUNT_BASE 3
710 #define NV_TEST_COUNT_EXTENDED 4
712 static const struct nv_ethtool_str nv_etests_str
[] = {
713 { "link (online/offline)" },
714 { "register (offline) " },
715 { "interrupt (offline) " },
716 { "loopback (offline) " }
719 struct register_test
{
724 static const struct register_test nv_registers_test
[] = {
725 { NvRegUnknownSetupReg6
, 0x01 },
726 { NvRegMisc1
, 0x03c },
727 { NvRegOffloadConfig
, 0x03ff },
728 { NvRegMulticastAddrA
, 0xffffffff },
729 { NvRegTxWatermark
, 0x0ff },
730 { NvRegWakeUpFlags
, 0x07777 },
737 unsigned int dma_len
;
742 * All hardware access under dev->priv->lock, except the performance
744 * - rx is (pseudo-) lockless: it relies on the single-threading provided
745 * by the arch code for interrupts.
746 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
747 * needs dev->priv->lock :-(
748 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
751 /* in dev: base, irq */
755 struct net_device
*dev
;
756 struct napi_struct napi
;
759 * Locking: spin_lock(&np->lock); */
760 struct nv_ethtool_stats estats
;
768 unsigned int phy_oui
;
769 unsigned int phy_model
;
774 /* General data: RO fields */
775 dma_addr_t ring_addr
;
776 struct pci_dev
*pci_dev
;
789 /* rx specific fields.
790 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
792 union ring_type get_rx
, put_rx
, first_rx
, last_rx
;
793 struct nv_skb_map
*get_rx_ctx
, *put_rx_ctx
;
794 struct nv_skb_map
*first_rx_ctx
, *last_rx_ctx
;
795 struct nv_skb_map
*rx_skb
;
797 union ring_type rx_ring
;
798 unsigned int rx_buf_sz
;
799 unsigned int pkt_limit
;
800 struct timer_list oom_kick
;
801 struct timer_list nic_poll
;
802 struct timer_list stats_poll
;
806 /* media detection workaround.
807 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
810 unsigned long link_timeout
;
812 * tx specific fields.
814 union ring_type get_tx
, put_tx
, first_tx
, last_tx
;
815 struct nv_skb_map
*get_tx_ctx
, *put_tx_ctx
;
816 struct nv_skb_map
*first_tx_ctx
, *last_tx_ctx
;
817 struct nv_skb_map
*tx_skb
;
819 union ring_type tx_ring
;
825 struct vlan_group
*vlangrp
;
827 /* msi/msi-x fields */
829 struct msix_entry msi_x_entry
[NV_MSI_X_MAX_VECTORS
];
836 * Maximum number of loops until we assume that a bit in the irq mask
837 * is stuck. Overridable with module param.
839 static int max_interrupt_work
= 5;
842 * Optimization can be either throuput mode or cpu mode
844 * Throughput Mode: Every tx and rx packet will generate an interrupt.
845 * CPU Mode: Interrupts are controlled by a timer.
848 NV_OPTIMIZATION_MODE_THROUGHPUT
,
849 NV_OPTIMIZATION_MODE_CPU
851 static int optimization_mode
= NV_OPTIMIZATION_MODE_THROUGHPUT
;
854 * Poll interval for timer irq
856 * This interval determines how frequent an interrupt is generated.
857 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
858 * Min = 0, and Max = 65535
860 static int poll_interval
= -1;
869 static int msi
= NV_MSI_INT_ENABLED
;
875 NV_MSIX_INT_DISABLED
,
878 static int msix
= NV_MSIX_INT_DISABLED
;
884 NV_DMA_64BIT_DISABLED
,
887 static int dma_64bit
= NV_DMA_64BIT_ENABLED
;
889 static inline struct fe_priv
*get_nvpriv(struct net_device
*dev
)
891 return netdev_priv(dev
);
894 static inline u8 __iomem
*get_hwbase(struct net_device
*dev
)
896 return ((struct fe_priv
*)netdev_priv(dev
))->base
;
899 static inline void pci_push(u8 __iomem
*base
)
901 /* force out pending posted writes */
905 static inline u32
nv_descr_getlength(struct ring_desc
*prd
, u32 v
)
907 return le32_to_cpu(prd
->flaglen
)
908 & ((v
== DESC_VER_1
) ? LEN_MASK_V1
: LEN_MASK_V2
);
911 static inline u32
nv_descr_getlength_ex(struct ring_desc_ex
*prd
, u32 v
)
913 return le32_to_cpu(prd
->flaglen
) & LEN_MASK_V2
;
916 static int reg_delay(struct net_device
*dev
, int offset
, u32 mask
, u32 target
,
917 int delay
, int delaymax
, const char *msg
)
919 u8 __iomem
*base
= get_hwbase(dev
);
930 } while ((readl(base
+ offset
) & mask
) != target
);
934 #define NV_SETUP_RX_RING 0x01
935 #define NV_SETUP_TX_RING 0x02
937 static inline u32
dma_low(dma_addr_t addr
)
942 static inline u32
dma_high(dma_addr_t addr
)
944 return addr
>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
947 static void setup_hw_rings(struct net_device
*dev
, int rxtx_flags
)
949 struct fe_priv
*np
= get_nvpriv(dev
);
950 u8 __iomem
*base
= get_hwbase(dev
);
952 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
953 if (rxtx_flags
& NV_SETUP_RX_RING
) {
954 writel(dma_low(np
->ring_addr
), base
+ NvRegRxRingPhysAddr
);
956 if (rxtx_flags
& NV_SETUP_TX_RING
) {
957 writel(dma_low(np
->ring_addr
+ np
->rx_ring_size
*sizeof(struct ring_desc
)), base
+ NvRegTxRingPhysAddr
);
960 if (rxtx_flags
& NV_SETUP_RX_RING
) {
961 writel(dma_low(np
->ring_addr
), base
+ NvRegRxRingPhysAddr
);
962 writel(dma_high(np
->ring_addr
), base
+ NvRegRxRingPhysAddrHigh
);
964 if (rxtx_flags
& NV_SETUP_TX_RING
) {
965 writel(dma_low(np
->ring_addr
+ np
->rx_ring_size
*sizeof(struct ring_desc_ex
)), base
+ NvRegTxRingPhysAddr
);
966 writel(dma_high(np
->ring_addr
+ np
->rx_ring_size
*sizeof(struct ring_desc_ex
)), base
+ NvRegTxRingPhysAddrHigh
);
971 static void free_rings(struct net_device
*dev
)
973 struct fe_priv
*np
= get_nvpriv(dev
);
975 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
976 if (np
->rx_ring
.orig
)
977 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
978 np
->rx_ring
.orig
, np
->ring_addr
);
981 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc_ex
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
982 np
->rx_ring
.ex
, np
->ring_addr
);
990 static int using_multi_irqs(struct net_device
*dev
)
992 struct fe_priv
*np
= get_nvpriv(dev
);
994 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
) ||
995 ((np
->msi_flags
& NV_MSI_X_ENABLED
) &&
996 ((np
->msi_flags
& NV_MSI_X_VECTORS_MASK
) == 0x1)))
1002 static void nv_enable_irq(struct net_device
*dev
)
1004 struct fe_priv
*np
= get_nvpriv(dev
);
1006 if (!using_multi_irqs(dev
)) {
1007 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1008 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1010 enable_irq(np
->pci_dev
->irq
);
1012 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1013 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
1014 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
1018 static void nv_disable_irq(struct net_device
*dev
)
1020 struct fe_priv
*np
= get_nvpriv(dev
);
1022 if (!using_multi_irqs(dev
)) {
1023 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1024 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1026 disable_irq(np
->pci_dev
->irq
);
1028 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1029 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
1030 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
1034 /* In MSIX mode, a write to irqmask behaves as XOR */
1035 static void nv_enable_hw_interrupts(struct net_device
*dev
, u32 mask
)
1037 u8 __iomem
*base
= get_hwbase(dev
);
1039 writel(mask
, base
+ NvRegIrqMask
);
1042 static void nv_disable_hw_interrupts(struct net_device
*dev
, u32 mask
)
1044 struct fe_priv
*np
= get_nvpriv(dev
);
1045 u8 __iomem
*base
= get_hwbase(dev
);
1047 if (np
->msi_flags
& NV_MSI_X_ENABLED
) {
1048 writel(mask
, base
+ NvRegIrqMask
);
1050 if (np
->msi_flags
& NV_MSI_ENABLED
)
1051 writel(0, base
+ NvRegMSIIrqMask
);
1052 writel(0, base
+ NvRegIrqMask
);
1056 #define MII_READ (-1)
1057 /* mii_rw: read/write a register on the PHY.
1059 * Caller must guarantee serialization
1061 static int mii_rw(struct net_device
*dev
, int addr
, int miireg
, int value
)
1063 u8 __iomem
*base
= get_hwbase(dev
);
1067 writel(NVREG_MIISTAT_MASK
, base
+ NvRegMIIStatus
);
1069 reg
= readl(base
+ NvRegMIIControl
);
1070 if (reg
& NVREG_MIICTL_INUSE
) {
1071 writel(NVREG_MIICTL_INUSE
, base
+ NvRegMIIControl
);
1072 udelay(NV_MIIBUSY_DELAY
);
1075 reg
= (addr
<< NVREG_MIICTL_ADDRSHIFT
) | miireg
;
1076 if (value
!= MII_READ
) {
1077 writel(value
, base
+ NvRegMIIData
);
1078 reg
|= NVREG_MIICTL_WRITE
;
1080 writel(reg
, base
+ NvRegMIIControl
);
1082 if (reg_delay(dev
, NvRegMIIControl
, NVREG_MIICTL_INUSE
, 0,
1083 NV_MIIPHY_DELAY
, NV_MIIPHY_DELAYMAX
, NULL
)) {
1084 dprintk(KERN_DEBUG
"%s: mii_rw of reg %d at PHY %d timed out.\n",
1085 dev
->name
, miireg
, addr
);
1087 } else if (value
!= MII_READ
) {
1088 /* it was a write operation - fewer failures are detectable */
1089 dprintk(KERN_DEBUG
"%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1090 dev
->name
, value
, miireg
, addr
);
1092 } else if (readl(base
+ NvRegMIIStatus
) & NVREG_MIISTAT_ERROR
) {
1093 dprintk(KERN_DEBUG
"%s: mii_rw of reg %d at PHY %d failed.\n",
1094 dev
->name
, miireg
, addr
);
1097 retval
= readl(base
+ NvRegMIIData
);
1098 dprintk(KERN_DEBUG
"%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1099 dev
->name
, miireg
, addr
, retval
);
1105 static int phy_reset(struct net_device
*dev
, u32 bmcr_setup
)
1107 struct fe_priv
*np
= netdev_priv(dev
);
1109 unsigned int tries
= 0;
1111 miicontrol
= BMCR_RESET
| bmcr_setup
;
1112 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, miicontrol
)) {
1116 /* wait for 500ms */
1119 /* must wait till reset is deasserted */
1120 while (miicontrol
& BMCR_RESET
) {
1122 miicontrol
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
1123 /* FIXME: 100 tries seem excessive */
1130 static int phy_init(struct net_device
*dev
)
1132 struct fe_priv
*np
= get_nvpriv(dev
);
1133 u8 __iomem
*base
= get_hwbase(dev
);
1134 u32 phyinterface
, phy_reserved
, mii_status
, mii_control
, mii_control_1000
,reg
;
1136 /* phy errata for E3016 phy */
1137 if (np
->phy_model
== PHY_MODEL_MARVELL_E3016
) {
1138 reg
= mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, MII_READ
);
1139 reg
&= ~PHY_MARVELL_E3016_INITMASK
;
1140 if (mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, reg
)) {
1141 printk(KERN_INFO
"%s: phy write to errata reg failed.\n", pci_name(np
->pci_dev
));
1145 if (np
->phy_oui
== PHY_OUI_REALTEK
) {
1146 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1147 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1150 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, PHY_REALTEK_INIT2
)) {
1151 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1154 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT3
)) {
1155 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1158 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG3
, PHY_REALTEK_INIT4
)) {
1159 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1162 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1163 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1168 /* set advertise register */
1169 reg
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
1170 reg
|= (ADVERTISE_10HALF
|ADVERTISE_10FULL
|ADVERTISE_100HALF
|ADVERTISE_100FULL
|ADVERTISE_PAUSE_ASYM
|ADVERTISE_PAUSE_CAP
);
1171 if (mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, reg
)) {
1172 printk(KERN_INFO
"%s: phy write to advertise failed.\n", pci_name(np
->pci_dev
));
1176 /* get phy interface type */
1177 phyinterface
= readl(base
+ NvRegPhyInterface
);
1179 /* see if gigabit phy */
1180 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
1181 if (mii_status
& PHY_GIGABIT
) {
1182 np
->gigabit
= PHY_GIGABIT
;
1183 mii_control_1000
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
1184 mii_control_1000
&= ~ADVERTISE_1000HALF
;
1185 if (phyinterface
& PHY_RGMII
)
1186 mii_control_1000
|= ADVERTISE_1000FULL
;
1188 mii_control_1000
&= ~ADVERTISE_1000FULL
;
1190 if (mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, mii_control_1000
)) {
1191 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1198 mii_control
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
1199 mii_control
|= BMCR_ANENABLE
;
1202 * (certain phys need bmcr to be setup with reset)
1204 if (phy_reset(dev
, mii_control
)) {
1205 printk(KERN_INFO
"%s: phy reset failed\n", pci_name(np
->pci_dev
));
1209 /* phy vendor specific configuration */
1210 if ((np
->phy_oui
== PHY_OUI_CICADA
) && (phyinterface
& PHY_RGMII
) ) {
1211 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_RESV1
, MII_READ
);
1212 phy_reserved
&= ~(PHY_CICADA_INIT1
| PHY_CICADA_INIT2
);
1213 phy_reserved
|= (PHY_CICADA_INIT3
| PHY_CICADA_INIT4
);
1214 if (mii_rw(dev
, np
->phyaddr
, MII_RESV1
, phy_reserved
)) {
1215 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1218 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, MII_READ
);
1219 phy_reserved
|= PHY_CICADA_INIT5
;
1220 if (mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, phy_reserved
)) {
1221 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1225 if (np
->phy_oui
== PHY_OUI_CICADA
) {
1226 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_SREVISION
, MII_READ
);
1227 phy_reserved
|= PHY_CICADA_INIT6
;
1228 if (mii_rw(dev
, np
->phyaddr
, MII_SREVISION
, phy_reserved
)) {
1229 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1233 if (np
->phy_oui
== PHY_OUI_VITESSE
) {
1234 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG1
, PHY_VITESSE_INIT1
)) {
1235 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1238 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT2
)) {
1239 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1242 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, MII_READ
);
1243 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, phy_reserved
)) {
1244 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1247 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, MII_READ
);
1248 phy_reserved
&= ~PHY_VITESSE_INIT_MSK1
;
1249 phy_reserved
|= PHY_VITESSE_INIT3
;
1250 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, phy_reserved
)) {
1251 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1254 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT4
)) {
1255 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1258 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT5
)) {
1259 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1262 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, MII_READ
);
1263 phy_reserved
&= ~PHY_VITESSE_INIT_MSK1
;
1264 phy_reserved
|= PHY_VITESSE_INIT3
;
1265 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, phy_reserved
)) {
1266 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1269 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, MII_READ
);
1270 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, phy_reserved
)) {
1271 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1274 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT6
)) {
1275 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1278 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT7
)) {
1279 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1282 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, MII_READ
);
1283 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, phy_reserved
)) {
1284 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1287 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, MII_READ
);
1288 phy_reserved
&= ~PHY_VITESSE_INIT_MSK2
;
1289 phy_reserved
|= PHY_VITESSE_INIT8
;
1290 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, phy_reserved
)) {
1291 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1294 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT9
)) {
1295 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1298 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG1
, PHY_VITESSE_INIT10
)) {
1299 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1303 if (np
->phy_oui
== PHY_OUI_REALTEK
) {
1304 /* reset could have cleared these out, set them back */
1305 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1306 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1309 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, PHY_REALTEK_INIT2
)) {
1310 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1313 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT3
)) {
1314 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1317 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG3
, PHY_REALTEK_INIT4
)) {
1318 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1321 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1322 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1327 /* some phys clear out pause advertisment on reset, set it back */
1328 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, reg
);
1330 /* restart auto negotiation */
1331 mii_control
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
1332 mii_control
|= (BMCR_ANRESTART
| BMCR_ANENABLE
);
1333 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, mii_control
)) {
1340 static void nv_start_rx(struct net_device
*dev
)
1342 struct fe_priv
*np
= netdev_priv(dev
);
1343 u8 __iomem
*base
= get_hwbase(dev
);
1344 u32 rx_ctrl
= readl(base
+ NvRegReceiverControl
);
1346 dprintk(KERN_DEBUG
"%s: nv_start_rx\n", dev
->name
);
1347 /* Already running? Stop it. */
1348 if ((readl(base
+ NvRegReceiverControl
) & NVREG_RCVCTL_START
) && !np
->mac_in_use
) {
1349 rx_ctrl
&= ~NVREG_RCVCTL_START
;
1350 writel(rx_ctrl
, base
+ NvRegReceiverControl
);
1353 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
1355 rx_ctrl
|= NVREG_RCVCTL_START
;
1357 rx_ctrl
&= ~NVREG_RCVCTL_RX_PATH_EN
;
1358 writel(rx_ctrl
, base
+ NvRegReceiverControl
);
1359 dprintk(KERN_DEBUG
"%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1360 dev
->name
, np
->duplex
, np
->linkspeed
);
1364 static void nv_stop_rx(struct net_device
*dev
)
1366 struct fe_priv
*np
= netdev_priv(dev
);
1367 u8 __iomem
*base
= get_hwbase(dev
);
1368 u32 rx_ctrl
= readl(base
+ NvRegReceiverControl
);
1370 dprintk(KERN_DEBUG
"%s: nv_stop_rx\n", dev
->name
);
1371 if (!np
->mac_in_use
)
1372 rx_ctrl
&= ~NVREG_RCVCTL_START
;
1374 rx_ctrl
|= NVREG_RCVCTL_RX_PATH_EN
;
1375 writel(rx_ctrl
, base
+ NvRegReceiverControl
);
1376 reg_delay(dev
, NvRegReceiverStatus
, NVREG_RCVSTAT_BUSY
, 0,
1377 NV_RXSTOP_DELAY1
, NV_RXSTOP_DELAY1MAX
,
1378 KERN_INFO
"nv_stop_rx: ReceiverStatus remained busy");
1380 udelay(NV_RXSTOP_DELAY2
);
1381 if (!np
->mac_in_use
)
1382 writel(0, base
+ NvRegLinkSpeed
);
1385 static void nv_start_tx(struct net_device
*dev
)
1387 struct fe_priv
*np
= netdev_priv(dev
);
1388 u8 __iomem
*base
= get_hwbase(dev
);
1389 u32 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
1391 dprintk(KERN_DEBUG
"%s: nv_start_tx\n", dev
->name
);
1392 tx_ctrl
|= NVREG_XMITCTL_START
;
1394 tx_ctrl
&= ~NVREG_XMITCTL_TX_PATH_EN
;
1395 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
1399 static void nv_stop_tx(struct net_device
*dev
)
1401 struct fe_priv
*np
= netdev_priv(dev
);
1402 u8 __iomem
*base
= get_hwbase(dev
);
1403 u32 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
1405 dprintk(KERN_DEBUG
"%s: nv_stop_tx\n", dev
->name
);
1406 if (!np
->mac_in_use
)
1407 tx_ctrl
&= ~NVREG_XMITCTL_START
;
1409 tx_ctrl
|= NVREG_XMITCTL_TX_PATH_EN
;
1410 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
1411 reg_delay(dev
, NvRegTransmitterStatus
, NVREG_XMITSTAT_BUSY
, 0,
1412 NV_TXSTOP_DELAY1
, NV_TXSTOP_DELAY1MAX
,
1413 KERN_INFO
"nv_stop_tx: TransmitterStatus remained busy");
1415 udelay(NV_TXSTOP_DELAY2
);
1416 if (!np
->mac_in_use
)
1417 writel(readl(base
+ NvRegTransmitPoll
) & NVREG_TRANSMITPOLL_MAC_ADDR_REV
,
1418 base
+ NvRegTransmitPoll
);
1421 static void nv_txrx_reset(struct net_device
*dev
)
1423 struct fe_priv
*np
= netdev_priv(dev
);
1424 u8 __iomem
*base
= get_hwbase(dev
);
1426 dprintk(KERN_DEBUG
"%s: nv_txrx_reset\n", dev
->name
);
1427 writel(NVREG_TXRXCTL_BIT2
| NVREG_TXRXCTL_RESET
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1429 udelay(NV_TXRX_RESET_DELAY
);
1430 writel(NVREG_TXRXCTL_BIT2
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1434 static void nv_mac_reset(struct net_device
*dev
)
1436 struct fe_priv
*np
= netdev_priv(dev
);
1437 u8 __iomem
*base
= get_hwbase(dev
);
1439 dprintk(KERN_DEBUG
"%s: nv_mac_reset\n", dev
->name
);
1440 writel(NVREG_TXRXCTL_BIT2
| NVREG_TXRXCTL_RESET
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1442 writel(NVREG_MAC_RESET_ASSERT
, base
+ NvRegMacReset
);
1444 udelay(NV_MAC_RESET_DELAY
);
1445 writel(0, base
+ NvRegMacReset
);
1447 udelay(NV_MAC_RESET_DELAY
);
1448 writel(NVREG_TXRXCTL_BIT2
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1452 static void nv_get_hw_stats(struct net_device
*dev
)
1454 struct fe_priv
*np
= netdev_priv(dev
);
1455 u8 __iomem
*base
= get_hwbase(dev
);
1457 np
->estats
.tx_bytes
+= readl(base
+ NvRegTxCnt
);
1458 np
->estats
.tx_zero_rexmt
+= readl(base
+ NvRegTxZeroReXmt
);
1459 np
->estats
.tx_one_rexmt
+= readl(base
+ NvRegTxOneReXmt
);
1460 np
->estats
.tx_many_rexmt
+= readl(base
+ NvRegTxManyReXmt
);
1461 np
->estats
.tx_late_collision
+= readl(base
+ NvRegTxLateCol
);
1462 np
->estats
.tx_fifo_errors
+= readl(base
+ NvRegTxUnderflow
);
1463 np
->estats
.tx_carrier_errors
+= readl(base
+ NvRegTxLossCarrier
);
1464 np
->estats
.tx_excess_deferral
+= readl(base
+ NvRegTxExcessDef
);
1465 np
->estats
.tx_retry_error
+= readl(base
+ NvRegTxRetryErr
);
1466 np
->estats
.rx_frame_error
+= readl(base
+ NvRegRxFrameErr
);
1467 np
->estats
.rx_extra_byte
+= readl(base
+ NvRegRxExtraByte
);
1468 np
->estats
.rx_late_collision
+= readl(base
+ NvRegRxLateCol
);
1469 np
->estats
.rx_runt
+= readl(base
+ NvRegRxRunt
);
1470 np
->estats
.rx_frame_too_long
+= readl(base
+ NvRegRxFrameTooLong
);
1471 np
->estats
.rx_over_errors
+= readl(base
+ NvRegRxOverflow
);
1472 np
->estats
.rx_crc_errors
+= readl(base
+ NvRegRxFCSErr
);
1473 np
->estats
.rx_frame_align_error
+= readl(base
+ NvRegRxFrameAlignErr
);
1474 np
->estats
.rx_length_error
+= readl(base
+ NvRegRxLenErr
);
1475 np
->estats
.rx_unicast
+= readl(base
+ NvRegRxUnicast
);
1476 np
->estats
.rx_multicast
+= readl(base
+ NvRegRxMulticast
);
1477 np
->estats
.rx_broadcast
+= readl(base
+ NvRegRxBroadcast
);
1478 np
->estats
.rx_packets
=
1479 np
->estats
.rx_unicast
+
1480 np
->estats
.rx_multicast
+
1481 np
->estats
.rx_broadcast
;
1482 np
->estats
.rx_errors_total
=
1483 np
->estats
.rx_crc_errors
+
1484 np
->estats
.rx_over_errors
+
1485 np
->estats
.rx_frame_error
+
1486 (np
->estats
.rx_frame_align_error
- np
->estats
.rx_extra_byte
) +
1487 np
->estats
.rx_late_collision
+
1488 np
->estats
.rx_runt
+
1489 np
->estats
.rx_frame_too_long
;
1490 np
->estats
.tx_errors_total
=
1491 np
->estats
.tx_late_collision
+
1492 np
->estats
.tx_fifo_errors
+
1493 np
->estats
.tx_carrier_errors
+
1494 np
->estats
.tx_excess_deferral
+
1495 np
->estats
.tx_retry_error
;
1497 if (np
->driver_data
& DEV_HAS_STATISTICS_V2
) {
1498 np
->estats
.tx_deferral
+= readl(base
+ NvRegTxDef
);
1499 np
->estats
.tx_packets
+= readl(base
+ NvRegTxFrame
);
1500 np
->estats
.rx_bytes
+= readl(base
+ NvRegRxCnt
);
1501 np
->estats
.tx_pause
+= readl(base
+ NvRegTxPause
);
1502 np
->estats
.rx_pause
+= readl(base
+ NvRegRxPause
);
1503 np
->estats
.rx_drop_frame
+= readl(base
+ NvRegRxDropFrame
);
1508 * nv_get_stats: dev->get_stats function
1509 * Get latest stats value from the nic.
1510 * Called with read_lock(&dev_base_lock) held for read -
1511 * only synchronized against unregister_netdevice.
1513 static struct net_device_stats
*nv_get_stats(struct net_device
*dev
)
1515 struct fe_priv
*np
= netdev_priv(dev
);
1517 /* If the nic supports hw counters then retrieve latest values */
1518 if (np
->driver_data
& (DEV_HAS_STATISTICS_V1
|DEV_HAS_STATISTICS_V2
)) {
1519 nv_get_hw_stats(dev
);
1521 /* copy to net_device stats */
1522 dev
->stats
.tx_bytes
= np
->estats
.tx_bytes
;
1523 dev
->stats
.tx_fifo_errors
= np
->estats
.tx_fifo_errors
;
1524 dev
->stats
.tx_carrier_errors
= np
->estats
.tx_carrier_errors
;
1525 dev
->stats
.rx_crc_errors
= np
->estats
.rx_crc_errors
;
1526 dev
->stats
.rx_over_errors
= np
->estats
.rx_over_errors
;
1527 dev
->stats
.rx_errors
= np
->estats
.rx_errors_total
;
1528 dev
->stats
.tx_errors
= np
->estats
.tx_errors_total
;
1535 * nv_alloc_rx: fill rx ring entries.
1536 * Return 1 if the allocations for the skbs failed and the
1537 * rx engine is without Available descriptors
1539 static int nv_alloc_rx(struct net_device
*dev
)
1541 struct fe_priv
*np
= netdev_priv(dev
);
1542 struct ring_desc
* less_rx
;
1544 less_rx
= np
->get_rx
.orig
;
1545 if (less_rx
-- == np
->first_rx
.orig
)
1546 less_rx
= np
->last_rx
.orig
;
1548 while (np
->put_rx
.orig
!= less_rx
) {
1549 struct sk_buff
*skb
= dev_alloc_skb(np
->rx_buf_sz
+ NV_RX_ALLOC_PAD
);
1551 np
->put_rx_ctx
->skb
= skb
;
1552 np
->put_rx_ctx
->dma
= pci_map_single(np
->pci_dev
,
1555 PCI_DMA_FROMDEVICE
);
1556 np
->put_rx_ctx
->dma_len
= skb_tailroom(skb
);
1557 np
->put_rx
.orig
->buf
= cpu_to_le32(np
->put_rx_ctx
->dma
);
1559 np
->put_rx
.orig
->flaglen
= cpu_to_le32(np
->rx_buf_sz
| NV_RX_AVAIL
);
1560 if (unlikely(np
->put_rx
.orig
++ == np
->last_rx
.orig
))
1561 np
->put_rx
.orig
= np
->first_rx
.orig
;
1562 if (unlikely(np
->put_rx_ctx
++ == np
->last_rx_ctx
))
1563 np
->put_rx_ctx
= np
->first_rx_ctx
;
1571 static int nv_alloc_rx_optimized(struct net_device
*dev
)
1573 struct fe_priv
*np
= netdev_priv(dev
);
1574 struct ring_desc_ex
* less_rx
;
1576 less_rx
= np
->get_rx
.ex
;
1577 if (less_rx
-- == np
->first_rx
.ex
)
1578 less_rx
= np
->last_rx
.ex
;
1580 while (np
->put_rx
.ex
!= less_rx
) {
1581 struct sk_buff
*skb
= dev_alloc_skb(np
->rx_buf_sz
+ NV_RX_ALLOC_PAD
);
1583 np
->put_rx_ctx
->skb
= skb
;
1584 np
->put_rx_ctx
->dma
= pci_map_single(np
->pci_dev
,
1587 PCI_DMA_FROMDEVICE
);
1588 np
->put_rx_ctx
->dma_len
= skb_tailroom(skb
);
1589 np
->put_rx
.ex
->bufhigh
= cpu_to_le32(dma_high(np
->put_rx_ctx
->dma
));
1590 np
->put_rx
.ex
->buflow
= cpu_to_le32(dma_low(np
->put_rx_ctx
->dma
));
1592 np
->put_rx
.ex
->flaglen
= cpu_to_le32(np
->rx_buf_sz
| NV_RX2_AVAIL
);
1593 if (unlikely(np
->put_rx
.ex
++ == np
->last_rx
.ex
))
1594 np
->put_rx
.ex
= np
->first_rx
.ex
;
1595 if (unlikely(np
->put_rx_ctx
++ == np
->last_rx_ctx
))
1596 np
->put_rx_ctx
= np
->first_rx_ctx
;
1604 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1605 #ifdef CONFIG_FORCEDETH_NAPI
1606 static void nv_do_rx_refill(unsigned long data
)
1608 struct net_device
*dev
= (struct net_device
*) data
;
1609 struct fe_priv
*np
= netdev_priv(dev
);
1611 /* Just reschedule NAPI rx processing */
1612 netif_rx_schedule(dev
, &np
->napi
);
1615 static void nv_do_rx_refill(unsigned long data
)
1617 struct net_device
*dev
= (struct net_device
*) data
;
1618 struct fe_priv
*np
= netdev_priv(dev
);
1621 if (!using_multi_irqs(dev
)) {
1622 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1623 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1625 disable_irq(np
->pci_dev
->irq
);
1627 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1629 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
1630 retcode
= nv_alloc_rx(dev
);
1632 retcode
= nv_alloc_rx_optimized(dev
);
1634 spin_lock_irq(&np
->lock
);
1635 if (!np
->in_shutdown
)
1636 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
1637 spin_unlock_irq(&np
->lock
);
1639 if (!using_multi_irqs(dev
)) {
1640 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1641 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1643 enable_irq(np
->pci_dev
->irq
);
1645 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1650 static void nv_init_rx(struct net_device
*dev
)
1652 struct fe_priv
*np
= netdev_priv(dev
);
1654 np
->get_rx
= np
->put_rx
= np
->first_rx
= np
->rx_ring
;
1655 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
1656 np
->last_rx
.orig
= &np
->rx_ring
.orig
[np
->rx_ring_size
-1];
1658 np
->last_rx
.ex
= &np
->rx_ring
.ex
[np
->rx_ring_size
-1];
1659 np
->get_rx_ctx
= np
->put_rx_ctx
= np
->first_rx_ctx
= np
->rx_skb
;
1660 np
->last_rx_ctx
= &np
->rx_skb
[np
->rx_ring_size
-1];
1662 for (i
= 0; i
< np
->rx_ring_size
; i
++) {
1663 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
1664 np
->rx_ring
.orig
[i
].flaglen
= 0;
1665 np
->rx_ring
.orig
[i
].buf
= 0;
1667 np
->rx_ring
.ex
[i
].flaglen
= 0;
1668 np
->rx_ring
.ex
[i
].txvlan
= 0;
1669 np
->rx_ring
.ex
[i
].bufhigh
= 0;
1670 np
->rx_ring
.ex
[i
].buflow
= 0;
1672 np
->rx_skb
[i
].skb
= NULL
;
1673 np
->rx_skb
[i
].dma
= 0;
1677 static void nv_init_tx(struct net_device
*dev
)
1679 struct fe_priv
*np
= netdev_priv(dev
);
1681 np
->get_tx
= np
->put_tx
= np
->first_tx
= np
->tx_ring
;
1682 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
1683 np
->last_tx
.orig
= &np
->tx_ring
.orig
[np
->tx_ring_size
-1];
1685 np
->last_tx
.ex
= &np
->tx_ring
.ex
[np
->tx_ring_size
-1];
1686 np
->get_tx_ctx
= np
->put_tx_ctx
= np
->first_tx_ctx
= np
->tx_skb
;
1687 np
->last_tx_ctx
= &np
->tx_skb
[np
->tx_ring_size
-1];
1689 for (i
= 0; i
< np
->tx_ring_size
; i
++) {
1690 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
1691 np
->tx_ring
.orig
[i
].flaglen
= 0;
1692 np
->tx_ring
.orig
[i
].buf
= 0;
1694 np
->tx_ring
.ex
[i
].flaglen
= 0;
1695 np
->tx_ring
.ex
[i
].txvlan
= 0;
1696 np
->tx_ring
.ex
[i
].bufhigh
= 0;
1697 np
->tx_ring
.ex
[i
].buflow
= 0;
1699 np
->tx_skb
[i
].skb
= NULL
;
1700 np
->tx_skb
[i
].dma
= 0;
1704 static int nv_init_ring(struct net_device
*dev
)
1706 struct fe_priv
*np
= netdev_priv(dev
);
1710 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
1711 return nv_alloc_rx(dev
);
1713 return nv_alloc_rx_optimized(dev
);
1716 static int nv_release_txskb(struct net_device
*dev
, struct nv_skb_map
* tx_skb
)
1718 struct fe_priv
*np
= netdev_priv(dev
);
1721 pci_unmap_page(np
->pci_dev
, tx_skb
->dma
,
1727 dev_kfree_skb_any(tx_skb
->skb
);
1735 static void nv_drain_tx(struct net_device
*dev
)
1737 struct fe_priv
*np
= netdev_priv(dev
);
1740 for (i
= 0; i
< np
->tx_ring_size
; i
++) {
1741 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
1742 np
->tx_ring
.orig
[i
].flaglen
= 0;
1743 np
->tx_ring
.orig
[i
].buf
= 0;
1745 np
->tx_ring
.ex
[i
].flaglen
= 0;
1746 np
->tx_ring
.ex
[i
].txvlan
= 0;
1747 np
->tx_ring
.ex
[i
].bufhigh
= 0;
1748 np
->tx_ring
.ex
[i
].buflow
= 0;
1750 if (nv_release_txskb(dev
, &np
->tx_skb
[i
]))
1751 dev
->stats
.tx_dropped
++;
1755 static void nv_drain_rx(struct net_device
*dev
)
1757 struct fe_priv
*np
= netdev_priv(dev
);
1760 for (i
= 0; i
< np
->rx_ring_size
; i
++) {
1761 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
1762 np
->rx_ring
.orig
[i
].flaglen
= 0;
1763 np
->rx_ring
.orig
[i
].buf
= 0;
1765 np
->rx_ring
.ex
[i
].flaglen
= 0;
1766 np
->rx_ring
.ex
[i
].txvlan
= 0;
1767 np
->rx_ring
.ex
[i
].bufhigh
= 0;
1768 np
->rx_ring
.ex
[i
].buflow
= 0;
1771 if (np
->rx_skb
[i
].skb
) {
1772 pci_unmap_single(np
->pci_dev
, np
->rx_skb
[i
].dma
,
1773 (skb_end_pointer(np
->rx_skb
[i
].skb
) -
1774 np
->rx_skb
[i
].skb
->data
),
1775 PCI_DMA_FROMDEVICE
);
1776 dev_kfree_skb(np
->rx_skb
[i
].skb
);
1777 np
->rx_skb
[i
].skb
= NULL
;
1782 static void drain_ring(struct net_device
*dev
)
1788 static inline u32
nv_get_empty_tx_slots(struct fe_priv
*np
)
1790 return (u32
)(np
->tx_ring_size
- ((np
->tx_ring_size
+ (np
->put_tx_ctx
- np
->get_tx_ctx
)) % np
->tx_ring_size
));
1794 * nv_start_xmit: dev->hard_start_xmit function
1795 * Called with netif_tx_lock held.
1797 static int nv_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1799 struct fe_priv
*np
= netdev_priv(dev
);
1801 u32 tx_flags_extra
= (np
->desc_ver
== DESC_VER_1
? NV_TX_LASTPACKET
: NV_TX2_LASTPACKET
);
1802 unsigned int fragments
= skb_shinfo(skb
)->nr_frags
;
1806 u32 size
= skb
->len
-skb
->data_len
;
1807 u32 entries
= (size
>> NV_TX2_TSO_MAX_SHIFT
) + ((size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
1809 struct ring_desc
* put_tx
;
1810 struct ring_desc
* start_tx
;
1811 struct ring_desc
* prev_tx
;
1812 struct nv_skb_map
* prev_tx_ctx
;
1814 /* add fragments to entries count */
1815 for (i
= 0; i
< fragments
; i
++) {
1816 entries
+= (skb_shinfo(skb
)->frags
[i
].size
>> NV_TX2_TSO_MAX_SHIFT
) +
1817 ((skb_shinfo(skb
)->frags
[i
].size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
1820 empty_slots
= nv_get_empty_tx_slots(np
);
1821 if (unlikely(empty_slots
<= entries
)) {
1822 spin_lock_irq(&np
->lock
);
1823 netif_stop_queue(dev
);
1825 spin_unlock_irq(&np
->lock
);
1826 return NETDEV_TX_BUSY
;
1829 start_tx
= put_tx
= np
->put_tx
.orig
;
1831 /* setup the header buffer */
1834 prev_tx_ctx
= np
->put_tx_ctx
;
1835 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
1836 np
->put_tx_ctx
->dma
= pci_map_single(np
->pci_dev
, skb
->data
+ offset
, bcnt
,
1838 np
->put_tx_ctx
->dma_len
= bcnt
;
1839 put_tx
->buf
= cpu_to_le32(np
->put_tx_ctx
->dma
);
1840 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
1842 tx_flags
= np
->tx_flags
;
1845 if (unlikely(put_tx
++ == np
->last_tx
.orig
))
1846 put_tx
= np
->first_tx
.orig
;
1847 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
1848 np
->put_tx_ctx
= np
->first_tx_ctx
;
1851 /* setup the fragments */
1852 for (i
= 0; i
< fragments
; i
++) {
1853 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1854 u32 size
= frag
->size
;
1859 prev_tx_ctx
= np
->put_tx_ctx
;
1860 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
1861 np
->put_tx_ctx
->dma
= pci_map_page(np
->pci_dev
, frag
->page
, frag
->page_offset
+offset
, bcnt
,
1863 np
->put_tx_ctx
->dma_len
= bcnt
;
1864 put_tx
->buf
= cpu_to_le32(np
->put_tx_ctx
->dma
);
1865 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
1869 if (unlikely(put_tx
++ == np
->last_tx
.orig
))
1870 put_tx
= np
->first_tx
.orig
;
1871 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
1872 np
->put_tx_ctx
= np
->first_tx_ctx
;
1876 /* set last fragment flag */
1877 prev_tx
->flaglen
|= cpu_to_le32(tx_flags_extra
);
1879 /* save skb in this slot's context area */
1880 prev_tx_ctx
->skb
= skb
;
1882 if (skb_is_gso(skb
))
1883 tx_flags_extra
= NV_TX2_TSO
| (skb_shinfo(skb
)->gso_size
<< NV_TX2_TSO_SHIFT
);
1885 tx_flags_extra
= skb
->ip_summed
== CHECKSUM_PARTIAL
?
1886 NV_TX2_CHECKSUM_L3
| NV_TX2_CHECKSUM_L4
: 0;
1888 spin_lock_irq(&np
->lock
);
1891 start_tx
->flaglen
|= cpu_to_le32(tx_flags
| tx_flags_extra
);
1892 np
->put_tx
.orig
= put_tx
;
1894 spin_unlock_irq(&np
->lock
);
1896 dprintk(KERN_DEBUG
"%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
1897 dev
->name
, entries
, tx_flags_extra
);
1900 for (j
=0; j
<64; j
++) {
1902 dprintk("\n%03x:", j
);
1903 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
1908 dev
->trans_start
= jiffies
;
1909 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
1910 return NETDEV_TX_OK
;
1913 static int nv_start_xmit_optimized(struct sk_buff
*skb
, struct net_device
*dev
)
1915 struct fe_priv
*np
= netdev_priv(dev
);
1918 unsigned int fragments
= skb_shinfo(skb
)->nr_frags
;
1922 u32 size
= skb
->len
-skb
->data_len
;
1923 u32 entries
= (size
>> NV_TX2_TSO_MAX_SHIFT
) + ((size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
1925 struct ring_desc_ex
* put_tx
;
1926 struct ring_desc_ex
* start_tx
;
1927 struct ring_desc_ex
* prev_tx
;
1928 struct nv_skb_map
* prev_tx_ctx
;
1930 /* add fragments to entries count */
1931 for (i
= 0; i
< fragments
; i
++) {
1932 entries
+= (skb_shinfo(skb
)->frags
[i
].size
>> NV_TX2_TSO_MAX_SHIFT
) +
1933 ((skb_shinfo(skb
)->frags
[i
].size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
1936 empty_slots
= nv_get_empty_tx_slots(np
);
1937 if (unlikely(empty_slots
<= entries
)) {
1938 spin_lock_irq(&np
->lock
);
1939 netif_stop_queue(dev
);
1941 spin_unlock_irq(&np
->lock
);
1942 return NETDEV_TX_BUSY
;
1945 start_tx
= put_tx
= np
->put_tx
.ex
;
1947 /* setup the header buffer */
1950 prev_tx_ctx
= np
->put_tx_ctx
;
1951 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
1952 np
->put_tx_ctx
->dma
= pci_map_single(np
->pci_dev
, skb
->data
+ offset
, bcnt
,
1954 np
->put_tx_ctx
->dma_len
= bcnt
;
1955 put_tx
->bufhigh
= cpu_to_le32(dma_high(np
->put_tx_ctx
->dma
));
1956 put_tx
->buflow
= cpu_to_le32(dma_low(np
->put_tx_ctx
->dma
));
1957 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
1959 tx_flags
= NV_TX2_VALID
;
1962 if (unlikely(put_tx
++ == np
->last_tx
.ex
))
1963 put_tx
= np
->first_tx
.ex
;
1964 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
1965 np
->put_tx_ctx
= np
->first_tx_ctx
;
1968 /* setup the fragments */
1969 for (i
= 0; i
< fragments
; i
++) {
1970 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1971 u32 size
= frag
->size
;
1976 prev_tx_ctx
= np
->put_tx_ctx
;
1977 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
1978 np
->put_tx_ctx
->dma
= pci_map_page(np
->pci_dev
, frag
->page
, frag
->page_offset
+offset
, bcnt
,
1980 np
->put_tx_ctx
->dma_len
= bcnt
;
1981 put_tx
->bufhigh
= cpu_to_le32(dma_high(np
->put_tx_ctx
->dma
));
1982 put_tx
->buflow
= cpu_to_le32(dma_low(np
->put_tx_ctx
->dma
));
1983 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
1987 if (unlikely(put_tx
++ == np
->last_tx
.ex
))
1988 put_tx
= np
->first_tx
.ex
;
1989 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
1990 np
->put_tx_ctx
= np
->first_tx_ctx
;
1994 /* set last fragment flag */
1995 prev_tx
->flaglen
|= cpu_to_le32(NV_TX2_LASTPACKET
);
1997 /* save skb in this slot's context area */
1998 prev_tx_ctx
->skb
= skb
;
2000 if (skb_is_gso(skb
))
2001 tx_flags_extra
= NV_TX2_TSO
| (skb_shinfo(skb
)->gso_size
<< NV_TX2_TSO_SHIFT
);
2003 tx_flags_extra
= skb
->ip_summed
== CHECKSUM_PARTIAL
?
2004 NV_TX2_CHECKSUM_L3
| NV_TX2_CHECKSUM_L4
: 0;
2007 if (likely(!np
->vlangrp
)) {
2008 start_tx
->txvlan
= 0;
2010 if (vlan_tx_tag_present(skb
))
2011 start_tx
->txvlan
= cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT
| vlan_tx_tag_get(skb
));
2013 start_tx
->txvlan
= 0;
2016 spin_lock_irq(&np
->lock
);
2019 start_tx
->flaglen
|= cpu_to_le32(tx_flags
| tx_flags_extra
);
2020 np
->put_tx
.ex
= put_tx
;
2022 spin_unlock_irq(&np
->lock
);
2024 dprintk(KERN_DEBUG
"%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
2025 dev
->name
, entries
, tx_flags_extra
);
2028 for (j
=0; j
<64; j
++) {
2030 dprintk("\n%03x:", j
);
2031 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
2036 dev
->trans_start
= jiffies
;
2037 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2038 return NETDEV_TX_OK
;
2042 * nv_tx_done: check for completed packets, release the skbs.
2044 * Caller must own np->lock.
2046 static void nv_tx_done(struct net_device
*dev
)
2048 struct fe_priv
*np
= netdev_priv(dev
);
2050 struct ring_desc
* orig_get_tx
= np
->get_tx
.orig
;
2052 while ((np
->get_tx
.orig
!= np
->put_tx
.orig
) &&
2053 !((flags
= le32_to_cpu(np
->get_tx
.orig
->flaglen
)) & NV_TX_VALID
)) {
2055 dprintk(KERN_DEBUG
"%s: nv_tx_done: flags 0x%x.\n",
2058 pci_unmap_page(np
->pci_dev
, np
->get_tx_ctx
->dma
,
2059 np
->get_tx_ctx
->dma_len
,
2061 np
->get_tx_ctx
->dma
= 0;
2063 if (np
->desc_ver
== DESC_VER_1
) {
2064 if (flags
& NV_TX_LASTPACKET
) {
2065 if (flags
& NV_TX_ERROR
) {
2066 if (flags
& NV_TX_UNDERFLOW
)
2067 dev
->stats
.tx_fifo_errors
++;
2068 if (flags
& NV_TX_CARRIERLOST
)
2069 dev
->stats
.tx_carrier_errors
++;
2070 dev
->stats
.tx_errors
++;
2072 dev
->stats
.tx_packets
++;
2073 dev
->stats
.tx_bytes
+= np
->get_tx_ctx
->skb
->len
;
2075 dev_kfree_skb_any(np
->get_tx_ctx
->skb
);
2076 np
->get_tx_ctx
->skb
= NULL
;
2079 if (flags
& NV_TX2_LASTPACKET
) {
2080 if (flags
& NV_TX2_ERROR
) {
2081 if (flags
& NV_TX2_UNDERFLOW
)
2082 dev
->stats
.tx_fifo_errors
++;
2083 if (flags
& NV_TX2_CARRIERLOST
)
2084 dev
->stats
.tx_carrier_errors
++;
2085 dev
->stats
.tx_errors
++;
2087 dev
->stats
.tx_packets
++;
2088 dev
->stats
.tx_bytes
+= np
->get_tx_ctx
->skb
->len
;
2090 dev_kfree_skb_any(np
->get_tx_ctx
->skb
);
2091 np
->get_tx_ctx
->skb
= NULL
;
2094 if (unlikely(np
->get_tx
.orig
++ == np
->last_tx
.orig
))
2095 np
->get_tx
.orig
= np
->first_tx
.orig
;
2096 if (unlikely(np
->get_tx_ctx
++ == np
->last_tx_ctx
))
2097 np
->get_tx_ctx
= np
->first_tx_ctx
;
2099 if (unlikely((np
->tx_stop
== 1) && (np
->get_tx
.orig
!= orig_get_tx
))) {
2101 netif_wake_queue(dev
);
2105 static void nv_tx_done_optimized(struct net_device
*dev
, int limit
)
2107 struct fe_priv
*np
= netdev_priv(dev
);
2109 struct ring_desc_ex
* orig_get_tx
= np
->get_tx
.ex
;
2111 while ((np
->get_tx
.ex
!= np
->put_tx
.ex
) &&
2112 !((flags
= le32_to_cpu(np
->get_tx
.ex
->flaglen
)) & NV_TX_VALID
) &&
2115 dprintk(KERN_DEBUG
"%s: nv_tx_done_optimized: flags 0x%x.\n",
2118 pci_unmap_page(np
->pci_dev
, np
->get_tx_ctx
->dma
,
2119 np
->get_tx_ctx
->dma_len
,
2121 np
->get_tx_ctx
->dma
= 0;
2123 if (flags
& NV_TX2_LASTPACKET
) {
2124 if (!(flags
& NV_TX2_ERROR
))
2125 dev
->stats
.tx_packets
++;
2126 dev_kfree_skb_any(np
->get_tx_ctx
->skb
);
2127 np
->get_tx_ctx
->skb
= NULL
;
2129 if (unlikely(np
->get_tx
.ex
++ == np
->last_tx
.ex
))
2130 np
->get_tx
.ex
= np
->first_tx
.ex
;
2131 if (unlikely(np
->get_tx_ctx
++ == np
->last_tx_ctx
))
2132 np
->get_tx_ctx
= np
->first_tx_ctx
;
2134 if (unlikely((np
->tx_stop
== 1) && (np
->get_tx
.ex
!= orig_get_tx
))) {
2136 netif_wake_queue(dev
);
2141 * nv_tx_timeout: dev->tx_timeout function
2142 * Called with netif_tx_lock held.
2144 static void nv_tx_timeout(struct net_device
*dev
)
2146 struct fe_priv
*np
= netdev_priv(dev
);
2147 u8 __iomem
*base
= get_hwbase(dev
);
2150 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
2151 status
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
2153 status
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
2155 printk(KERN_INFO
"%s: Got tx_timeout. irq: %08x\n", dev
->name
, status
);
2160 printk(KERN_INFO
"%s: Ring at %lx\n",
2161 dev
->name
, (unsigned long)np
->ring_addr
);
2162 printk(KERN_INFO
"%s: Dumping tx registers\n", dev
->name
);
2163 for (i
=0;i
<=np
->register_size
;i
+= 32) {
2164 printk(KERN_INFO
"%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2166 readl(base
+ i
+ 0), readl(base
+ i
+ 4),
2167 readl(base
+ i
+ 8), readl(base
+ i
+ 12),
2168 readl(base
+ i
+ 16), readl(base
+ i
+ 20),
2169 readl(base
+ i
+ 24), readl(base
+ i
+ 28));
2171 printk(KERN_INFO
"%s: Dumping tx ring\n", dev
->name
);
2172 for (i
=0;i
<np
->tx_ring_size
;i
+= 4) {
2173 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
2174 printk(KERN_INFO
"%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2176 le32_to_cpu(np
->tx_ring
.orig
[i
].buf
),
2177 le32_to_cpu(np
->tx_ring
.orig
[i
].flaglen
),
2178 le32_to_cpu(np
->tx_ring
.orig
[i
+1].buf
),
2179 le32_to_cpu(np
->tx_ring
.orig
[i
+1].flaglen
),
2180 le32_to_cpu(np
->tx_ring
.orig
[i
+2].buf
),
2181 le32_to_cpu(np
->tx_ring
.orig
[i
+2].flaglen
),
2182 le32_to_cpu(np
->tx_ring
.orig
[i
+3].buf
),
2183 le32_to_cpu(np
->tx_ring
.orig
[i
+3].flaglen
));
2185 printk(KERN_INFO
"%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
2187 le32_to_cpu(np
->tx_ring
.ex
[i
].bufhigh
),
2188 le32_to_cpu(np
->tx_ring
.ex
[i
].buflow
),
2189 le32_to_cpu(np
->tx_ring
.ex
[i
].flaglen
),
2190 le32_to_cpu(np
->tx_ring
.ex
[i
+1].bufhigh
),
2191 le32_to_cpu(np
->tx_ring
.ex
[i
+1].buflow
),
2192 le32_to_cpu(np
->tx_ring
.ex
[i
+1].flaglen
),
2193 le32_to_cpu(np
->tx_ring
.ex
[i
+2].bufhigh
),
2194 le32_to_cpu(np
->tx_ring
.ex
[i
+2].buflow
),
2195 le32_to_cpu(np
->tx_ring
.ex
[i
+2].flaglen
),
2196 le32_to_cpu(np
->tx_ring
.ex
[i
+3].bufhigh
),
2197 le32_to_cpu(np
->tx_ring
.ex
[i
+3].buflow
),
2198 le32_to_cpu(np
->tx_ring
.ex
[i
+3].flaglen
));
2203 spin_lock_irq(&np
->lock
);
2205 /* 1) stop tx engine */
2208 /* 2) check that the packets were not sent already: */
2209 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
2212 nv_tx_done_optimized(dev
, np
->tx_ring_size
);
2214 /* 3) if there are dead entries: clear everything */
2215 if (np
->get_tx_ctx
!= np
->put_tx_ctx
) {
2216 printk(KERN_DEBUG
"%s: tx_timeout: dead entries!\n", dev
->name
);
2219 setup_hw_rings(dev
, NV_SETUP_TX_RING
);
2222 netif_wake_queue(dev
);
2224 /* 4) restart tx engine */
2226 spin_unlock_irq(&np
->lock
);
2230 * Called when the nic notices a mismatch between the actual data len on the
2231 * wire and the len indicated in the 802 header
2233 static int nv_getlen(struct net_device
*dev
, void *packet
, int datalen
)
2235 int hdrlen
; /* length of the 802 header */
2236 int protolen
; /* length as stored in the proto field */
2238 /* 1) calculate len according to header */
2239 if ( ((struct vlan_ethhdr
*)packet
)->h_vlan_proto
== htons(ETH_P_8021Q
)) {
2240 protolen
= ntohs( ((struct vlan_ethhdr
*)packet
)->h_vlan_encapsulated_proto
);
2243 protolen
= ntohs( ((struct ethhdr
*)packet
)->h_proto
);
2246 dprintk(KERN_DEBUG
"%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
2247 dev
->name
, datalen
, protolen
, hdrlen
);
2248 if (protolen
> ETH_DATA_LEN
)
2249 return datalen
; /* Value in proto field not a len, no checks possible */
2252 /* consistency checks: */
2253 if (datalen
> ETH_ZLEN
) {
2254 if (datalen
>= protolen
) {
2255 /* more data on wire than in 802 header, trim of
2258 dprintk(KERN_DEBUG
"%s: nv_getlen: accepting %d bytes.\n",
2259 dev
->name
, protolen
);
2262 /* less data on wire than mentioned in header.
2263 * Discard the packet.
2265 dprintk(KERN_DEBUG
"%s: nv_getlen: discarding long packet.\n",
2270 /* short packet. Accept only if 802 values are also short */
2271 if (protolen
> ETH_ZLEN
) {
2272 dprintk(KERN_DEBUG
"%s: nv_getlen: discarding short packet.\n",
2276 dprintk(KERN_DEBUG
"%s: nv_getlen: accepting %d bytes.\n",
2277 dev
->name
, datalen
);
2282 static int nv_rx_process(struct net_device
*dev
, int limit
)
2284 struct fe_priv
*np
= netdev_priv(dev
);
2287 struct sk_buff
*skb
;
2290 while((np
->get_rx
.orig
!= np
->put_rx
.orig
) &&
2291 !((flags
= le32_to_cpu(np
->get_rx
.orig
->flaglen
)) & NV_RX_AVAIL
) &&
2292 (rx_work
< limit
)) {
2294 dprintk(KERN_DEBUG
"%s: nv_rx_process: flags 0x%x.\n",
2298 * the packet is for us - immediately tear down the pci mapping.
2299 * TODO: check if a prefetch of the first cacheline improves
2302 pci_unmap_single(np
->pci_dev
, np
->get_rx_ctx
->dma
,
2303 np
->get_rx_ctx
->dma_len
,
2304 PCI_DMA_FROMDEVICE
);
2305 skb
= np
->get_rx_ctx
->skb
;
2306 np
->get_rx_ctx
->skb
= NULL
;
2310 dprintk(KERN_DEBUG
"Dumping packet (flags 0x%x).",flags
);
2311 for (j
=0; j
<64; j
++) {
2313 dprintk("\n%03x:", j
);
2314 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
2318 /* look at what we actually got: */
2319 if (np
->desc_ver
== DESC_VER_1
) {
2320 if (likely(flags
& NV_RX_DESCRIPTORVALID
)) {
2321 len
= flags
& LEN_MASK_V1
;
2322 if (unlikely(flags
& NV_RX_ERROR
)) {
2323 if (flags
& NV_RX_ERROR4
) {
2324 len
= nv_getlen(dev
, skb
->data
, len
);
2326 dev
->stats
.rx_errors
++;
2331 /* framing errors are soft errors */
2332 else if (flags
& NV_RX_FRAMINGERR
) {
2333 if (flags
& NV_RX_SUBSTRACT1
) {
2337 /* the rest are hard errors */
2339 if (flags
& NV_RX_MISSEDFRAME
)
2340 dev
->stats
.rx_missed_errors
++;
2341 if (flags
& NV_RX_CRCERR
)
2342 dev
->stats
.rx_crc_errors
++;
2343 if (flags
& NV_RX_OVERFLOW
)
2344 dev
->stats
.rx_over_errors
++;
2345 dev
->stats
.rx_errors
++;
2355 if (likely(flags
& NV_RX2_DESCRIPTORVALID
)) {
2356 len
= flags
& LEN_MASK_V2
;
2357 if (unlikely(flags
& NV_RX2_ERROR
)) {
2358 if (flags
& NV_RX2_ERROR4
) {
2359 len
= nv_getlen(dev
, skb
->data
, len
);
2361 dev
->stats
.rx_errors
++;
2366 /* framing errors are soft errors */
2367 else if (flags
& NV_RX2_FRAMINGERR
) {
2368 if (flags
& NV_RX2_SUBSTRACT1
) {
2372 /* the rest are hard errors */
2374 if (flags
& NV_RX2_CRCERR
)
2375 dev
->stats
.rx_crc_errors
++;
2376 if (flags
& NV_RX2_OVERFLOW
)
2377 dev
->stats
.rx_over_errors
++;
2378 dev
->stats
.rx_errors
++;
2383 if (((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_TCP
) || /*ip and tcp */
2384 ((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_UDP
)) /*ip and udp */
2385 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2391 /* got a valid packet - forward it to the network core */
2393 skb
->protocol
= eth_type_trans(skb
, dev
);
2394 dprintk(KERN_DEBUG
"%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2395 dev
->name
, len
, skb
->protocol
);
2396 #ifdef CONFIG_FORCEDETH_NAPI
2397 netif_receive_skb(skb
);
2401 dev
->last_rx
= jiffies
;
2402 dev
->stats
.rx_packets
++;
2403 dev
->stats
.rx_bytes
+= len
;
2405 if (unlikely(np
->get_rx
.orig
++ == np
->last_rx
.orig
))
2406 np
->get_rx
.orig
= np
->first_rx
.orig
;
2407 if (unlikely(np
->get_rx_ctx
++ == np
->last_rx_ctx
))
2408 np
->get_rx_ctx
= np
->first_rx_ctx
;
2416 static int nv_rx_process_optimized(struct net_device
*dev
, int limit
)
2418 struct fe_priv
*np
= netdev_priv(dev
);
2422 struct sk_buff
*skb
;
2425 while((np
->get_rx
.ex
!= np
->put_rx
.ex
) &&
2426 !((flags
= le32_to_cpu(np
->get_rx
.ex
->flaglen
)) & NV_RX2_AVAIL
) &&
2427 (rx_work
< limit
)) {
2429 dprintk(KERN_DEBUG
"%s: nv_rx_process_optimized: flags 0x%x.\n",
2433 * the packet is for us - immediately tear down the pci mapping.
2434 * TODO: check if a prefetch of the first cacheline improves
2437 pci_unmap_single(np
->pci_dev
, np
->get_rx_ctx
->dma
,
2438 np
->get_rx_ctx
->dma_len
,
2439 PCI_DMA_FROMDEVICE
);
2440 skb
= np
->get_rx_ctx
->skb
;
2441 np
->get_rx_ctx
->skb
= NULL
;
2445 dprintk(KERN_DEBUG
"Dumping packet (flags 0x%x).",flags
);
2446 for (j
=0; j
<64; j
++) {
2448 dprintk("\n%03x:", j
);
2449 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
2453 /* look at what we actually got: */
2454 if (likely(flags
& NV_RX2_DESCRIPTORVALID
)) {
2455 len
= flags
& LEN_MASK_V2
;
2456 if (unlikely(flags
& NV_RX2_ERROR
)) {
2457 if (flags
& NV_RX2_ERROR4
) {
2458 len
= nv_getlen(dev
, skb
->data
, len
);
2464 /* framing errors are soft errors */
2465 else if (flags
& NV_RX2_FRAMINGERR
) {
2466 if (flags
& NV_RX2_SUBSTRACT1
) {
2470 /* the rest are hard errors */
2477 if (((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_TCP
) || /*ip and tcp */
2478 ((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUM_IP_UDP
)) /*ip and udp */
2479 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2481 /* got a valid packet - forward it to the network core */
2483 skb
->protocol
= eth_type_trans(skb
, dev
);
2484 prefetch(skb
->data
);
2486 dprintk(KERN_DEBUG
"%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2487 dev
->name
, len
, skb
->protocol
);
2489 if (likely(!np
->vlangrp
)) {
2490 #ifdef CONFIG_FORCEDETH_NAPI
2491 netif_receive_skb(skb
);
2496 vlanflags
= le32_to_cpu(np
->get_rx
.ex
->buflow
);
2497 if (vlanflags
& NV_RX3_VLAN_TAG_PRESENT
) {
2498 #ifdef CONFIG_FORCEDETH_NAPI
2499 vlan_hwaccel_receive_skb(skb
, np
->vlangrp
,
2500 vlanflags
& NV_RX3_VLAN_TAG_MASK
);
2502 vlan_hwaccel_rx(skb
, np
->vlangrp
,
2503 vlanflags
& NV_RX3_VLAN_TAG_MASK
);
2506 #ifdef CONFIG_FORCEDETH_NAPI
2507 netif_receive_skb(skb
);
2514 dev
->last_rx
= jiffies
;
2515 dev
->stats
.rx_packets
++;
2516 dev
->stats
.rx_bytes
+= len
;
2521 if (unlikely(np
->get_rx
.ex
++ == np
->last_rx
.ex
))
2522 np
->get_rx
.ex
= np
->first_rx
.ex
;
2523 if (unlikely(np
->get_rx_ctx
++ == np
->last_rx_ctx
))
2524 np
->get_rx_ctx
= np
->first_rx_ctx
;
2532 static void set_bufsize(struct net_device
*dev
)
2534 struct fe_priv
*np
= netdev_priv(dev
);
2536 if (dev
->mtu
<= ETH_DATA_LEN
)
2537 np
->rx_buf_sz
= ETH_DATA_LEN
+ NV_RX_HEADERS
;
2539 np
->rx_buf_sz
= dev
->mtu
+ NV_RX_HEADERS
;
2543 * nv_change_mtu: dev->change_mtu function
2544 * Called with dev_base_lock held for read.
2546 static int nv_change_mtu(struct net_device
*dev
, int new_mtu
)
2548 struct fe_priv
*np
= netdev_priv(dev
);
2551 if (new_mtu
< 64 || new_mtu
> np
->pkt_limit
)
2557 /* return early if the buffer sizes will not change */
2558 if (old_mtu
<= ETH_DATA_LEN
&& new_mtu
<= ETH_DATA_LEN
)
2560 if (old_mtu
== new_mtu
)
2563 /* synchronized against open : rtnl_lock() held by caller */
2564 if (netif_running(dev
)) {
2565 u8 __iomem
*base
= get_hwbase(dev
);
2567 * It seems that the nic preloads valid ring entries into an
2568 * internal buffer. The procedure for flushing everything is
2569 * guessed, there is probably a simpler approach.
2570 * Changing the MTU is a rare event, it shouldn't matter.
2572 nv_disable_irq(dev
);
2573 netif_tx_lock_bh(dev
);
2574 spin_lock(&np
->lock
);
2579 /* drain rx queue */
2582 /* reinit driver view of the rx queue */
2584 if (nv_init_ring(dev
)) {
2585 if (!np
->in_shutdown
)
2586 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
2588 /* reinit nic view of the rx queue */
2589 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
2590 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
2591 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
2592 base
+ NvRegRingSizes
);
2594 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2597 /* restart rx engine */
2600 spin_unlock(&np
->lock
);
2601 netif_tx_unlock_bh(dev
);
2607 static void nv_copy_mac_to_hw(struct net_device
*dev
)
2609 u8 __iomem
*base
= get_hwbase(dev
);
2612 mac
[0] = (dev
->dev_addr
[0] << 0) + (dev
->dev_addr
[1] << 8) +
2613 (dev
->dev_addr
[2] << 16) + (dev
->dev_addr
[3] << 24);
2614 mac
[1] = (dev
->dev_addr
[4] << 0) + (dev
->dev_addr
[5] << 8);
2616 writel(mac
[0], base
+ NvRegMacAddrA
);
2617 writel(mac
[1], base
+ NvRegMacAddrB
);
2621 * nv_set_mac_address: dev->set_mac_address function
2622 * Called with rtnl_lock() held.
2624 static int nv_set_mac_address(struct net_device
*dev
, void *addr
)
2626 struct fe_priv
*np
= netdev_priv(dev
);
2627 struct sockaddr
*macaddr
= (struct sockaddr
*)addr
;
2629 if (!is_valid_ether_addr(macaddr
->sa_data
))
2630 return -EADDRNOTAVAIL
;
2632 /* synchronized against open : rtnl_lock() held by caller */
2633 memcpy(dev
->dev_addr
, macaddr
->sa_data
, ETH_ALEN
);
2635 if (netif_running(dev
)) {
2636 netif_tx_lock_bh(dev
);
2637 spin_lock_irq(&np
->lock
);
2639 /* stop rx engine */
2642 /* set mac address */
2643 nv_copy_mac_to_hw(dev
);
2645 /* restart rx engine */
2647 spin_unlock_irq(&np
->lock
);
2648 netif_tx_unlock_bh(dev
);
2650 nv_copy_mac_to_hw(dev
);
2656 * nv_set_multicast: dev->set_multicast function
2657 * Called with netif_tx_lock held.
2659 static void nv_set_multicast(struct net_device
*dev
)
2661 struct fe_priv
*np
= netdev_priv(dev
);
2662 u8 __iomem
*base
= get_hwbase(dev
);
2665 u32 pff
= readl(base
+ NvRegPacketFilterFlags
) & NVREG_PFF_PAUSE_RX
;
2667 memset(addr
, 0, sizeof(addr
));
2668 memset(mask
, 0, sizeof(mask
));
2670 if (dev
->flags
& IFF_PROMISC
) {
2671 pff
|= NVREG_PFF_PROMISC
;
2673 pff
|= NVREG_PFF_MYADDR
;
2675 if (dev
->flags
& IFF_ALLMULTI
|| dev
->mc_list
) {
2679 alwaysOn
[0] = alwaysOn
[1] = alwaysOff
[0] = alwaysOff
[1] = 0xffffffff;
2680 if (dev
->flags
& IFF_ALLMULTI
) {
2681 alwaysOn
[0] = alwaysOn
[1] = alwaysOff
[0] = alwaysOff
[1] = 0;
2683 struct dev_mc_list
*walk
;
2685 walk
= dev
->mc_list
;
2686 while (walk
!= NULL
) {
2688 a
= le32_to_cpu(*(__le32
*) walk
->dmi_addr
);
2689 b
= le16_to_cpu(*(__le16
*) (&walk
->dmi_addr
[4]));
2697 addr
[0] = alwaysOn
[0];
2698 addr
[1] = alwaysOn
[1];
2699 mask
[0] = alwaysOn
[0] | alwaysOff
[0];
2700 mask
[1] = alwaysOn
[1] | alwaysOff
[1];
2702 mask
[0] = NVREG_MCASTMASKA_NONE
;
2703 mask
[1] = NVREG_MCASTMASKB_NONE
;
2706 addr
[0] |= NVREG_MCASTADDRA_FORCE
;
2707 pff
|= NVREG_PFF_ALWAYS
;
2708 spin_lock_irq(&np
->lock
);
2710 writel(addr
[0], base
+ NvRegMulticastAddrA
);
2711 writel(addr
[1], base
+ NvRegMulticastAddrB
);
2712 writel(mask
[0], base
+ NvRegMulticastMaskA
);
2713 writel(mask
[1], base
+ NvRegMulticastMaskB
);
2714 writel(pff
, base
+ NvRegPacketFilterFlags
);
2715 dprintk(KERN_INFO
"%s: reconfiguration for multicast lists.\n",
2718 spin_unlock_irq(&np
->lock
);
2721 static void nv_update_pause(struct net_device
*dev
, u32 pause_flags
)
2723 struct fe_priv
*np
= netdev_priv(dev
);
2724 u8 __iomem
*base
= get_hwbase(dev
);
2726 np
->pause_flags
&= ~(NV_PAUSEFRAME_TX_ENABLE
| NV_PAUSEFRAME_RX_ENABLE
);
2728 if (np
->pause_flags
& NV_PAUSEFRAME_RX_CAPABLE
) {
2729 u32 pff
= readl(base
+ NvRegPacketFilterFlags
) & ~NVREG_PFF_PAUSE_RX
;
2730 if (pause_flags
& NV_PAUSEFRAME_RX_ENABLE
) {
2731 writel(pff
|NVREG_PFF_PAUSE_RX
, base
+ NvRegPacketFilterFlags
);
2732 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
2734 writel(pff
, base
+ NvRegPacketFilterFlags
);
2737 if (np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
) {
2738 u32 regmisc
= readl(base
+ NvRegMisc1
) & ~NVREG_MISC1_PAUSE_TX
;
2739 if (pause_flags
& NV_PAUSEFRAME_TX_ENABLE
) {
2740 writel(NVREG_TX_PAUSEFRAME_ENABLE
, base
+ NvRegTxPauseFrame
);
2741 writel(regmisc
|NVREG_MISC1_PAUSE_TX
, base
+ NvRegMisc1
);
2742 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
2744 writel(NVREG_TX_PAUSEFRAME_DISABLE
, base
+ NvRegTxPauseFrame
);
2745 writel(regmisc
, base
+ NvRegMisc1
);
2751 * nv_update_linkspeed: Setup the MAC according to the link partner
2752 * @dev: Network device to be configured
2754 * The function queries the PHY and checks if there is a link partner.
2755 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
2756 * set to 10 MBit HD.
2758 * The function returns 0 if there is no link partner and 1 if there is
2759 * a good link partner.
2761 static int nv_update_linkspeed(struct net_device
*dev
)
2763 struct fe_priv
*np
= netdev_priv(dev
);
2764 u8 __iomem
*base
= get_hwbase(dev
);
2767 int adv_lpa
, adv_pause
, lpa_pause
;
2768 int newls
= np
->linkspeed
;
2769 int newdup
= np
->duplex
;
2772 u32 control_1000
, status_1000
, phyreg
, pause_flags
, txreg
;
2775 /* BMSR_LSTATUS is latched, read it twice:
2776 * we want the current value.
2778 mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
2779 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
2781 if (!(mii_status
& BMSR_LSTATUS
)) {
2782 dprintk(KERN_DEBUG
"%s: no link detected by phy - falling back to 10HD.\n",
2784 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2790 if (np
->autoneg
== 0) {
2791 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
2792 dev
->name
, np
->fixed_mode
);
2793 if (np
->fixed_mode
& LPA_100FULL
) {
2794 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
2796 } else if (np
->fixed_mode
& LPA_100HALF
) {
2797 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
2799 } else if (np
->fixed_mode
& LPA_10FULL
) {
2800 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2803 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2809 /* check auto negotiation is complete */
2810 if (!(mii_status
& BMSR_ANEGCOMPLETE
)) {
2811 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
2812 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2815 dprintk(KERN_DEBUG
"%s: autoneg not completed - falling back to 10HD.\n", dev
->name
);
2819 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
2820 lpa
= mii_rw(dev
, np
->phyaddr
, MII_LPA
, MII_READ
);
2821 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
2822 dev
->name
, adv
, lpa
);
2825 if (np
->gigabit
== PHY_GIGABIT
) {
2826 control_1000
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
2827 status_1000
= mii_rw(dev
, np
->phyaddr
, MII_STAT1000
, MII_READ
);
2829 if ((control_1000
& ADVERTISE_1000FULL
) &&
2830 (status_1000
& LPA_1000FULL
)) {
2831 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: GBit ethernet detected.\n",
2833 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_1000
;
2839 /* FIXME: handle parallel detection properly */
2840 adv_lpa
= lpa
& adv
;
2841 if (adv_lpa
& LPA_100FULL
) {
2842 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
2844 } else if (adv_lpa
& LPA_100HALF
) {
2845 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
2847 } else if (adv_lpa
& LPA_10FULL
) {
2848 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2850 } else if (adv_lpa
& LPA_10HALF
) {
2851 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2854 dprintk(KERN_DEBUG
"%s: bad ability %04x - falling back to 10HD.\n", dev
->name
, adv_lpa
);
2855 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2860 if (np
->duplex
== newdup
&& np
->linkspeed
== newls
)
2863 dprintk(KERN_INFO
"%s: changing link setting from %d/%d to %d/%d.\n",
2864 dev
->name
, np
->linkspeed
, np
->duplex
, newls
, newdup
);
2866 np
->duplex
= newdup
;
2867 np
->linkspeed
= newls
;
2869 /* The transmitter and receiver must be restarted for safe update */
2870 if (readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_START
) {
2871 txrxFlags
|= NV_RESTART_TX
;
2874 if (readl(base
+ NvRegReceiverControl
) & NVREG_RCVCTL_START
) {
2875 txrxFlags
|= NV_RESTART_RX
;
2879 if (np
->gigabit
== PHY_GIGABIT
) {
2880 phyreg
= readl(base
+ NvRegRandomSeed
);
2881 phyreg
&= ~(0x3FF00);
2882 if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_10
)
2883 phyreg
|= NVREG_RNDSEED_FORCE3
;
2884 else if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_100
)
2885 phyreg
|= NVREG_RNDSEED_FORCE2
;
2886 else if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_1000
)
2887 phyreg
|= NVREG_RNDSEED_FORCE
;
2888 writel(phyreg
, base
+ NvRegRandomSeed
);
2891 phyreg
= readl(base
+ NvRegPhyInterface
);
2892 phyreg
&= ~(PHY_HALF
|PHY_100
|PHY_1000
);
2893 if (np
->duplex
== 0)
2895 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_100
)
2897 else if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
)
2899 writel(phyreg
, base
+ NvRegPhyInterface
);
2901 if (phyreg
& PHY_RGMII
) {
2902 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
)
2903 txreg
= NVREG_TX_DEFERRAL_RGMII_1000
;
2905 txreg
= NVREG_TX_DEFERRAL_RGMII_10_100
;
2907 txreg
= NVREG_TX_DEFERRAL_DEFAULT
;
2909 writel(txreg
, base
+ NvRegTxDeferral
);
2911 if (np
->desc_ver
== DESC_VER_1
) {
2912 txreg
= NVREG_TX_WM_DESC1_DEFAULT
;
2914 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
)
2915 txreg
= NVREG_TX_WM_DESC2_3_1000
;
2917 txreg
= NVREG_TX_WM_DESC2_3_DEFAULT
;
2919 writel(txreg
, base
+ NvRegTxWatermark
);
2921 writel(NVREG_MISC1_FORCE
| ( np
->duplex
? 0 : NVREG_MISC1_HD
),
2924 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
2928 /* setup pause frame */
2929 if (np
->duplex
!= 0) {
2930 if (np
->autoneg
&& np
->pause_flags
& NV_PAUSEFRAME_AUTONEG
) {
2931 adv_pause
= adv
& (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
2932 lpa_pause
= lpa
& (LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
);
2934 switch (adv_pause
) {
2935 case ADVERTISE_PAUSE_CAP
:
2936 if (lpa_pause
& LPA_PAUSE_CAP
) {
2937 pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
2938 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
2939 pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
2942 case ADVERTISE_PAUSE_ASYM
:
2943 if (lpa_pause
== (LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
))
2945 pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
2948 case ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
:
2949 if (lpa_pause
& LPA_PAUSE_CAP
)
2951 pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
2952 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
2953 pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
2955 if (lpa_pause
== LPA_PAUSE_ASYM
)
2957 pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
2962 pause_flags
= np
->pause_flags
;
2965 nv_update_pause(dev
, pause_flags
);
2967 if (txrxFlags
& NV_RESTART_TX
)
2969 if (txrxFlags
& NV_RESTART_RX
)
2975 static void nv_linkchange(struct net_device
*dev
)
2977 if (nv_update_linkspeed(dev
)) {
2978 if (!netif_carrier_ok(dev
)) {
2979 netif_carrier_on(dev
);
2980 printk(KERN_INFO
"%s: link up.\n", dev
->name
);
2984 if (netif_carrier_ok(dev
)) {
2985 netif_carrier_off(dev
);
2986 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
2992 static void nv_link_irq(struct net_device
*dev
)
2994 u8 __iomem
*base
= get_hwbase(dev
);
2997 miistat
= readl(base
+ NvRegMIIStatus
);
2998 writel(NVREG_MIISTAT_MASK
, base
+ NvRegMIIStatus
);
2999 dprintk(KERN_INFO
"%s: link change irq, status 0x%x.\n", dev
->name
, miistat
);
3001 if (miistat
& (NVREG_MIISTAT_LINKCHANGE
))
3003 dprintk(KERN_DEBUG
"%s: link change notification done.\n", dev
->name
);
3006 static irqreturn_t
nv_nic_irq(int foo
, void *data
)
3008 struct net_device
*dev
= (struct net_device
*) data
;
3009 struct fe_priv
*np
= netdev_priv(dev
);
3010 u8 __iomem
*base
= get_hwbase(dev
);
3014 dprintk(KERN_DEBUG
"%s: nv_nic_irq\n", dev
->name
);
3017 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
3018 events
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
3019 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
3021 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
3022 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
3024 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
3025 if (!(events
& np
->irqmask
))
3028 spin_lock(&np
->lock
);
3030 spin_unlock(&np
->lock
);
3032 #ifdef CONFIG_FORCEDETH_NAPI
3033 if (events
& NVREG_IRQ_RX_ALL
) {
3034 netif_rx_schedule(dev
, &np
->napi
);
3036 /* Disable furthur receive irq's */
3037 spin_lock(&np
->lock
);
3038 np
->irqmask
&= ~NVREG_IRQ_RX_ALL
;
3040 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
3041 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3043 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3044 spin_unlock(&np
->lock
);
3047 if (nv_rx_process(dev
, RX_WORK_PER_LOOP
)) {
3048 if (unlikely(nv_alloc_rx(dev
))) {
3049 spin_lock(&np
->lock
);
3050 if (!np
->in_shutdown
)
3051 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3052 spin_unlock(&np
->lock
);
3056 if (unlikely(events
& NVREG_IRQ_LINK
)) {
3057 spin_lock(&np
->lock
);
3059 spin_unlock(&np
->lock
);
3061 if (unlikely(np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
))) {
3062 spin_lock(&np
->lock
);
3064 spin_unlock(&np
->lock
);
3065 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3067 if (unlikely(events
& (NVREG_IRQ_TX_ERR
))) {
3068 dprintk(KERN_DEBUG
"%s: received irq with events 0x%x. Probably TX fail.\n",
3071 if (unlikely(events
& (NVREG_IRQ_UNKNOWN
))) {
3072 printk(KERN_DEBUG
"%s: received irq with unknown events 0x%x. Please report\n",
3075 if (unlikely(events
& NVREG_IRQ_RECOVER_ERROR
)) {
3076 spin_lock(&np
->lock
);
3077 /* disable interrupts on the nic */
3078 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3079 writel(0, base
+ NvRegIrqMask
);
3081 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3084 if (!np
->in_shutdown
) {
3085 np
->nic_poll_irq
= np
->irqmask
;
3086 np
->recover_error
= 1;
3087 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3089 spin_unlock(&np
->lock
);
3092 if (unlikely(i
> max_interrupt_work
)) {
3093 spin_lock(&np
->lock
);
3094 /* disable interrupts on the nic */
3095 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3096 writel(0, base
+ NvRegIrqMask
);
3098 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3101 if (!np
->in_shutdown
) {
3102 np
->nic_poll_irq
= np
->irqmask
;
3103 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3105 spin_unlock(&np
->lock
);
3106 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq.\n", dev
->name
, i
);
3111 dprintk(KERN_DEBUG
"%s: nv_nic_irq completed\n", dev
->name
);
3113 return IRQ_RETVAL(i
);
3117 * All _optimized functions are used to help increase performance
3118 * (reduce CPU and increase throughput). They use descripter version 3,
3119 * compiler directives, and reduce memory accesses.
3121 static irqreturn_t
nv_nic_irq_optimized(int foo
, void *data
)
3123 struct net_device
*dev
= (struct net_device
*) data
;
3124 struct fe_priv
*np
= netdev_priv(dev
);
3125 u8 __iomem
*base
= get_hwbase(dev
);
3129 dprintk(KERN_DEBUG
"%s: nv_nic_irq_optimized\n", dev
->name
);
3132 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
3133 events
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
3134 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
3136 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
3137 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
3139 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
3140 if (!(events
& np
->irqmask
))
3143 spin_lock(&np
->lock
);
3144 nv_tx_done_optimized(dev
, TX_WORK_PER_LOOP
);
3145 spin_unlock(&np
->lock
);
3147 #ifdef CONFIG_FORCEDETH_NAPI
3148 if (events
& NVREG_IRQ_RX_ALL
) {
3149 netif_rx_schedule(dev
, &np
->napi
);
3151 /* Disable furthur receive irq's */
3152 spin_lock(&np
->lock
);
3153 np
->irqmask
&= ~NVREG_IRQ_RX_ALL
;
3155 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
3156 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3158 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3159 spin_unlock(&np
->lock
);
3162 if (nv_rx_process_optimized(dev
, RX_WORK_PER_LOOP
)) {
3163 if (unlikely(nv_alloc_rx_optimized(dev
))) {
3164 spin_lock(&np
->lock
);
3165 if (!np
->in_shutdown
)
3166 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3167 spin_unlock(&np
->lock
);
3171 if (unlikely(events
& NVREG_IRQ_LINK
)) {
3172 spin_lock(&np
->lock
);
3174 spin_unlock(&np
->lock
);
3176 if (unlikely(np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
))) {
3177 spin_lock(&np
->lock
);
3179 spin_unlock(&np
->lock
);
3180 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3182 if (unlikely(events
& (NVREG_IRQ_TX_ERR
))) {
3183 dprintk(KERN_DEBUG
"%s: received irq with events 0x%x. Probably TX fail.\n",
3186 if (unlikely(events
& (NVREG_IRQ_UNKNOWN
))) {
3187 printk(KERN_DEBUG
"%s: received irq with unknown events 0x%x. Please report\n",
3190 if (unlikely(events
& NVREG_IRQ_RECOVER_ERROR
)) {
3191 spin_lock(&np
->lock
);
3192 /* disable interrupts on the nic */
3193 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3194 writel(0, base
+ NvRegIrqMask
);
3196 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3199 if (!np
->in_shutdown
) {
3200 np
->nic_poll_irq
= np
->irqmask
;
3201 np
->recover_error
= 1;
3202 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3204 spin_unlock(&np
->lock
);
3208 if (unlikely(i
> max_interrupt_work
)) {
3209 spin_lock(&np
->lock
);
3210 /* disable interrupts on the nic */
3211 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3212 writel(0, base
+ NvRegIrqMask
);
3214 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3217 if (!np
->in_shutdown
) {
3218 np
->nic_poll_irq
= np
->irqmask
;
3219 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3221 spin_unlock(&np
->lock
);
3222 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq.\n", dev
->name
, i
);
3227 dprintk(KERN_DEBUG
"%s: nv_nic_irq_optimized completed\n", dev
->name
);
3229 return IRQ_RETVAL(i
);
3232 static irqreturn_t
nv_nic_irq_tx(int foo
, void *data
)
3234 struct net_device
*dev
= (struct net_device
*) data
;
3235 struct fe_priv
*np
= netdev_priv(dev
);
3236 u8 __iomem
*base
= get_hwbase(dev
);
3239 unsigned long flags
;
3241 dprintk(KERN_DEBUG
"%s: nv_nic_irq_tx\n", dev
->name
);
3244 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_TX_ALL
;
3245 writel(NVREG_IRQ_TX_ALL
, base
+ NvRegMSIXIrqStatus
);
3246 dprintk(KERN_DEBUG
"%s: tx irq: %08x\n", dev
->name
, events
);
3247 if (!(events
& np
->irqmask
))
3250 spin_lock_irqsave(&np
->lock
, flags
);
3251 nv_tx_done_optimized(dev
, TX_WORK_PER_LOOP
);
3252 spin_unlock_irqrestore(&np
->lock
, flags
);
3254 if (unlikely(events
& (NVREG_IRQ_TX_ERR
))) {
3255 dprintk(KERN_DEBUG
"%s: received irq with events 0x%x. Probably TX fail.\n",
3258 if (unlikely(i
> max_interrupt_work
)) {
3259 spin_lock_irqsave(&np
->lock
, flags
);
3260 /* disable interrupts on the nic */
3261 writel(NVREG_IRQ_TX_ALL
, base
+ NvRegIrqMask
);
3264 if (!np
->in_shutdown
) {
3265 np
->nic_poll_irq
|= NVREG_IRQ_TX_ALL
;
3266 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3268 spin_unlock_irqrestore(&np
->lock
, flags
);
3269 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev
->name
, i
);
3274 dprintk(KERN_DEBUG
"%s: nv_nic_irq_tx completed\n", dev
->name
);
3276 return IRQ_RETVAL(i
);
3279 #ifdef CONFIG_FORCEDETH_NAPI
3280 static int nv_napi_poll(struct napi_struct
*napi
, int budget
)
3282 struct fe_priv
*np
= container_of(napi
, struct fe_priv
, napi
);
3283 struct net_device
*dev
= np
->dev
;
3284 u8 __iomem
*base
= get_hwbase(dev
);
3285 unsigned long flags
;
3288 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
3289 pkts
= nv_rx_process(dev
, budget
);
3290 retcode
= nv_alloc_rx(dev
);
3292 pkts
= nv_rx_process_optimized(dev
, budget
);
3293 retcode
= nv_alloc_rx_optimized(dev
);
3297 spin_lock_irqsave(&np
->lock
, flags
);
3298 if (!np
->in_shutdown
)
3299 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3300 spin_unlock_irqrestore(&np
->lock
, flags
);
3303 if (pkts
< budget
) {
3304 /* re-enable receive interrupts */
3305 spin_lock_irqsave(&np
->lock
, flags
);
3307 __netif_rx_complete(dev
, napi
);
3309 np
->irqmask
|= NVREG_IRQ_RX_ALL
;
3310 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
3311 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3313 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3315 spin_unlock_irqrestore(&np
->lock
, flags
);
3321 #ifdef CONFIG_FORCEDETH_NAPI
3322 static irqreturn_t
nv_nic_irq_rx(int foo
, void *data
)
3324 struct net_device
*dev
= (struct net_device
*) data
;
3325 struct fe_priv
*np
= netdev_priv(dev
);
3326 u8 __iomem
*base
= get_hwbase(dev
);
3329 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_RX_ALL
;
3330 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegMSIXIrqStatus
);
3333 netif_rx_schedule(dev
, &np
->napi
);
3334 /* disable receive interrupts on the nic */
3335 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3341 static irqreturn_t
nv_nic_irq_rx(int foo
, void *data
)
3343 struct net_device
*dev
= (struct net_device
*) data
;
3344 struct fe_priv
*np
= netdev_priv(dev
);
3345 u8 __iomem
*base
= get_hwbase(dev
);
3348 unsigned long flags
;
3350 dprintk(KERN_DEBUG
"%s: nv_nic_irq_rx\n", dev
->name
);
3353 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_RX_ALL
;
3354 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegMSIXIrqStatus
);
3355 dprintk(KERN_DEBUG
"%s: rx irq: %08x\n", dev
->name
, events
);
3356 if (!(events
& np
->irqmask
))
3359 if (nv_rx_process_optimized(dev
, RX_WORK_PER_LOOP
)) {
3360 if (unlikely(nv_alloc_rx_optimized(dev
))) {
3361 spin_lock_irqsave(&np
->lock
, flags
);
3362 if (!np
->in_shutdown
)
3363 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3364 spin_unlock_irqrestore(&np
->lock
, flags
);
3368 if (unlikely(i
> max_interrupt_work
)) {
3369 spin_lock_irqsave(&np
->lock
, flags
);
3370 /* disable interrupts on the nic */
3371 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3374 if (!np
->in_shutdown
) {
3375 np
->nic_poll_irq
|= NVREG_IRQ_RX_ALL
;
3376 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3378 spin_unlock_irqrestore(&np
->lock
, flags
);
3379 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev
->name
, i
);
3383 dprintk(KERN_DEBUG
"%s: nv_nic_irq_rx completed\n", dev
->name
);
3385 return IRQ_RETVAL(i
);
3389 static irqreturn_t
nv_nic_irq_other(int foo
, void *data
)
3391 struct net_device
*dev
= (struct net_device
*) data
;
3392 struct fe_priv
*np
= netdev_priv(dev
);
3393 u8 __iomem
*base
= get_hwbase(dev
);
3396 unsigned long flags
;
3398 dprintk(KERN_DEBUG
"%s: nv_nic_irq_other\n", dev
->name
);
3401 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_OTHER
;
3402 writel(NVREG_IRQ_OTHER
, base
+ NvRegMSIXIrqStatus
);
3403 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
3404 if (!(events
& np
->irqmask
))
3407 /* check tx in case we reached max loop limit in tx isr */
3408 spin_lock_irqsave(&np
->lock
, flags
);
3409 nv_tx_done_optimized(dev
, TX_WORK_PER_LOOP
);
3410 spin_unlock_irqrestore(&np
->lock
, flags
);
3412 if (events
& NVREG_IRQ_LINK
) {
3413 spin_lock_irqsave(&np
->lock
, flags
);
3415 spin_unlock_irqrestore(&np
->lock
, flags
);
3417 if (np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
)) {
3418 spin_lock_irqsave(&np
->lock
, flags
);
3420 spin_unlock_irqrestore(&np
->lock
, flags
);
3421 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3423 if (events
& NVREG_IRQ_RECOVER_ERROR
) {
3424 spin_lock_irq(&np
->lock
);
3425 /* disable interrupts on the nic */
3426 writel(NVREG_IRQ_OTHER
, base
+ NvRegIrqMask
);
3429 if (!np
->in_shutdown
) {
3430 np
->nic_poll_irq
|= NVREG_IRQ_OTHER
;
3431 np
->recover_error
= 1;
3432 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3434 spin_unlock_irq(&np
->lock
);
3437 if (events
& (NVREG_IRQ_UNKNOWN
)) {
3438 printk(KERN_DEBUG
"%s: received irq with unknown events 0x%x. Please report\n",
3441 if (unlikely(i
> max_interrupt_work
)) {
3442 spin_lock_irqsave(&np
->lock
, flags
);
3443 /* disable interrupts on the nic */
3444 writel(NVREG_IRQ_OTHER
, base
+ NvRegIrqMask
);
3447 if (!np
->in_shutdown
) {
3448 np
->nic_poll_irq
|= NVREG_IRQ_OTHER
;
3449 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3451 spin_unlock_irqrestore(&np
->lock
, flags
);
3452 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_other.\n", dev
->name
, i
);
3457 dprintk(KERN_DEBUG
"%s: nv_nic_irq_other completed\n", dev
->name
);
3459 return IRQ_RETVAL(i
);
3462 static irqreturn_t
nv_nic_irq_test(int foo
, void *data
)
3464 struct net_device
*dev
= (struct net_device
*) data
;
3465 struct fe_priv
*np
= netdev_priv(dev
);
3466 u8 __iomem
*base
= get_hwbase(dev
);
3469 dprintk(KERN_DEBUG
"%s: nv_nic_irq_test\n", dev
->name
);
3471 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
3472 events
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
3473 writel(NVREG_IRQ_TIMER
, base
+ NvRegIrqStatus
);
3475 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
3476 writel(NVREG_IRQ_TIMER
, base
+ NvRegMSIXIrqStatus
);
3479 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
3480 if (!(events
& NVREG_IRQ_TIMER
))
3481 return IRQ_RETVAL(0);
3483 spin_lock(&np
->lock
);
3485 spin_unlock(&np
->lock
);
3487 dprintk(KERN_DEBUG
"%s: nv_nic_irq_test completed\n", dev
->name
);
3489 return IRQ_RETVAL(1);
3492 static void set_msix_vector_map(struct net_device
*dev
, u32 vector
, u32 irqmask
)
3494 u8 __iomem
*base
= get_hwbase(dev
);
3498 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3499 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3500 * the remaining 8 interrupts.
3502 for (i
= 0; i
< 8; i
++) {
3503 if ((irqmask
>> i
) & 0x1) {
3504 msixmap
|= vector
<< (i
<< 2);
3507 writel(readl(base
+ NvRegMSIXMap0
) | msixmap
, base
+ NvRegMSIXMap0
);
3510 for (i
= 0; i
< 8; i
++) {
3511 if ((irqmask
>> (i
+ 8)) & 0x1) {
3512 msixmap
|= vector
<< (i
<< 2);
3515 writel(readl(base
+ NvRegMSIXMap1
) | msixmap
, base
+ NvRegMSIXMap1
);
3518 static int nv_request_irq(struct net_device
*dev
, int intr_test
)
3520 struct fe_priv
*np
= get_nvpriv(dev
);
3521 u8 __iomem
*base
= get_hwbase(dev
);
3524 irqreturn_t (*handler
)(int foo
, void *data
);
3527 handler
= nv_nic_irq_test
;
3529 if (np
->desc_ver
== DESC_VER_3
)
3530 handler
= nv_nic_irq_optimized
;
3532 handler
= nv_nic_irq
;
3535 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) {
3536 for (i
= 0; i
< (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
); i
++) {
3537 np
->msi_x_entry
[i
].entry
= i
;
3539 if ((ret
= pci_enable_msix(np
->pci_dev
, np
->msi_x_entry
, (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
))) == 0) {
3540 np
->msi_flags
|= NV_MSI_X_ENABLED
;
3541 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
&& !intr_test
) {
3542 /* Request irq for rx handling */
3543 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
, &nv_nic_irq_rx
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3544 printk(KERN_INFO
"forcedeth: request_irq failed for rx %d\n", ret
);
3545 pci_disable_msix(np
->pci_dev
);
3546 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3549 /* Request irq for tx handling */
3550 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
, &nv_nic_irq_tx
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3551 printk(KERN_INFO
"forcedeth: request_irq failed for tx %d\n", ret
);
3552 pci_disable_msix(np
->pci_dev
);
3553 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3556 /* Request irq for link and timer handling */
3557 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
, &nv_nic_irq_other
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3558 printk(KERN_INFO
"forcedeth: request_irq failed for link %d\n", ret
);
3559 pci_disable_msix(np
->pci_dev
);
3560 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3563 /* map interrupts to their respective vector */
3564 writel(0, base
+ NvRegMSIXMap0
);
3565 writel(0, base
+ NvRegMSIXMap1
);
3566 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_RX
, NVREG_IRQ_RX_ALL
);
3567 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_TX
, NVREG_IRQ_TX_ALL
);
3568 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_OTHER
, NVREG_IRQ_OTHER
);
3570 /* Request irq for all interrupts */
3571 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
, handler
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3572 printk(KERN_INFO
"forcedeth: request_irq failed %d\n", ret
);
3573 pci_disable_msix(np
->pci_dev
);
3574 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3578 /* map interrupts to vector 0 */
3579 writel(0, base
+ NvRegMSIXMap0
);
3580 writel(0, base
+ NvRegMSIXMap1
);
3584 if (ret
!= 0 && np
->msi_flags
& NV_MSI_CAPABLE
) {
3585 if ((ret
= pci_enable_msi(np
->pci_dev
)) == 0) {
3586 np
->msi_flags
|= NV_MSI_ENABLED
;
3587 dev
->irq
= np
->pci_dev
->irq
;
3588 if (request_irq(np
->pci_dev
->irq
, handler
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3589 printk(KERN_INFO
"forcedeth: request_irq failed %d\n", ret
);
3590 pci_disable_msi(np
->pci_dev
);
3591 np
->msi_flags
&= ~NV_MSI_ENABLED
;
3592 dev
->irq
= np
->pci_dev
->irq
;
3596 /* map interrupts to vector 0 */
3597 writel(0, base
+ NvRegMSIMap0
);
3598 writel(0, base
+ NvRegMSIMap1
);
3599 /* enable msi vector 0 */
3600 writel(NVREG_MSI_VECTOR_0_ENABLED
, base
+ NvRegMSIIrqMask
);
3604 if (request_irq(np
->pci_dev
->irq
, handler
, IRQF_SHARED
, dev
->name
, dev
) != 0)
3611 free_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
, dev
);
3613 free_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
, dev
);
3618 static void nv_free_irq(struct net_device
*dev
)
3620 struct fe_priv
*np
= get_nvpriv(dev
);
3623 if (np
->msi_flags
& NV_MSI_X_ENABLED
) {
3624 for (i
= 0; i
< (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
); i
++) {
3625 free_irq(np
->msi_x_entry
[i
].vector
, dev
);
3627 pci_disable_msix(np
->pci_dev
);
3628 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3630 free_irq(np
->pci_dev
->irq
, dev
);
3631 if (np
->msi_flags
& NV_MSI_ENABLED
) {
3632 pci_disable_msi(np
->pci_dev
);
3633 np
->msi_flags
&= ~NV_MSI_ENABLED
;
3638 static void nv_do_nic_poll(unsigned long data
)
3640 struct net_device
*dev
= (struct net_device
*) data
;
3641 struct fe_priv
*np
= netdev_priv(dev
);
3642 u8 __iomem
*base
= get_hwbase(dev
);
3646 * First disable irq(s) and then
3647 * reenable interrupts on the nic, we have to do this before calling
3648 * nv_nic_irq because that may decide to do otherwise
3651 if (!using_multi_irqs(dev
)) {
3652 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
3653 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
3655 disable_irq_lockdep(np
->pci_dev
->irq
);
3658 if (np
->nic_poll_irq
& NVREG_IRQ_RX_ALL
) {
3659 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
3660 mask
|= NVREG_IRQ_RX_ALL
;
3662 if (np
->nic_poll_irq
& NVREG_IRQ_TX_ALL
) {
3663 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
3664 mask
|= NVREG_IRQ_TX_ALL
;
3666 if (np
->nic_poll_irq
& NVREG_IRQ_OTHER
) {
3667 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
3668 mask
|= NVREG_IRQ_OTHER
;
3671 np
->nic_poll_irq
= 0;
3673 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
3675 if (np
->recover_error
) {
3676 np
->recover_error
= 0;
3677 printk(KERN_INFO
"forcedeth: MAC in recoverable error state\n");
3678 if (netif_running(dev
)) {
3679 netif_tx_lock_bh(dev
);
3680 spin_lock(&np
->lock
);
3685 /* drain rx queue */
3688 /* reinit driver view of the rx queue */
3690 if (nv_init_ring(dev
)) {
3691 if (!np
->in_shutdown
)
3692 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3694 /* reinit nic view of the rx queue */
3695 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
3696 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
3697 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
3698 base
+ NvRegRingSizes
);
3700 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
3703 /* restart rx engine */
3706 spin_unlock(&np
->lock
);
3707 netif_tx_unlock_bh(dev
);
3712 writel(mask
, base
+ NvRegIrqMask
);
3715 if (!using_multi_irqs(dev
)) {
3716 if (np
->desc_ver
== DESC_VER_3
)
3717 nv_nic_irq_optimized(0, dev
);
3720 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
3721 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
3723 enable_irq_lockdep(np
->pci_dev
->irq
);
3725 if (np
->nic_poll_irq
& NVREG_IRQ_RX_ALL
) {
3726 nv_nic_irq_rx(0, dev
);
3727 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
3729 if (np
->nic_poll_irq
& NVREG_IRQ_TX_ALL
) {
3730 nv_nic_irq_tx(0, dev
);
3731 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
3733 if (np
->nic_poll_irq
& NVREG_IRQ_OTHER
) {
3734 nv_nic_irq_other(0, dev
);
3735 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
3740 #ifdef CONFIG_NET_POLL_CONTROLLER
3741 static void nv_poll_controller(struct net_device
*dev
)
3743 nv_do_nic_poll((unsigned long) dev
);
3747 static void nv_do_stats_poll(unsigned long data
)
3749 struct net_device
*dev
= (struct net_device
*) data
;
3750 struct fe_priv
*np
= netdev_priv(dev
);
3752 nv_get_hw_stats(dev
);
3754 if (!np
->in_shutdown
)
3755 mod_timer(&np
->stats_poll
, jiffies
+ STATS_INTERVAL
);
3758 static void nv_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
3760 struct fe_priv
*np
= netdev_priv(dev
);
3761 strcpy(info
->driver
, DRV_NAME
);
3762 strcpy(info
->version
, FORCEDETH_VERSION
);
3763 strcpy(info
->bus_info
, pci_name(np
->pci_dev
));
3766 static void nv_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wolinfo
)
3768 struct fe_priv
*np
= netdev_priv(dev
);
3769 wolinfo
->supported
= WAKE_MAGIC
;
3771 spin_lock_irq(&np
->lock
);
3773 wolinfo
->wolopts
= WAKE_MAGIC
;
3774 spin_unlock_irq(&np
->lock
);
3777 static int nv_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wolinfo
)
3779 struct fe_priv
*np
= netdev_priv(dev
);
3780 u8 __iomem
*base
= get_hwbase(dev
);
3783 if (wolinfo
->wolopts
== 0) {
3785 } else if (wolinfo
->wolopts
& WAKE_MAGIC
) {
3787 flags
= NVREG_WAKEUPFLAGS_ENABLE
;
3789 if (netif_running(dev
)) {
3790 spin_lock_irq(&np
->lock
);
3791 writel(flags
, base
+ NvRegWakeUpFlags
);
3792 spin_unlock_irq(&np
->lock
);
3797 static int nv_get_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
3799 struct fe_priv
*np
= netdev_priv(dev
);
3802 spin_lock_irq(&np
->lock
);
3803 ecmd
->port
= PORT_MII
;
3804 if (!netif_running(dev
)) {
3805 /* We do not track link speed / duplex setting if the
3806 * interface is disabled. Force a link check */
3807 if (nv_update_linkspeed(dev
)) {
3808 if (!netif_carrier_ok(dev
))
3809 netif_carrier_on(dev
);
3811 if (netif_carrier_ok(dev
))
3812 netif_carrier_off(dev
);
3816 if (netif_carrier_ok(dev
)) {
3817 switch(np
->linkspeed
& (NVREG_LINKSPEED_MASK
)) {
3818 case NVREG_LINKSPEED_10
:
3819 ecmd
->speed
= SPEED_10
;
3821 case NVREG_LINKSPEED_100
:
3822 ecmd
->speed
= SPEED_100
;
3824 case NVREG_LINKSPEED_1000
:
3825 ecmd
->speed
= SPEED_1000
;
3828 ecmd
->duplex
= DUPLEX_HALF
;
3830 ecmd
->duplex
= DUPLEX_FULL
;
3836 ecmd
->autoneg
= np
->autoneg
;
3838 ecmd
->advertising
= ADVERTISED_MII
;
3840 ecmd
->advertising
|= ADVERTISED_Autoneg
;
3841 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
3842 if (adv
& ADVERTISE_10HALF
)
3843 ecmd
->advertising
|= ADVERTISED_10baseT_Half
;
3844 if (adv
& ADVERTISE_10FULL
)
3845 ecmd
->advertising
|= ADVERTISED_10baseT_Full
;
3846 if (adv
& ADVERTISE_100HALF
)
3847 ecmd
->advertising
|= ADVERTISED_100baseT_Half
;
3848 if (adv
& ADVERTISE_100FULL
)
3849 ecmd
->advertising
|= ADVERTISED_100baseT_Full
;
3850 if (np
->gigabit
== PHY_GIGABIT
) {
3851 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
3852 if (adv
& ADVERTISE_1000FULL
)
3853 ecmd
->advertising
|= ADVERTISED_1000baseT_Full
;
3856 ecmd
->supported
= (SUPPORTED_Autoneg
|
3857 SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
|
3858 SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full
|
3860 if (np
->gigabit
== PHY_GIGABIT
)
3861 ecmd
->supported
|= SUPPORTED_1000baseT_Full
;
3863 ecmd
->phy_address
= np
->phyaddr
;
3864 ecmd
->transceiver
= XCVR_EXTERNAL
;
3866 /* ignore maxtxpkt, maxrxpkt for now */
3867 spin_unlock_irq(&np
->lock
);
3871 static int nv_set_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
3873 struct fe_priv
*np
= netdev_priv(dev
);
3875 if (ecmd
->port
!= PORT_MII
)
3877 if (ecmd
->transceiver
!= XCVR_EXTERNAL
)
3879 if (ecmd
->phy_address
!= np
->phyaddr
) {
3880 /* TODO: support switching between multiple phys. Should be
3881 * trivial, but not enabled due to lack of test hardware. */
3884 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
3887 mask
= ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
3888 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
;
3889 if (np
->gigabit
== PHY_GIGABIT
)
3890 mask
|= ADVERTISED_1000baseT_Full
;
3892 if ((ecmd
->advertising
& mask
) == 0)
3895 } else if (ecmd
->autoneg
== AUTONEG_DISABLE
) {
3896 /* Note: autonegotiation disable, speed 1000 intentionally
3897 * forbidden - noone should need that. */
3899 if (ecmd
->speed
!= SPEED_10
&& ecmd
->speed
!= SPEED_100
)
3901 if (ecmd
->duplex
!= DUPLEX_HALF
&& ecmd
->duplex
!= DUPLEX_FULL
)
3907 netif_carrier_off(dev
);
3908 if (netif_running(dev
)) {
3909 nv_disable_irq(dev
);
3910 netif_tx_lock_bh(dev
);
3911 spin_lock(&np
->lock
);
3915 spin_unlock(&np
->lock
);
3916 netif_tx_unlock_bh(dev
);
3919 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
3924 /* advertise only what has been requested */
3925 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
3926 adv
&= ~(ADVERTISE_ALL
| ADVERTISE_100BASE4
| ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
3927 if (ecmd
->advertising
& ADVERTISED_10baseT_Half
)
3928 adv
|= ADVERTISE_10HALF
;
3929 if (ecmd
->advertising
& ADVERTISED_10baseT_Full
)
3930 adv
|= ADVERTISE_10FULL
;
3931 if (ecmd
->advertising
& ADVERTISED_100baseT_Half
)
3932 adv
|= ADVERTISE_100HALF
;
3933 if (ecmd
->advertising
& ADVERTISED_100baseT_Full
)
3934 adv
|= ADVERTISE_100FULL
;
3935 if (np
->pause_flags
& NV_PAUSEFRAME_RX_REQ
) /* for rx we set both advertisments but disable tx pause */
3936 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
3937 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
3938 adv
|= ADVERTISE_PAUSE_ASYM
;
3939 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
3941 if (np
->gigabit
== PHY_GIGABIT
) {
3942 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
3943 adv
&= ~ADVERTISE_1000FULL
;
3944 if (ecmd
->advertising
& ADVERTISED_1000baseT_Full
)
3945 adv
|= ADVERTISE_1000FULL
;
3946 mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, adv
);
3949 if (netif_running(dev
))
3950 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
3951 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
3952 if (np
->phy_model
== PHY_MODEL_MARVELL_E3016
) {
3953 bmcr
|= BMCR_ANENABLE
;
3954 /* reset the phy in order for settings to stick,
3955 * and cause autoneg to start */
3956 if (phy_reset(dev
, bmcr
)) {
3957 printk(KERN_INFO
"%s: phy reset failed\n", dev
->name
);
3961 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
3962 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
3969 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
3970 adv
&= ~(ADVERTISE_ALL
| ADVERTISE_100BASE4
| ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
3971 if (ecmd
->speed
== SPEED_10
&& ecmd
->duplex
== DUPLEX_HALF
)
3972 adv
|= ADVERTISE_10HALF
;
3973 if (ecmd
->speed
== SPEED_10
&& ecmd
->duplex
== DUPLEX_FULL
)
3974 adv
|= ADVERTISE_10FULL
;
3975 if (ecmd
->speed
== SPEED_100
&& ecmd
->duplex
== DUPLEX_HALF
)
3976 adv
|= ADVERTISE_100HALF
;
3977 if (ecmd
->speed
== SPEED_100
&& ecmd
->duplex
== DUPLEX_FULL
)
3978 adv
|= ADVERTISE_100FULL
;
3979 np
->pause_flags
&= ~(NV_PAUSEFRAME_AUTONEG
|NV_PAUSEFRAME_RX_ENABLE
|NV_PAUSEFRAME_TX_ENABLE
);
3980 if (np
->pause_flags
& NV_PAUSEFRAME_RX_REQ
) {/* for rx we set both advertisments but disable tx pause */
3981 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
3982 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
3984 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
) {
3985 adv
|= ADVERTISE_PAUSE_ASYM
;
3986 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
3988 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
3989 np
->fixed_mode
= adv
;
3991 if (np
->gigabit
== PHY_GIGABIT
) {
3992 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
3993 adv
&= ~ADVERTISE_1000FULL
;
3994 mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, adv
);
3997 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
3998 bmcr
&= ~(BMCR_ANENABLE
|BMCR_SPEED100
|BMCR_SPEED1000
|BMCR_FULLDPLX
);
3999 if (np
->fixed_mode
& (ADVERTISE_10FULL
|ADVERTISE_100FULL
))
4000 bmcr
|= BMCR_FULLDPLX
;
4001 if (np
->fixed_mode
& (ADVERTISE_100HALF
|ADVERTISE_100FULL
))
4002 bmcr
|= BMCR_SPEED100
;
4003 if (np
->phy_oui
== PHY_OUI_MARVELL
) {
4004 /* reset the phy in order for forced mode settings to stick */
4005 if (phy_reset(dev
, bmcr
)) {
4006 printk(KERN_INFO
"%s: phy reset failed\n", dev
->name
);
4010 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4011 if (netif_running(dev
)) {
4012 /* Wait a bit and then reconfigure the nic. */
4019 if (netif_running(dev
)) {
4028 #define FORCEDETH_REGS_VER 1
4030 static int nv_get_regs_len(struct net_device
*dev
)
4032 struct fe_priv
*np
= netdev_priv(dev
);
4033 return np
->register_size
;
4036 static void nv_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
, void *buf
)
4038 struct fe_priv
*np
= netdev_priv(dev
);
4039 u8 __iomem
*base
= get_hwbase(dev
);
4043 regs
->version
= FORCEDETH_REGS_VER
;
4044 spin_lock_irq(&np
->lock
);
4045 for (i
= 0;i
<= np
->register_size
/sizeof(u32
); i
++)
4046 rbuf
[i
] = readl(base
+ i
*sizeof(u32
));
4047 spin_unlock_irq(&np
->lock
);
4050 static int nv_nway_reset(struct net_device
*dev
)
4052 struct fe_priv
*np
= netdev_priv(dev
);
4058 netif_carrier_off(dev
);
4059 if (netif_running(dev
)) {
4060 nv_disable_irq(dev
);
4061 netif_tx_lock_bh(dev
);
4062 spin_lock(&np
->lock
);
4066 spin_unlock(&np
->lock
);
4067 netif_tx_unlock_bh(dev
);
4068 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
4071 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4072 if (np
->phy_model
== PHY_MODEL_MARVELL_E3016
) {
4073 bmcr
|= BMCR_ANENABLE
;
4074 /* reset the phy in order for settings to stick*/
4075 if (phy_reset(dev
, bmcr
)) {
4076 printk(KERN_INFO
"%s: phy reset failed\n", dev
->name
);
4080 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
4081 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4084 if (netif_running(dev
)) {
4097 static int nv_set_tso(struct net_device
*dev
, u32 value
)
4099 struct fe_priv
*np
= netdev_priv(dev
);
4101 if ((np
->driver_data
& DEV_HAS_CHECKSUM
))
4102 return ethtool_op_set_tso(dev
, value
);
4107 static void nv_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
* ring
)
4109 struct fe_priv
*np
= netdev_priv(dev
);
4111 ring
->rx_max_pending
= (np
->desc_ver
== DESC_VER_1
) ? RING_MAX_DESC_VER_1
: RING_MAX_DESC_VER_2_3
;
4112 ring
->rx_mini_max_pending
= 0;
4113 ring
->rx_jumbo_max_pending
= 0;
4114 ring
->tx_max_pending
= (np
->desc_ver
== DESC_VER_1
) ? RING_MAX_DESC_VER_1
: RING_MAX_DESC_VER_2_3
;
4116 ring
->rx_pending
= np
->rx_ring_size
;
4117 ring
->rx_mini_pending
= 0;
4118 ring
->rx_jumbo_pending
= 0;
4119 ring
->tx_pending
= np
->tx_ring_size
;
4122 static int nv_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
* ring
)
4124 struct fe_priv
*np
= netdev_priv(dev
);
4125 u8 __iomem
*base
= get_hwbase(dev
);
4126 u8
*rxtx_ring
, *rx_skbuff
, *tx_skbuff
;
4127 dma_addr_t ring_addr
;
4129 if (ring
->rx_pending
< RX_RING_MIN
||
4130 ring
->tx_pending
< TX_RING_MIN
||
4131 ring
->rx_mini_pending
!= 0 ||
4132 ring
->rx_jumbo_pending
!= 0 ||
4133 (np
->desc_ver
== DESC_VER_1
&&
4134 (ring
->rx_pending
> RING_MAX_DESC_VER_1
||
4135 ring
->tx_pending
> RING_MAX_DESC_VER_1
)) ||
4136 (np
->desc_ver
!= DESC_VER_1
&&
4137 (ring
->rx_pending
> RING_MAX_DESC_VER_2_3
||
4138 ring
->tx_pending
> RING_MAX_DESC_VER_2_3
))) {
4142 /* allocate new rings */
4143 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
4144 rxtx_ring
= pci_alloc_consistent(np
->pci_dev
,
4145 sizeof(struct ring_desc
) * (ring
->rx_pending
+ ring
->tx_pending
),
4148 rxtx_ring
= pci_alloc_consistent(np
->pci_dev
,
4149 sizeof(struct ring_desc_ex
) * (ring
->rx_pending
+ ring
->tx_pending
),
4152 rx_skbuff
= kmalloc(sizeof(struct nv_skb_map
) * ring
->rx_pending
, GFP_KERNEL
);
4153 tx_skbuff
= kmalloc(sizeof(struct nv_skb_map
) * ring
->tx_pending
, GFP_KERNEL
);
4154 if (!rxtx_ring
|| !rx_skbuff
|| !tx_skbuff
) {
4155 /* fall back to old rings */
4156 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
4158 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc
) * (ring
->rx_pending
+ ring
->tx_pending
),
4159 rxtx_ring
, ring_addr
);
4162 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc_ex
) * (ring
->rx_pending
+ ring
->tx_pending
),
4163 rxtx_ring
, ring_addr
);
4172 if (netif_running(dev
)) {
4173 nv_disable_irq(dev
);
4174 netif_tx_lock_bh(dev
);
4175 spin_lock(&np
->lock
);
4187 /* set new values */
4188 np
->rx_ring_size
= ring
->rx_pending
;
4189 np
->tx_ring_size
= ring
->tx_pending
;
4190 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
4191 np
->rx_ring
.orig
= (struct ring_desc
*)rxtx_ring
;
4192 np
->tx_ring
.orig
= &np
->rx_ring
.orig
[np
->rx_ring_size
];
4194 np
->rx_ring
.ex
= (struct ring_desc_ex
*)rxtx_ring
;
4195 np
->tx_ring
.ex
= &np
->rx_ring
.ex
[np
->rx_ring_size
];
4197 np
->rx_skb
= (struct nv_skb_map
*)rx_skbuff
;
4198 np
->tx_skb
= (struct nv_skb_map
*)tx_skbuff
;
4199 np
->ring_addr
= ring_addr
;
4201 memset(np
->rx_skb
, 0, sizeof(struct nv_skb_map
) * np
->rx_ring_size
);
4202 memset(np
->tx_skb
, 0, sizeof(struct nv_skb_map
) * np
->tx_ring_size
);
4204 if (netif_running(dev
)) {
4205 /* reinit driver view of the queues */
4207 if (nv_init_ring(dev
)) {
4208 if (!np
->in_shutdown
)
4209 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
4212 /* reinit nic view of the queues */
4213 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4214 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4215 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4216 base
+ NvRegRingSizes
);
4218 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4221 /* restart engines */
4224 spin_unlock(&np
->lock
);
4225 netif_tx_unlock_bh(dev
);
4233 static void nv_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
* pause
)
4235 struct fe_priv
*np
= netdev_priv(dev
);
4237 pause
->autoneg
= (np
->pause_flags
& NV_PAUSEFRAME_AUTONEG
) != 0;
4238 pause
->rx_pause
= (np
->pause_flags
& NV_PAUSEFRAME_RX_ENABLE
) != 0;
4239 pause
->tx_pause
= (np
->pause_flags
& NV_PAUSEFRAME_TX_ENABLE
) != 0;
4242 static int nv_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
* pause
)
4244 struct fe_priv
*np
= netdev_priv(dev
);
4247 if ((!np
->autoneg
&& np
->duplex
== 0) ||
4248 (np
->autoneg
&& !pause
->autoneg
&& np
->duplex
== 0)) {
4249 printk(KERN_INFO
"%s: can not set pause settings when forced link is in half duplex.\n",
4253 if (pause
->tx_pause
&& !(np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
)) {
4254 printk(KERN_INFO
"%s: hardware does not support tx pause frames.\n", dev
->name
);
4258 netif_carrier_off(dev
);
4259 if (netif_running(dev
)) {
4260 nv_disable_irq(dev
);
4261 netif_tx_lock_bh(dev
);
4262 spin_lock(&np
->lock
);
4266 spin_unlock(&np
->lock
);
4267 netif_tx_unlock_bh(dev
);
4270 np
->pause_flags
&= ~(NV_PAUSEFRAME_RX_REQ
|NV_PAUSEFRAME_TX_REQ
);
4271 if (pause
->rx_pause
)
4272 np
->pause_flags
|= NV_PAUSEFRAME_RX_REQ
;
4273 if (pause
->tx_pause
)
4274 np
->pause_flags
|= NV_PAUSEFRAME_TX_REQ
;
4276 if (np
->autoneg
&& pause
->autoneg
) {
4277 np
->pause_flags
|= NV_PAUSEFRAME_AUTONEG
;
4279 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
4280 adv
&= ~(ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
4281 if (np
->pause_flags
& NV_PAUSEFRAME_RX_REQ
) /* for rx we set both advertisments but disable tx pause */
4282 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4283 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
4284 adv
|= ADVERTISE_PAUSE_ASYM
;
4285 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
4287 if (netif_running(dev
))
4288 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
4289 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4290 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
4291 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4293 np
->pause_flags
&= ~(NV_PAUSEFRAME_AUTONEG
|NV_PAUSEFRAME_RX_ENABLE
|NV_PAUSEFRAME_TX_ENABLE
);
4294 if (pause
->rx_pause
)
4295 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
4296 if (pause
->tx_pause
)
4297 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
4299 if (!netif_running(dev
))
4300 nv_update_linkspeed(dev
);
4302 nv_update_pause(dev
, np
->pause_flags
);
4305 if (netif_running(dev
)) {
4313 static u32
nv_get_rx_csum(struct net_device
*dev
)
4315 struct fe_priv
*np
= netdev_priv(dev
);
4316 return (np
->rx_csum
) != 0;
4319 static int nv_set_rx_csum(struct net_device
*dev
, u32 data
)
4321 struct fe_priv
*np
= netdev_priv(dev
);
4322 u8 __iomem
*base
= get_hwbase(dev
);
4325 if (np
->driver_data
& DEV_HAS_CHECKSUM
) {
4328 np
->txrxctl_bits
|= NVREG_TXRXCTL_RXCHECK
;
4331 /* vlan is dependent on rx checksum offload */
4332 if (!(np
->vlanctl_bits
& NVREG_VLANCONTROL_ENABLE
))
4333 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_RXCHECK
;
4335 if (netif_running(dev
)) {
4336 spin_lock_irq(&np
->lock
);
4337 writel(np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
4338 spin_unlock_irq(&np
->lock
);
4347 static int nv_set_tx_csum(struct net_device
*dev
, u32 data
)
4349 struct fe_priv
*np
= netdev_priv(dev
);
4351 if (np
->driver_data
& DEV_HAS_CHECKSUM
)
4352 return ethtool_op_set_tx_hw_csum(dev
, data
);
4357 static int nv_set_sg(struct net_device
*dev
, u32 data
)
4359 struct fe_priv
*np
= netdev_priv(dev
);
4361 if (np
->driver_data
& DEV_HAS_CHECKSUM
)
4362 return ethtool_op_set_sg(dev
, data
);
4367 static int nv_get_sset_count(struct net_device
*dev
, int sset
)
4369 struct fe_priv
*np
= netdev_priv(dev
);
4373 if (np
->driver_data
& DEV_HAS_TEST_EXTENDED
)
4374 return NV_TEST_COUNT_EXTENDED
;
4376 return NV_TEST_COUNT_BASE
;
4378 if (np
->driver_data
& DEV_HAS_STATISTICS_V1
)
4379 return NV_DEV_STATISTICS_V1_COUNT
;
4380 else if (np
->driver_data
& DEV_HAS_STATISTICS_V2
)
4381 return NV_DEV_STATISTICS_V2_COUNT
;
4389 static void nv_get_ethtool_stats(struct net_device
*dev
, struct ethtool_stats
*estats
, u64
*buffer
)
4391 struct fe_priv
*np
= netdev_priv(dev
);
4394 nv_do_stats_poll((unsigned long)dev
);
4396 memcpy(buffer
, &np
->estats
, nv_get_sset_count(dev
, ETH_SS_STATS
)*sizeof(u64
));
4399 static int nv_link_test(struct net_device
*dev
)
4401 struct fe_priv
*np
= netdev_priv(dev
);
4404 mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
4405 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
4407 /* check phy link status */
4408 if (!(mii_status
& BMSR_LSTATUS
))
4414 static int nv_register_test(struct net_device
*dev
)
4416 u8 __iomem
*base
= get_hwbase(dev
);
4418 u32 orig_read
, new_read
;
4421 orig_read
= readl(base
+ nv_registers_test
[i
].reg
);
4423 /* xor with mask to toggle bits */
4424 orig_read
^= nv_registers_test
[i
].mask
;
4426 writel(orig_read
, base
+ nv_registers_test
[i
].reg
);
4428 new_read
= readl(base
+ nv_registers_test
[i
].reg
);
4430 if ((new_read
& nv_registers_test
[i
].mask
) != (orig_read
& nv_registers_test
[i
].mask
))
4433 /* restore original value */
4434 orig_read
^= nv_registers_test
[i
].mask
;
4435 writel(orig_read
, base
+ nv_registers_test
[i
].reg
);
4437 } while (nv_registers_test
[++i
].reg
!= 0);
4442 static int nv_interrupt_test(struct net_device
*dev
)
4444 struct fe_priv
*np
= netdev_priv(dev
);
4445 u8 __iomem
*base
= get_hwbase(dev
);
4448 u32 save_msi_flags
, save_poll_interval
= 0;
4450 if (netif_running(dev
)) {
4451 /* free current irq */
4453 save_poll_interval
= readl(base
+NvRegPollingInterval
);
4456 /* flag to test interrupt handler */
4459 /* setup test irq */
4460 save_msi_flags
= np
->msi_flags
;
4461 np
->msi_flags
&= ~NV_MSI_X_VECTORS_MASK
;
4462 np
->msi_flags
|= 0x001; /* setup 1 vector */
4463 if (nv_request_irq(dev
, 1))
4466 /* setup timer interrupt */
4467 writel(NVREG_POLL_DEFAULT_CPU
, base
+ NvRegPollingInterval
);
4468 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
4470 nv_enable_hw_interrupts(dev
, NVREG_IRQ_TIMER
);
4472 /* wait for at least one interrupt */
4475 spin_lock_irq(&np
->lock
);
4477 /* flag should be set within ISR */
4478 testcnt
= np
->intr_test
;
4482 nv_disable_hw_interrupts(dev
, NVREG_IRQ_TIMER
);
4483 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
4484 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
4486 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
4488 spin_unlock_irq(&np
->lock
);
4492 np
->msi_flags
= save_msi_flags
;
4494 if (netif_running(dev
)) {
4495 writel(save_poll_interval
, base
+ NvRegPollingInterval
);
4496 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
4497 /* restore original irq */
4498 if (nv_request_irq(dev
, 0))
4505 static int nv_loopback_test(struct net_device
*dev
)
4507 struct fe_priv
*np
= netdev_priv(dev
);
4508 u8 __iomem
*base
= get_hwbase(dev
);
4509 struct sk_buff
*tx_skb
, *rx_skb
;
4510 dma_addr_t test_dma_addr
;
4511 u32 tx_flags_extra
= (np
->desc_ver
== DESC_VER_1
? NV_TX_LASTPACKET
: NV_TX2_LASTPACKET
);
4513 int len
, i
, pkt_len
;
4515 u32 filter_flags
= 0;
4516 u32 misc1_flags
= 0;
4519 if (netif_running(dev
)) {
4520 nv_disable_irq(dev
);
4521 filter_flags
= readl(base
+ NvRegPacketFilterFlags
);
4522 misc1_flags
= readl(base
+ NvRegMisc1
);
4527 /* reinit driver view of the rx queue */
4531 /* setup hardware for loopback */
4532 writel(NVREG_MISC1_FORCE
, base
+ NvRegMisc1
);
4533 writel(NVREG_PFF_ALWAYS
| NVREG_PFF_LOOPBACK
, base
+ NvRegPacketFilterFlags
);
4535 /* reinit nic view of the rx queue */
4536 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4537 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4538 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4539 base
+ NvRegRingSizes
);
4542 /* restart rx engine */
4546 /* setup packet for tx */
4547 pkt_len
= ETH_DATA_LEN
;
4548 tx_skb
= dev_alloc_skb(pkt_len
);
4550 printk(KERN_ERR
"dev_alloc_skb() failed during loopback test"
4551 " of %s\n", dev
->name
);
4555 test_dma_addr
= pci_map_single(np
->pci_dev
, tx_skb
->data
,
4556 skb_tailroom(tx_skb
),
4557 PCI_DMA_FROMDEVICE
);
4558 pkt_data
= skb_put(tx_skb
, pkt_len
);
4559 for (i
= 0; i
< pkt_len
; i
++)
4560 pkt_data
[i
] = (u8
)(i
& 0xff);
4562 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
4563 np
->tx_ring
.orig
[0].buf
= cpu_to_le32(test_dma_addr
);
4564 np
->tx_ring
.orig
[0].flaglen
= cpu_to_le32((pkt_len
-1) | np
->tx_flags
| tx_flags_extra
);
4566 np
->tx_ring
.ex
[0].bufhigh
= cpu_to_le32(dma_high(test_dma_addr
));
4567 np
->tx_ring
.ex
[0].buflow
= cpu_to_le32(dma_low(test_dma_addr
));
4568 np
->tx_ring
.ex
[0].flaglen
= cpu_to_le32((pkt_len
-1) | np
->tx_flags
| tx_flags_extra
);
4570 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4571 pci_push(get_hwbase(dev
));
4575 /* check for rx of the packet */
4576 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
4577 flags
= le32_to_cpu(np
->rx_ring
.orig
[0].flaglen
);
4578 len
= nv_descr_getlength(&np
->rx_ring
.orig
[0], np
->desc_ver
);
4581 flags
= le32_to_cpu(np
->rx_ring
.ex
[0].flaglen
);
4582 len
= nv_descr_getlength_ex(&np
->rx_ring
.ex
[0], np
->desc_ver
);
4585 if (flags
& NV_RX_AVAIL
) {
4587 } else if (np
->desc_ver
== DESC_VER_1
) {
4588 if (flags
& NV_RX_ERROR
)
4591 if (flags
& NV_RX2_ERROR
) {
4597 if (len
!= pkt_len
) {
4599 dprintk(KERN_DEBUG
"%s: loopback len mismatch %d vs %d\n",
4600 dev
->name
, len
, pkt_len
);
4602 rx_skb
= np
->rx_skb
[0].skb
;
4603 for (i
= 0; i
< pkt_len
; i
++) {
4604 if (rx_skb
->data
[i
] != (u8
)(i
& 0xff)) {
4606 dprintk(KERN_DEBUG
"%s: loopback pattern check failed on byte %d\n",
4613 dprintk(KERN_DEBUG
"%s: loopback - did not receive test packet\n", dev
->name
);
4616 pci_unmap_page(np
->pci_dev
, test_dma_addr
,
4617 (skb_end_pointer(tx_skb
) - tx_skb
->data
),
4619 dev_kfree_skb_any(tx_skb
);
4625 /* drain rx queue */
4629 if (netif_running(dev
)) {
4630 writel(misc1_flags
, base
+ NvRegMisc1
);
4631 writel(filter_flags
, base
+ NvRegPacketFilterFlags
);
4638 static void nv_self_test(struct net_device
*dev
, struct ethtool_test
*test
, u64
*buffer
)
4640 struct fe_priv
*np
= netdev_priv(dev
);
4641 u8 __iomem
*base
= get_hwbase(dev
);
4643 memset(buffer
, 0, nv_get_sset_count(dev
, ETH_SS_TEST
)*sizeof(u64
));
4645 if (!nv_link_test(dev
)) {
4646 test
->flags
|= ETH_TEST_FL_FAILED
;
4650 if (test
->flags
& ETH_TEST_FL_OFFLINE
) {
4651 if (netif_running(dev
)) {
4652 netif_stop_queue(dev
);
4653 #ifdef CONFIG_FORCEDETH_NAPI
4654 napi_disable(&np
->napi
);
4656 netif_tx_lock_bh(dev
);
4657 spin_lock_irq(&np
->lock
);
4658 nv_disable_hw_interrupts(dev
, np
->irqmask
);
4659 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
4660 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
4662 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
4668 /* drain rx queue */
4671 spin_unlock_irq(&np
->lock
);
4672 netif_tx_unlock_bh(dev
);
4675 if (!nv_register_test(dev
)) {
4676 test
->flags
|= ETH_TEST_FL_FAILED
;
4680 result
= nv_interrupt_test(dev
);
4682 test
->flags
|= ETH_TEST_FL_FAILED
;
4690 if (!nv_loopback_test(dev
)) {
4691 test
->flags
|= ETH_TEST_FL_FAILED
;
4695 if (netif_running(dev
)) {
4696 /* reinit driver view of the rx queue */
4698 if (nv_init_ring(dev
)) {
4699 if (!np
->in_shutdown
)
4700 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
4702 /* reinit nic view of the rx queue */
4703 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4704 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4705 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4706 base
+ NvRegRingSizes
);
4708 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4710 /* restart rx engine */
4713 netif_start_queue(dev
);
4714 #ifdef CONFIG_FORCEDETH_NAPI
4715 napi_enable(&np
->napi
);
4717 nv_enable_hw_interrupts(dev
, np
->irqmask
);
4722 static void nv_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buffer
)
4724 switch (stringset
) {
4726 memcpy(buffer
, &nv_estats_str
, nv_get_sset_count(dev
, ETH_SS_STATS
)*sizeof(struct nv_ethtool_str
));
4729 memcpy(buffer
, &nv_etests_str
, nv_get_sset_count(dev
, ETH_SS_TEST
)*sizeof(struct nv_ethtool_str
));
4734 static const struct ethtool_ops ops
= {
4735 .get_drvinfo
= nv_get_drvinfo
,
4736 .get_link
= ethtool_op_get_link
,
4737 .get_wol
= nv_get_wol
,
4738 .set_wol
= nv_set_wol
,
4739 .get_settings
= nv_get_settings
,
4740 .set_settings
= nv_set_settings
,
4741 .get_regs_len
= nv_get_regs_len
,
4742 .get_regs
= nv_get_regs
,
4743 .nway_reset
= nv_nway_reset
,
4744 .set_tso
= nv_set_tso
,
4745 .get_ringparam
= nv_get_ringparam
,
4746 .set_ringparam
= nv_set_ringparam
,
4747 .get_pauseparam
= nv_get_pauseparam
,
4748 .set_pauseparam
= nv_set_pauseparam
,
4749 .get_rx_csum
= nv_get_rx_csum
,
4750 .set_rx_csum
= nv_set_rx_csum
,
4751 .set_tx_csum
= nv_set_tx_csum
,
4752 .set_sg
= nv_set_sg
,
4753 .get_strings
= nv_get_strings
,
4754 .get_ethtool_stats
= nv_get_ethtool_stats
,
4755 .get_sset_count
= nv_get_sset_count
,
4756 .self_test
= nv_self_test
,
4759 static void nv_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
4761 struct fe_priv
*np
= get_nvpriv(dev
);
4763 spin_lock_irq(&np
->lock
);
4765 /* save vlan group */
4769 /* enable vlan on MAC */
4770 np
->txrxctl_bits
|= NVREG_TXRXCTL_VLANSTRIP
| NVREG_TXRXCTL_VLANINS
;
4772 /* disable vlan on MAC */
4773 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_VLANSTRIP
;
4774 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_VLANINS
;
4777 writel(np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4779 spin_unlock_irq(&np
->lock
);
4782 /* The mgmt unit and driver use a semaphore to access the phy during init */
4783 static int nv_mgmt_acquire_sema(struct net_device
*dev
)
4785 u8 __iomem
*base
= get_hwbase(dev
);
4787 u32 tx_ctrl
, mgmt_sema
;
4789 for (i
= 0; i
< 10; i
++) {
4790 mgmt_sema
= readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_MGMT_SEMA_MASK
;
4791 if (mgmt_sema
== NVREG_XMITCTL_MGMT_SEMA_FREE
)
4796 if (mgmt_sema
!= NVREG_XMITCTL_MGMT_SEMA_FREE
)
4799 for (i
= 0; i
< 2; i
++) {
4800 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
4801 tx_ctrl
|= NVREG_XMITCTL_HOST_SEMA_ACQ
;
4802 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
4804 /* verify that semaphore was acquired */
4805 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
4806 if (((tx_ctrl
& NVREG_XMITCTL_HOST_SEMA_MASK
) == NVREG_XMITCTL_HOST_SEMA_ACQ
) &&
4807 ((tx_ctrl
& NVREG_XMITCTL_MGMT_SEMA_MASK
) == NVREG_XMITCTL_MGMT_SEMA_FREE
))
4816 static int nv_open(struct net_device
*dev
)
4818 struct fe_priv
*np
= netdev_priv(dev
);
4819 u8 __iomem
*base
= get_hwbase(dev
);
4823 dprintk(KERN_DEBUG
"nv_open: begin\n");
4825 /* erase previous misconfiguration */
4826 if (np
->driver_data
& DEV_HAS_POWER_CNTRL
)
4828 writel(NVREG_MCASTADDRA_FORCE
, base
+ NvRegMulticastAddrA
);
4829 writel(0, base
+ NvRegMulticastAddrB
);
4830 writel(NVREG_MCASTMASKA_NONE
, base
+ NvRegMulticastMaskA
);
4831 writel(NVREG_MCASTMASKB_NONE
, base
+ NvRegMulticastMaskB
);
4832 writel(0, base
+ NvRegPacketFilterFlags
);
4834 writel(0, base
+ NvRegTransmitterControl
);
4835 writel(0, base
+ NvRegReceiverControl
);
4837 writel(0, base
+ NvRegAdapterControl
);
4839 if (np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
)
4840 writel(NVREG_TX_PAUSEFRAME_DISABLE
, base
+ NvRegTxPauseFrame
);
4842 /* initialize descriptor rings */
4844 oom
= nv_init_ring(dev
);
4846 writel(0, base
+ NvRegLinkSpeed
);
4847 writel(readl(base
+ NvRegTransmitPoll
) & NVREG_TRANSMITPOLL_MAC_ADDR_REV
, base
+ NvRegTransmitPoll
);
4849 writel(0, base
+ NvRegUnknownSetupReg6
);
4851 np
->in_shutdown
= 0;
4854 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4855 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4856 base
+ NvRegRingSizes
);
4858 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
4859 if (np
->desc_ver
== DESC_VER_1
)
4860 writel(NVREG_TX_WM_DESC1_DEFAULT
, base
+ NvRegTxWatermark
);
4862 writel(NVREG_TX_WM_DESC2_3_DEFAULT
, base
+ NvRegTxWatermark
);
4863 writel(np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
4864 writel(np
->vlanctl_bits
, base
+ NvRegVlanControl
);
4866 writel(NVREG_TXRXCTL_BIT1
|np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
4867 reg_delay(dev
, NvRegUnknownSetupReg5
, NVREG_UNKSETUP5_BIT31
, NVREG_UNKSETUP5_BIT31
,
4868 NV_SETUP5_DELAY
, NV_SETUP5_DELAYMAX
,
4869 KERN_INFO
"open: SetupReg5, Bit 31 remained off\n");
4871 writel(0, base
+ NvRegMIIMask
);
4872 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
4873 writel(NVREG_MIISTAT_MASK2
, base
+ NvRegMIIStatus
);
4875 writel(NVREG_MISC1_FORCE
| NVREG_MISC1_HD
, base
+ NvRegMisc1
);
4876 writel(readl(base
+ NvRegTransmitterStatus
), base
+ NvRegTransmitterStatus
);
4877 writel(NVREG_PFF_ALWAYS
, base
+ NvRegPacketFilterFlags
);
4878 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4880 writel(readl(base
+ NvRegReceiverStatus
), base
+ NvRegReceiverStatus
);
4881 get_random_bytes(&i
, sizeof(i
));
4882 writel(NVREG_RNDSEED_FORCE
| (i
&NVREG_RNDSEED_MASK
), base
+ NvRegRandomSeed
);
4883 writel(NVREG_TX_DEFERRAL_DEFAULT
, base
+ NvRegTxDeferral
);
4884 writel(NVREG_RX_DEFERRAL_DEFAULT
, base
+ NvRegRxDeferral
);
4885 if (poll_interval
== -1) {
4886 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
)
4887 writel(NVREG_POLL_DEFAULT_THROUGHPUT
, base
+ NvRegPollingInterval
);
4889 writel(NVREG_POLL_DEFAULT_CPU
, base
+ NvRegPollingInterval
);
4892 writel(poll_interval
& 0xFFFF, base
+ NvRegPollingInterval
);
4893 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
4894 writel((np
->phyaddr
<< NVREG_ADAPTCTL_PHYSHIFT
)|NVREG_ADAPTCTL_PHYVALID
|NVREG_ADAPTCTL_RUNNING
,
4895 base
+ NvRegAdapterControl
);
4896 writel(NVREG_MIISPEED_BIT8
|NVREG_MIIDELAY
, base
+ NvRegMIISpeed
);
4897 writel(NVREG_MII_LINKCHANGE
, base
+ NvRegMIIMask
);
4899 writel(NVREG_WAKEUPFLAGS_ENABLE
, base
+ NvRegWakeUpFlags
);
4901 i
= readl(base
+ NvRegPowerState
);
4902 if ( (i
& NVREG_POWERSTATE_POWEREDUP
) == 0)
4903 writel(NVREG_POWERSTATE_POWEREDUP
|i
, base
+ NvRegPowerState
);
4907 writel(readl(base
+ NvRegPowerState
) | NVREG_POWERSTATE_VALID
, base
+ NvRegPowerState
);
4909 nv_disable_hw_interrupts(dev
, np
->irqmask
);
4911 writel(NVREG_MIISTAT_MASK2
, base
+ NvRegMIIStatus
);
4912 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
4915 if (nv_request_irq(dev
, 0)) {
4919 /* ask for interrupts */
4920 nv_enable_hw_interrupts(dev
, np
->irqmask
);
4922 spin_lock_irq(&np
->lock
);
4923 writel(NVREG_MCASTADDRA_FORCE
, base
+ NvRegMulticastAddrA
);
4924 writel(0, base
+ NvRegMulticastAddrB
);
4925 writel(NVREG_MCASTMASKA_NONE
, base
+ NvRegMulticastMaskA
);
4926 writel(NVREG_MCASTMASKB_NONE
, base
+ NvRegMulticastMaskB
);
4927 writel(NVREG_PFF_ALWAYS
|NVREG_PFF_MYADDR
, base
+ NvRegPacketFilterFlags
);
4928 /* One manual link speed update: Interrupts are enabled, future link
4929 * speed changes cause interrupts and are handled by nv_link_irq().
4933 miistat
= readl(base
+ NvRegMIIStatus
);
4934 writel(NVREG_MIISTAT_MASK
, base
+ NvRegMIIStatus
);
4935 dprintk(KERN_INFO
"startup: got 0x%08x.\n", miistat
);
4937 /* set linkspeed to invalid value, thus force nv_update_linkspeed
4940 ret
= nv_update_linkspeed(dev
);
4943 netif_start_queue(dev
);
4944 #ifdef CONFIG_FORCEDETH_NAPI
4945 napi_enable(&np
->napi
);
4949 netif_carrier_on(dev
);
4951 printk(KERN_INFO
"%s: no link during initialization.\n", dev
->name
);
4952 netif_carrier_off(dev
);
4955 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
4957 /* start statistics timer */
4958 if (np
->driver_data
& (DEV_HAS_STATISTICS_V1
|DEV_HAS_STATISTICS_V2
))
4959 mod_timer(&np
->stats_poll
, jiffies
+ STATS_INTERVAL
);
4961 spin_unlock_irq(&np
->lock
);
4969 static int nv_close(struct net_device
*dev
)
4971 struct fe_priv
*np
= netdev_priv(dev
);
4974 spin_lock_irq(&np
->lock
);
4975 np
->in_shutdown
= 1;
4976 spin_unlock_irq(&np
->lock
);
4977 #ifdef CONFIG_FORCEDETH_NAPI
4978 napi_disable(&np
->napi
);
4980 synchronize_irq(np
->pci_dev
->irq
);
4982 del_timer_sync(&np
->oom_kick
);
4983 del_timer_sync(&np
->nic_poll
);
4984 del_timer_sync(&np
->stats_poll
);
4986 netif_stop_queue(dev
);
4987 spin_lock_irq(&np
->lock
);
4992 /* disable interrupts on the nic or we will lock up */
4993 base
= get_hwbase(dev
);
4994 nv_disable_hw_interrupts(dev
, np
->irqmask
);
4996 dprintk(KERN_INFO
"%s: Irqmask is zero again\n", dev
->name
);
4998 spin_unlock_irq(&np
->lock
);
5004 if (np
->wolenabled
) {
5005 writel(NVREG_PFF_ALWAYS
|NVREG_PFF_MYADDR
, base
+ NvRegPacketFilterFlags
);
5009 /* FIXME: power down nic */
5014 static int __devinit
nv_probe(struct pci_dev
*pci_dev
, const struct pci_device_id
*id
)
5016 struct net_device
*dev
;
5021 u32 powerstate
, txreg
;
5022 u32 phystate_orig
= 0, phystate
;
5023 int phyinitialized
= 0;
5024 DECLARE_MAC_BUF(mac
);
5025 static int printed_version
;
5027 if (!printed_version
++)
5028 printk(KERN_INFO
"%s: Reverse Engineered nForce ethernet"
5029 " driver. Version %s.\n", DRV_NAME
, FORCEDETH_VERSION
);
5031 dev
= alloc_etherdev(sizeof(struct fe_priv
));
5036 np
= netdev_priv(dev
);
5038 np
->pci_dev
= pci_dev
;
5039 spin_lock_init(&np
->lock
);
5040 SET_NETDEV_DEV(dev
, &pci_dev
->dev
);
5042 init_timer(&np
->oom_kick
);
5043 np
->oom_kick
.data
= (unsigned long) dev
;
5044 np
->oom_kick
.function
= &nv_do_rx_refill
; /* timer handler */
5045 init_timer(&np
->nic_poll
);
5046 np
->nic_poll
.data
= (unsigned long) dev
;
5047 np
->nic_poll
.function
= &nv_do_nic_poll
; /* timer handler */
5048 init_timer(&np
->stats_poll
);
5049 np
->stats_poll
.data
= (unsigned long) dev
;
5050 np
->stats_poll
.function
= &nv_do_stats_poll
; /* timer handler */
5052 err
= pci_enable_device(pci_dev
);
5056 pci_set_master(pci_dev
);
5058 err
= pci_request_regions(pci_dev
, DRV_NAME
);
5062 if (id
->driver_data
& (DEV_HAS_VLAN
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_STATISTICS_V2
))
5063 np
->register_size
= NV_PCI_REGSZ_VER3
;
5064 else if (id
->driver_data
& DEV_HAS_STATISTICS_V1
)
5065 np
->register_size
= NV_PCI_REGSZ_VER2
;
5067 np
->register_size
= NV_PCI_REGSZ_VER1
;
5071 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
5072 dprintk(KERN_DEBUG
"%s: resource %d start %p len %ld flags 0x%08lx.\n",
5073 pci_name(pci_dev
), i
, (void*)pci_resource_start(pci_dev
, i
),
5074 pci_resource_len(pci_dev
, i
),
5075 pci_resource_flags(pci_dev
, i
));
5076 if (pci_resource_flags(pci_dev
, i
) & IORESOURCE_MEM
&&
5077 pci_resource_len(pci_dev
, i
) >= np
->register_size
) {
5078 addr
= pci_resource_start(pci_dev
, i
);
5082 if (i
== DEVICE_COUNT_RESOURCE
) {
5083 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5084 "Couldn't find register window\n");
5088 /* copy of driver data */
5089 np
->driver_data
= id
->driver_data
;
5091 /* handle different descriptor versions */
5092 if (id
->driver_data
& DEV_HAS_HIGH_DMA
) {
5093 /* packet format 3: supports 40-bit addressing */
5094 np
->desc_ver
= DESC_VER_3
;
5095 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_3
;
5097 if (pci_set_dma_mask(pci_dev
, DMA_39BIT_MASK
))
5098 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5099 "64-bit DMA failed, using 32-bit addressing\n");
5101 dev
->features
|= NETIF_F_HIGHDMA
;
5102 if (pci_set_consistent_dma_mask(pci_dev
, DMA_39BIT_MASK
)) {
5103 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5104 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5107 } else if (id
->driver_data
& DEV_HAS_LARGEDESC
) {
5108 /* packet format 2: supports jumbo frames */
5109 np
->desc_ver
= DESC_VER_2
;
5110 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_2
;
5112 /* original packet format */
5113 np
->desc_ver
= DESC_VER_1
;
5114 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_1
;
5117 np
->pkt_limit
= NV_PKTLIMIT_1
;
5118 if (id
->driver_data
& DEV_HAS_LARGEDESC
)
5119 np
->pkt_limit
= NV_PKTLIMIT_2
;
5121 if (id
->driver_data
& DEV_HAS_CHECKSUM
) {
5123 np
->txrxctl_bits
|= NVREG_TXRXCTL_RXCHECK
;
5124 dev
->features
|= NETIF_F_HW_CSUM
| NETIF_F_SG
;
5125 dev
->features
|= NETIF_F_TSO
;
5128 np
->vlanctl_bits
= 0;
5129 if (id
->driver_data
& DEV_HAS_VLAN
) {
5130 np
->vlanctl_bits
= NVREG_VLANCONTROL_ENABLE
;
5131 dev
->features
|= NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_TX
;
5132 dev
->vlan_rx_register
= nv_vlan_rx_register
;
5136 if ((id
->driver_data
& DEV_HAS_MSI
) && msi
) {
5137 np
->msi_flags
|= NV_MSI_CAPABLE
;
5139 if ((id
->driver_data
& DEV_HAS_MSI_X
) && msix
) {
5140 np
->msi_flags
|= NV_MSI_X_CAPABLE
;
5143 np
->pause_flags
= NV_PAUSEFRAME_RX_CAPABLE
| NV_PAUSEFRAME_RX_REQ
| NV_PAUSEFRAME_AUTONEG
;
5144 if (id
->driver_data
& DEV_HAS_PAUSEFRAME_TX
) {
5145 np
->pause_flags
|= NV_PAUSEFRAME_TX_CAPABLE
| NV_PAUSEFRAME_TX_REQ
;
5150 np
->base
= ioremap(addr
, np
->register_size
);
5153 dev
->base_addr
= (unsigned long)np
->base
;
5155 dev
->irq
= pci_dev
->irq
;
5157 np
->rx_ring_size
= RX_RING_DEFAULT
;
5158 np
->tx_ring_size
= TX_RING_DEFAULT
;
5160 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
5161 np
->rx_ring
.orig
= pci_alloc_consistent(pci_dev
,
5162 sizeof(struct ring_desc
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
5164 if (!np
->rx_ring
.orig
)
5166 np
->tx_ring
.orig
= &np
->rx_ring
.orig
[np
->rx_ring_size
];
5168 np
->rx_ring
.ex
= pci_alloc_consistent(pci_dev
,
5169 sizeof(struct ring_desc_ex
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
5171 if (!np
->rx_ring
.ex
)
5173 np
->tx_ring
.ex
= &np
->rx_ring
.ex
[np
->rx_ring_size
];
5175 np
->rx_skb
= kcalloc(np
->rx_ring_size
, sizeof(struct nv_skb_map
), GFP_KERNEL
);
5176 np
->tx_skb
= kcalloc(np
->tx_ring_size
, sizeof(struct nv_skb_map
), GFP_KERNEL
);
5177 if (!np
->rx_skb
|| !np
->tx_skb
)
5180 dev
->open
= nv_open
;
5181 dev
->stop
= nv_close
;
5182 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
5183 dev
->hard_start_xmit
= nv_start_xmit
;
5185 dev
->hard_start_xmit
= nv_start_xmit_optimized
;
5186 dev
->get_stats
= nv_get_stats
;
5187 dev
->change_mtu
= nv_change_mtu
;
5188 dev
->set_mac_address
= nv_set_mac_address
;
5189 dev
->set_multicast_list
= nv_set_multicast
;
5190 #ifdef CONFIG_NET_POLL_CONTROLLER
5191 dev
->poll_controller
= nv_poll_controller
;
5193 #ifdef CONFIG_FORCEDETH_NAPI
5194 netif_napi_add(dev
, &np
->napi
, nv_napi_poll
, RX_WORK_PER_LOOP
);
5196 SET_ETHTOOL_OPS(dev
, &ops
);
5197 dev
->tx_timeout
= nv_tx_timeout
;
5198 dev
->watchdog_timeo
= NV_WATCHDOG_TIMEO
;
5200 pci_set_drvdata(pci_dev
, dev
);
5202 /* read the mac address */
5203 base
= get_hwbase(dev
);
5204 np
->orig_mac
[0] = readl(base
+ NvRegMacAddrA
);
5205 np
->orig_mac
[1] = readl(base
+ NvRegMacAddrB
);
5207 /* check the workaround bit for correct mac address order */
5208 txreg
= readl(base
+ NvRegTransmitPoll
);
5209 if ((txreg
& NVREG_TRANSMITPOLL_MAC_ADDR_REV
) ||
5210 (id
->driver_data
& DEV_HAS_CORRECT_MACADDR
)) {
5211 /* mac address is already in correct order */
5212 dev
->dev_addr
[0] = (np
->orig_mac
[0] >> 0) & 0xff;
5213 dev
->dev_addr
[1] = (np
->orig_mac
[0] >> 8) & 0xff;
5214 dev
->dev_addr
[2] = (np
->orig_mac
[0] >> 16) & 0xff;
5215 dev
->dev_addr
[3] = (np
->orig_mac
[0] >> 24) & 0xff;
5216 dev
->dev_addr
[4] = (np
->orig_mac
[1] >> 0) & 0xff;
5217 dev
->dev_addr
[5] = (np
->orig_mac
[1] >> 8) & 0xff;
5219 /* need to reverse mac address to correct order */
5220 dev
->dev_addr
[0] = (np
->orig_mac
[1] >> 8) & 0xff;
5221 dev
->dev_addr
[1] = (np
->orig_mac
[1] >> 0) & 0xff;
5222 dev
->dev_addr
[2] = (np
->orig_mac
[0] >> 24) & 0xff;
5223 dev
->dev_addr
[3] = (np
->orig_mac
[0] >> 16) & 0xff;
5224 dev
->dev_addr
[4] = (np
->orig_mac
[0] >> 8) & 0xff;
5225 dev
->dev_addr
[5] = (np
->orig_mac
[0] >> 0) & 0xff;
5226 writel(txreg
|NVREG_TRANSMITPOLL_MAC_ADDR_REV
, base
+ NvRegTransmitPoll
);
5228 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
5230 if (!is_valid_ether_addr(dev
->perm_addr
)) {
5232 * Bad mac address. At least one bios sets the mac address
5233 * to 01:23:45:67:89:ab
5235 dev_printk(KERN_ERR
, &pci_dev
->dev
,
5236 "Invalid Mac address detected: %s\n",
5237 print_mac(mac
, dev
->dev_addr
));
5238 dev_printk(KERN_ERR
, &pci_dev
->dev
,
5239 "Please complain to your hardware vendor. Switching to a random MAC.\n");
5240 dev
->dev_addr
[0] = 0x00;
5241 dev
->dev_addr
[1] = 0x00;
5242 dev
->dev_addr
[2] = 0x6c;
5243 get_random_bytes(&dev
->dev_addr
[3], 3);
5246 dprintk(KERN_DEBUG
"%s: MAC Address %s\n",
5247 pci_name(pci_dev
), print_mac(mac
, dev
->dev_addr
));
5249 /* set mac address */
5250 nv_copy_mac_to_hw(dev
);
5253 writel(0, base
+ NvRegWakeUpFlags
);
5256 if (id
->driver_data
& DEV_HAS_POWER_CNTRL
) {
5258 /* take phy and nic out of low power mode */
5259 powerstate
= readl(base
+ NvRegPowerState2
);
5260 powerstate
&= ~NVREG_POWERSTATE2_POWERUP_MASK
;
5261 if ((id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_12
||
5262 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_13
) &&
5263 pci_dev
->revision
>= 0xA3)
5264 powerstate
|= NVREG_POWERSTATE2_POWERUP_REV_A3
;
5265 writel(powerstate
, base
+ NvRegPowerState2
);
5268 if (np
->desc_ver
== DESC_VER_1
) {
5269 np
->tx_flags
= NV_TX_VALID
;
5271 np
->tx_flags
= NV_TX2_VALID
;
5273 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
) {
5274 np
->irqmask
= NVREG_IRQMASK_THROUGHPUT
;
5275 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) /* set number of vectors */
5276 np
->msi_flags
|= 0x0003;
5278 np
->irqmask
= NVREG_IRQMASK_CPU
;
5279 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) /* set number of vectors */
5280 np
->msi_flags
|= 0x0001;
5283 if (id
->driver_data
& DEV_NEED_TIMERIRQ
)
5284 np
->irqmask
|= NVREG_IRQ_TIMER
;
5285 if (id
->driver_data
& DEV_NEED_LINKTIMER
) {
5286 dprintk(KERN_INFO
"%s: link timer on.\n", pci_name(pci_dev
));
5287 np
->need_linktimer
= 1;
5288 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
5290 dprintk(KERN_INFO
"%s: link timer off.\n", pci_name(pci_dev
));
5291 np
->need_linktimer
= 0;
5294 /* clear phy state and temporarily halt phy interrupts */
5295 writel(0, base
+ NvRegMIIMask
);
5296 phystate
= readl(base
+ NvRegAdapterControl
);
5297 if (phystate
& NVREG_ADAPTCTL_RUNNING
) {
5299 phystate
&= ~NVREG_ADAPTCTL_RUNNING
;
5300 writel(phystate
, base
+ NvRegAdapterControl
);
5302 writel(NVREG_MIISTAT_MASK
, base
+ NvRegMIIStatus
);
5304 if (id
->driver_data
& DEV_HAS_MGMT_UNIT
) {
5305 /* management unit running on the mac? */
5306 if (readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_SYNC_PHY_INIT
) {
5307 np
->mac_in_use
= readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_MGMT_ST
;
5308 dprintk(KERN_INFO
"%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev
), np
->mac_in_use
);
5309 if (nv_mgmt_acquire_sema(dev
)) {
5310 /* management unit setup the phy already? */
5311 if ((readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_SYNC_MASK
) ==
5312 NVREG_XMITCTL_SYNC_PHY_INIT
) {
5313 /* phy is inited by mgmt unit */
5315 dprintk(KERN_INFO
"%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev
));
5317 /* we need to init the phy */
5323 /* find a suitable phy */
5324 for (i
= 1; i
<= 32; i
++) {
5326 int phyaddr
= i
& 0x1F;
5328 spin_lock_irq(&np
->lock
);
5329 id1
= mii_rw(dev
, phyaddr
, MII_PHYSID1
, MII_READ
);
5330 spin_unlock_irq(&np
->lock
);
5331 if (id1
< 0 || id1
== 0xffff)
5333 spin_lock_irq(&np
->lock
);
5334 id2
= mii_rw(dev
, phyaddr
, MII_PHYSID2
, MII_READ
);
5335 spin_unlock_irq(&np
->lock
);
5336 if (id2
< 0 || id2
== 0xffff)
5339 np
->phy_model
= id2
& PHYID2_MODEL_MASK
;
5340 id1
= (id1
& PHYID1_OUI_MASK
) << PHYID1_OUI_SHFT
;
5341 id2
= (id2
& PHYID2_OUI_MASK
) >> PHYID2_OUI_SHFT
;
5342 dprintk(KERN_DEBUG
"%s: open: Found PHY %04x:%04x at address %d.\n",
5343 pci_name(pci_dev
), id1
, id2
, phyaddr
);
5344 np
->phyaddr
= phyaddr
;
5345 np
->phy_oui
= id1
| id2
;
5349 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5350 "open: Could not find a valid PHY.\n");
5354 if (!phyinitialized
) {
5358 /* see if it is a gigabit phy */
5359 u32 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
5360 if (mii_status
& PHY_GIGABIT
) {
5361 np
->gigabit
= PHY_GIGABIT
;
5365 /* set default link speed settings */
5366 np
->linkspeed
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
5370 err
= register_netdev(dev
);
5372 dev_printk(KERN_INFO
, &pci_dev
->dev
,
5373 "unable to register netdev: %d\n", err
);
5377 dev_printk(KERN_INFO
, &pci_dev
->dev
, "ifname %s, PHY OUI 0x%x @ %d, "
5378 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
5389 dev_printk(KERN_INFO
, &pci_dev
->dev
, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
5390 dev
->features
& NETIF_F_HIGHDMA
? "highdma " : "",
5391 dev
->features
& (NETIF_F_HW_CSUM
| NETIF_F_SG
) ?
5393 dev
->features
& (NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_TX
) ?
5395 id
->driver_data
& DEV_HAS_POWER_CNTRL
? "pwrctl " : "",
5396 id
->driver_data
& DEV_HAS_MGMT_UNIT
? "mgmt " : "",
5397 id
->driver_data
& DEV_NEED_TIMERIRQ
? "timirq " : "",
5398 np
->gigabit
== PHY_GIGABIT
? "gbit " : "",
5399 np
->need_linktimer
? "lnktim " : "",
5400 np
->msi_flags
& NV_MSI_CAPABLE
? "msi " : "",
5401 np
->msi_flags
& NV_MSI_X_CAPABLE
? "msi-x " : "",
5408 writel(phystate
|NVREG_ADAPTCTL_RUNNING
, base
+ NvRegAdapterControl
);
5409 pci_set_drvdata(pci_dev
, NULL
);
5413 iounmap(get_hwbase(dev
));
5415 pci_release_regions(pci_dev
);
5417 pci_disable_device(pci_dev
);
5424 static void __devexit
nv_remove(struct pci_dev
*pci_dev
)
5426 struct net_device
*dev
= pci_get_drvdata(pci_dev
);
5427 struct fe_priv
*np
= netdev_priv(dev
);
5428 u8 __iomem
*base
= get_hwbase(dev
);
5430 unregister_netdev(dev
);
5432 /* special op: write back the misordered MAC address - otherwise
5433 * the next nv_probe would see a wrong address.
5435 writel(np
->orig_mac
[0], base
+ NvRegMacAddrA
);
5436 writel(np
->orig_mac
[1], base
+ NvRegMacAddrB
);
5437 writel(readl(base
+ NvRegTransmitPoll
) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV
,
5438 base
+ NvRegTransmitPoll
);
5440 /* free all structures */
5442 iounmap(get_hwbase(dev
));
5443 pci_release_regions(pci_dev
);
5444 pci_disable_device(pci_dev
);
5446 pci_set_drvdata(pci_dev
, NULL
);
5450 static int nv_suspend(struct pci_dev
*pdev
, pm_message_t state
)
5452 struct net_device
*dev
= pci_get_drvdata(pdev
);
5453 struct fe_priv
*np
= netdev_priv(dev
);
5455 if (!netif_running(dev
))
5458 netif_device_detach(dev
);
5463 pci_save_state(pdev
);
5464 pci_enable_wake(pdev
, pci_choose_state(pdev
, state
), np
->wolenabled
);
5465 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
5470 static int nv_resume(struct pci_dev
*pdev
)
5472 struct net_device
*dev
= pci_get_drvdata(pdev
);
5475 if (!netif_running(dev
))
5478 netif_device_attach(dev
);
5480 pci_set_power_state(pdev
, PCI_D0
);
5481 pci_restore_state(pdev
);
5482 pci_enable_wake(pdev
, PCI_D0
, 0);
5489 #define nv_suspend NULL
5490 #define nv_resume NULL
5491 #endif /* CONFIG_PM */
5493 static struct pci_device_id pci_tbl
[] = {
5494 { /* nForce Ethernet Controller */
5495 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_1
),
5496 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
5498 { /* nForce2 Ethernet Controller */
5499 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_2
),
5500 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
5502 { /* nForce3 Ethernet Controller */
5503 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_3
),
5504 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
5506 { /* nForce3 Ethernet Controller */
5507 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_4
),
5508 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
5510 { /* nForce3 Ethernet Controller */
5511 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_5
),
5512 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
5514 { /* nForce3 Ethernet Controller */
5515 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_6
),
5516 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
5518 { /* nForce3 Ethernet Controller */
5519 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_7
),
5520 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
5522 { /* CK804 Ethernet Controller */
5523 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_8
),
5524 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
,
5526 { /* CK804 Ethernet Controller */
5527 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_9
),
5528 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
,
5530 { /* MCP04 Ethernet Controller */
5531 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_10
),
5532 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
,
5534 { /* MCP04 Ethernet Controller */
5535 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_11
),
5536 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
,
5538 { /* MCP51 Ethernet Controller */
5539 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_12
),
5540 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_STATISTICS_V1
,
5542 { /* MCP51 Ethernet Controller */
5543 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_13
),
5544 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_STATISTICS_V1
,
5546 { /* MCP55 Ethernet Controller */
5547 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_14
),
5548 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_VLAN
|DEV_HAS_MSI
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
,
5550 { /* MCP55 Ethernet Controller */
5551 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_15
),
5552 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_VLAN
|DEV_HAS_MSI
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
,
5554 { /* MCP61 Ethernet Controller */
5555 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_16
),
5556 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5558 { /* MCP61 Ethernet Controller */
5559 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_17
),
5560 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5562 { /* MCP61 Ethernet Controller */
5563 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_18
),
5564 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5566 { /* MCP61 Ethernet Controller */
5567 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_19
),
5568 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5570 { /* MCP65 Ethernet Controller */
5571 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_20
),
5572 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5574 { /* MCP65 Ethernet Controller */
5575 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_21
),
5576 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5578 { /* MCP65 Ethernet Controller */
5579 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_22
),
5580 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5582 { /* MCP65 Ethernet Controller */
5583 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_23
),
5584 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5586 { /* MCP67 Ethernet Controller */
5587 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_24
),
5588 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5590 { /* MCP67 Ethernet Controller */
5591 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_25
),
5592 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5594 { /* MCP67 Ethernet Controller */
5595 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_26
),
5596 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5598 { /* MCP67 Ethernet Controller */
5599 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_27
),
5600 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5602 { /* MCP73 Ethernet Controller */
5603 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_28
),
5604 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5606 { /* MCP73 Ethernet Controller */
5607 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_29
),
5608 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5610 { /* MCP73 Ethernet Controller */
5611 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_30
),
5612 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5614 { /* MCP73 Ethernet Controller */
5615 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_31
),
5616 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5618 { /* MCP77 Ethernet Controller */
5619 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_32
),
5620 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5622 { /* MCP77 Ethernet Controller */
5623 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_33
),
5624 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5626 { /* MCP77 Ethernet Controller */
5627 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_34
),
5628 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5630 { /* MCP77 Ethernet Controller */
5631 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_35
),
5632 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5634 { /* MCP79 Ethernet Controller */
5635 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_36
),
5636 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5638 { /* MCP79 Ethernet Controller */
5639 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_37
),
5640 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5642 { /* MCP79 Ethernet Controller */
5643 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_38
),
5644 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5646 { /* MCP79 Ethernet Controller */
5647 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_39
),
5648 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_MSI
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
|DEV_HAS_CORRECT_MACADDR
,
5653 static struct pci_driver driver
= {
5655 .id_table
= pci_tbl
,
5657 .remove
= __devexit_p(nv_remove
),
5658 .suspend
= nv_suspend
,
5659 .resume
= nv_resume
,
5662 static int __init
init_nic(void)
5664 return pci_register_driver(&driver
);
5667 static void __exit
exit_nic(void)
5669 pci_unregister_driver(&driver
);
5672 module_param(max_interrupt_work
, int, 0);
5673 MODULE_PARM_DESC(max_interrupt_work
, "forcedeth maximum events handled per interrupt");
5674 module_param(optimization_mode
, int, 0);
5675 MODULE_PARM_DESC(optimization_mode
, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
5676 module_param(poll_interval
, int, 0);
5677 MODULE_PARM_DESC(poll_interval
, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
5678 module_param(msi
, int, 0);
5679 MODULE_PARM_DESC(msi
, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
5680 module_param(msix
, int, 0);
5681 MODULE_PARM_DESC(msix
, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
5682 module_param(dma_64bit
, int, 0);
5683 MODULE_PARM_DESC(dma_64bit
, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
5685 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
5686 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
5687 MODULE_LICENSE("GPL");
5689 MODULE_DEVICE_TABLE(pci
, pci_tbl
);
5691 module_init(init_nic
);
5692 module_exit(exit_nic
);